diff --git a/kmod-dinghai.spec b/kmod-dinghai.spec index ee95db71656693f9bfc7b96cf9ebed0412e40191..bc4de313e4c1fcd1b5b9111e8d2796960acff967 100644 --- a/kmod-dinghai.spec +++ b/kmod-dinghai.spec @@ -52,10 +52,14 @@ pushd src/net/build -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP \ --ksrc /usr/src/kernels/%{kernel}.%{_arch} popd +pushd src/rdma +./build.sh --ksrc /usr/src/kernels/%{kernel}.%{_arch} --use_rel_net_path +popd %install mkdir -p %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/crypto/zsda/ mkdir -p %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/ethernet/ +mkdir -p %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/infiniband/hw/ %{__install} -D -t %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/crypto/zsda/ src/crypto/zsda/accdevice/zsda_common/zsda_common.ko %{__install} -D -t %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/crypto/zsda/ src/crypto/zsda/accdevice/zsda_pf/zsda_pf.ko %{__install} -D -t %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/crypto/zsda/ src/crypto/zsda/accdevice/zsda_vf/zsda_vf.ko @@ -64,7 +68,7 @@ mkdir -p %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/e %{__install} -D -t %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/ethernet/ src/net/drivers/net/ethernet/dinghai/zxdh_ptp.ko %{__install} -D -t %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/ethernet/ src/net/drivers/net/ethernet/dinghai/zxdh_pf.ko %{__install} -D -t %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/ethernet/ src/net/drivers/net/ethernet/dinghai/zxdh_en_aux.ko - +%{__install} -D -t %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/infiniband/hw/ src/rdma/src/zrdma.ko # Make .ko objects temporarily executable for automatic stripping find %{buildroot}/lib/modules -type f -name \*.ko -exec chmod u+x \{\} \+ @@ -91,6 +95,7 @@ if [ -x "/usr/sbin/weak-modules" ]; then printf '%s\n' "/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/ethernet/zxdh_ptp.ko" | /usr/sbin/weak-modules --no-initramfs --add-modules printf '%s\n' "/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/ethernet/zxdh_pf.ko" | /usr/sbin/weak-modules --no-initramfs --add-modules printf '%s\n' "/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/ethernet/zxdh_en_aux.ko" | /usr/sbin/weak-modules --no-initramfs --add-modules + printf '%s\n' "/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/infiniband/hw/zrdma.ko" | /usr/sbin/weak-modules --no-initramfs --add-modules fi %preun @@ -102,6 +107,7 @@ echo "/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/ethernet/zxdh_np echo "/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/ethernet/zxdh_ptp.ko" >> /var/run/rpm-%{pkg}-modules.list echo "/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/ethernet/zxdh_pf.ko" >> /var/run/rpm-%{pkg}-modules.list echo "/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/net/ethernet/zxdh_en_aux.ko" >> /var/run/rpm-%{pkg}-modules.list +echo "/lib/modules/%{kernel}.%{_arch}/extra/drivers/dinghai/infiniband/hw/zrdma.ko" >> /var/run/rpm-%{pkg}-modules.list %postun depmod -a > /dev/null 2>&1 @@ -120,3 +126,4 @@ rm /var/run/rpm-%{pkg}-modules.list %changelog + diff --git a/src/rdma/COPYING b/src/rdma/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..d511905c1647a1e311e8b20d5930a37a9c2531cd --- /dev/null +++ b/src/rdma/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/src/rdma/build.sh b/src/rdma/build.sh new file mode 100755 index 0000000000000000000000000000000000000000..4ed02ae7b2d5df9d182b5dd45089f233a632b00f --- /dev/null +++ b/src/rdma/build.sh @@ -0,0 +1,240 @@ +# SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +#!/bin/bash +# Copyright (c) 2023 - 2024 ZTE Corporation + +print_usage() { + echo + echo "usage: $0 {ofed} {noinstall} { ...}" + echo " ofed - compile using OFED 4.17 or above modules" + echo " noinstall - skip driver installation" + echo " dir - extra directory to be searched for header files" + exit 1 +} + +get_suse_local_ver() { + CONFIG_SUSE_KERNEL=`grep " CONFIG_SUSE_KERNEL " $1 | cut -d' ' -f3` + if [ "$CONFIG_SUSE_KERNEL" == "1" ]; then + LV=`grep " CONFIG_LOCALVERSION " $1 | cut -d'-' -f2 | sed 's/\.g[[:xdigit:]]\{7\}//'` + LV_A=`echo $LV | cut -d'.' -f1` + LV_B=`echo $LV | cut -s -d'.' -f2` + LV_C=`echo $LV | cut -s -d'.' -f3` + SLE_LOCALVERSION_CODE=$((LV_A * 65536 + LV_B * 256 + LV_C)) + else + SLE_LOCALVERSION_CODE=0 + fi +} + +cmd_initrd() { + echo "Updating initramfs..." + if which dracut > /dev/null 2>&1 ; then + echo "dracut --force" + dracut --force --omit-drivers "i40iw" + elif which update-initramfs > /dev/null 2>&1 ; then + echo "update-initramfs -u" + update-initramfs -u + else + echo "Unable to update initramfs. You may need to do this manually." + fi +} + +RDMADIR="$( cd "$( dirname "$0" )" && pwd )" +NETDIR="/zte/zxdh_kernel/" + +USE_OFED=0 +NO_INSTALL=0 +EXTRA_INCS= +INSTALL_MODULE=0 +CLEAN=0 +CLEAN_KO=0 +USE_COMPILE=0 + +while [ ! -z "$1" ] +do + case "$1" in + --ksrc) + KSRC=$2 + shift + ;; + + --cross_compile) + USE_COMPILE=1 + CROSS_COMPILE=$2 + export CROSS_COMPILE + ARCH=arm64 + export ARCH + shift + ;; + + --use_rel_net_path) + NETDIR=$RDMADIR/../net/ + ;; + + --use_abs_net_path) + NETDIR=$2 + shift + ;; + + ofed) + USE_OFED=1 + ;; + + noinstall) + NO_INSTALL=1 + ;; + + installmodule) + INSTALL_MODULE=1 + ;; + + clean) + CLEAN=1 + ;; + + cleanko) + CLEAN_KO=1 + ;; + + *) + EXTRA_INCS=$1 + ;; + esac + shift +done + +echo "Using net path: $NETDIR" + +# Use KSRC if defined. +if [ -z "$KSRC" ]; then + + if [ -z "$BUILD_KERNEL" ]; then + BUILD_KERNEL=`uname -r` + fi + + #BUILD_KERNEL="5.10.134-13.1.zncgsl6.x86_64" + + if [ -e /usr/src/kernels/linux-$BUILD_KERNEL/include/config ]; then + KSRC="/usr/src/kernels/linux-$BUILD_KERNEL/" + elif [ -e /usr/src/kernels/$BUILD_KERNEL/include/config ]; then + KSRC="/usr/src/kernels/$BUILD_KERNEL/" + elif [ -e /lib/modules/$BUILD_KERNEL/build/include/config ]; then + KSRC="/lib/modules/$BUILD_KERNEL/build/" + fi + + if [ -z "$KSRC" ]; then + BUILD_KERNEL=`uname -r | sed 's/\([0-9]*\.[0-9]*\)\..*/\1/'` + if [ -e /usr/src/kernels/linux-$BUILD_KERNEL/include/config ]; then + KSRC="/usr/src/kernels/linux-$BUILD_KERNEL/" + elif [ -e /usr/src/kernels/$BUILD_KERNEL/include/config ]; then + KSRC="/usr/src/kernels/$BUILD_KERNEL/" + elif [ -e /lib/modules/$BUILD_KERNEL/build/include/config ]; then + KSRC="/lib/modules/$BUILD_KERNEL/build/" + fi + fi + export KSRC +fi + +if [ -e ${KSRC}/include/linux/kconfig.h ]; then + INCLUDE_KCONF_HDR="-include ${KSRC}/include/linux/kconfig.h" + export INCLUDE_KCONF_HDR +fi + +if [ -e ${KSRC}/include/generated/autoconf.h ]; then + INCLUDE_AUTOCONF_HDR="-include ${KSRC}/include/generated/autoconf.h" + export INCLUDE_AUTOCONF_HDR + get_suse_local_ver "${KSRC}/include/generated/autoconf.h" +elif [ -e ${KSRC}/include/linux/autoconf.h ]; then + INCLUDE_AUTOCONF_HDR="-include ${KSRC}/include/linux/autoconf.h" + export INCLUDE_AUTOCONF_HDR + get_suse_local_ver "${KSRC}/include/linux/autoconf.h" +fi + +if [ -e ${KSRC}/include/generated/utsrelease.h ]; then + UTSRELEASE_HDR="-include ${KSRC}/include/generated/utsrelease.h" + export UTSRELEASE_HDR +fi + +make -C $KSRC CFLAGS_MODULE="${EXTRA_INCS}" M=$PWD/src clean + +which nproc > /dev/null 2>&1 +if [ $? -ne 0 ]; then + nproc=1 +else + nproc=`nproc` +fi + +if [ -e "/lib/modules/$BUILD_KERNEL/extern-symvers/auxiliary.symvers" ]; then + KBUILD_EXTRA_SYMBOLS="/lib/modules/$BUILD_KERNEL/extern-symvers/auxiliary.symvers" + export KBUILD_EXTRA_SYMBOLS +fi + +KBUILD_EXTRA_SYMBOLS="$NETDIR/Module.symvers" +export KBUILD_EXTRA_SYMBOLS +INCLUDE_ZXDH_HDR="-include -I$NETDIR/include/dinghai" +export INCLUDE_ZXDH_HDR + + +if [ "$USE_OFED" == "1" ]; then + if [ -z "$OFED_OPENIB_PATH" ]; then + OFED_OPENIB_PATH="/usr/src/openib" + fi + if [ ! -e $OFED_OPENIB_PATH ]; then + echo "Please install OFED development package" + print_usage + fi + + if [ -z "$OFED_VERSION_CODE" ]; then + V1=$(ofed_info | head -1 | cut -d '-' -f 2 | cut -d '.' -f 1) + V2=$(ofed_info | head -1 | cut -d '-' -f 2 | cut -d '.' -f 2 | cut -d ':' -f 1) + OFED_VERSION_CODE=$(( ($V1 << 16) + ($V2 << 8) )) + fi + + if [ ${OFED_VERSION_CODE} -lt $(( (4 << 16) + (8 << 8) )) ]; then + echo "Unsupported OFED version installed, requires 4.8 or above" + exit 1 + fi + + KBUILD_EXTRA_SYMBOLS+=" $OFED_OPENIB_PATH/Module.symvers" + export KBUILD_EXTRA_SYMBOLS + + INCLUDE_COMPAT_HDR="-include $OFED_OPENIB_PATH/include/linux/compat-2.6.h -I$OFED_OPENIB_PATH/include -I$OFED_OPENIB_PATH/include/uapi" + export INCLUDE_COMPAT_HDR + + if [ ${OFED_VERSION_CODE} == $(( (4 << 16) + (8 << 8) )) ]; then + make "CFLAGS_MODULE=-DMODULE -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE} -D__OFED_4_8__ -DOFED_VERSION_CODE=${OFED_VERSION_CODE} ${EXTRA_INCS}" -j$nproc -C $KSRC M=$PWD/src W=1 + else + make "CFLAGS_MODULE=-DMODULE -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE} -D__OFED_BUILD__ -DOFED_VERSION_CODE=${OFED_VERSION_CODE} ${EXTRA_INCS}" -j$nproc -C $KSRC M=$PWD/src W=1 + fi +else + make "CFLAGS_MODULE=-DMODULE -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE} -DOFED_VERSION_CODE=${OFED_VERSION_CODE} ${EXTRA_INCS}" -j$nproc -C $KSRC M=$PWD/src W=1 +fi + +if [ $? -ne 0 ]; then + echo "make failed, exiting..." + exit 1 +fi + +if [ "$INSTALL_MODULE" == "1" ]; then + if [ "$NO_INSTALL" == "0" ]; then + echo "Installing module is not currently supported." + exit 1 + fi +fi + +if [ "$CLEAN" == "1" ]; then + current_dir=$PWD + cd $PWD/src + rm -rf *.order *.symvers *.mod *.o *.cmd *.o.cmd .cm* \ + .configfs* .ctrl* .debugfs* .hmc* .icrdma* .zrdma* \ + .main* .Module* .pble* .puda* .trace* .uda* .utils* .verbs* \ + .vf* .virtchnl* .ws* .hw* .modules* .uk* .cache.mk .ft_debug.o.cmd \ + .tmp_versions + cd $current_dir +fi + +if [ "$CLEAN_KO" == "1" ]; then + current_dir=$PWD + cd $PWD/src + rm zrdma.ko + cd $current_dir +fi + diff --git a/src/rdma/src/Kbuild b/src/rdma/src/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..bf59df4d2305f8e1585f31e313a1c4f090d7d038 --- /dev/null +++ b/src/rdma/src/Kbuild @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +# Copyright (c) 2023 - 2024 ZTE Corporation +# Makefile for the ZTE(R) Ethernet Protocol Driver for RDMA +# +ifneq (${LINUXINCLUDE},) +LINUXINCLUDE := \ + ${INCLUDE_KCONF_HDR} \ + ${INCLUDE_AUTOCONF_HDR} \ + ${INCLUDE_COMPAT_HDR} \ + ${UTSRELEASE_HDR} \ + ${LINUXINCLUDE} +endif + +ccflags-y := -Werror -Wno-cast-function-type -DZ_DH_DEBUG -DMSIX_SUPPORT #-DMSIX_DEBUG #-DDCQCN_INFO -DZXDH_DEBUG +obj-m += zrdma.o + +zrdma-objs := main.o \ + manager.o \ + hw.o \ + cm.o \ + ctrl.o \ + hmc.o \ + pble.o \ + puda.o \ + uk.o \ + utils.o \ + verbs.o \ + uda.o \ + trace.o \ + icrdma_hw.o \ + vf.o \ + virtchnl.o \ + zrdma_kcompat.o \ + tc_hmcdma.o \ + srq.o \ + restrack.o \ + private_verbs_cmd.o \ + dbgfs.o \ + smmu/kernel/adk_mmu600.o \ + smmu/kernel/cmdk_mmu600.o \ + smmu/kernel/cmdk_pagetable.o + + +CFLAGS_trace.o = -I$(src) diff --git a/src/rdma/src/cm.c b/src/rdma/src/cm.c new file mode 100644 index 0000000000000000000000000000000000000000..ab4e73db296f820669ece104b431b29db26e108a --- /dev/null +++ b/src/rdma/src/cm.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "main.h" +#include "trace.h" + +/** + * zxdh_copy_ip_htonl - copy IP address from host to network order + * @dst: IP address in network order (big endian) + * @src: IP address in host order + */ +void zxdh_copy_ip_htonl(__be32 *dst, u32 *src) +{ + *dst++ = htonl(*src++); + *dst++ = htonl(*src++); + *dst++ = htonl(*src++); + *dst = htonl(*src); +} + +/** + * zxdh_netdev_vlan_ipv6 - Gets the netdev and mac + * @addr: local IPv6 address + * @vlan_id: vlan id for the given IPv6 address + * @mac: mac address for the given IPv6 address + * + * Returns the net_device of the IPv6 address and also sets the + * vlan id and mac for that address. + */ +struct net_device *zxdh_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac) +{ + struct net_device *ip_dev = NULL; + struct in6_addr laddr6; + + if (!IS_ENABLED(CONFIG_IPV6)) + return NULL; + + zxdh_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr); + if (vlan_id) + *vlan_id = 0xFFFF; /* Match rdma_vlan_dev_vlan_id() */ + if (mac) + eth_zero_addr(mac); + + rcu_read_lock(); + for_each_netdev_rcu(&init_net, ip_dev) { + if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) { + if (vlan_id) + *vlan_id = rdma_vlan_dev_vlan_id(ip_dev); + if (ip_dev->dev_addr && mac) + ether_addr_copy(mac, ip_dev->dev_addr); + break; + } + } + rcu_read_unlock(); + + return ip_dev; +} + +/** + * zxdh_get_vlan_ipv4 - Returns the vlan_id for IPv4 address + * @addr: local IPv4 address + */ +u16 zxdh_get_vlan_ipv4(u32 *addr) +{ + struct net_device *netdev; + u16 vlan_id = 0xFFFF; + + netdev = ip_dev_find(&init_net, htonl(addr[0])); + if (netdev) { + vlan_id = rdma_vlan_dev_vlan_id(netdev); + dev_put(netdev); + } + + return vlan_id; +} + +/** + * zxdh_ipv4_is_lpb - check if loopback + * @loc_addr: local addr to compare + * @rem_addr: remote address + */ +bool zxdh_ipv4_is_lpb(u32 loc_addr, u32 rem_addr) +{ + return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr); +} + +/** + * zxdh_ipv6_is_lpb - check if loopback + * @loc_addr: local addr to compare + * @rem_addr: remote address + */ +bool zxdh_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr) +{ + struct in6_addr raddr6; + + zxdh_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr); + + return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6); +} + +/** + * zxdh_aeq_qp_event - called by worker thread to disconnect qp + * @iwqp: associate qp for the connection + */ +static void zxdh_aeq_qp_event(struct zxdh_qp *iwqp) +{ + struct zxdh_sc_qp *qp = &iwqp->sc_qp; + unsigned long flags; + struct ib_qp_attr attr; + + spin_lock_irqsave(&iwqp->lock, flags); + + if (iwqp->flush_issued || iwqp->sc_qp.qp_uk.destroy_pending) { + spin_unlock_irqrestore(&iwqp->lock, flags); + return; + } + spin_unlock_irqrestore(&iwqp->lock, flags); + + attr.qp_state = IB_QPS_ERR; + zxdh_modify_qp_roce(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); + zxdh_ib_qp_event(iwqp, qp->event_type); +} + +/** + * zxdh_aeq_qp_worker - worker for aeq handle qp + * @work: points or disconn structure + */ +static void zxdh_aeq_qp_worker(struct work_struct *work) +{ + struct aeq_qp_work *dwork = + container_of(work, struct aeq_qp_work, work); + struct zxdh_qp *iwqp = dwork->iwqp; + + kfree(dwork); + zxdh_aeq_qp_event(iwqp); + zxdh_qp_rem_ref(&iwqp->ibqp); +} + +/** + * zxdh_aeq_qp_disconn - when a connection is being closed + * @iwqp: associated qp for the connection + */ +void zxdh_aeq_qp_disconn(struct zxdh_qp *iwqp) +{ + struct zxdh_device *iwdev = iwqp->iwdev; + struct aeq_qp_work *work; + unsigned long flags; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + pr_err("kzalloc work failed!\n"); + return; + } + spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); + if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num - + iwdev->rf->sc_dev.base_qpn]) { + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + zxdh_dbg(iwdev_to_idev(iwdev), + "CM: qp_id %d is already freed\n", iwqp->ibqp.qp_num); + kfree(work); + return; + } + zxdh_qp_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + + work->iwqp = iwqp; + INIT_WORK(&work->work, zxdh_aeq_qp_worker); + queue_work(iwdev->cleanup_wq, &work->work); +} + +/** + * zxdh_aeq_entry_err_worker - worker for aeq 8f5 handle qpc + * @work: work task structure + */ +static void zxdh_aeq_entry_err_worker(struct work_struct *work) +{ + struct aeq_qp_work *dwork = + container_of(work, struct aeq_qp_work, work); + struct zxdh_qp *iwqp = dwork->iwqp; + struct zxdh_sc_qp *qp = &iwqp->sc_qp; + struct zxdh_dma_mem qpc_buf = {}; + u64 temp; + u32 tx_last_ack_psn; + + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwqp->iwdev->rf->sc_dev.hw->device, + qpc_buf.size, &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("no memory\n"); + return; + } + + kfree(dwork); + zxdh_query_qpc(qp, &qpc_buf); + get_64bit_val((__le64 *)qpc_buf.va, 0, &temp); + tx_last_ack_psn = FIELD_GET(RDMAQPC_TX_LAST_ACK_PSN, temp); + if (tx_last_ack_psn != qp->aeq_entry_err_last_psn) { + // qp restart success + qp->entry_err_cnt = 0; + } + qp->aeq_entry_err_last_psn = tx_last_ack_psn; + + if (qp->entry_err_cnt >= ZXDH_AEQ_RETRY_LIMIT) { + // AEQ reported. counts out of limit. + zxdh_ib_qp_event(iwqp, ZXDH_QP_EVENT_CATASTROPHIC); + } else { + // AEQ not reported + pr_info("8f5 entry_err_cnt: %d\n", qp->entry_err_cnt); + qp->entry_err_cnt++; + } + + dma_free_coherent(iwqp->iwdev->rf->sc_dev.hw->device, qpc_buf.size, + qpc_buf.va, qpc_buf.pa); + zxdh_qp_rem_ref(&iwqp->ibqp); +} + +/** + * zxdh_aeq_process_entry_err - query qpc when aeq 8f5 is triggered + * @iwqp: associated qp for the connection + */ +void zxdh_aeq_process_entry_err(struct zxdh_qp *iwqp) +{ + struct aeq_qp_work *work; + struct zxdh_device *iwdev = iwqp->iwdev; + unsigned long flags; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + pr_err("kzalloc work failed!\n"); + return; + } + + spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); + + if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num - + iwdev->rf->sc_dev.base_qpn]) { + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + zxdh_dbg(iwdev_to_idev(iwdev), + "CM: qp_id %d is already freed\n", iwqp->ibqp.qp_num); + kfree(work); + return; + } + zxdh_qp_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + + work->iwqp = iwqp; + INIT_WORK(&work->work, zxdh_aeq_entry_err_worker); + queue_work(iwdev->cleanup_wq, &work->work); +} + +/** + * zxdh_aeq_entry_err_worker - worker for aeq 8f3 handle qpc + * @work: work task structure + */ +static void zxdh_aeq_retry_err_worker(struct work_struct *work) +{ + struct aeq_qp_work *dwork = + container_of(work, struct aeq_qp_work, work); + struct zxdh_qp *iwqp = dwork->iwqp; + struct zxdh_sc_qp *qp = &iwqp->sc_qp; + struct zxdh_dma_mem qpc_buf = {}; + u64 temp; + u32 ack_err_flag, tx_last_ack_psn, retry_cqe_sq_opcode, recv_err_flag; + + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwqp->iwdev->rf->sc_dev.hw->device, + qpc_buf.size, &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("no memory\n"); + return; + } + + kfree(dwork); + zxdh_query_qpc(qp, &qpc_buf); + get_64bit_val((__le64 *)qpc_buf.va, 0, &temp); + tx_last_ack_psn = FIELD_GET(RDMAQPC_TX_LAST_ACK_PSN, temp); + get_64bit_val((__le64 *)qpc_buf.va, 56, &temp); + retry_cqe_sq_opcode = + FIELD_GET(RDMAQPC_TX_RETRY_CQE_SQ_OPCODE_FLAG, temp); + get_64bit_val((__le64 *)qpc_buf.va, 48, &temp); + recv_err_flag = FIELD_GET(RDMAQPC_TX_RECV_ERR_FLAG, temp); + get_64bit_val((__le64 *)qpc_buf.va, 40, &temp); + ack_err_flag = FIELD_GET(BIT_ULL(48), temp); + + if (ack_err_flag != 1) { + pr_info("qp %d has been restarted!\n", qp->qp_uk.qp_id); + goto free_rsrc; + } + + if (!((retry_cqe_sq_opcode >= 32) && + (recv_err_flag == 1 || recv_err_flag == 2))) { + pr_info("Timeout! 800f3 aeq reported!\n"); + zxdh_ib_qp_event(iwqp, ZXDH_QP_EVENT_CATASTROPHIC); + goto free_rsrc; + } + + if (tx_last_ack_psn != qp->aeq_retry_err_last_psn) { + // qp restart success + pr_info("retry_err_cnt reset\n"); + qp->retry_err_cnt = 0; + } + qp->aeq_retry_err_last_psn = tx_last_ack_psn; + + if (qp->retry_err_cnt >= ZXDH_AEQ_RETRY_LIMIT) { + // AEQ reported. counts out of limit. + zxdh_ib_qp_event(iwqp, ZXDH_QP_EVENT_CATASTROPHIC); + } else { + // AEQ not reported + pr_info("8f3 retry_err_cnt: %d\n", qp->retry_err_cnt); + qp->retry_err_cnt++; + } +free_rsrc: + dma_free_coherent(iwqp->iwdev->rf->sc_dev.hw->device, qpc_buf.size, + qpc_buf.va, qpc_buf.pa); + zxdh_qp_rem_ref(&iwqp->ibqp); +} + +/** + * zxdh_aeq_process_retry_err - query qpc when aeq 8f3 is triggered + * @iwqp: associated qp for the connection + */ +void zxdh_aeq_process_retry_err(struct zxdh_qp *iwqp) +{ + struct aeq_qp_work *work; + struct zxdh_device *iwdev = iwqp->iwdev; + unsigned long flags; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + pr_err("kzalloc work failed!\n"); + return; + } + + spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); + + if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num - + iwdev->rf->sc_dev.base_qpn]) { + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + zxdh_dbg(iwdev_to_idev(iwdev), + "CM: qp_id %d is already freed\n", iwqp->ibqp.qp_num); + kfree(work); + return; + } + zxdh_qp_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + + work->iwqp = iwqp; + INIT_WORK(&work->work, zxdh_aeq_retry_err_worker); + queue_work(iwdev->cleanup_wq, &work->work); +} diff --git a/src/rdma/src/cm.h b/src/rdma/src/cm.h new file mode 100644 index 0000000000000000000000000000000000000000..48018955c98aef6aa66733fab0d72465362d28c9 --- /dev/null +++ b/src/rdma/src/cm.h @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#ifndef ZXDH_CM_H +#define ZXDH_CM_H + +#define ZXDH_MPA_REQUEST_ACCEPT 1 +#define ZXDH_MPA_REQUEST_REJECT 2 + +/* IETF MPA -- defines */ +#define IEFT_MPA_KEY_REQ "MPA ID Req Frame" +#define IEFT_MPA_KEY_REP "MPA ID Rep Frame" +#define IETF_MPA_KEY_SIZE 16 +#define IETF_MPA_VER 1 +#define IETF_MAX_PRIV_DATA_LEN 512 +#define IETF_MPA_FRAME_SIZE 20 +#define IETF_RTR_MSG_SIZE 4 +#define IETF_MPA_V2_FLAG 0x10 +#define SNDMARKER_SEQNMASK 0x000001ff +#define ZXDH_MAX_IETF_SIZE 32 + +/* IETF RTR MSG Fields */ +#define IETF_PEER_TO_PEER 0x8000 +#define IETF_FLPDU_ZERO_LEN 0x4000 +#define IETF_RDMA0_WRITE 0x8000 +#define IETF_RDMA0_READ 0x4000 +#define IETF_NO_IRD_ORD 0x3fff + +#define MAX_PORTS 65536 + +#define ZXDH_PASSIVE_STATE_INDICATED 0 +#define ZXDH_DO_NOT_SEND_RESET_EVENT 1 +#define ZXDH_SEND_RESET_EVENT 2 + +#define MAX_ZXDH_IFS 4 + +#define SET_ACK 1 +#define SET_SYN 2 +#define SET_FIN 4 +#define SET_RST 8 + +#define TCP_OPTIONS_PADDING 3 + +#define ZXDH_DEFAULT_RETRYS 64 +#define ZXDH_DEFAULT_RETRANS 8 +#define ZXDH_DEFAULT_TTL 0x40 +#define ZXDH_DEFAULT_RTT_VAR 6 +#define ZXDH_DEFAULT_SS_THRESH 0x3fffffff +#define ZXDH_DEFAULT_REXMIT_THRESH 8 + +#define ZXDH_RETRY_TIMEOUT HZ +#define ZXDH_SHORT_TIME 10 +#define ZXDH_LONG_TIME (2 * HZ) +#define ZXDH_MAX_TIMEOUT ((unsigned long)(12 * HZ)) + +#define ZXDH_CM_HASHTABLE_SIZE 1024 +#define ZXDH_CM_TCP_TIMER_INTERVAL 3000 +#define ZXDH_CM_DEFAULT_MTU 1540 +#define ZXDH_CM_DEFAULT_FRAME_CNT 10 +#define ZXDH_CM_THREAD_STACK_SIZE 256 +#define ZXDH_CM_DEFAULT_RCV_WND 64240 +#define ZXDH_CM_DEFAULT_RCV_WND_SCALED 0x3FFFC +#define ZXDH_CM_DEFAULT_RCV_WND_SCALE 2 +#define ZXDH_CM_DEFAULT_FREE_PKTS 10 +#define ZXDH_CM_FREE_PKT_LO_WATERMARK 2 +#define ZXDH_CM_DEFAULT_MSS 536 +#define ZXDH_CM_DEFAULT_MPA_VER 2 +#define ZXDH_CM_DEFAULT_SEQ 0x159bf75f +#define ZXDH_CM_DEFAULT_LOCAL_ID 0x3b47 +#define ZXDH_CM_DEFAULT_SEQ2 0x18ed5740 +#define ZXDH_CM_DEFAULT_LOCAL_ID2 0xb807 +#define ZXDH_MAX_CM_BUF (ZXDH_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN) + +/* cm node transition states */ +enum zxdh_cm_node_state { + ZXDH_CM_STATE_UNKNOWN, + ZXDH_CM_STATE_INITED, + ZXDH_CM_STATE_LISTENING, + ZXDH_CM_STATE_SYN_RCVD, + ZXDH_CM_STATE_SYN_SENT, + ZXDH_CM_STATE_ONE_SIDE_ESTABLISHED, + ZXDH_CM_STATE_ESTABLISHED, + ZXDH_CM_STATE_ACCEPTING, + ZXDH_CM_STATE_MPAREQ_SENT, + ZXDH_CM_STATE_MPAREQ_RCVD, + ZXDH_CM_STATE_MPAREJ_RCVD, + ZXDH_CM_STATE_OFFLOADED, + ZXDH_CM_STATE_FIN_WAIT1, + ZXDH_CM_STATE_FIN_WAIT2, + ZXDH_CM_STATE_CLOSE_WAIT, + ZXDH_CM_STATE_TIME_WAIT, + ZXDH_CM_STATE_LAST_ACK, + ZXDH_CM_STATE_CLOSING, + ZXDH_CM_STATE_LISTENER_DESTROYED, + ZXDH_CM_STATE_CLOSED, +}; + +enum mpa_frame_ver { + IETF_MPA_V1 = 1, + IETF_MPA_V2 = 2, +}; + +enum mpa_frame_key { + MPA_KEY_REQUEST, + MPA_KEY_REPLY, +}; + +enum send_rdma0 { + SEND_RDMA_READ_ZERO = 1, + SEND_RDMA_WRITE_ZERO = 2, +}; + +enum zxdh_tcpip_pkt_type { + ZXDH_PKT_TYPE_UNKNOWN, + ZXDH_PKT_TYPE_SYN, + ZXDH_PKT_TYPE_SYNACK, + ZXDH_PKT_TYPE_ACK, + ZXDH_PKT_TYPE_FIN, + ZXDH_PKT_TYPE_RST, +}; + +enum zxdh_cm_listener_state { + ZXDH_CM_LISTENER_PASSIVE_STATE = 1, + ZXDH_CM_LISTENER_ACTIVE_STATE = 2, + ZXDH_CM_LISTENER_EITHER_STATE = 3, +}; + +/* CM event codes */ +enum zxdh_cm_event_type { + ZXDH_CM_EVENT_UNKNOWN, + ZXDH_CM_EVENT_ESTABLISHED, + ZXDH_CM_EVENT_MPA_REQ, + ZXDH_CM_EVENT_MPA_CONNECT, + ZXDH_CM_EVENT_MPA_ACCEPT, + ZXDH_CM_EVENT_MPA_REJECT, + ZXDH_CM_EVENT_MPA_ESTABLISHED, + ZXDH_CM_EVENT_CONNECTED, + ZXDH_CM_EVENT_RESET, + ZXDH_CM_EVENT_ABORTED, +}; + +struct ietf_mpa_v1 { + u8 key[IETF_MPA_KEY_SIZE]; + u8 flags; + u8 rev; + __be16 priv_data_len; + u8 priv_data[]; +}; + +struct ietf_rtr_msg { + __be16 ctrl_ird; + __be16 ctrl_ord; +}; + +struct ietf_mpa_v2 { + u8 key[IETF_MPA_KEY_SIZE]; + u8 flags; + u8 rev; + __be16 priv_data_len; + struct ietf_rtr_msg rtr_msg; + u8 priv_data[]; +}; + +struct option_base { + u8 optionnum; + u8 len; +}; + +struct option_mss { + u8 optionnum; + u8 len; + __be16 mss; +}; + +struct option_windowscale { + u8 optionnum; + u8 len; + u8 shiftcount; +}; + +union all_known_options { + char eol; + struct option_base base; + struct option_mss mss; + struct option_windowscale windowscale; +}; + +struct zxdh_timer_entry { + struct list_head list; + unsigned long timetosend; /* jiffies */ + struct zxdh_puda_buf *sqbuf; + u32 type; + u32 retrycount; + u32 retranscount; + u32 context; + u32 send_retrans; + int close_when_complete; +}; + +/* CM context params */ +struct zxdh_cm_tcp_context { + u8 client; + u32 loc_seq_num; + u32 loc_ack_num; + u32 rem_ack_num; + u32 rcv_nxt; + u32 loc_id; + u32 rem_id; + u32 snd_wnd; + u32 max_snd_wnd; + u32 rcv_wnd; + u32 mss; + u8 snd_wscale; + u8 rcv_wscale; +}; + +struct zxdh_apbvt_entry { + struct hlist_node hlist; + u32 use_cnt; + u16 port; +}; + +struct zxdh_cm_listener { + struct list_head list; + struct iw_cm_id *cm_id; + struct zxdh_cm_core *cm_core; + struct zxdh_device *iwdev; + struct list_head child_listen_list; + struct zxdh_apbvt_entry *apbvt_entry; + enum zxdh_cm_listener_state listener_state; + refcount_t refcnt; + atomic_t pend_accepts_cnt; + u32 loc_addr[4]; + u32 reused_node; + int backlog; + u16 loc_port; + u16 vlan_id; + u8 loc_mac[ETH_ALEN]; + u8 user_pri; + u8 tos; + u8 qhash_set : 1; + u8 ipv4 : 1; +}; + +struct zxdh_kmem_info { + void *addr; + u32 size; +}; + +struct zxdh_mpa_priv_info { + const void *addr; + u32 size; +}; + +struct zxdh_cm_node { + struct zxdh_qp *iwqp; + struct zxdh_device *iwdev; + struct zxdh_sc_dev *dev; + struct zxdh_cm_tcp_context tcp_cntxt; + struct zxdh_cm_core *cm_core; + struct zxdh_timer_entry *send_entry; + struct zxdh_timer_entry *close_entry; + struct zxdh_cm_listener *listener; + struct list_head timer_entry; + struct list_head reset_entry; + struct list_head teardown_entry; + struct zxdh_apbvt_entry *apbvt_entry; + struct rcu_head rcu_head; + struct zxdh_mpa_priv_info pdata; + struct zxdh_sc_ah *ah; + struct ietf_mpa_v2 mpa_v2_frame; + struct zxdh_kmem_info mpa_hdr; + struct iw_cm_id *cm_id; + struct hlist_node list; + struct completion establish_comp; + spinlock_t retrans_list_lock; /* protect CM node rexmit updates*/ + atomic_t passive_state; + refcount_t refcnt; + enum zxdh_cm_node_state state; + enum send_rdma0 send_rdma0_op; + enum mpa_frame_ver mpa_frame_rev; + u32 loc_addr[4], rem_addr[4]; + u16 loc_port, rem_port; + int apbvt_set; + int accept_pend; + u16 vlan_id; + u16 ird_size; + u16 ord_size; + u16 mpav2_ird_ord; + u16 lsmm_size; + u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN]; + u8 loc_mac[ETH_ALEN]; + u8 rem_mac[ETH_ALEN]; + u8 user_pri; + u8 tos; + u8 ack_rcvd : 1; + u8 qhash_set : 1; + u8 ipv4 : 1; + u8 snd_mark_en : 1; + u8 rcv_mark_en : 1; + u8 do_lpb : 1; + u8 accelerated : 1; +}; + +struct zxdh_cm_core { + struct zxdh_device *iwdev; + struct zxdh_sc_dev *dev; + struct list_head listen_list; + DECLARE_HASHTABLE(cm_hash_tbl, 8); + DECLARE_HASHTABLE(apbvt_hash_tbl, 8); + struct timer_list tcp_timer; + struct workqueue_struct *event_wq; + spinlock_t ht_lock; /* protect CM node (active side) list */ + spinlock_t listen_list_lock; /* protect listener list */ + spinlock_t apbvt_lock; /*serialize apbvt add/del entries*/ + u64 stats_nodes_created; + u64 stats_nodes_destroyed; + u64 stats_listen_created; + u64 stats_listen_destroyed; + u64 stats_listen_nodes_created; + u64 stats_listen_nodes_destroyed; + u64 stats_lpbs; + u64 stats_accepts; + u64 stats_rejects; + u64 stats_connect_errs; + u64 stats_passive_errs; + u64 stats_pkt_retrans; + u64 stats_backlog_drops; + struct zxdh_puda_buf *(*form_cm_frame)(struct zxdh_cm_node *cm_node, + struct zxdh_kmem_info *options, + struct zxdh_kmem_info *hdr, + struct zxdh_mpa_priv_info *pdata, + u8 flags); + int (*cm_create_ah)(struct zxdh_cm_node *cm_node, bool wait); + void (*cm_free_ah)(struct zxdh_cm_node *cm_node); +}; + +bool zxdh_ipv4_is_lpb(u32 loc_addr, u32 rem_addr); +bool zxdh_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr); +#endif /* ZXDH_CM_H */ diff --git a/src/rdma/src/configfs.c b/src/rdma/src/configfs.c new file mode 100644 index 0000000000000000000000000000000000000000..61cb82cfaffb17119624ef4f8d264a891b917aaf --- /dev/null +++ b/src/rdma/src/configfs.c @@ -0,0 +1,1474 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include +#include +#include +#include +#include "main.h" +#ifdef __OFED_4_8__ +#include +#include +#endif + +#if IS_ENABLED(CONFIG_CONFIGFS_FS) +enum zxdh_configfs_attr_type { + ZXDH_ATTR_IW_DCTCP, + ZXDH_ATTR_IW_TIMELY, + ZXDH_ATTR_IW_ECN, + ZXDH_ATTR_ROCE_TIMELY, + ZXDH_ATTR_ROCE_DCQCN, + ZXDH_ATTR_ROCE_DCTCP, + ZXDH_ATTR_ROCE_ENABLE, + ZXDH_ATTR_IW_OOO, + ZXDH_ATTR_ROCE_NO_ICRC, + ZXDH_ATTR_ENABLE_UP_MAP, +}; + +struct zxdh_vsi_grp { + struct config_group group; + struct zxdh_device *iwdev; +}; + +/** + * zxdh_find_device_by_name - find a vsi device given a name + * @name: name of iwdev + */ +static struct zxdh_device *zxdh_find_device_by_name(const char *name) +{ + struct zxdh_handler *hdl; + struct zxdh_device *iwdev; + unsigned long flags; + + spin_lock_irqsave(&zxdh_handler_lock, flags); + list_for_each_entry(hdl, &zxdh_handlers, list) { + iwdev = hdl->iwdev; + if (!strcmp(name, iwdev->ibdev.name)) { + spin_unlock_irqrestore(&zxdh_handler_lock, flags); + return iwdev; + } + } + spin_unlock_irqrestore(&zxdh_handler_lock, flags); + + return NULL; +} + +#ifdef __OFED_4_8__ +static int zxdh_configfs_set_vsi_attr(struct zxdh_vsi_grp *grp, const char *buf, + enum zxdh_configfs_attr_type attr_type) +{ +#else +/* + * zxdh_configfs_set_vsi_attr - set vsi configfs attribute + * @item_name: config item name + * @buf: buffer + * @zxdh_configfs_type_attr: vsi attribute type to set + */ +static int zxdh_configfs_set_vsi_attr(struct config_item *item, const char *buf, + enum zxdh_configfs_attr_type attr_type) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + struct zxdh_up_info up_map_info = {}; + bool enable; + int ret = 0; + + if (strtobool(buf, &enable)) { + ret = -EINVAL; + goto done; + } + + switch (attr_type) { + case ZXDH_ATTR_IW_DCTCP: + iwdev->iwarp_dctcp_en = enable; + iwdev->iwarp_ecn_en = !enable; + break; + case ZXDH_ATTR_IW_TIMELY: + iwdev->iwarp_timely_en = enable; + break; + case ZXDH_ATTR_IW_ECN: + iwdev->iwarp_ecn_en = enable; + break; + case ZXDH_ATTR_ENABLE_UP_MAP: + iwdev->up_map_en = enable; + if (enable) { + *((u64 *)up_map_info.map) = iwdev->up_up_map; + up_map_info.use_cnp_up_override = true; + up_map_info.cnp_up_override = iwdev->cnp_up_override; + } else { + *((u64 *)up_map_info.map) = ZXDH_DEFAULT_UP_UP_MAP; + up_map_info.use_cnp_up_override = false; + } + up_map_info.hmc_fcn_idx = iwdev->rf->sc_dev.hmc_fn_id; + zxdh_cqp_up_map_cmd(&iwdev->rf->sc_dev, ZXDH_OP_SET_UP_MAP, + &up_map_info); + break; + case ZXDH_ATTR_ROCE_NO_ICRC: + iwdev->roce_no_icrc_en = enable; + break; + case ZXDH_ATTR_ROCE_TIMELY: + iwdev->roce_timely_en = enable; + break; + case ZXDH_ATTR_ROCE_DCQCN: + iwdev->roce_dcqcn_en = enable; + break; + case ZXDH_ATTR_ROCE_DCTCP: + iwdev->roce_dctcp_en = enable; + break; + case ZXDH_ATTR_ROCE_ENABLE: + //rf->roce_en = enable; FIXME: Add when roce/iwarp in configFS + break; + case ZXDH_ATTR_IW_OOO: + iwdev->iw_ooo = enable; + iwdev->override_ooo = true; + break; + default: + ret = -EINVAL; + } + +done: + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_push_mode(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * push_mode_show - Show the value of push_mode for device + * @item: config item + * @buf: buffer to write to + */ +static ssize_t push_mode_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->push_mode); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_push_mode(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * push_mode_store - Store value for push_mode + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t push_mode_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + bool enable; + + if (strtobool(buf, &enable)) + return -EINVAL; + + iwdev->push_mode = enable; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_cwnd(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * roce_cwnd_show - Show the value of RoCE cwnd + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_cwnd_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_cwnd); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_cwnd(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * roce_cwnd_store - Store value for roce_cwnd + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_cwnd_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u32 rsrc_cwnd; + + if (kstrtou32(buf, 0, &rsrc_cwnd)) + return -EINVAL; + + if (!rsrc_cwnd) + return -EINVAL; + + iwdev->roce_cwnd = rsrc_cwnd; + iwdev->override_cwnd = true; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_rd_fence_rate(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/* + * roce_rd_fence_rate_show - Show RoCE read fence rate + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_rd_fence_rate_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->rd_fence_rate); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_rd_fence_rate(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * roce_rd_fence_rate_store - Store RoCE read fence rate + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_rd_fence_rate_store(struct config_item *item, + const char *buf, size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u32 rd_fence_rate; + + if (kstrtou32(buf, 0, &rd_fence_rate)) + return -EINVAL; + + if (rd_fence_rate > 256) + return -EINVAL; + + iwdev->rd_fence_rate = rd_fence_rate; + iwdev->override_rd_fence_rate = true; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_ackcreds(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * roce_ackcreds_show - Show the value of RoCE ack_creds + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_ackcreds_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_ackcreds); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_ackcreds(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * roce_ackcreds_store - Store value for roce_ackcreds + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_ackcreds_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u32 rsrc_ackcreds; + + if (kstrtou32(buf, 0, &rsrc_ackcreds)) + return -EINVAL; + + if (!rsrc_ackcreds || rsrc_ackcreds > 0x1E) + return -EINVAL; + + iwdev->roce_ackcreds = rsrc_ackcreds; + iwdev->override_ackcreds = true; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_cnp_up_override(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * cnp_up_override_store - Store value for CNP override + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t cnp_up_override_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u8 cnp_override; + + if (kstrtou8(buf, 0, &cnp_override)) + return -EINVAL; + + if (cnp_override > 0x3F) + return -EINVAL; + + iwdev->cnp_up_override = cnp_override; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_cnp_up_override(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * cnp_up_override_show - Show value of CNP UP override + * @item: config item + * @buf: buffer to write to + */ +static ssize_t cnp_up_override_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->cnp_up_override); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_ceq_itr(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * ceq_itr_store - Set interrupt Throttling(ITR) value + * @item: config item + * @buf: buffer to read from + * @count: size of buffer + */ +static ssize_t ceq_itr_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u32 itr; + + if (kstrtou32(buf, 0, &itr)) + return -EINVAL; + +#define ZXDH_MAX_ITR 8160 + if (itr > 8160) + return -EINVAL; + + iwdev->rf->sc_dev.ceq_itr = itr; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_ceq_itr(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * ceq_itr_show - Show interrupt Throttling(ITR) value + * @item: config item + * @buf: buffer to write to + */ +static ssize_t ceq_itr_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->rf->sc_dev.ceq_itr); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_ceq_intrl(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * ceq_intrl_store - Set the interrupt rate limit value + * @item: config item + * @buf: buffer to read from + * @count: size of buffer + */ +static ssize_t ceq_intrl_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + struct zxdh_msix_vector *msix_vec; + u32 intrl, interval = 0; + int i; + + if (kstrtou32(buf, 0, &intrl)) + return -EINVAL; + +#define ZXDH_MIN_INT_RATE_LIMIT 4237 +#define ZXDH_MAX_INT_RATE_LIMIT 250000 +#define ZXDH_USECS_PER_SEC 1000000 +#define ZXDH_USECS_PER_UNIT 4 +#define ZXDH_MAX_SUPPORTED_INT_RATE_INTERVAL 59 /* 59 * 4 = 236 us */ + + if (intrl && intrl < ZXDH_MIN_INT_RATE_LIMIT) + intrl = ZXDH_MIN_INT_RATE_LIMIT; + if (intrl > ZXDH_MAX_INT_RATE_LIMIT) + intrl = ZXDH_MAX_INT_RATE_LIMIT; + + iwdev->ceq_intrl = intrl; + if (intrl) { + interval = (ZXDH_USECS_PER_SEC / intrl) / ZXDH_USECS_PER_UNIT; + + ibdev_info( + &iwdev->ibdev, + "CEQ Interrupt rate Limit enabled with interval = %d\n", + interval); + } else { + ibdev_info(&iwdev->ibdev, + "CEQ Interrupt rate Limit disabled\n"); + } + msix_vec = &iwdev->rf->iw_msixtbl[2]; + for (i = 1; i < iwdev->rf->ceqs_count; i++, msix_vec++) + zxdh_set_irq_rate_limit(&iwdev->rf->sc_dev, msix_vec->idx, + interval); + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_ceq_intrl(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * ceq_intrl_show - Show the interrupt rate limit value + * @item: config item + * @buf: buffer to write to + */ +static ssize_t ceq_intrl_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->ceq_intrl); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_up_up_map(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * up_up_map_store - Store value for UP-UP map + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t up_up_map_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u64 up_map; + + if (kstrtou64(buf, 0, &up_map)) + return -EINVAL; + + iwdev->up_up_map = up_map; + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_up_up_map(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * up_up_map_show - Show value of IP-UP map + * @item: config item + * @buf: buffer to write to + */ +static ssize_t up_up_map_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "0x%llx\n", iwdev->up_up_map); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_rcv_wnd(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * rcv_wnd_show - Show the value of TCP receive window + * @item: config item + * @buf: buffer to write to + */ +static ssize_t rcv_wnd_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->rcv_wnd); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_rcv_wnd(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * rcv_wnd_store - Store value for rcv_wnd + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t rcv_wnd_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u32 rsrc_rcv_wnd; + + if (kstrtou32(buf, 0, &rsrc_rcv_wnd)) + return -EINVAL; + + if (rsrc_rcv_wnd < 65536) + return -EINVAL; + + iwdev->rcv_wnd = rsrc_rcv_wnd; + iwdev->override_rcv_wnd = true; + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_rcv_wscale(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * rcv_wscale_show - Show value of TCP receive window scale + * @item: config item + * @buf: buffer to write to + */ +static ssize_t rcv_wscale_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->rcv_wscale); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_rcv_wscale(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * rcv_wscale_store - Store value for recv_wscale + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t rcv_wscale_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u8 rsrc_rcv_wscale; + + if (kstrtou8(buf, 0, &rsrc_rcv_wscale)) + return -EINVAL; + + if (rsrc_rcv_wscale > 16) + return -EINVAL; + + iwdev->rcv_wscale = rsrc_rcv_wscale; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_iw_dctcp_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * iw_dctcp_enable_show - Show the value of dctcp_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t iw_dctcp_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->iwarp_dctcp_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_iw_dctcp_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * iw_dctcp_enable_store - Store value of dctcp_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t iw_dctcp_enable_store(struct config_item *item, const char *buf, + size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_IW_DCTCP); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_IW_DCTCP); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_iw_ecn_enable(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * iw_ecn_enable_show - Show the value of ecn_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t iw_ecn_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->iwarp_ecn_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_iw_ecn_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * iw_ecn_enable_store - Store value of ecn_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t iw_ecn_enable_store(struct config_item *item, const char *buf, + size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_IW_ECN); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_IW_ECN); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_iw_timely_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * iw_timely_enable_show - Show value of iwarp_timely_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t iw_timely_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->iwarp_timely_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_iw_timely_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * iw_timely_enable_store - Store value of iwarp_timely_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t iw_timely_enable_store(struct config_item *item, const char *buf, + size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_IW_TIMELY); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_IW_TIMELY); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_iw_rtomin(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * iw_rtomin_show - Show the value of rtomin for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t iw_rtomin_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->iwarp_rtomin); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_iw_rtomin(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * iw_rtomin_store - Store value of iwarp_rtomin for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t iw_rtomin_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u8 rtomin; + + if (kstrtou8(buf, 0, &rtomin)) + return -EINVAL; + + iwdev->iwarp_rtomin = rtomin; + iwdev->override_rtomin = true; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_rtomin(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * roce_rtomin_show - Show the value of roce_rtomin for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_rtomin_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_rtomin); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_rtomin(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * roce_rtomin_store - Store value of roce_rtomin for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_rtomin_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u8 rtomin; + + if (kstrtou8(buf, 0, &rtomin)) + return -EINVAL; + + iwdev->roce_rtomin = rtomin; + iwdev->override_rtomin = true; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_timely_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * roce_timely_enable_show - Show value of roce_timely_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_timely_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_timely_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_timely_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * roce_timely_enable_store - Store value of roce_timely_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_timely_enable_store(struct config_item *item, + const char *buf, size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_ROCE_TIMELY); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_ROCE_TIMELY); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_no_icrc_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * roce_no_icrc_enable_show - Show value of no_icrc for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_no_icrc_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_no_icrc_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_no_icrc_enable(struct zxdh_vsi_grp *grp, + const char *buf, + size_t count) +#else +/** + * roce_no_icrc_enable_store - Store value of roce_no_icrc for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_no_icrc_enable_store(struct config_item *item, + const char *buf, size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_ROCE_NO_ICRC); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_ROCE_NO_ICRC); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_up_map_enable(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * up_map_enable_show - Show value of up_map_enable for PF + * @item: config item + * @buf: buffer to write to + */ +static ssize_t up_map_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->up_map_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_up_map_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * up_map_enable_store - Store value of up_map_enable for PF + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t up_map_enable_store(struct config_item *item, const char *buf, + size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_ENABLE_UP_MAP); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_ENABLE_UP_MAP); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_iw_ooo_enable(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * iw_ooo_enable_show - Show the value of iw_ooo_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t iw_ooo_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->iw_ooo); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_iw_ooo_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * iw_ooo_enable_store - Store value of iw_ooo_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t iw_ooo_enable_store(struct config_item *item, const char *buf, + size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_IW_OOO); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_IW_OOO); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_dcqcn_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * roce_dcqcn_enable_show - Show the value of roce_dcqcn_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_dcqcn_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_dcqcn_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_dcqcn_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * roce_dcqcn_enable_store - Store value of roce_dcqcn_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_dcqcn_enable_store(struct config_item *item, + const char *buf, size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_ROCE_DCQCN); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_ROCE_DCQCN); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_dctcp_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/* roce_dctcp_enable_show - Show the value of roce_dctcp_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_dctcp_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_dctcp_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_dctcp_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * roce_dctcp_enable_store - Store value of roce_dctcp_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_dctcp_enable_store(struct config_item *item, + const char *buf, size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_ROCE_DCTCP); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_ROCE_DCTCP); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +CONFIGFS_EATTR_STRUCT(zrdma, zxdh_vsi_grp); +#define CFG_CONFIG_DESC_ITEM_ATTR(name) \ + static struct zxdh_attribute name = __CONFIGFS_EATTR( \ + name, S_IRUGO | S_IWUSR, zxdh_show_attr_##name, \ + zxdh_store_attr_##name) + +CFG_CONFIG_DESC_ITEM_ATTR(iw_dctcp_enable); +CFG_CONFIG_DESC_ITEM_ATTR(push_mode); +CFG_CONFIG_DESC_ITEM_ATTR(iw_timely_enable); +CFG_CONFIG_DESC_ITEM_ATTR(iw_ecn_enable); +CFG_CONFIG_DESC_ITEM_ATTR(iw_rtomin); +CFG_CONFIG_DESC_ITEM_ATTR(rcv_wnd); +CFG_CONFIG_DESC_ITEM_ATTR(rcv_wscale); +CFG_CONFIG_DESC_ITEM_ATTR(iw_ooo_enable); +CFG_CONFIG_DESC_ITEM_ATTR(cnp_up_override); +CFG_CONFIG_DESC_ITEM_ATTR(up_map_enable); +CFG_CONFIG_DESC_ITEM_ATTR(up_up_map); +CFG_CONFIG_DESC_ITEM_ATTR(ceq_itr); +CFG_CONFIG_DESC_ITEM_ATTR(ceq_intrl); +CFG_CONFIG_DESC_ITEM_ATTR(roce_cwnd); +CFG_CONFIG_DESC_ITEM_ATTR(roce_rd_fence_rate); +CFG_CONFIG_DESC_ITEM_ATTR(roce_ackcreds); +CFG_CONFIG_DESC_ITEM_ATTR(roce_timely_enable); +CFG_CONFIG_DESC_ITEM_ATTR(roce_no_icrc_enable); +CFG_CONFIG_DESC_ITEM_ATTR(roce_dcqcn_enable); +CFG_CONFIG_DESC_ITEM_ATTR(roce_dctcp_enable); +CFG_CONFIG_DESC_ITEM_ATTR(roce_rtomin); + +CONFIGFS_EATTR_OPS(zrdma, zxdh_vsi_grp, group); + +static struct configfs_attribute *zxdh_gen1_iw_vsi_attrs[] = { + &rcv_wnd.attr, + &rcv_wscale.attr, + NULL, +}; + +static struct configfs_attribute *zxdh_iw_vsi_attrs[] = { + &push_mode.attr, &iw_dctcp_enable.attr, + &iw_timely_enable.attr, &iw_ecn_enable.attr, + &iw_rtomin.attr, &rcv_wnd.attr, + &rcv_wscale.attr, &iw_ooo_enable.attr, + &cnp_up_override.attr, &up_map_enable.attr, + &up_up_map.attr, &ceq_itr.attr, + &ceq_intrl.attr, NULL, +}; + +static struct configfs_attribute *zxdh_roce_vsi_attrs[] = { + &push_mode.attr, + &roce_cwnd.attr, + &roce_rd_fence_rate.attr, + &roce_ackcreds.attr, + &roce_timely_enable.attr, + &roce_no_icrc_enable.attr, + &roce_dcqcn_enable.attr, + &roce_dctcp_enable.attr, + &roce_rtomin.attr, + &cnp_up_override.attr, + &up_map_enable.attr, + &up_up_map.attr, + &ceq_itr.attr, + &ceq_intrl.attr, + NULL, +}; +#else /* OFED_4_8 */ +CONFIGFS_ATTR(, push_mode); +CONFIGFS_ATTR(, iw_dctcp_enable); +CONFIGFS_ATTR(, iw_timely_enable); +CONFIGFS_ATTR(, iw_ecn_enable); +CONFIGFS_ATTR(, iw_rtomin); +CONFIGFS_ATTR(, rcv_wnd); +CONFIGFS_ATTR(, rcv_wscale); +CONFIGFS_ATTR(, iw_ooo_enable); +CONFIGFS_ATTR(, up_map_enable); +CONFIGFS_ATTR(, cnp_up_override); +CONFIGFS_ATTR(, up_up_map); +CONFIGFS_ATTR(, ceq_itr); +CONFIGFS_ATTR(, ceq_intrl); +CONFIGFS_ATTR(, roce_timely_enable); +CONFIGFS_ATTR(, roce_no_icrc_enable); +CONFIGFS_ATTR(, roce_dcqcn_enable); +CONFIGFS_ATTR(, roce_dctcp_enable); +CONFIGFS_ATTR(, roce_cwnd); +CONFIGFS_ATTR(, roce_rd_fence_rate); +CONFIGFS_ATTR(, roce_ackcreds); +CONFIGFS_ATTR(, roce_rtomin); + +static struct configfs_attribute *zxdh_gen1_iw_vsi_attrs[] = { + &attr_rcv_wnd, + &attr_rcv_wscale, + NULL, +}; + +static struct configfs_attribute *zxdh_iw_vsi_attrs[] = { + &attr_push_mode, &attr_iw_dctcp_enable, + &attr_iw_timely_enable, &attr_iw_ecn_enable, + &attr_iw_rtomin, &attr_rcv_wnd, + &attr_rcv_wscale, &attr_iw_ooo_enable, + &attr_cnp_up_override, &attr_up_map_enable, + &attr_up_up_map, &attr_ceq_itr, + &attr_ceq_intrl, NULL, +}; + +static struct configfs_attribute *zxdh_roce_vsi_attrs[] = { + &attr_push_mode, + &attr_roce_cwnd, + &attr_roce_rd_fence_rate, + &attr_roce_ackcreds, + &attr_roce_timely_enable, + &attr_roce_no_icrc_enable, + &attr_roce_dcqcn_enable, + &attr_roce_dctcp_enable, + &attr_roce_rtomin, + &attr_cnp_up_override, + &attr_up_map_enable, + &attr_up_up_map, + &attr_ceq_itr, + &attr_ceq_intrl, + NULL, +}; +#endif /* OFED_4_8 */ + +static void zxdh_release_vsi_grp(struct config_item *item) +{ + struct config_group *group = + container_of(item, struct config_group, cg_item); + struct zxdh_vsi_grp *vsi_grp = + container_of(group, struct zxdh_vsi_grp, group); + + kfree(vsi_grp); +} + +static struct configfs_item_operations zxdh_vsi_ops = { +#ifdef __OFED_4_8__ + .show_attribute = zxdh_attr_show, + .store_attribute = zxdh_attr_store, +#endif + .release = zxdh_release_vsi_grp +}; + +static struct config_item_type zxdh_iw_vsi_type = { + .ct_attrs = zxdh_iw_vsi_attrs, + .ct_item_ops = &zxdh_vsi_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_item_type zxdh_roce_vsi_type = { + .ct_attrs = zxdh_roce_vsi_attrs, + .ct_item_ops = &zxdh_vsi_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_item_type zxdh_gen1_iw_vsi_type = { + .ct_attrs = zxdh_gen1_iw_vsi_attrs, + .ct_item_ops = &zxdh_vsi_ops, + .ct_owner = THIS_MODULE, +}; + +/** + * zxdh_vsi_make_group - Creation of subsystem groups + * @group: config group + * @name: name of the group + */ +static struct config_group *zxdh_vsi_make_group(struct config_group *group, + const char *name) +{ + struct zxdh_vsi_grp *vsi_grp; + struct zxdh_device *iwdev; + u8 hw_ver; + + iwdev = zxdh_find_device_by_name(name); + if (!iwdev) + return ERR_PTR(-ENODEV); + + hw_ver = iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev; + + vsi_grp = kzalloc(sizeof(*vsi_grp), GFP_KERNEL); + if (!vsi_grp) + return ERR_PTR(-ENOMEM); + + vsi_grp->iwdev = iwdev; + + config_group_init(&vsi_grp->group); + + if (hw_ver == ZXDH_GEN_1) { + config_group_init_type_name(&vsi_grp->group, name, + &zxdh_gen1_iw_vsi_type); + } else { + if (iwdev->rf->protocol_used == ZXDH_ROCE_PROTOCOL_ONLY) + config_group_init_type_name(&vsi_grp->group, name, + &zxdh_roce_vsi_type); + else + config_group_init_type_name(&vsi_grp->group, name, + &zxdh_iw_vsi_type); + } + + return &vsi_grp->group; +} + +static struct configfs_group_operations zxdh_vsi_group_ops = { + .make_group = zxdh_vsi_make_group, +}; + +static struct config_item_type zxdh_subsys_type = { + .ct_group_ops = &zxdh_vsi_group_ops, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem cfs_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "zrdma", + .ci_type = &zxdh_subsys_type, + }, + }, +}; + +int zxdh_configfs_init(void) +{ + config_group_init(&cfs_subsys.su_group); + mutex_init(&cfs_subsys.su_mutex); + return configfs_register_subsystem(&cfs_subsys); +} + +void zxdh_configfs_exit(void) +{ + configfs_unregister_subsystem(&cfs_subsys); +} +#endif /* CONFIG_CONFIGFS_FS */ diff --git a/src/rdma/src/ctrl.c b/src/rdma/src/ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..b739054b982aef5a4b3463489f2b98eb452762fd --- /dev/null +++ b/src/rdma/src/ctrl.c @@ -0,0 +1,5345 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "ws.h" +#include "protos.h" +#include "vf.h" +#include "virtchnl.h" +#include "icrdma_hw.h" +#include "main.h" +#include "srq.h" + +/** + * zxdh_get_qp_from_list - get next qp from a list + * @head: Listhead of qp's + * @qp: current qp + */ +struct zxdh_sc_qp *zxdh_get_qp_from_list(struct list_head *head, + struct zxdh_sc_qp *qp) +{ + struct list_head *lastentry; + struct list_head *entry = NULL; + + if (list_empty(head)) + return NULL; + + if (!qp) { + entry = head->next; + } else { + lastentry = &qp->list; + entry = lastentry->next; + if (entry == head) + return NULL; + } + + return container_of(entry, struct zxdh_sc_qp, list); +} + +#ifdef Z_CONFIG_RDMA_VSI +/** + * zxdh_qp_rem_qos - remove qp from qos lists during destroy qp + * @qp: qp to be removed from qos + */ +void zxdh_qp_rem_qos(struct zxdh_sc_qp *qp) +{ + struct zxdh_sc_vsi *vsi = qp->vsi; + + mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); + if (qp->on_qoslist) { + qp->on_qoslist = false; + list_del(&qp->list); + } + mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); +} + +/** + * zxdh_qp_add_qos - called during setctx for qp to be added to qos + * @qp: qp to be added to qos + */ +void zxdh_qp_add_qos(struct zxdh_sc_qp *qp) +{ + struct zxdh_sc_vsi *vsi = qp->vsi; + + mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); + if (!qp->on_qoslist) { + list_add(&qp->list, &vsi->qos[qp->user_pri].qplist); + qp->on_qoslist = true; + qp->qs_handle = vsi->qos[qp->user_pri].qs_handle; + } + mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); +} +#else +/** + * zxdh_qp_rem_qos - remove qp from qos lists during destroy qp + * @qp: qp to be removed from qos + */ +void zxdh_qp_rem_qos(struct zxdh_sc_qp *qp) +{ + struct zxdh_sc_dev *dev = qp->dev; + + mutex_lock(&dev->qos[qp->user_pri].qos_mutex); + if (qp->on_qoslist) { + qp->on_qoslist = false; + list_del(&qp->list); + } + mutex_unlock(&dev->qos[qp->user_pri].qos_mutex); +} + +/** + * zxdh_qp_add_qos - called during setctx for qp to be added to qos + * @qp: qp to be added to qos + */ +void zxdh_qp_add_qos(struct zxdh_sc_qp *qp) +{ + struct zxdh_sc_dev *dev = qp->dev; + + mutex_lock(&dev->qos[qp->user_pri].qos_mutex); + if (!qp->on_qoslist) { + list_add(&qp->list, &dev->qos[qp->user_pri].qplist); + qp->on_qoslist = true; + qp->qs_handle = dev->qos[qp->user_pri].qs_handle; + } + mutex_unlock(&dev->qos[qp->user_pri].qos_mutex); +} +#endif + +/** + * zxdh_sc_pd_init - initialize sc pd struct + * @dev: sc device struct + * @pd: sc pd ptr + * @pd_id: pd_id for allocated pd + * @abi_ver: User/Kernel ABI version + */ +void zxdh_sc_pd_init(struct zxdh_sc_dev *dev, struct zxdh_sc_pd *pd, u32 pd_id, + int abi_ver) +{ + pd->pd_id = pd_id; + pd->abi_ver = abi_ver; + pd->dev = dev; +} + +/** + * zxdh_sc_add_arp_cache_entry - cqp wqe add arp cache entry + * @cqp: struct for cqp hw + * @info: arp entry information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int +zxdh_sc_add_arp_cache_entry(struct zxdh_sc_cqp *cqp, + struct zxdh_add_arp_cache_entry_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + u64 temp, hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + set_64bit_val(wqe, 8, info->reach_max); + + temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) | + LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) | + LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40); + set_64bit_val(wqe, 16, temp); + + hdr = info->arp_index | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MANAGE_ARP) | + FIELD_PREP(ZXDH_CQPSQ_MAT_PERMANENT, info->permanent) | + FIELD_PREP(ZXDH_CQPSQ_MAT_ENTRYVALID, true) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, + 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_del_arp_cache_entry - dele arp cache entry + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @arp_index: arp index to delete arp entry + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_del_arp_cache_entry(struct zxdh_sc_cqp *cqp, u64 scratch, + u16 arp_index, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = arp_index | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MANAGE_ARP) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE", DUMP_PREFIX_OFFSET, + 16, 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_manage_apbvt_entry - for adding and deleting apbvt entries + * @cqp: struct for cqp hw + * @info: info for apbvt entry to add or delete + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_manage_apbvt_entry(struct zxdh_sc_cqp *cqp, + struct zxdh_apbvt_info *info, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 16, info->port); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MANAGE_APBVT) | + FIELD_PREP(ZXDH_CQPSQ_MAPT_ADDPORT, info->add) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_manage_qhash_table_entry - manage quad hash entries + * @cqp: struct for cqp hw + * @info: info for quad hash to manage + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + * + * This is called before connection establishment is started. + * For passive connections, when listener is created, it will + * call with entry type of ZXDH_QHASH_TYPE_TCP_SYN with local + * ip address and tcp port. When SYN is received (passive + * connections) or sent (active connections), this routine is + * called with entry type of ZXDH_QHASH_TYPE_TCP_ESTABLISHED + * and quad is passed in info. + * + * When iwarp connection is done and its state moves to RTS, the + * quad hash entry in the hardware will point to iwarp's qp + * number and requires no calls from the driver. + */ +static int zxdh_sc_manage_qhash_table_entry(struct zxdh_sc_cqp *cqp, + struct zxdh_qhash_table_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + u64 qw1 = 0; + u64 qw2 = 0; + u64 temp; + struct zxdh_sc_vsi *vsi = info->vsi; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) | + LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) | + LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40); + set_64bit_val(wqe, 0, temp); + + qw1 = FIELD_PREP(ZXDH_CQPSQ_QHASH_QPN, info->qp_num) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_DEST_PORT, info->dest_port); + if (info->ipv4_valid) { + set_64bit_val(wqe, 48, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR3, + info->dest_ip[0])); + } else { + set_64bit_val(wqe, 56, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR0, + info->dest_ip[0]) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR1, + info->dest_ip[1])); + + set_64bit_val(wqe, 48, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR2, + info->dest_ip[2]) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR3, + info->dest_ip[3])); + } + qw2 = FIELD_PREP(ZXDH_CQPSQ_QHASH_QS_HANDLE, + vsi->qos[info->user_pri].qs_handle); + if (info->vlan_valid) + qw2 |= FIELD_PREP(ZXDH_CQPSQ_QHASH_VLANID, info->vlan_id); + set_64bit_val(wqe, 16, qw2); + if (info->entry_type == ZXDH_QHASH_TYPE_TCP_ESTABLISHED) { + qw1 |= FIELD_PREP(ZXDH_CQPSQ_QHASH_SRC_PORT, info->src_port); + if (!info->ipv4_valid) { + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR0, + info->src_ip[0]) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR1, + info->src_ip[1])); + set_64bit_val(wqe, 32, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR2, + info->src_ip[2]) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR3, + info->src_ip[3])); + } else { + set_64bit_val(wqe, 32, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR3, + info->src_ip[0])); + } + } + + set_64bit_val(wqe, 8, qw1); + temp = FIELD_PREP(ZXDH_CQPSQ_QHASH_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_OPCODE, + ZXDH_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_MANAGE, info->manage) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_VLANVALID, info->vlan_valid) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_ENTRYTYPE, info->entry_type); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_qp_init - initialize qp + * @qp: sc qp + * @info: initialization qp info + */ +int zxdh_sc_qp_init(struct zxdh_sc_qp *qp, struct zxdh_qp_init_info *info) +{ + int ret_code; + u32 pble_obj_cnt; + u16 wqe_size; + struct zxdh_qp *iwqp = container_of(qp, struct zxdh_qp, sc_qp); + + if (iwqp->is_srq == false) { + if (info->qp_uk_init_info.max_sq_frag_cnt > + info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags || + info->qp_uk_init_info.max_rq_frag_cnt > + info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags) + return -EINVAL; + } else { + if (info->qp_uk_init_info.max_sq_frag_cnt > + info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags) + return -EINVAL; + } + + qp->dev = info->dev; +#ifdef Z_CONFIG_RDMA_VSI + qp->vsi = info->vsi; +#endif + qp->sq_pa = info->sq_pa; + if (iwqp->is_srq == false) + qp->rq_pa = info->rq_pa; + qp->hw_host_ctx_pa = info->host_ctx_pa; + qp->shadow_area_pa = info->shadow_area_pa; + qp->pd = info->pd; + qp->hw_host_ctx = info->host_ctx; + info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db; + qp->is_nvmeof_ioq = false; + qp->is_nvmeof_tgt = false; + qp->nvmeof_qid = 0xffff; + qp->entry_err_cnt = 0; + qp->retry_err_cnt = 0; + qp->aeq_entry_err_last_psn = 0; + qp->aeq_retry_err_last_psn = 0; + + ret_code = zxdh_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info); + if (ret_code) + return ret_code; + + qp->virtual_map = info->virtual_map; + pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + + if (iwqp->is_srq == false) { + if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) || + (info->virtual_map && info->rq_pa >= pble_obj_cnt)) + return -EINVAL; + } else { + if ((info->virtual_map && info->sq_pa >= pble_obj_cnt)) + return -EINVAL; + } + + qp->hw_sq_size = zxdh_get_encoded_wqe_size(qp->qp_uk.sq_ring.size, + ZXDH_QUEUE_TYPE_SQ_RQ); + + ret_code = zxdh_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt, + &wqe_size); + if (ret_code) + return ret_code; + + if (iwqp->is_srq == false) { + qp->hw_rq_size = zxdh_get_encoded_wqe_size( + qp->qp_uk.rq_size, ZXDH_QUEUE_TYPE_SQ_RQ); + } + + return 0; +} + +/** + * zxdh_sc_qp_create - create qp + * @qp: sc qp + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_qp_create(struct zxdh_sc_qp *qp, u64 scratch, bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + + cqp = qp->dev->cqp; + + if (qp->qp_ctx_num < qp->dev->base_qpn || + qp->qp_ctx_num > + (qp->dev->base_qpn + + cqp->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_QP].max_cnt - 1)) + return -EINVAL; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 16, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 24, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 32, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 40, RDMAQPC_MASK_INIT); + hdr = FIELD_PREP(ZXDH_CQPSQ_QP_ID, qp->qp_uk.qp_id) | + FIELD_PREP(ZXDH_CQPSQ_QP_CONTEXT_ID, qp->qp_ctx_num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_QP); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_qp_modify - modify qp cqp wqe + * @qp: sc qp + * @info: modify qp info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_qp_modify(struct zxdh_sc_qp *qp, struct zxdh_modify_qp_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = qp->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 16, info->qpc_tx_mask_low); + set_64bit_val(wqe, 24, info->qpc_tx_mask_high); + set_64bit_val(wqe, 32, info->qpc_rx_mask_low); + set_64bit_val(wqe, 40, info->qpc_rx_mask_high); + hdr = FIELD_PREP(ZXDH_CQPSQ_QP_ID, qp->qp_uk.qp_id) | + FIELD_PREP(ZXDH_CQPSQ_QP_CONTEXT_ID, qp->qp_ctx_num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MODIFY_QP); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_qp_destroy - cqp destroy qp + * @qp: sc qp + * @scratch: u64 saved to be used during cqp completion + * @ignore_mw_bnd: memory window bind flag + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_qp_destroy(struct zxdh_sc_qp *qp, u64 scratch, bool ignore_mw_bnd, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = qp->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 16, RDMAQPC_TX_MASKL_DESTROY); + set_64bit_val(wqe, 24, RDMAQPC_TX_MASKH_QP_STATE); + set_64bit_val(wqe, 32, RDMAQPC_RX_MASKL_DESTROY); + set_64bit_val(wqe, 40, RDMAQPC_RX_MASKH_DEST_IP); + hdr = FIELD_PREP(ZXDH_CQPSQ_QP_ID, qp->qp_uk.qp_id) | + FIELD_PREP(ZXDH_CQPSQ_QP_CONTEXT_ID, qp->qp_ctx_num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_QP); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_get_encoded_ird_size - + * @ird_size: IRD size + */ +static u8 zxdh_sc_get_encoded_ird_size(u16 ird_size) +{ + u8 encoded_size = 0; + + while (ird_size >>= 1) + encoded_size++; + + return encoded_size; +} + +/** + * zxdh_sc_qp_resetctx_roce - set qp's context + * @qp: sc qp + * @qp_ctx: context ptr + */ +void zxdh_sc_qp_resetctx_roce(struct zxdh_sc_qp *qp, __le64 *qp_ctx) +{ + memset(qp_ctx, 0, ZXDH_QP_CTX_SIZE); + set_64bit_val(qp_ctx, 32, + FIELD_PREP(RDMAQPC_TX_HW_SQ_TAIL_HIGH, + RS_64_1(IRDMAQPC_HW_SQ_TAIL_INIT, 11))); + set_64bit_val(qp_ctx, 280, FIELD_PREP(RDMAQPC_RX_IRD_RXNUM, 511)); + set_64bit_val(qp_ctx, 384, + FIELD_PREP(RDMAQPC_RX_VHCA_ID, qp->dev->vhca_id)); +} + +u16 zxdh_get_rc_gqp_id(u16 ws_index, u16 vhca_id, u32 total_vhca) +{ + u16 gqp_start = 0; + u16 gqp_offset = 0; + + if (total_vhca <= 34) { + gqp_start = 6 * 8 * vhca_id + 1; + gqp_offset = ws_index % (6 * 8); + } else if (total_vhca <= 66) { + gqp_start = 3 * 8 * vhca_id + 1; + gqp_offset = ws_index % (3 * 8); + } else if (total_vhca <= 130) { + gqp_start = 3 * 4 * vhca_id + 1; + gqp_offset = ws_index % (3 * 4); + } else if (total_vhca <= 258) { + gqp_start = 3 * 2 * vhca_id + 1; + gqp_offset = ws_index % (3 * 2); + } + + return (gqp_start + gqp_offset); +} + +u16 get_ud_gqp_id(u16 vhca_id, u32 total_vhca) +{ + u16 ud_gqp_offset = 0; + u16 ud_gqp_id = 0; + + if (total_vhca <= 34) { + ud_gqp_offset = 6 * 8 * total_vhca + 1; + } else if (total_vhca <= 66) { + ud_gqp_offset = 3 * 8 * total_vhca + 1; + } else if (total_vhca <= 130) { + ud_gqp_offset = 3 * 4 * total_vhca + 1; + } else if (total_vhca <= 258) { + ud_gqp_offset = 3 * 2 * total_vhca + 1; + } + + ud_gqp_id = ud_gqp_offset + vhca_id; + return ud_gqp_id; +} + +void zxdh_sc_qp_modify_ctx_udp_sport(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_qp_host_ctx_info *info) +{ + struct zxdh_udp_offload_info *udp; + u64 hdr; + + udp = info->udp_info; + + hdr = FIELD_PREP(RDMAQPC_TX_SRC_PORTNUM, udp->src_port); + set_64bit_val(qp_ctx, 96, hdr); + dma_wmb(); + set_64bit_val(qp_ctx, 368, + FIELD_PREP(RDMAQPC_RX_SRC_PORTNUM, udp->src_port)); + dma_wmb(); + print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16, 8, + qp_ctx, ZXDH_QP_CTX_SIZE, false); +} + +void zxdh_sc_qp_modify_private_cmd_qpc(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_modify_qpc_item *info) +{ + u64 hdr; + + hdr = FIELD_PREP(RDMAQPC_TX_CUR_RETRY_CNT, info->cur_retry_count) | + FIELD_PREP(RDMAQPC_TX_READ_RETRY_FLAG, info->read_retry_flag) | + FIELD_PREP(RDMAQPC_TX_LAST_ACK_PSN, info->tx_last_ack_psn); + set_64bit_val(qp_ctx, 0, hdr); + dma_wmb(); + set_64bit_val(qp_ctx, 8, + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_FLAG, + info->rnr_retry_flag) | + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_TIME_L, + info->rnr_retry_time_l) | + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_THRESHOLD, + info->rnr_retry_threshold)); + set_64bit_val(qp_ctx, 16, + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_TIME_H, + info->rnr_retry_time_h)); + dma_wmb(); + + set_64bit_val(qp_ctx, 32, + FIELD_PREP(RDMAQPC_TX_RETRY_FLAG, info->retry_flag)); + dma_wmb(); + + set_64bit_val(qp_ctx, 40, + FIELD_PREP(RDMAQPC_TX_ERR_FLAG, info->err_flag) | + FIELD_PREP(RDMAQPC_TX_ACK_ERR_FLAG, + info->ack_err_flag) | + FIELD_PREP(RDMAQPC_TX_LAST_ACK_WQE_OFFSET, + info->last_ack_wqe_offset) | + FIELD_PREP(RDMAQPC_TX_HW_SQ_TAIL_UNA, + info->hw_sq_tail_una) | + FIELD_PREP(RDMAQPC_TX_RDWQE_PYLD_LENGTH_L, + info->rdwqe_pyld_length_l) | + FIELD_PREP(RDMAQPC_TX_RDWQE_PYLD_LENGTH_H, + info->rdwqe_pyld_length_h)); + + dma_wmb(); + + set_64bit_val(qp_ctx, 48, + FIELD_PREP(RDMAQPC_TX_PACKAGE_ERR_FLAG, + info->package_err_flag) | + FIELD_PREP(RDMAQPC_TX_RECV_READ_FLAG, + info->recv_read_flag) | + FIELD_PREP(RDMAQPC_TX_RECV_ERR_FLAG, + info->recv_err_flag) | + FIELD_PREP(RDMAQPC_TX_RECV_RD_MSG_LOSS_ERR_FLAG, + info->recv_rd_msg_loss_err_flag) | + FIELD_PREP(RDMAQPC_TX_RECV_RD_MSG_LOSS_ERR_CNT, + info->recv_rd_msg_loss_err_cnt) | + FIELD_PREP(RDMAQPC_TX_RD_MSG_LOSS_ERR_FLAG, + info->rd_msg_loss_err_flag) | + FIELD_PREP(RDMAQPC_TX_PKTCHK_RD_MSG_LOSS_ERR_CNT, + info->pktchk_rd_msg_loss_err_cnt)); + dma_wmb(); + + set_64bit_val(qp_ctx, 56, + FIELD_PREP(RDMAQPC_TX_RETRY_CQE_SQ_OPCODE_FLAG, + info->retry_cqe_sq_opcode)); + dma_wmb(); + + print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16, 8, + qp_ctx, ZXDH_QP_CTX_SIZE, false); +} + +/** + * zxdh_sc_qp_setctx_roce - set qp's context + * @qp: sc qp + * @qp_ctx: context ptr + * @info: ctx info + */ +void zxdh_sc_qp_setctx_roce(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_qp_host_ctx_info *info) +{ + struct zxdh_roce_offload_info *roce_info; + struct zxdh_udp_offload_info *udp; + u64 mac; + u64 dmac; + u64 hdr; + u8 service_type; + u16 header_len; + u16 gqp_id; + + roce_info = info->roce_info; + udp = info->udp_info; + + if (roce_info->dcqcn_en || roce_info->dctcp_en) { + udp->tos &= ~ECN_CODE_PT_MASK; + udp->tos |= ECN_CODE_PT_VAL; + } + + mac = LS_64_1(roce_info->mac_addr[5], 0) | + LS_64_1(roce_info->mac_addr[4], 8) | + LS_64_1(roce_info->mac_addr[3], 16) | + LS_64_1(roce_info->mac_addr[2], 24) | + LS_64_1(roce_info->mac_addr[1], 32) | + LS_64_1(roce_info->mac_addr[0], 40); + + dmac = LS_64_1(udp->dest_mac[5], 0) | LS_64_1(udp->dest_mac[4], 8) | + LS_64_1(udp->dest_mac[3], 16) | LS_64_1(udp->dest_mac[2], 24) | + LS_64_1(udp->dest_mac[1], 32) | LS_64_1(udp->dest_mac[0], 40); + + qp->user_pri = info->user_pri; + if (qp->qp_uk.qp_type == ZXDH_QP_TYPE_ROCE_RC) { + service_type = ZXDH_QP_SERVICE_TYPE_RC; + gqp_id = zxdh_get_rc_gqp_id(qp->qp_uk.ws_index, + qp->dev->vhca_id, + qp->dev->total_vhca); + } else { + service_type = ZXDH_QP_SERVICE_TYPE_UD; + gqp_id = get_ud_gqp_id(qp->dev->vhca_id, qp->dev->total_vhca); + } + + if (udp->ipv4) + header_len = udp->insert_vlan_tag ? 70 : 66; + else + header_len = udp->insert_vlan_tag ? 90 : 86; + + roce_info->is_qp1 = qp->qp_uk.qp_id == 1 ? 1 : 0; + + set_64bit_val(qp_ctx, 0, + FIELD_PREP(RDMAQPC_TX_RETRY_CNT, udp->rexmit_thresh) | + FIELD_PREP(RDMAQPC_TX_CUR_RETRY_CNT, + udp->rexmit_thresh) | + FIELD_PREP(RDMAQPC_TX_LAST_ACK_PSN, + udp->psn_max) | + FIELD_PREP(RDMAQPC_TX_LSN_LOW1, udp->lsn)); + set_64bit_val(qp_ctx, 8, + FIELD_PREP(RDMAQPC_TX_LSN_HIGH23, RS_64_1(udp->lsn, 1)) | + FIELD_PREP(RDMAQPC_TX_ACKCREDITS, + (info->use_srq || qp->is_nvmeof_ioq) ? + 0x1f : + roce_info->ack_credits) | + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_THRESHOLD, + udp->rnr_nak_thresh)); + set_64bit_val(qp_ctx, 16, FIELD_PREP(RDMAQPC_TX_SSN, 1)); + set_64bit_val(qp_ctx, 24, + FIELD_PREP(RDMAQPC_TX_PSN_MAX, udp->psn_max) | + FIELD_PREP(RDMAQPC_TX_PSN_NEXT, udp->psn_nxt)); + set_64bit_val(qp_ctx, 32, + FIELD_PREP(RDMAQPC_TX_HW_SQ_TAIL_HIGH, + RS_64_1(IRDMAQPC_HW_SQ_TAIL_INIT, 11)) | + FIELD_PREP(RDMAQPC_TX_LOCAL_ACK_TIMEOUT, + udp->timeout)); + set_64bit_val(qp_ctx, 40, + FIELD_PREP(RDMAQPC_TX_HW_SQ_TAIL_UNA, + IRDMAQPC_HW_SQ_TAIL_INIT)); + set_64bit_val(qp_ctx, 48, 0); + set_64bit_val(qp_ctx, 56, + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_CNT, + udp->rnr_nak_thresh) | + FIELD_PREP(RDMAQPC_TX_RNR_CUR_RETRY_CNT, + udp->rnr_nak_thresh)); + hdr = FIELD_PREP(RDMAQPC_TX_SERVICE_TYPE, service_type) | + FIELD_PREP(RDMAQPC_TX_SQ_VMAP, qp->virtual_map) | + FIELD_PREP(RDMAQPC_TX_SQ_LPBL_SIZE, qp->virtual_map ? 1 : 0) | + FIELD_PREP(RDMAQPC_TX_IS_QP1, roce_info->is_qp1) | + FIELD_PREP(RDMAQPC_TX_IPV4, udp->ipv4) | + FIELD_PREP(RDMAQPC_TX_FAST_REG_EN, roce_info->fast_reg_en) | + FIELD_PREP(RDMAQPC_TX_BIND_EN, roce_info->bind_en) | + FIELD_PREP(RDMAQPC_TX_INSERT_VLANTAG, udp->insert_vlan_tag) | + FIELD_PREP(RDMAQPC_TX_VLANTAG, udp->vlan_tag) | + FIELD_PREP(RDMAQPC_TX_PD_INDEX, roce_info->pd_id) | + FIELD_PREP(RDMAQPC_TX_RSV_LKEY_EN, roce_info->priv_mode_en) | + FIELD_PREP(RDMAQPC_TX_ECN_EN, roce_info->ecn_en); + dma_wmb(); + + set_64bit_val(qp_ctx, 64, hdr); + set_64bit_val(qp_ctx, 72, qp->sq_pa); + set_64bit_val(qp_ctx, 80, + FIELD_PREP(RDMAQPC_TX_DEST_IPADDR3, + udp->dest_ip_addr[3]) | + FIELD_PREP(RDMAQPC_TX_DEST_IPADDR2, + udp->dest_ip_addr[2])); + set_64bit_val(qp_ctx, 88, + FIELD_PREP(RDMAQPC_TX_DEST_IPADDR1, + udp->dest_ip_addr[1]) | + FIELD_PREP(RDMAQPC_TX_DEST_IPADDR0, + udp->dest_ip_addr[0])); + hdr = FIELD_PREP(RDMAQPC_TX_SRC_PORTNUM, udp->src_port) | + FIELD_PREP(RDMAQPC_TX_DEST_PORTNUM, udp->dst_port) | + FIELD_PREP(RDMAQPC_TX_FLOWLABEL, udp->flow_label) | + FIELD_PREP(RDMAQPC_TX_TTL, udp->ttl) | + FIELD_PREP(RDMAQPC_TX_ROCE_TVER, roce_info->roce_tver); + dma_wmb(); + + set_64bit_val(qp_ctx, 96, hdr); + set_64bit_val( + qp_ctx, 104, + FIELD_PREP(RDMAQPC_TX_QKEY, roce_info->qkey) | + FIELD_PREP(RDMAQPC_TX_DEST_QP, roce_info->dest_qp) | + FIELD_PREP(RDMAQPC_TX_ORD_SIZE, roce_info->ord_size)); + set_64bit_val(qp_ctx, 112, + FIELD_PREP(RDMAQPC_TX_DEST_MAC, dmac) | + FIELD_PREP(RDMAQPC_TX_PKEY, roce_info->p_key)); + set_64bit_val(qp_ctx, 120, info->qp_compl_ctx); + set_64bit_val(qp_ctx, 128, + FIELD_PREP(RDMAQPC_TX_LOCAL_IPADDR3, + udp->local_ipaddr[3]) | + FIELD_PREP(RDMAQPC_TX_LOCAL_IPADDR2, + udp->local_ipaddr[2])); + set_64bit_val(qp_ctx, 136, + FIELD_PREP(RDMAQPC_TX_LOCAL_IPADDR1, + udp->local_ipaddr[1]) | + FIELD_PREP(RDMAQPC_TX_LOCAL_IPADDR0, + udp->local_ipaddr[0])); + set_64bit_val(qp_ctx, 144, + FIELD_PREP(RDMAQPC_TX_SRC_MAC, mac) | + FIELD_PREP(RDMAQPC_TX_PMTU, udp->pmtu) | + FIELD_PREP(RDMAQPC_TX_ACK_TIMEOUT, udp->timeout) | + FIELD_PREP(RDMAQPC_TX_LOG_SQSIZE, + qp->hw_sq_size)); + hdr = FIELD_PREP(RDMAQPC_TX_CQN, info->send_cq_num) | + FIELD_PREP(RDMAQPC_TX_NVMEOF_QID, qp->nvmeof_qid) | + FIELD_PREP(RDMAQPC_TX_IS_NVMEOF_TGT, qp->is_nvmeof_tgt) | + FIELD_PREP(RDMAQPC_TX_IS_NVMEOF_IOQ, qp->is_nvmeof_ioq) | + FIELD_PREP(RDMAQPC_TX_DCQCN_ID, + gqp_id) | //todo:映射方案确定后按方案修改 + FIELD_PREP(RDMAQPC_TX_DCQCN_EN, roce_info->dcqcn_en) | + FIELD_PREP(RDMAQPC_TX_QUEUE_TC, + (service_type == ZXDH_QP_SERVICE_TYPE_UD) ? + ZXDH_QP_UD_QUEUE_TC : + qp->qp_uk.user_pri); + dma_wmb(); + + set_64bit_val(qp_ctx, 152, hdr); + set_64bit_val( + qp_ctx, 160, + FIELD_PREP(RDMAQPC_TX_QPN, qp->qp_uk.qp_id) | + FIELD_PREP(RDMAQPC_TX_TOS, + (service_type == ZXDH_QP_SERVICE_TYPE_UD) ? + ZXDH_QP_UD_TOS : + udp->tos) | + FIELD_PREP(RDMAQPC_TX_VHCA_ID_LOW6, qp->dev->vhca_id)); + set_64bit_val( + qp_ctx, 168, + FIELD_PREP(RDMAQPC_TX_VHCA_ID_HIGH4, + RS_64_1(qp->dev->vhca_id, 6)) | + FIELD_PREP(RDMAQPC_TX_QP_FLOW_SET, qp->qp_uk.ws_index) | + FIELD_PREP(RDMAQPC_TX_QPSTATE, info->next_qp_state) | + FIELD_PREP(RDMAQPC_TX_DEBUG_SET, qp->dev->vhca_id)); + + set_64bit_val(qp_ctx, 256, FIELD_PREP(RDMAQPC_RX_LAST_OPCODE, 4)); + set_64bit_val(qp_ctx, 264, + (service_type == ZXDH_QP_SERVICE_TYPE_UD) ? + roce_info->qkey : + 0); + set_64bit_val(qp_ctx, 272, FIELD_PREP(RDMAQPC_RX_EPSN, udp->epsn)); + set_64bit_val(qp_ctx, 280, FIELD_PREP(RDMAQPC_RX_IRD_RXNUM, 511)); + set_64bit_val(qp_ctx, 288, 0); + set_64bit_val(qp_ctx, 296, 0); + set_64bit_val(qp_ctx, 304, 0); + set_64bit_val(qp_ctx, 312, 0); + set_64bit_val(qp_ctx, 320, + FIELD_PREP(RDMAQPC_RX_LOCAL_IPADDR3, + udp->local_ipaddr[3]) | + FIELD_PREP(RDMAQPC_RX_LOCAL_IPADDR2, + udp->local_ipaddr[2])); + set_64bit_val(qp_ctx, 328, + FIELD_PREP(RDMAQPC_RX_SRC_MAC_HIGH16, RS_64_1(mac, 32)) | + FIELD_PREP(RDMAQPC_RX_DEST_MAC, dmac)); + + hdr = FIELD_PREP(RDMAQPC_RX_IS_NVMEOF_IOQ, qp->is_nvmeof_ioq) | + FIELD_PREP(RDMAQPC_RX_INSERT_VLANTAG, udp->insert_vlan_tag) | + FIELD_PREP(RDMAQPC_RX_PMTU, udp->pmtu) | + FIELD_PREP(RDMAQPC_RX_SERVICE_TYPE, service_type) | + FIELD_PREP(RDMAQPC_RX_IPV4, udp->ipv4) | + FIELD_PREP(RDMAQPC_RX_PD_INDEX, roce_info->pd_id) | + FIELD_PREP(RDMAQPC_RX_QPSTATE, info->next_qp_state) | + FIELD_PREP(RDMAQPC_RX_SRC_MAC_LOW32, mac); + dma_wmb(); + + set_64bit_val(qp_ctx, 336, hdr); + hdr = FIELD_PREP(RDMAQPC_RX_DEST_QP_HIGH12, + RS_64_1(roce_info->dest_qp, 12)) | + FIELD_PREP(RDMAQPC_RX_FLOWLABEL, udp->flow_label) | + FIELD_PREP(RDMAQPC_RX_TTL, udp->ttl) | + FIELD_PREP(RDMAQPC_RX_TOS, + (service_type == ZXDH_QP_SERVICE_TYPE_UD) ? + ZXDH_QP_UD_TOS : + udp->tos) | + FIELD_PREP(RDMAQPC_RX_VLANTAG, udp->vlan_tag); + dma_wmb(); + + set_64bit_val(qp_ctx, 344, hdr); + + if (info->use_srq) { + set_64bit_val(qp_ctx, 352, + FIELD_PREP(RDMAQPC_RX_SRQN, + qp->srq->srq_uk.srq_id)); + } else if (qp->is_nvmeof_ioq) { + set_64bit_val(qp_ctx, 352, + FIELD_PREP(RDMAQPC_RX_NVMEOF_QID, + qp->nvmeof_qid) | + FIELD_PREP(RDMAQPC_RX_IS_NVMEOF_TGT, + qp->is_nvmeof_tgt)); + } else { + set_64bit_val(qp_ctx, 352, qp->rq_pa); + } + + set_64bit_val(qp_ctx, 360, qp->shadow_area_pa); + set_64bit_val(qp_ctx, 368, + FIELD_PREP(RDMAQPC_RX_HDR_LEN, header_len) | + FIELD_PREP(RDMAQPC_RX_PKEY, roce_info->p_key) | + FIELD_PREP(RDMAQPC_RX_SRC_PORTNUM, + udp->src_port)); + hdr = FIELD_PREP(RDMAQPC_RX_WQE_SIGN_EN, 0) | + FIELD_PREP(RDMAQPC_RX_RQ_VMAP, qp->virtual_map) | + FIELD_PREP(RDMAQPC_RX_IRD_SIZE, + zxdh_sc_get_encoded_ird_size(roce_info->ird_size)) | + FIELD_PREP(RDMAQPC_RX_LOG_RQSIZE, qp->hw_rq_size) | + FIELD_PREP(RDMAQPC_RX_SEND_EN, 1) | + FIELD_PREP(RDMAQPC_RX_WRITE_EN, roce_info->wr_rdresp_en) | + FIELD_PREP(RDMAQPC_RX_READ_EN, roce_info->rd_en) | + FIELD_PREP(RDMAQPC_RX_LOG_RQE_SIZE, qp->qp_uk.rq_wqe_size) | + FIELD_PREP(RDMAQPC_RX_USE_SRQ, info->use_srq) | + FIELD_PREP(RDMAQPC_RX_CQN, info->rcv_cq_num) | + FIELD_PREP(RDMAQPC_RX_DEST_QP_LOW12, roce_info->dest_qp) | + FIELD_PREP(RDMAQPC_RX_RQ_LPBL_SIZE, qp->virtual_map ? 1 : 0) | + FIELD_PREP(RDMAQPC_RX_RSV_LKEY_EN, roce_info->priv_mode_en) | + FIELD_PREP(RDMAQPC_RX_RNR_TIMER, udp->min_rnr_timer) | + FIELD_PREP(RDMAQPC_RX_ACK_CREDITS, + (info->use_srq || qp->is_nvmeof_ioq) ? 1 : 0); + dma_wmb(); + + set_64bit_val(qp_ctx, 376, hdr); + set_64bit_val( + qp_ctx, 384, + FIELD_PREP(RDMAQPC_RX_QP_GROUP_NUM, + gqp_id) | //todo:映射方案确定后按方案修改 + FIELD_PREP(RDMAQPC_RX_QP_FLOW_SET, qp->qp_uk.ws_index) | + FIELD_PREP(RDMAQPC_RX_DEBUG_SET, qp->dev->vhca_id) | + FIELD_PREP(RDMAQPC_RX_VHCA_ID, qp->dev->vhca_id) | + FIELD_PREP(RDMAQPC_RX_QUEUE_TC, + (service_type == ZXDH_QP_SERVICE_TYPE_UD) ? + ZXDH_QP_UD_QUEUE_TC : + qp->qp_uk.user_pri)); + set_64bit_val(qp_ctx, 392, info->qp_compl_ctx); + set_64bit_val(qp_ctx, 400, + FIELD_PREP(RDMAQPC_RX_DEST_IPADDR1, + udp->dest_ip_addr[1]) | + FIELD_PREP(RDMAQPC_RX_DEST_IPADDR0, + udp->dest_ip_addr[0])); + set_64bit_val(qp_ctx, 408, + FIELD_PREP(RDMAQPC_RX_DEST_IPADDR3, + udp->dest_ip_addr[3]) | + FIELD_PREP(RDMAQPC_RX_DEST_IPADDR2, + udp->dest_ip_addr[2])); + set_64bit_val(qp_ctx, 416, + FIELD_PREP(RDMAQPC_RX_LOCAL_IPADDR1, + udp->local_ipaddr[1]) | + FIELD_PREP(RDMAQPC_RX_LOCAL_IPADDR0, + udp->local_ipaddr[0])); + + print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16, 8, + qp_ctx, ZXDH_QP_CTX_SIZE, false); +} + +/** + * zxdh_sc_alloc_stag - mr stag alloc + * @dev: sc device struct + * @info: stag info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_alloc_stag(struct zxdh_sc_dev *dev, + struct zxdh_allocate_stag_info *info, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + u32 pd_h, pd_l; + enum zxdh_page_size page_size; + + if (info->page_size == 0x40000000) + page_size = ZXDH_PAGE_SIZE_1G; + else if (info->page_size == 0x200000) + page_size = ZXDH_PAGE_SIZE_2M; + else + page_size = ZXDH_PAGE_SIZE_4K; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + pd_l = info->pd_id & 0x3FFFF; + pd_h = (info->pd_id >> 18) & 0x03; + + if (info->chunk_size) + set_64bit_val(wqe, 16, + FIELD_PREP(ZXDH_CQPSQ_STAG_FIRSTPMPBLIDX, + info->first_pm_pbl_idx)); + + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_CQPSQ_STAG_IDX, info->stag_idx) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_HIG, pd_h)); + + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_LOW, pd_l) | + FIELD_PREP(ZXDH_CQPSQ_STAG_STAGLEN, + info->total_len)); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_ALLOC_MKEY) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STAG_ARIGHTS, info->access_rights) | + FIELD_PREP(ZXDH_CQPSQ_STAG_HPAGESIZE, page_size) | + FIELD_PREP(ZXDH_CQPSQ_STAG_LPBLSIZE, info->chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR, 1) | + FIELD_PREP(ZXDH_CQPSQ_STAG_FAST_REGISTER_MR_EN, 1) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_INVALID_EN, 1); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_sc_mr_reg_non_shared - non-shared mr registration + * @dev: sc device struct + * @info: mr info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_mr_reg_non_shared(struct zxdh_sc_dev *dev, + struct zxdh_reg_ns_stag_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + u64 fbo; + struct zxdh_sc_cqp *cqp; + u64 hdr; + u32 pble_obj_cnt, pd_h, pd_l; + u8 addr_type; + enum zxdh_page_size page_size; + + if (info->page_size == 0x40000000) + page_size = ZXDH_PAGE_SIZE_1G; + else if (info->page_size == 0x200000) + page_size = ZXDH_PAGE_SIZE_2M; + else if (info->page_size == 0x1000) + page_size = ZXDH_PAGE_SIZE_4K; + else + return -EINVAL; + + pble_obj_cnt = dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt) + return -EINVAL; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + fbo = info->va & (info->page_size - 1); + + pd_l = info->pd_id & 0x3FFFF; + pd_h = (info->pd_id >> 18) & 0x03; + + set_64bit_val(wqe, 8, + (info->addr_type == ZXDH_ADDR_TYPE_VA_BASED ? info->va : + fbo)); + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_CQPSQ_STAG_KEY, info->stag_key) | + FIELD_PREP(ZXDH_CQPSQ_STAG_IDX, info->stag_idx) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_HIG, pd_h)); + + if (!info->chunk_size) { + set_64bit_val(wqe, 32, info->reg_addr_pa); + } else { + set_64bit_val(wqe, 16, + FIELD_PREP(ZXDH_CQPSQ_STAG_FIRSTPMPBLIDX, + info->first_pm_pbl_index)); + } + + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_STAG_STAGLEN, info->total_len) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_LOW, pd_l)); + + addr_type = (info->addr_type == ZXDH_ADDR_TYPE_VA_BASED) ? 1 : 0; + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_REG_MR) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STAG_VABASEDTO, addr_type) | + FIELD_PREP(ZXDH_CQPSQ_STAG_SHARED, 0) | + FIELD_PREP(ZXDH_CQPSQ_STAG_ARIGHTS, info->access_rights) | + FIELD_PREP(ZXDH_CQPSQ_STAG_HPAGESIZE, page_size) | + FIELD_PREP(ZXDH_CQPSQ_STAG_LPBLSIZE, info->chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR, 1) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_INVALID_EN, 0) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_FORCE_DEL, 0); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: MR_REG_NS WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_sc_dealloc_stag - deallocate stag + * @dev: sc device struct + * @info: dealloc stag info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_dealloc_stag(struct zxdh_sc_dev *dev, + struct zxdh_dealloc_stag_info *info, + u64 scratch, bool post_sq) +{ + u64 hdr; + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u32 pd_h, pd_l; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + pd_l = info->pd_id & 0x3FFFF; + pd_h = (info->pd_id >> 18) & 0x03; + + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_CQPSQ_STAG_IDX, info->stag_idx) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_HIG, pd_h)); + + set_64bit_val(wqe, 40, FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_LOW, pd_l)); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DEALLOC_MKEY) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR, info->mr) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_FORCE_DEL, 0); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: DEALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_sc_mw_alloc - mw allocate + * @dev: sc device struct + * @info: memory window allocation information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_mw_alloc(struct zxdh_sc_dev *dev, + struct zxdh_mw_alloc_info *info, u64 scratch, + bool post_sq) +{ + u64 hdr; + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u32 pd_h, pd_l; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + pd_l = info->pd_id & 0x3FFFF; + pd_h = (info->pd_id >> 18) & 0x03; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_ALLOC_MKEY) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MWTYPE, info->mw_wide) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY, + info->mw1_bind_dont_vldt_key) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR, 0); + + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_CQPSQ_STAG_IDX, info->mw_stag_index) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_HIG, pd_h)); + + set_64bit_val(wqe, 40, FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_LOW, pd_l)); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: MW_ALLOC WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp + * @qp: sc qp struct + * @info: fast mr info + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_mr_fast_register(struct zxdh_sc_qp *qp, + struct zxdh_fast_reg_stag_info *info, bool post_sq) +{ + u64 temp, hdr; + __le64 *wqe; + u32 wqe_idx; + bool local_fence = true; + enum zxdh_page_size page_size; + struct zxdh_post_sq_info sq_info = {}; + + if (info->page_size == 0x40000000) + page_size = ZXDH_PAGE_SIZE_1G; + else if (info->page_size == 0x200000) + page_size = ZXDH_PAGE_SIZE_2M; + else + page_size = ZXDH_PAGE_SIZE_4K; + + sq_info.wr_id = info->wr_id; + sq_info.signaled = info->signaled; + + wqe = zxdh_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, + ZXDH_QP_WQE_MIN_QUANTA, 0, &sq_info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(&qp->qp_uk, wqe_idx); + + temp = (info->addr_type == ZXDH_ADDR_TYPE_VA_BASED) ? + (uintptr_t)info->va : + info->fbo; + set_64bit_val(wqe, 8, temp); + + set_64bit_val(wqe, 16, + info->total_len | FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, + info->first_pm_pbl_index)); + + temp = info->first_pm_pbl_index >> 16; + + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) | + FIELD_PREP(IRDMAQPSQ_PBLADDR, + info->reg_addr_pa >> + ZXDH_HW_PAGE_SHIFT)); + + hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) | + FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) | + FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) | + FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) | + FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) | + FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) | + FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_FAST_REG_MR) | + FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_QP_WQE_MIN_SIZE, false); + + if (post_sq) + zxdh_uk_qp_post_wr(&qp->qp_uk); + + return 0; +} + +/** + * zxdh_sc_dev_qplist_init - Init the qos qplist + * @dev: pointer to dev + */ +void zxdh_sc_dev_qplist_init(struct zxdh_sc_dev *dev) +{ + u8 i; + + for (i = 0; i < ZXDH_MAX_USER_PRIORITY; i++) { + mutex_init(&dev->qos[i].qos_mutex); + INIT_LIST_HEAD(&dev->qos[i].qplist); + } +} + +/** + * zxdh_get_encoded_wqe_size - given wq size, returns hardware encoded size + * @wqsize: size of the wq (sq, rq) to encoded_size + * @queue_type: queue type selected for the calculation algorithm + */ +u8 zxdh_get_encoded_wqe_size(u32 wqsize, enum zxdh_queue_type queue_type) +{ + u8 encoded_size = 0; + + /* cqp sq's hw coded value starts from 1 for size of 4 + * while it starts from 0 for qp' wq's. + */ + if (queue_type == ZXDH_QUEUE_TYPE_CQP) + encoded_size = 1; + while (wqsize >>= 1) + encoded_size++; + + return encoded_size; +} + +/** + * zxdh_sc_gather_stats - collect the statistics + * @cqp: struct for cqp hw + * @info: gather stats info structure + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_gather_stats(struct zxdh_sc_cqp *cqp, + struct zxdh_stats_gather_info *info, + u64 scratch) +{ + __le64 *wqe; + u64 temp; + + if (info->stats_buff_mem.size < ZXDH_GATHER_STATS_BUF_SIZE) + return -ENOSPC; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_STATS_HMC_FCN_INDEX, + info->hmc_fcn_index)); + set_64bit_val(wqe, 32, info->stats_buff_mem.pa); + + temp = FIELD_PREP(ZXDH_CQPSQ_STATS_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STATS_USE_INST, info->use_stats_inst) | + FIELD_PREP(ZXDH_CQPSQ_STATS_INST_INDEX, info->stats_inst_index) | + FIELD_PREP(ZXDH_CQPSQ_STATS_USE_HMC_FCN_INDEX, + info->use_hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STATS_OP, ZXDH_CQP_OP_GATHER_STATS); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET, 16, + 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_manage_stats_inst - allocate or free stats instance + * @cqp: struct for cqp hw + * @info: stats info structure + * @alloc: alloc vs. delete flag + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_manage_stats_inst(struct zxdh_sc_cqp *cqp, + struct zxdh_stats_inst_info *info, + bool alloc, u64 scratch) +{ + __le64 *wqe; + u64 temp; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_STATS_HMC_FCN_INDEX, + info->hmc_fn_id)); + temp = FIELD_PREP(ZXDH_CQPSQ_STATS_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STATS_ALLOC_INST, alloc) | + FIELD_PREP(ZXDH_CQPSQ_STATS_USE_HMC_FCN_INDEX, + info->use_hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STATS_INST_INDEX, info->stats_idx) | + FIELD_PREP(ZXDH_CQPSQ_STATS_OP, ZXDH_CQP_OP_MANAGE_STATS); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("WQE: MANAGE_STATS WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_sc_set_up_map - set the up map table + * @cqp: struct for cqp hw + * @info: User priority map info + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_set_up_map(struct zxdh_sc_cqp *cqp, + struct zxdh_up_info *info, u64 scratch) +{ + __le64 *wqe; + u64 temp; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + temp = info->map[0] | LS_64_1(info->map[1], 8) | + LS_64_1(info->map[2], 16) | LS_64_1(info->map[3], 24) | + LS_64_1(info->map[4], 32) | LS_64_1(info->map[5], 40) | + LS_64_1(info->map[6], 48) | LS_64_1(info->map[7], 56); + + set_64bit_val(wqe, 0, temp); + set_64bit_val( + wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) | + FIELD_PREP(ZXDH_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx)); + + temp = FIELD_PREP(ZXDH_CQPSQ_UP_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_UP_USEVLAN, info->use_vlan) | + FIELD_PREP(ZXDH_CQPSQ_UP_USEOVERRIDE, + info->use_cnp_up_override) | + FIELD_PREP(ZXDH_CQPSQ_UP_OP, ZXDH_CQP_OP_UP_MAP); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("WQE: UPMAP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, + ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_manage_ws_node - create/modify/destroy WS node + * @cqp: struct for cqp hw + * @info: node info structure + * @node_op: 0 for add 1 for modify, 2 for delete + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_manage_ws_node(struct zxdh_sc_cqp *cqp, + struct zxdh_ws_node_info *info, + enum zxdh_ws_node_op node_op, u64 scratch) +{ + __le64 *wqe; + u64 temp = 0; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 32, + FIELD_PREP(ZXDH_CQPSQ_WS_VSI, info->vsi) | + FIELD_PREP(ZXDH_CQPSQ_WS_WEIGHT, info->weight)); + + temp = FIELD_PREP(ZXDH_CQPSQ_WS_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_WS_NODEOP, node_op) | + FIELD_PREP(ZXDH_CQPSQ_WS_ENABLENODE, info->enable) | + FIELD_PREP(ZXDH_CQPSQ_WS_NODETYPE, info->type_leaf) | + FIELD_PREP(ZXDH_CQPSQ_WS_PRIOTYPE, info->prio_type) | + FIELD_PREP(ZXDH_CQPSQ_WS_TC, info->tc) | + FIELD_PREP(ZXDH_CQPSQ_WS_OP, ZXDH_CQP_OP_WORK_SCHED_NODE) | + FIELD_PREP(ZXDH_CQPSQ_WS_PARENTID, info->parent_id) | + FIELD_PREP(ZXDH_CQPSQ_WS_NODEID, info->id); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("WQE: MANAGE_WS WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_qp_flush_wqes - flush qp's wqe + * @qp: sc qp + * @info: dlush information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_qp_flush_wqes(struct zxdh_sc_qp *qp, + struct zxdh_qp_flush_info *info, u64 scratch, + bool post_sq) +{ + u64 temp = 0; + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + bool flush_sq = false, flush_rq = false; + + if (info->rq && !qp->flush_rq) + flush_rq = true; + if (info->sq && !qp->flush_sq) + flush_sq = true; + qp->flush_sq |= flush_sq; + qp->flush_rq |= flush_rq; + + if (!flush_sq && !flush_rq) { + pr_err("CQP: Additional flush request ignored for qp %x\n", + qp->qp_uk.qp_id); + return -EALREADY; + } + + cqp = qp->pd->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + if (info->userflushcode) { + if (flush_rq) + temp |= FIELD_PREP(ZXDH_CQPSQ_FWQE_RQMNERR, + info->rq_minor_code) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_RQMJERR, + info->rq_major_code); + if (flush_sq) + temp |= FIELD_PREP(ZXDH_CQPSQ_FWQE_SQMNERR, + info->sq_minor_code) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_SQMJERR, + info->sq_major_code); + } + set_64bit_val(wqe, 8, temp); + + temp = (info->generate_ae) ? + info->ae_code | FIELD_PREP(ZXDH_CQPSQ_FWQE_AESOURCE, + info->ae_src) : + 0; + set_64bit_val(wqe, 16, temp); + + hdr = qp->qp_uk.qp_id | + FIELD_PREP(ZXDH_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_USERFLCODE, info->userflushcode) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_FLUSHSQ, flush_sq) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_FLUSHRQ, flush_rq) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_FLUSH_WQES); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: QP_FLUSH WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_gen_ae - generate AE, uses flush WQE CQP OP + * @qp: sc qp + * @info: gen ae information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_gen_ae(struct zxdh_sc_qp *qp, struct zxdh_gen_ae_info *info, + u64 scratch, bool post_sq) +{ + u64 temp; + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = qp->pd->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + temp = info->ae_code | + FIELD_PREP(ZXDH_CQPSQ_FWQE_AESOURCE, info->ae_src); + set_64bit_val(wqe, 8, temp); + + hdr = qp->qp_uk.qp_id | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_GEN_AE) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_GENERATE_AE, 1) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: GEN_AE WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, + ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/*** zxdh_sc_qp_upload_context - upload qp's context + * @dev: sc device struct + * @info: upload context info ptr for return + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_qp_upload_context(struct zxdh_sc_dev *dev, + struct zxdh_upload_context_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 16, info->buf_pa); + + hdr = FIELD_PREP(ZXDH_CQPSQ_UCTX_QPID, info->qp_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_UPLOAD_QPC) | + FIELD_PREP(ZXDH_CQPSQ_UCTX_QPTYPE, info->qp_type) | + FIELD_PREP(ZXDH_CQPSQ_UCTX_RAWFORMAT, info->raw_format) | + FIELD_PREP(ZXDH_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: QP_UPLOAD_CTX WQE", DUMP_PREFIX_OFFSET, 16, + 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_manage_push_page - Handle push page + * @cqp: struct for cqp hw + * @info: push page info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_manage_push_page(struct zxdh_sc_cqp *cqp, + struct zxdh_cqp_manage_push_page_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 16, info->qs_handle); + hdr = FIELD_PREP(ZXDH_CQPSQ_MPP_PPTYPE, info->push_page_type) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MANAGE_PUSH_PAGES) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_MPP_FREE_PAGE, info->free_page); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: MANAGE_PUSH_PAGES WQE", DUMP_PREFIX_OFFSET, + 16, 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_suspend_qp - suspend qp for param change + * @cqp: struct for cqp hw + * @qp: sc qp struct + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_suspend_qp(struct zxdh_sc_cqp *cqp, struct zxdh_sc_qp *qp, + u64 scratch) +{ + u64 hdr; + __le64 *wqe; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_SUSPEND_QP) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: SUSPEND_QP WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_resume_qp - resume qp after suspend + * @cqp: struct for cqp hw + * @qp: sc qp struct + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_resume_qp(struct zxdh_sc_cqp *cqp, struct zxdh_sc_qp *qp, + u64 scratch) +{ + u64 hdr; + __le64 *wqe; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 16, + FIELD_PREP(ZXDH_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle)); + + hdr = FIELD_PREP(ZXDH_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_RESUME_QP) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: RESUME_QP WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_cq_init - initialize completion q + * @cq: cq struct + * @info: cq initialization info + */ +int zxdh_sc_cq_init(struct zxdh_sc_cq *cq, struct zxdh_cq_init_info *info) +{ + u32 pble_obj_cnt; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) + return -EINVAL; + + cq->cq_pa = info->cq_base_pa; + cq->dev = info->dev; + cq->ceq_id = info->ceq_id; + cq->ceq_index = info->ceq_index; + info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db; + zxdh_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info); + + cq->virtual_map = info->virtual_map; + cq->pbl_chunk_size = info->pbl_chunk_size; + cq->ceqe_mask = info->ceqe_mask; + cq->cq_type = (info->type) ? info->type : ZXDH_CQ_TYPE_IO; + cq->shadow_area_pa = info->shadow_area_pa; + cq->shadow_read_threshold = info->shadow_read_threshold; + cq->ceq_id_valid = info->ceq_id_valid; + cq->tph_en = info->tph_en; + cq->tph_val = info->tph_val; + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + + return 0; +} + +/** + * zxdh_sc_cq_create - create completion q + * @cq: cq struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_cq_create(struct zxdh_sc_cq *cq, u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 temp; + u64 hdr; + struct zxdh_sc_ceq *ceq; + int ret_code = 0; + + cqp = cq->dev->cqp; + if (cq->cq_uk.cq_id > + (cqp->dev->base_cqn + + cqp->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].max_cnt - 1)) + return -EINVAL; + + if (cq->ceq_index > (cq->dev->max_ceqs - 1)) + return -EINVAL; + + ceq = cq->dev->ceq[cq->ceq_index]; + if (ceq && ceq->reg_cq) + ret_code = zxdh_sc_add_cq_ctx(ceq, cq); + + if (ret_code) + return ret_code; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) { + if (ceq && ceq->reg_cq) + zxdh_sc_remove_cq_ctx(ceq, cq); + return -ENOSPC; + } + + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_FIELD_ALL)); + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CQSTATE, 1) | + FIELD_PREP(ZXDH_CQPSQ_CQ_OVERFLOW_LOCKED_FLAG, 0) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQESIZE, cq->cq_uk.cqe_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | + FIELD_PREP(ZXDH_CQPSQ_CQ_DEBUG_SET, cq->dev->vhca_id) | + FIELD_PREP(ZXDH_CQPSQ_CQ_VHCAID, cq->dev->vhca_id) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQMAX, cq->cq_max) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQPERIOD, cq->cq_period) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SCQE_BREAK_MODERATION_EN, + cq->scqe_break_moderation_en); + + dma_wmb(); + set_64bit_val(wqe, 16, temp); + set_64bit_val(wqe, 24, RS_64_1(cq->shadow_area_pa, 6)); + + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CEQ_ID, + (cq->ceq_id_valid ? cq->ceq_id : 0)) | + FIELD_PREP(ZXDH_CQPSQ_CQ_ST, cq->cq_st) | + FIELD_PREP(ZXDH_CQPSQ_CQ_IS_IN_LIST_CNT, cq->is_in_list_cnt) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQSIZE, cq->cq_uk.cq_log_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SHADOW_READ_THRESHOLD, + cq->shadow_read_threshold); + + dma_wmb(); + set_64bit_val(wqe, 32, temp); + set_64bit_val(wqe, 40, 0); // hw self-maintenance field + set_64bit_val(wqe, 48, + cq->virtual_map ? cq->first_pm_pbl_idx : + RS_64_1(cq->cq_pa, 8)); + set_64bit_val(wqe, 56, RS_64_1(cq, 1)); + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FLD_LS_64(cq->dev, cq->cq_uk.cq_id, ZXDH_CQPSQ_CQ_CQID); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_cq_destroy - destroy completion q + * @cq: cq struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_cq_destroy(struct zxdh_sc_cq *cq, u64 scratch, bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + struct zxdh_sc_ceq *ceq; + + cqp = cq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + ceq = cq->dev->ceq[cq->ceq_index]; + if (ceq && ceq->reg_cq) + zxdh_sc_remove_cq_ctx(ceq, cq); + + if (cq->cq_overflow_locked_flag) + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_FIELD_ALL)); + else + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_CQ_STATE)); + + set_64bit_val(wqe, 16, 0); + set_64bit_val(wqe, 24, 0); + set_64bit_val(wqe, 32, 0); + set_64bit_val(wqe, 40, 0); + set_64bit_val(wqe, 48, 0); + set_64bit_val(wqe, 56, 0); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FLD_LS_64(cq->dev, cq->cq_uk.cq_id, ZXDH_CQPSQ_CQ_CQID); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_cq_resize - set resized cq buffer info + * @cq: resized cq + * @info: resized cq buffer info + */ +void zxdh_sc_cq_resize(struct zxdh_sc_cq *cq, struct zxdh_modify_cq_info *info) +{ + cq->virtual_map = info->virtual_map; + cq->cq_pa = info->cq_pa; + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + cq->pbl_chunk_size = info->pbl_chunk_size; + zxdh_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size); +} + +/** + * zxdh_sc_cq_modify - modify a Completion Queue + * @cq: cq struct + * @info: modification info struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag to post to sq + */ +static int zxdh_sc_cq_modify(struct zxdh_sc_cq *cq, + struct zxdh_modify_cq_info *info, u64 scratch, + bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + u64 temp; + u32 pble_obj_cnt; + + pble_obj_cnt = cq->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + if (info->cq_resize && info->virtual_map && + info->first_pm_pbl_idx >= pble_obj_cnt) + return -EINVAL; + + cqp = cq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_FIELD_RESIZE)); + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CQSTATE, 1) | + FIELD_PREP(ZXDH_CQPSQ_CQ_OVERFLOW_LOCKED_FLAG, 0) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQESIZE, cq->cq_uk.cqe_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | + FIELD_PREP(ZXDH_CQPSQ_CQ_DEBUG_SET, cq->dev->vhca_id) | + FIELD_PREP(ZXDH_CQPSQ_CQ_VHCAID, cq->dev->vhca_id) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQMAX, cq->cq_max) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQPERIOD, cq->cq_period) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SCQE_BREAK_MODERATION_EN, + cq->scqe_break_moderation_en); + + dma_wmb(); + set_64bit_val(wqe, 16, temp); + set_64bit_val(wqe, 24, RS_64_1(cq->shadow_area_pa, 6)); + + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CEQ_ID, + (cq->ceq_id_valid ? cq->ceq_id : 0)) | + FIELD_PREP(ZXDH_CQPSQ_CQ_ST, cq->cq_st) | + FIELD_PREP(ZXDH_CQPSQ_CQ_IS_IN_LIST_CNT, cq->is_in_list_cnt) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQSIZE, + zxdh_num_to_log(info->cq_size)) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SHADOW_READ_THRESHOLD, + cq->shadow_read_threshold); + + dma_wmb(); + set_64bit_val(wqe, 32, temp); + set_64bit_val(wqe, 40, 0); // hw self-maintenance field + set_64bit_val(wqe, 48, + info->virtual_map ? info->first_pm_pbl_idx : + RS_64_1(info->cq_pa, 8)); + set_64bit_val(wqe, 56, RS_64_1(cq, 1)); + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MODIFY_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_CQ_MODIFY_SIZE, 1) | + FLD_LS_64(cq->dev, cq->cq_uk.cq_id, ZXDH_CQPSQ_CQ_CQID); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_modify_cq_moderation - modify cq_count and cq_period of a Completion Queue + * @cq: cq struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag to post to sq + */ +static int zxdh_sc_modify_cq_moderation(struct zxdh_sc_cq *cq, u64 scratch, + bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + u64 temp; + + cqp = cq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_FIELD_MODIFY)); + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CQMAX, cq->cq_max) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQPERIOD, cq->cq_period) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SCQE_BREAK_MODERATION_EN, + cq->scqe_break_moderation_en); + + dma_wmb(); + set_64bit_val(wqe, 16, temp); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MODIFY_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_CQ_MODIFY_SIZE, 0) | + FLD_LS_64(cq->dev, cq->cq_uk.cq_id, ZXDH_CQPSQ_CQ_CQID); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_check_cqp_progress - check cqp processing progress + * @timeout: timeout info struct + * @dev: sc device struct + */ +void zxdh_check_cqp_progress(struct zxdh_cqp_timeout *timeout, + struct zxdh_sc_dev *dev) +{ + if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[ZXDH_OP_CMPL_CMDS]) { + timeout->compl_cqp_cmds = dev->cqp_cmd_stats[ZXDH_OP_CMPL_CMDS]; + timeout->count = 0; + } else { + if (dev->cqp_cmd_stats[ZXDH_OP_REQ_CMDS] != + timeout->compl_cqp_cmds) + timeout->count++; + } +} + +/** + * zxdh_get_cqp_reg_info - get head and tail for cqp using registers + * @cqp: struct for cqp hw + * @val: cqp tail register value + * @tail: wqtail register value + * @error: cqp processing err + */ +static inline void zxdh_get_cqp_reg_info(struct zxdh_sc_cqp *cqp, u32 *val, + u32 *tail, u32 *error) +{ + *val = readl(cqp->dev->hw->hw_addr + C_RDMA_CQP_TAIL); + *tail = (u32)FIELD_GET(ZXDH_CQPTAIL_WQTAIL, *val); + *error = readl(cqp->dev->hw->hw_addr + C_RDMA_CQP_ERROR); +} + +/** + * zxdh_cqp_poll_registers - poll cqp registers + * @cqp: struct for cqp hw + * @tail: wqtail register value + * @count: how many times to try for completion + */ +static int zxdh_cqp_poll_registers(struct zxdh_sc_cqp *cqp, u32 tail, u32 count) +{ + u32 i = 0; + u32 newtail, error, val; + + while (i++ < count) { + zxdh_get_cqp_reg_info(cqp, &val, &newtail, &error); + if (error) { + error = readl(cqp->dev->hw->hw_addr + + C_RDMA_CQP_ERRCODE); + pr_err("CQP: CQPERRCODES error_code[x%08X]\n", error); + return -EIO; + } + if (newtail != tail) { + /* SUCCESS */ + ZXDH_RING_MOVE_TAIL(cqp->sq_ring); + cqp->dev->cqp_cmd_stats[ZXDH_OP_CMPL_CMDS]++; + return 0; + } + udelay(cqp->dev->hw_attrs.max_sleep_count); + } + + return -ETIMEDOUT; +} + +/** + * zxdh_sc_find_reg_cq - find cq ctx index + * @ceq: ceq sc structure + * @cq: cq sc structure + */ +static u32 zxdh_sc_find_reg_cq(struct zxdh_sc_ceq *ceq, struct zxdh_sc_cq *cq) +{ + u32 i; + + for (i = 0; i < ceq->reg_cq_size; i++) { + if (cq == ceq->reg_cq[i]) + return i; + } + + return ZXDH_INVALID_CQ_IDX; +} + +/** + * zxdh_sc_add_cq_ctx - add cq ctx tracking for ceq + * @ceq: ceq sc structure + * @cq: cq sc structure + */ +int zxdh_sc_add_cq_ctx(struct zxdh_sc_ceq *ceq, struct zxdh_sc_cq *cq) +{ + unsigned long flags; + + spin_lock_irqsave(&ceq->req_cq_lock, flags); + + if (ceq->reg_cq_size == ceq->elem_cnt) { + spin_unlock_irqrestore(&ceq->req_cq_lock, flags); + return -ENOSPC; + } + + ceq->reg_cq[ceq->reg_cq_size++] = cq; + + spin_unlock_irqrestore(&ceq->req_cq_lock, flags); + + return 0; +} + +/** + * zxdh_sc_remove_cq_ctx - remove cq ctx tracking for ceq + * @ceq: ceq sc structure + * @cq: cq sc structure + */ +void zxdh_sc_remove_cq_ctx(struct zxdh_sc_ceq *ceq, struct zxdh_sc_cq *cq) +{ + unsigned long flags; + u32 cq_ctx_idx; + + spin_lock_irqsave(&ceq->req_cq_lock, flags); + cq_ctx_idx = zxdh_sc_find_reg_cq(ceq, cq); + if (cq_ctx_idx == ZXDH_INVALID_CQ_IDX) + goto exit; + + ceq->reg_cq_size--; + if (cq_ctx_idx != ceq->reg_cq_size) + ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size]; + ceq->reg_cq[ceq->reg_cq_size] = NULL; + +exit: + spin_unlock_irqrestore(&ceq->req_cq_lock, flags); +} + +/** + * zxdh_sc_cqp_init - Initialize buffers for a control Queue Pair + * @cqp: IWARP control queue pair pointer + * @info: IWARP control queue pair init info pointer + * + * Initializes the object and context buffers for a control Queue Pair. + */ +int zxdh_sc_cqp_init(struct zxdh_sc_cqp *cqp, struct zxdh_cqp_init_info *info) +{ + u8 hw_sq_size; + + if (info->sq_size > ZXDH_CQP_SW_SQSIZE_2048 || + info->sq_size < ZXDH_CQP_SW_SQSIZE_4 || + ((info->sq_size & (info->sq_size - 1)))) + return -EINVAL; + + hw_sq_size = + zxdh_get_encoded_wqe_size(info->sq_size, ZXDH_QUEUE_TYPE_CQP); + cqp->size = sizeof(*cqp); + cqp->sq_size = info->sq_size; + cqp->hw_sq_size = hw_sq_size; + cqp->sq_base = info->sq; + cqp->sq_pa = info->sq_pa; + cqp->dev = info->dev; + cqp->struct_ver = info->struct_ver; + cqp->hw_maj_ver = info->hw_maj_ver; + cqp->hw_min_ver = info->hw_min_ver; + cqp->scratch_array = info->scratch_array; + cqp->polarity = 0; + cqp->en_datacenter_tcp = info->en_datacenter_tcp; + cqp->ena_vf_count = info->ena_vf_count; + cqp->hmc_profile = info->hmc_profile; + cqp->ceqs_per_vf = info->ceqs_per_vf; + cqp->disable_packed = info->disable_packed; + cqp->rocev2_rto_policy = info->rocev2_rto_policy; + cqp->protocol_used = info->protocol_used; + cqp->state_cfg = true; // CQP Create: true, CQP Destroy: false + memcpy(&cqp->dcqcn_params, &info->dcqcn_params, + sizeof(cqp->dcqcn_params)); + info->dev->cqp = cqp; + + ZXDH_RING_INIT(cqp->sq_ring, cqp->sq_size); + cqp->dev->cqp_cmd_stats[ZXDH_OP_REQ_CMDS] = 0; + cqp->dev->cqp_cmd_stats[ZXDH_OP_CMPL_CMDS] = 0; + /* for the cqp commands backlog. */ + INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); + + writel(ZXDH_CQPDB_INIT_VALUE, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_DB)); + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_MGC_BASE_HIGH)); + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_MGC_BASE_LOW)); + writel(0, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_AH_CACHE_ID)); + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_MGC_INDICATE_ID)); + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE)); + + return 0; +} + +/** + * zxdh_sc_cqp_create - create cqp during bringup + * @cqp: struct for cqp hw + * @maj_err: If error, major err number + * @min_err: If error, minor err number + */ +int zxdh_sc_cqp_create(struct zxdh_sc_cqp *cqp, u16 *maj_err, u16 *min_err) +{ + u32 temp; + u32 cnt = 0, val = 0, err_code; + int ret_code; + struct zxdh_pci_f *rf = + container_of(cqp->dev, struct zxdh_pci_f, sc_dev); + + spin_lock_init(&cqp->dev->cqp_lock); + + //reset CQP status + writel(0, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONFIG_DONE)); + mdelay(5); + + do { + if (cnt++ > cqp->dev->hw_attrs.max_done_count) { + ret_code = -ETIMEDOUT; + pr_info("%s reset cqp timeout!\n", __func__); + break; + } + udelay(cqp->dev->hw_attrs.max_sleep_count); + val = readl((u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_STATUS)); + } while (val & 0x01); + cnt = 0; + + // VF_PF_ID + temp = (u32)(FIELD_PREP(ZXDH_CQP_CREATE_EPID, + (rf->ep_id + ZXDH_HOST_EP0_ID)) | + FIELD_PREP(ZXDH_CQP_CREATE_VFID, rf->vf_id) | + FIELD_PREP(ZXDH_CQP_CREATE_PFID, rf->pf_id) | + FIELD_PREP(ZXDH_CQP_CREATE_VFUNC_ACTIVE, rf->ftype)); + writel(temp, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_PF_VF_ID(cqp->dev->vhca_id))); + + // CQP_Context_0 + temp = (u32)(FIELD_PREP(ZXDH_CQP_CREATE_STATE_CFG, cqp->state_cfg) | + FIELD_PREP(ZXDH_CQP_CREATE_SQSIZE, cqp->sq_size) | + FIELD_PREP(ZXDH_CQP_CREATE_QPC_OBJ_IDX, 11) | + FIELD_PREP(ZXDH_CQP_CREATE_QPC_INDICATE_IDX, 2) | + FIELD_PREP(ZXDH_CQP_CREATE_OBJ_IDX, 11) | + FIELD_PREP(ZXDH_CQP_CREATE_INDICATE_IDX, 2)); + writel(temp, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_0)); + // CQP_Context_1 + writel(cqp->dev->base_qpn, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_1)); + // CQP_Context_2 + temp = (u32)FIELD_GET(ZXDH_CQPADDR_HIGH, cqp->sq_pa); + writel(temp, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_2)); + // CQP_Context_3 + temp = (u32)FIELD_GET(ZXDH_CQPADDR_LOW, cqp->sq_pa); + writel(temp, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_3)); + // CQP_Context_4 + temp = (u32)FIELD_GET(ZXDH_CQPADDR_HIGH, (uintptr_t)cqp); + writel(temp, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_4)); + // CQP_Context_5 + temp = (u32)FIELD_GET(ZXDH_CQPADDR_LOW, (uintptr_t)cqp); + writel(temp, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_5)); + + // CQP_CQ_NUM INIT + writel(ZXDH_CCQN_INIT_VALUE, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CQ_NUM)); + + wmb(); /* make sure WQE is populated before valid bit is set */ + // CQP_Config_Done + writel(1, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONFIG_DONE)); + +#ifdef ZXDH_DEBUG + writel(1, (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_STATUS)); +#endif + + do { + if (cnt++ > cqp->dev->hw_attrs.max_done_count) { + ret_code = -ETIMEDOUT; + goto err; + } + udelay(cqp->dev->hw_attrs.max_sleep_count); + val = readl((u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_STATUS)); + } while (!val); + + if (FLD_RS_32(cqp->dev, val, ZXDH_CCQPSTATUS_CCQP_ERR)) { + ret_code = -EOPNOTSUPP; + goto err; + } + + cqp->process_config_pte_table = zxdh_sc_config_pte_table; + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CQ_NUM)); + + return 0; +err: + dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size, cqp->sdbuf.va, + cqp->sdbuf.pa); + err_code = readl( + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_ERRCODE)); + *min_err = (u16)FIELD_GET(ZXDH_CQPERRCODES_CQP_MINOR_CODE, err_code); + *maj_err = (u16)FIELD_GET(ZXDH_CQPERRCODES_CQP_MAJOR_CODE, err_code); + return ret_code; +} + +/** + * zxdh_sc_cqp_post_sq - post of cqp's sq + * @cqp: struct for cqp hw + */ +void zxdh_sc_cqp_post_sq(struct zxdh_sc_cqp *cqp) +{ + u32 hdr; + u8 polarity = 0; + + polarity = ((ZXDH_RING_CURRENT_HEAD(cqp->sq_ring) == 0) ? + !cqp->polarity : + cqp->polarity); + hdr = FIELD_PREP(ZXDH_CQPSQ_DBPOLARITY, polarity) | + FIELD_PREP(ZXDH_CQPSQ_DBRINGHEAD, + ZXDH_RING_CURRENT_HEAD(cqp->sq_ring)); + + dma_wmb(); + + writel(hdr, cqp->dev->cqp_db); +} +/** + * zxdh_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq + * and pass back index + * @cqp: CQP HW structure + * @scratch: private data for CQP WQE + * @wqe_idx: WQE index of CQP SQ + */ +__le64 *zxdh_sc_cqp_get_next_send_wqe_idx(struct zxdh_sc_cqp *cqp, u64 scratch, + u32 *wqe_idx) +{ + __le64 *wqe = NULL; + int ret_code; + + if (ZXDH_RING_FULL_ERR(cqp->sq_ring)) { + pr_err("WQE: CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n", + cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size); + return NULL; + } + ZXDH_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code); + if (ret_code) + return NULL; + + cqp->dev->cqp_cmd_stats[ZXDH_OP_REQ_CMDS]++; + if (!*wqe_idx) + cqp->polarity = !cqp->polarity; + wqe = cqp->sq_base[*wqe_idx].elem; + cqp->scratch_array[*wqe_idx] = scratch; + + memset(&wqe[0], 0, 24); + memset(&wqe[4], 0, 32); + + return wqe; +} + +/** + * zxdh_sc_cqp_destroy - destroy cqp during close + * @cqp: struct for cqp hw + * @free_hwcqp: true for regular cqp destroy; false for reset path + */ +int zxdh_sc_cqp_destroy(struct zxdh_sc_cqp *cqp, bool free_hwcqp) +{ + u32 cnt = 0, val; + int ret_code = 0; + + if (free_hwcqp) { + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_CONFIG_DONE)); + do { + if (cnt++ > cqp->dev->hw_attrs.max_done_count) { + ret_code = -ETIMEDOUT; + break; + } + udelay(cqp->dev->hw_attrs.max_sleep_count); + val = readl((u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_STATUS)); + } while (FLD_RS_32(cqp->dev, val, ZXDH_CCQPSTATUS_CCQP_DONE)); + } + return ret_code; +} + +/** + * zxdh_sc_ccq_arm - enable intr for control cq + * @ccq: ccq sc struct + */ +void zxdh_sc_ccq_arm(struct zxdh_sc_cq *ccq) +{ + u64 temp_val; + u16 sw_cq_sel; + u8 arm_seq_num; + u32 cqe_index; + u32 hdr; + + get_64bit_val(ccq->cq_uk.shadow_area, 0, &temp_val); + sw_cq_sel = (u16)FIELD_GET(ZXDH_CQ_DBSA_SW_CQ_SELECT, temp_val); + arm_seq_num = (u8)FIELD_GET(ZXDH_CQ_DBSA_ARM_SEQ_NUM, temp_val); + arm_seq_num++; + cqe_index = (u32)FIELD_GET(ZXDH_CQ_DBSA_CQEIDX, temp_val); + + temp_val = FIELD_PREP(ZXDH_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | + FIELD_PREP(ZXDH_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | + FIELD_PREP(ZXDH_CQ_DBSA_ARM_NEXT, 0) | + FIELD_PREP(ZXDH_CQ_DBSA_CQEIDX, cqe_index); + + set_64bit_val(ccq->cq_uk.shadow_area, 0, temp_val); + + hdr = FIELD_PREP(ZXDH_CQ_ARM_DBSA_VLD, 0) | + FIELD_PREP(ZXDH_CQ_ARM_CQ_ID, ccq->cq_uk.cq_id); + + dma_wmb(); /* make sure shadow area is updated before arming */ + + writel(hdr, ccq->dev->cq_arm_db); +} + +/** + * zxdh_sc_ccq_get_cqe_info - get ccq's cq entry + * @ccq: ccq sc struct + * @info: completion q entry to return + */ +int zxdh_sc_ccq_get_cqe_info(struct zxdh_sc_cq *ccq, + struct zxdh_ccq_cqe_info *info) +{ + u64 qp_ctx, temp, temp1, cq_shadow_temp; + __le64 *cqe; + struct zxdh_sc_cqp *cqp; + u32 wqe_idx; + u32 error; + u8 polarity; + u8 mailbox_cqe = 0; + int ret_code = 0; + + cqe = ZXDH_GET_CURRENT_CQ_ELEM(&ccq->cq_uk); + get_64bit_val(cqe, 0, &temp); + polarity = (u8)FIELD_GET(ZXDH_CQ_VALID, temp); + if (polarity != ccq->cq_uk.polarity) + return -ENOENT; + mailbox_cqe = (u8)FIELD_GET(ZXDH_CQ_MAILBOXCQE, temp); + + get_64bit_val(cqe, 8, &qp_ctx); + //cqp = (struct zxdh_sc_cqp *)(unsigned long)qp_ctx; + cqp = ccq->dev->cqp; + info->error = (bool)FIELD_GET(ZXDH_CQ_ERROR, temp); + info->maj_err_code = ZXDH_CQPSQ_MAJ_NO_ERROR; + info->min_err_code = (u16)FIELD_GET(ZXDH_CQ_MINERR, temp); + if (info->error) { + info->maj_err_code = (u16)FIELD_GET(ZXDH_CQ_MAJERR, temp); + cqp = ccq->dev->cqp; + error = readl((u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_ERRCODE)); + pr_err("CQP: CQPERRCODES error_code[x%08X]\n", error); + } + + wqe_idx = (u32)FIELD_GET(ZXDH_CQ_WQEIDX, temp); + + if (info->error) + wqe_idx = ZXDH_RING_CURRENT_TAIL(cqp->sq_ring); + + info->scratch = cqp->scratch_array[wqe_idx]; + + get_64bit_val(cqe, 16, &temp1); + info->op_ret_val = (u32)FIELD_GET(ZXDH_CCQ_OPRETVAL, temp1); + get_64bit_val(cqp->sq_base[wqe_idx].elem, 0, &temp1); + info->op_code = (u8)FIELD_GET(ZXDH_CQPSQ_OPCODE, temp1); + info->cqp = cqp; + info->mailbox_cqe = mailbox_cqe; + + if (mailbox_cqe == 1) { + get_64bit_val(cqe, 24, &temp1); + info->addrbuf[0] = temp1; + get_64bit_val(cqe, 32, &temp1); + info->addrbuf[1] = temp1; + get_64bit_val(cqe, 40, &temp1); + info->addrbuf[2] = temp1; + get_64bit_val(cqe, 48, &temp1); + info->addrbuf[3] = temp1; + get_64bit_val(cqe, 56, &temp1); + info->addrbuf[4] = temp1; + } else if (info->op_code == ZXDH_CQP_OP_WQE_DMA_READ_USECQE) { + get_64bit_val(cqe, 24, &temp1); + info->addrbuf[0] = temp1; + get_64bit_val(cqe, 32, &temp1); + info->addrbuf[1] = temp1; + get_64bit_val(cqe, 40, &temp1); + info->addrbuf[2] = temp1; + get_64bit_val(cqe, 48, &temp1); + info->addrbuf[3] = temp1; + get_64bit_val(cqe, 56, &temp1); + info->addrbuf[4] = temp1; + } + + /* move the head for cq */ + ZXDH_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code); + if (!ZXDH_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)) + ccq->cq_uk.polarity ^= 1; + + /* update cq tail in cq shadow memory also */ + ZXDH_RING_MOVE_TAIL(ccq->cq_uk.cq_ring); + get_64bit_val(ccq->cq_uk.shadow_area, 0, &cq_shadow_temp); + cq_shadow_temp &= ~ZXDH_CQ_DBSA_CQEIDX; + cq_shadow_temp |= + FIELD_PREP(ZXDH_CQ_DBSA_CQEIDX, + ZXDH_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)); + set_64bit_val(ccq->cq_uk.shadow_area, 0, cq_shadow_temp); + + dma_wmb(); /* make sure shadow area is updated before moving tail */ + if ((mailbox_cqe != 1)) { + ZXDH_RING_MOVE_TAIL(cqp->sq_ring); + ccq->dev->cqp_cmd_stats[ZXDH_OP_CMPL_CMDS]++; + } + + return ret_code; +} + +/** + * zxdh_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ + * @cqp: struct for cqp hw + * @op_code: cqp opcode for completion + * @compl_info: completion q entry to return + */ +int zxdh_sc_poll_for_cqp_op_done(struct zxdh_sc_cqp *cqp, u8 op_code, + struct zxdh_ccq_cqe_info *compl_info) +{ + struct zxdh_ccq_cqe_info info = {}; + struct zxdh_sc_cq *ccq; + int ret_code = 0; + u32 cnt = 0; + u8 cqe_valid = false; + + ccq = cqp->dev->ccq; + while (1) { + if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count) + return -ETIMEDOUT; + + if (zxdh_sc_ccq_get_cqe_info(ccq, &info)) { + udelay(cqp->dev->hw_attrs.max_sleep_count); + continue; + } + if (info.error && info.op_code != ZXDH_CQP_OP_QUERY_MKEY) { + ret_code = -EIO; + break; + } + cqe_valid = true; + + /* make sure op code matches*/ + if (op_code == info.op_code) + break; + pr_err("WQE: opcode mismatch for my op code 0x%x, returned opcode %x\n", + op_code, info.op_code); + } + + if (compl_info) + memcpy(compl_info, &info, sizeof(*compl_info)); + + if ((cqe_valid == true) && (cqp->dev->ceq_0_ok == true)) + zxdh_sc_ccq_arm(ccq); + + return ret_code; +} + +/** + * zxdh_sc_manage_hmc_pm_func_table - manage of function table + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @info: info for the manage function table operation + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_manage_hmc_pm_func_table(struct zxdh_sc_cqp *cqp, + struct zxdh_hmc_fcn_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_MHMC_VFIDX, info->vf_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, + ZXDH_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) | + FIELD_PREP(ZXDH_CQPSQ_MHMC_FREEPMFN, info->free_fcn) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: MANAGE_HMC_PM_FUNC_TABLE WQE", + DUMP_PREFIX_OFFSET, 16, 8, wqe, + ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_commit_fpm_val_done - wait for cqp eqe completion + * for fpm commit + * @cqp: struct for cqp hw + */ +static int zxdh_sc_commit_fpm_val_done(struct zxdh_sc_cqp *cqp) +{ + return zxdh_sc_poll_for_cqp_op_done(cqp, ZXDH_CQP_OP_WQE_DMA_WRITE_32, + NULL); +} + +/** + * zxdh_sc_ceq_init - initialize ceq + * @ceq: ceq sc structure + * @info: ceq initialization info + */ +int zxdh_sc_ceq_init(struct zxdh_sc_ceq *ceq, struct zxdh_ceq_init_info *info) +{ + u32 pble_obj_cnt; + + if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size || + info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size) + return -EINVAL; + + if (info->ceq_index > (info->dev->max_ceqs - 1)) + return -EINVAL; + pble_obj_cnt = info->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + + if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) + return -EINVAL; + + ceq->size = sizeof(*ceq); + ceq->ceqe_base = (struct zxdh_ceqe *)info->ceqe_base; + ceq->ceq_id = info->ceq_id; + ceq->ceq_index = info->ceq_index; + ceq->dev = info->dev; + ceq->elem_cnt = info->elem_cnt; + ceq->log2_elem_size = info->log2_elem_size; + ceq->ceq_elem_pa = info->ceqe_pa; + ceq->virtual_map = info->virtual_map; + ceq->itr_no_expire = info->itr_no_expire; + ceq->reg_cq = info->reg_cq; + ceq->reg_cq_size = 0; + spin_lock_init(&ceq->req_cq_lock); + ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0); + ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0); + ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL); + ceq->tph_en = info->tph_en; + ceq->tph_val = info->tph_val; + ceq->msix_idx = info->msix_idx; + ceq->polarity = 1; + ZXDH_RING_INIT(ceq->ceq_ring, ceq->elem_cnt); + ceq->dev->ceq[info->ceq_index] = ceq; + + return 0; +} + +/** + * zxdh_sc_ceq_create - create ceq wqe + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_ceq_create(struct zxdh_sc_ceq *ceq, u64 scratch, + bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + + cqp = ceq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CEQC_PERIOD_L, 0) | + FIELD_PREP(ZXDH_CEQC_VHCA, ceq->dev->vhca_id) | + FIELD_PREP(ZXDH_CEQC_INTR_IDX, ceq->msix_idx) | + FIELD_PREP(ZXDH_CEQC_INT_TYPE, ZXDH_IRQ_TYPE_MSIX) | + FIELD_PREP(ZXDH_CEQC_CEQ_HEAD, 0) | + FIELD_PREP(ZXDH_CEQC_CEQE_VALID, ceq->polarity) | + FIELD_PREP(ZXDH_CEQC_LEAF_PBL_SIZE, ceq->pbl_chunk_size) | + // FIELD_PREP(ZXDH_CEQC_VIRTUALLY_MAPPED, ceq->virtual_map) | + FIELD_PREP(ZXDH_CEQC_CEQ_SIZE, ZXDH_CEQE_SIZE_16_BYTE) | + FIELD_PREP(ZXDH_CEQC_LOG_CEQ_NUM, ceq->log2_elem_size) | + FIELD_PREP(ZXDH_CEQC_CEQ_STATE, ZXDH_QUEUE_STATE_OK); + dma_wmb(); + + set_64bit_val(wqe, 8, hdr); + + hdr = FIELD_PREP(ZXDH_CEQC_CEQ_ADDRESS, + ceq->virtual_map ? + ceq->first_pm_pbl_idx : + RS_64_1(ceq->ceq_elem_pa, 7)) | //右移7bit + FIELD_PREP(ZXDH_CEQC_PERIOD_H, 0); + dma_wmb(); + set_64bit_val(wqe, 16, hdr); + + hdr = FIELD_PREP(ZXDH_CEQC_CEQ_MAX_CNT, IRMDA_CEQ_AGGREGATION_CNT_0) | + FIELD_PREP(ZXDH_CEQC_CEQ_AXI_RSP_ERR_FLAG, 0); + dma_wmb(); + set_64bit_val(wqe, 24, hdr); + + hdr = FIELD_PREP(ZXDH_CQPSQ_CEQ_CEQID, ceq->ceq_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_CEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_cceq_create_done - poll for control ceq wqe to complete + * @ceq: ceq sc structure + */ +static int zxdh_sc_cceq_create_done(struct zxdh_sc_ceq *ceq) +{ + struct zxdh_sc_cqp *cqp; + + cqp = ceq->dev->cqp; + return zxdh_sc_poll_for_cqp_op_done(cqp, ZXDH_CQP_OP_CREATE_CEQ, NULL); +} + +/** + * zxdh_sc_cceq_destroy_done - poll for destroy cceq to complete + * @ceq: ceq sc structure + */ +int zxdh_sc_cceq_destroy_done(struct zxdh_sc_ceq *ceq) +{ + struct zxdh_sc_cqp *cqp; + + if (ceq->reg_cq) + zxdh_sc_remove_cq_ctx(ceq, ceq->dev->ccq); + + cqp = ceq->dev->cqp; + + return zxdh_sc_poll_for_cqp_op_done(cqp, ZXDH_CQP_OP_DESTROY_CEQ, NULL); +} + +/** + * zxdh_sc_cceq_create - create cceq + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + */ +int zxdh_sc_cceq_create(struct zxdh_sc_ceq *ceq, u64 scratch) +{ + int ret_code; + + if (ceq->reg_cq) { + ret_code = zxdh_sc_add_cq_ctx(ceq, ceq->dev->ccq); + if (ret_code) + return ret_code; + } + + ret_code = zxdh_sc_ceq_create(ceq, scratch, true); + if (!ret_code) + return zxdh_sc_cceq_create_done(ceq); + + return ret_code; +} + +/** + * zxdh_sc_ceq_destroy - destroy ceq + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_ceq_destroy(struct zxdh_sc_ceq *ceq, u64 scratch, bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + + cqp = ceq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CEQC_PERIOD_L, 0) | + FIELD_PREP(ZXDH_CEQC_VHCA, ceq->dev->vhca_id) | + FIELD_PREP(ZXDH_CEQC_INTR_IDX, ceq->msix_idx) | + FIELD_PREP(ZXDH_CEQC_INT_TYPE, ZXDH_IRQ_TYPE_PIN) | + FIELD_PREP(ZXDH_CEQC_CEQ_HEAD, 0) | + FIELD_PREP(ZXDH_CEQC_CEQE_VALID, ceq->polarity) | + FIELD_PREP(ZXDH_CEQC_LEAF_PBL_SIZE, ceq->pbl_chunk_size) | + // FIELD_PREP(ZXDH_CEQC_VIRTUALLY_MAPPED, ceq->virtual_map) | + FIELD_PREP(ZXDH_CEQC_CEQ_SIZE, ZXDH_CEQE_SIZE_64_BYTE) | + FIELD_PREP(ZXDH_CEQC_LOG_CEQ_NUM, ceq->log2_elem_size) | + FIELD_PREP(ZXDH_CEQC_CEQ_STATE, ZXDH_QUEUE_STATE_OK); + dma_wmb(); + + set_64bit_val(wqe, 8, hdr); + + hdr = FIELD_PREP(ZXDH_CEQC_CEQ_ADDRESS, ceq->virtual_map ? + ceq->first_pm_pbl_idx : + ceq->ceq_elem_pa) | + FIELD_PREP(ZXDH_CEQC_PERIOD_H, 0); + dma_wmb(); + set_64bit_val(wqe, 16, hdr); + + hdr = FIELD_PREP(ZXDH_CEQC_CEQ_MAX_CNT, IRMDA_CEQ_AGGREGATION_CNT_0) | + FIELD_PREP(ZXDH_CEQC_CEQ_AXI_RSP_ERR_FLAG, 0); + dma_wmb(); + set_64bit_val(wqe, 24, hdr); + + hdr = FIELD_PREP(ZXDH_CQPSQ_CEQ_CEQID, ceq->ceq_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_CEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_process_ceq - process ceq + * @dev: sc device struct + * @ceq: ceq sc structure + * + * It is expected caller serializes this function with cleanup_ceqes() + * because these functions manipulate the same ceq + */ +void *zxdh_sc_process_ceq(struct zxdh_sc_dev *dev, struct zxdh_sc_ceq *ceq) +{ + u64 temp; + __le64 *ceqe; + struct zxdh_sc_cq *cq = NULL; + struct zxdh_sc_cq *temp_cq; + u8 polarity; + u32 cq_idx; + unsigned long flags; + + do { + cq_idx = 0; + ceqe = ZXDH_GET_CURRENT_CEQ_ELEM(ceq); + get_64bit_val(ceqe, 0, &temp); + polarity = (u8)FIELD_GET(ZXDH_CEQE_VALID, temp); + if (polarity != ceq->polarity) + return NULL; + + temp_cq = (struct zxdh_sc_cq *)(unsigned long)LS_64_1(temp, 1); + if (!temp_cq) { + cq_idx = ZXDH_INVALID_CQ_IDX; + ZXDH_RING_MOVE_TAIL(ceq->ceq_ring); + + if (!ZXDH_RING_CURRENT_TAIL(ceq->ceq_ring)) + ceq->polarity ^= 1; + continue; + } + + cq = temp_cq; + if (ceq->reg_cq) { + spin_lock_irqsave(&ceq->req_cq_lock, flags); + cq_idx = zxdh_sc_find_reg_cq(ceq, cq); + spin_unlock_irqrestore(&ceq->req_cq_lock, flags); + } + + ZXDH_RING_MOVE_TAIL(ceq->ceq_ring); + if (!ZXDH_RING_CURRENT_TAIL(ceq->ceq_ring)) + ceq->polarity ^= 1; + } while (cq_idx == ZXDH_INVALID_CQ_IDX); + + return cq; +} + +/** + * zxdh_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq + * @cq: cq for which the ceqes need to be cleaned up + * @ceq: ceq ptr + * + * The function is called after the cq is destroyed to cleanup + * its pending ceqe entries. It is expected caller serializes this + * function with process_ceq() in interrupt context. + */ +void zxdh_sc_cleanup_ceqes(struct zxdh_sc_cq *cq, struct zxdh_sc_ceq *ceq) +{ + struct zxdh_sc_cq *next_cq; + u8 ceq_polarity = ceq->polarity; + __le64 *ceqe; + u8 polarity; + u64 temp; + int next; + u32 i; + + next = ZXDH_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0); + + for (i = 1; i <= ZXDH_RING_SIZE(*ceq); i++) { + ceqe = ZXDH_GET_CEQ_ELEM_AT_POS(ceq, next); + + get_64bit_val(ceqe, 0, &temp); + polarity = (u8)FIELD_GET(ZXDH_CEQE_VALID, temp); + if (polarity != ceq_polarity) + return; + + next_cq = (struct zxdh_sc_cq *)(unsigned long)LS_64_1(temp, 1); + if (cq == next_cq) + set_64bit_val(ceqe, 0, temp & ZXDH_CEQE_VALID); + + next = ZXDH_RING_GET_NEXT_TAIL(ceq->ceq_ring, i); + if (!next) + ceq_polarity ^= 1; + } +} + +/** + * zxdh_sc_aeq_init - initialize aeq + * @aeq: aeq structure ptr + * @info: aeq initialization info + */ +int zxdh_sc_aeq_init(struct zxdh_sc_aeq *aeq, struct zxdh_aeq_init_info *info) +{ + u32 pble_obj_cnt; + + if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size || + info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size) + return -EINVAL; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + + if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) + return -EINVAL; + + aeq->size = sizeof(*aeq); + aeq->polarity = 1; + aeq->get_polarity_flag = 0; + aeq->aeqe_base = (struct zxdh_sc_aeqe *)info->aeqe_base; + aeq->dev = info->dev; + aeq->elem_cnt = info->elem_cnt; + aeq->aeq_elem_pa = info->aeq_elem_pa; + ZXDH_RING_INIT(aeq->aeq_ring, aeq->elem_cnt); + aeq->virtual_map = info->virtual_map; + aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL); + aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0); + aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0); + aeq->msix_idx = info->msix_idx; + info->dev->aeq = aeq; + + return 0; +} + +/** + * zxdh_sc_aeq_create - create aeq + * @aeq: aeq structure ptr + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_aeq_create(struct zxdh_sc_aeq *aeq, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = aeq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_AEQC_INTR_IDX, aeq->msix_idx) | + FIELD_PREP(ZXDH_AEQC_AEQ_HEAD, 0) | + FIELD_PREP(ZXDH_AEQC_LEAF_PBL_SIZE, aeq->pbl_chunk_size) | + FIELD_PREP(ZXDH_AEQC_VIRTUALLY_MAPPED, aeq->virtual_map) | + FIELD_PREP(ZXDH_AEQC_AEQ_SIZE, aeq->elem_cnt) | + FIELD_PREP(ZXDH_AEQC_AEQ_STATE, 0); + dma_wmb(); + set_64bit_val(wqe, 8, hdr); + + set_64bit_val(wqe, 16, + aeq->virtual_map ? aeq->first_pm_pbl_idx : + aeq->aeq_elem_pa); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_AEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_cqp_aeq_create - create aeq + * @aeq: aeq structure ptr + */ +int zxdh_cqp_aeq_create(struct zxdh_sc_aeq *aeq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_dev *dev; + u64 hdr; + u64 scratch = 0; + u32 tail = 0, val = 0, error = 0; + int ret_code; + + cqp = aeq->dev->cqp; + dev = aeq->dev; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_AEQC_INTR_IDX, aeq->msix_idx) | + FIELD_PREP(ZXDH_AEQC_AEQ_HEAD, 0) | + FIELD_PREP(ZXDH_AEQC_LEAF_PBL_SIZE, aeq->pbl_chunk_size) | + FIELD_PREP(ZXDH_AEQC_VIRTUALLY_MAPPED, aeq->virtual_map) | + FIELD_PREP(ZXDH_AEQC_AEQ_SIZE, aeq->elem_cnt) | + FIELD_PREP(ZXDH_AEQC_AEQ_STATE, 0); + dma_wmb(); + set_64bit_val(wqe, 8, hdr); + + set_64bit_val(wqe, 16, + aeq->virtual_map ? aeq->first_pm_pbl_idx : + aeq->aeq_elem_pa); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_AEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_get_cqp_reg_info(cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(cqp); + + ret_code = zxdh_cqp_poll_registers(cqp, tail, + dev->hw_attrs.max_done_count); + + if (ret_code) + return ret_code; + + return 0; +} + +/** + * zxdh_sc_aeq_destroy - destroy aeq during close + * @aeq: aeq structure ptr + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_aeq_destroy(struct zxdh_sc_aeq *aeq, u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_dev *dev; + u64 hdr; + + dev = aeq->dev; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + hdr = FIELD_PREP(ZXDH_AEQC_INTR_IDX, aeq->msix_idx) | + FIELD_PREP(ZXDH_AEQC_AEQ_HEAD, 0) | + FIELD_PREP(ZXDH_AEQC_LEAF_PBL_SIZE, aeq->pbl_chunk_size) | + FIELD_PREP(ZXDH_AEQC_VIRTUALLY_MAPPED, aeq->virtual_map) | + FIELD_PREP(ZXDH_AEQC_AEQ_SIZE, aeq->elem_cnt) | + FIELD_PREP(ZXDH_AEQC_AEQ_STATE, ZXDH_QUEUE_STATE_OK); + dma_wmb(); + set_64bit_val(wqe, 8, hdr); + + set_64bit_val(wqe, 16, + aeq->virtual_map ? aeq->first_pm_pbl_idx : + aeq->aeq_elem_pa); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_AEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_aeq_requestor_msg_cfg - ae src msg cfg + *@info: aeqe info to be cfg + */ +static void zxdh_aeq_requestor_msg_cfg(struct zxdh_aeqe_info *info) +{ + switch (info->ae_id) { + case ZXDH_AE_REQ_AXI_RSP_ERR: + case ZXDH_AE_REQ_WQE_FLUSH: + break; + default: + info->qp = true; + info->sq = true; + break; + } +} + +/** + * zxdh_ae_responder_msg_cfg - ae src msg cfg + *@info: aeqe info to be cfg + */ +static void zxdh_aeq_responder_msg_cfg(struct zxdh_aeqe_info *info) +{ + switch (info->ae_id) { + case ZXDH_AE_RSP_WQE_FLUSH: + break; + case ZXDH_AE_RSP_SRQ_WATER_SIG: + info->srq = true; + break; + case ZXDH_AE_RSP_PKT_TYPE_CQ_OVERFLOW: + info->cq = true; + break; + case ZXDH_AE_RSP_PKT_TYPE_CQ_OVERFLOW_QP: + info->qp = true; + break; + case ZXDH_AE_RSP_PKT_TYPE_CQ_STATE: + info->cq = true; + break; + case ZXDH_AE_RSP_PKT_TYPE_CQ_TWO_PBLE_RSP: + info->cq = true; + break; + case ZXDH_AE_RSP_SRQ_AXI_RSP_SIG: + info->srq = true; + break; + default: + info->qp = true; + info->rq = true; + break; + } +} + +/** + * zxdh_ae_src_msg_cfg - ae src msg cfg + *@info: aeqe info to be cfg + *@ae_src: ae msg source + */ +static void zxdh_ae_src_msg_cfg(struct zxdh_aeqe_info *info, u8 ae_src) +{ + if (ae_src == ZXDH_AE_REQUESTER) { //requestor + zxdh_aeq_requestor_msg_cfg(info); + } else if (ae_src == ZXDH_AE_RESPONDER) { //responder + zxdh_aeq_responder_msg_cfg(info); + } else { + pr_err("aeq src msg cfg, bad ae_src!\n"); + } +} + +/** + * zxdh_sc_get_next_aeqe - get next aeq entry + * @aeq: aeq structure ptr + * @info: aeqe info to be returned + */ +int zxdh_sc_get_next_aeqe(struct zxdh_sc_aeq *aeq, struct zxdh_aeqe_info *info) +{ + u64 temp, temp1, compl_ctx; + __le64 *aeqe; + u16 wqe_idx; + u8 ae_src; + u8 polarity; + + aeqe = ZXDH_GET_CURRENT_AEQ_ELEM(aeq); + get_64bit_val(aeqe, 16, &compl_ctx); + get_64bit_val(aeqe, 0, &temp); + get_64bit_val(aeqe, 8, &temp1); + polarity = (u8)FIELD_GET(ZXDH_AEQE_VALID, temp); + info->ae_id = (u16)FIELD_GET(ZXDH_AEQE_AECODE, temp); + if ((aeq->get_polarity_flag == 0) && (info->ae_id)) { + aeq->polarity = polarity; + aeq->get_polarity_flag = 1; + } + + if (aeq->polarity != polarity) + return -ENOENT; + + if (info->ae_id == 0) + return -ENOENT; + + print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8, + aeqe, 16, false); + + ae_src = (u8)FIELD_GET(ZXDH_AEQE_AESRC, temp); + wqe_idx = (u16)FIELD_GET(ZXDH_AEQE_WQDESCIDX, temp1); + info->qp_cq_id = (u32)FIELD_GET(ZXDH_AEQE_QPCQID, temp1); + info->iwarp_state = (u8)FIELD_GET(ZXDH_AEQE_IWSTATE, temp); + info->aeqe_overflow = (bool)FIELD_GET(ZXDH_AEQE_OVERFLOW, temp); + info->vhca_id = (u8)FIELD_GET(ZXDH_AEQE_VHCA_ID, temp); + info->compl_ctx = compl_ctx; + info->ae_src = ae_src; + zxdh_ae_src_msg_cfg(info, ae_src); + if (info->ae_id != 257) { + pr_info("%s ae_src:%d wqe_idx:%d qp_cq_id:%d ae_id:%d vhca_id:%d\n", + __func__, ae_src, wqe_idx, info->qp_cq_id, info->ae_id, + info->vhca_id); + } + + ZXDH_RING_MOVE_TAIL(aeq->aeq_ring); + if (!ZXDH_RING_CURRENT_TAIL(aeq->aeq_ring)) + aeq->polarity ^= 1; + + return 0; +} + +/** + * zxdh_sc_repost_aeq_tail - repost aeq valid idx + * @dev: sc device struct + * @idx: valid location + */ +int zxdh_sc_repost_aeq_tail(struct zxdh_sc_dev *dev, u32 idx) +{ + writel(idx, dev->aeq_tail_pointer); + return 0; +} + +int zxdh_sc_dma_read(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_src_copy_dest *src_dest, + struct zxdh_path_index *spath_index, + struct zxdh_path_index *dpath_index, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 src_path_index = 0, dest_path_index = 0; + + if (!cqp) + return -ENOMEM; + + src_path_index = zxdh_get_path_index(spath_index); + dest_path_index = zxdh_get_path_index(dpath_index); + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, dest_path_index); + set_64bit_val(wqe, 16, src_dest->dest); + set_64bit_val(wqe, 24, src_dest->src); + set_64bit_val(wqe, 32, src_dest->len); + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRCPATHINDEX, src_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_READ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_dma_read_usecqe(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_dam_read_bycqe *readbuf, + struct zxdh_path_index *spath_index, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 src_path_index = 0; + u8 i = 0; + + if (!cqp) + return -ENOMEM; + + if (readbuf->num > 5) + return -ENOMEM; + + src_path_index = zxdh_get_path_index(spath_index); + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, readbuf->valuetype); + for (i = 0; i < readbuf->num; i++) + set_64bit_val(wqe, 16 + i * 8, readbuf->addrbuf[i]); + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRCPATHINDEX, src_path_index) | + FIELD_PREP(ZXDH_CQPSQ_DATABITWIDTH, readbuf->bitwidth) | + FIELD_PREP(ZXDH_CQPSQ_DATAINCQENUM, readbuf->num) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_READ_USECQE) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_dma_write64(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write64_date *dma_data, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 dest_path_index = 0; + int i, loop; + + if (!cqp) + return -ENOMEM; + + loop = dma_data->num; + if (loop > 3) + return -ENOMEM; + + dest_path_index = zxdh_get_path_index(dpath_index); + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + for (i = 0; i < loop; i++) { + set_64bit_val(wqe, 16 + i * 8, dma_data->addrbuf[i]); + set_64bit_val(wqe, 40 + i * 8, dma_data->databuf[i]); + } + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE_64) | + FIELD_PREP(ZXDH_CQPSQ_DATAINWQENUM, dma_data->num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_clear_nof_ioq(struct zxdh_sc_dev *dev, u64 size, u64 ioq_pa) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + if (!dev) + return -ENOMEM; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + dev->nof_clear_dpu_mem.size = NOF_IOQ_SQ_WQE_SIZE * NOF_IOQ_SQ_SIZE; + dev->nof_clear_dpu_mem.va = + dma_alloc_coherent(dev->hw->device, dev->nof_clear_dpu_mem.size, + &dev->nof_clear_dpu_mem.pa, GFP_KERNEL); + if (!dev->nof_clear_dpu_mem.va) + return -ENOMEM; + memset(dev->nof_clear_dpu_mem.va, 0, dev->nof_clear_dpu_mem.size); + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_WRITE; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = dev->nof_clear_dpu_mem.pa; + cqp_info->in.u.dma_writeread.src_dest.len = dev->nof_clear_dpu_mem.size; + cqp_info->in.u.dma_writeread.src_dest.dest = ioq_pa; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_DMA_OBJ_ID; + cqp_info->in.u.dma_writeread.src_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_DPU_DDR; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + pr_info("clear nof ioq pa=%llx size=%d\n", ioq_pa, + dev->nof_clear_dpu_mem.size); + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +int zxdh_clear_dpuddr(struct zxdh_sc_dev *dev, u64 size, bool clear) +{ + __le64 *wqe; + u64 hdr; + u32 tail = 0, val = 0, error = 0, loop = 0, i = 0; + int ret_code = 0; + u64 scratch = 0; + u64 src_path_index = 0, dest_path_index = 0, remain_leg = 0; + struct zxdh_path_index spath_index = {}; + struct zxdh_path_index dpath_index = {}; + struct zxdh_src_copy_dest src_dest = {}; + + if (!dev) + return -ENOMEM; + + if ((false == clear) || (dev->hmc_use_dpu_ddr == false)) + return 0; + + dev->clear_dpu_mem.size = ZXDH_HMC_DIRECT_BP_SIZE; + dev->clear_dpu_mem.va = + dma_alloc_coherent(dev->hw->device, dev->clear_dpu_mem.size, + &dev->clear_dpu_mem.pa, GFP_KERNEL); + if (!dev->clear_dpu_mem.va) + return -ENOMEM; + memset(dev->clear_dpu_mem.va, 0, dev->clear_dpu_mem.size); + + loop = size / ZXDH_HMC_DIRECT_BP_SIZE; + remain_leg = size % ZXDH_HMC_DIRECT_BP_SIZE; + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + dpath_index.path_select = ZXDH_INDICATE_DPU_DDR; // L2D + dpath_index.obj_id = ZXDH_DMA_OBJ_ID; // L2D + dpath_index.vhca_id = dev->vhca_id; + dest_path_index = zxdh_get_path_index(&dpath_index); + + spath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + spath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + spath_index.obj_id = ZXDH_DMA_OBJ_ID; + spath_index.vhca_id = dev->vhca_id; + src_path_index = zxdh_get_path_index(&spath_index); + src_dest.src = dev->clear_dpu_mem.pa; + src_dest.len = dev->clear_dpu_mem.size; + + for (i = 0; i < loop; i++) { + src_dest.dest = dev->hmc_pf_manager_info.hmc_base + + i * ZXDH_HMC_DIRECT_BP_SIZE; + if (i == 0 || i == loop - 1) + pr_info("src_dest.src=%llx src_dest.dest=%llx src_dest.len=%d\n", + src_dest.src, src_dest.dest, src_dest.len); + + wqe = zxdh_sc_cqp_get_next_send_wqe(dev->cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, src_path_index); + set_64bit_val(wqe, 16, src_dest.dest); + set_64bit_val(wqe, 24, src_dest.src); + set_64bit_val(wqe, 32, src_dest.len); + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, dev->cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers( + dev->cqp, tail, dev->hw_attrs.max_done_count); + + if (ret_code) + return ret_code; + } + + if (remain_leg != 0) { + src_dest.dest = dev->hmc_pf_manager_info.hmc_base + + i * ZXDH_HMC_DIRECT_BP_SIZE; + src_dest.len = remain_leg; + pr_info("src_dest.src=%llx src_dest.dest=%llx src_dest.len=%d\n", + src_dest.src, src_dest.dest, src_dest.len); + + wqe = zxdh_sc_cqp_get_next_send_wqe(dev->cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, src_path_index); + set_64bit_val(wqe, 16, src_dest.dest); + set_64bit_val(wqe, 24, src_dest.src); + set_64bit_val(wqe, 32, src_dest.len); + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, dev->cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers( + dev->cqp, tail, dev->hw_attrs.max_done_count); + + if (ret_code) + return ret_code; + } + + return ret_code; +} + +int zxdh_sc_dma_write32(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write32_date *dma_data, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 dest_path_index = 0; + int i, loop; + + if (!cqp) + return -ENOMEM; + + loop = dma_data->num; + if (loop > 4) + return -ENOMEM; + + dest_path_index = zxdh_get_path_index(dpath_index); + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + for (i = 0; i < loop; i++) { + set_64bit_val(wqe, 16 + i * 8, dma_data->addrbuf[i]); + if (i == 0) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATALOW, + dma_data->databuf[i]); + set_64bit_val(wqe, 48, hdr); + } else if (i == 1) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATAHIGH, + dma_data->databuf[i]); + set_64bit_val(wqe, 48, hdr); + } else if (i == 2) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATALOW, + dma_data->databuf[i]); + set_64bit_val(wqe, 56, hdr); + } else { // if (i == 3) + hdr = FIELD_PREP(ZXDH_CQPSQ_DATAHIGH, + dma_data->databuf[i]); + set_64bit_val(wqe, 56, hdr); + } + } + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE_32) | + FIELD_PREP(ZXDH_CQPSQ_InterSourSel, dma_data->inter_sour_sel) | + FIELD_PREP(ZXDH_CQPSQ_NeedInter, dma_data->need_inter) | + FIELD_PREP(ZXDH_CQPSQ_DATAINWQENUM, dma_data->num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_dma_write(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_src_copy_dest *src_dest, + struct zxdh_path_index *spath_index, + struct zxdh_path_index *dpath_index, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 src_path_index = 0, dest_path_index = 0; + + if (!cqp) + return -ENOMEM; + + src_path_index = zxdh_get_path_index(spath_index); + dest_path_index = zxdh_get_path_index(dpath_index); + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, src_path_index); + set_64bit_val(wqe, 16, src_dest->dest); + set_64bit_val(wqe, 24, src_dest->src); + set_64bit_val(wqe, 32, src_dest->len); + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_query_qpc(struct zxdh_sc_qp *qp, u64 qpc_buf_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp = qp->dev->cqp; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_QP) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_QPC_ID, qp->qp_ctx_num); + set_64bit_val(wqe, 8, qpc_buf_pa); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_query_cqc(struct zxdh_sc_cq *cq, u64 cqc_buf_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp = cq->dev->cqp; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_CQC_ID, cq->cq_uk.cq_id); + set_64bit_val(wqe, 8, cqc_buf_pa); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_query_ceqc(struct zxdh_sc_ceq *ceq, u64 ceqc_buf_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp = ceq->dev->cqp; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_CEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_CQC_ID, ceq->ceq_id); + set_64bit_val(wqe, 8, ceqc_buf_pa); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_query_aeqc(struct zxdh_sc_aeq *aeq, u64 aeqc_buf_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp = aeq->dev->cqp; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_AEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_CQC_ID, aeq->dev->vhca_id); + set_64bit_val(wqe, 8, aeqc_buf_pa); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +u32 zxdh_num_to_log(u32 size_num) +{ + u32 size_log = 0; + u32 temp = size_num; + + while (size_num > 1) { + size_num >>= 1; + size_log++; + } + if (temp != (1 << size_log)) + size_log += 1; + + return size_log; +} + +int zxdh_sc_mb_create(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_mailboxhead_data *mbhead_data, bool post_sq, + u32 dst_vf_id) +{ + __le64 *wqe; + u64 hdr; + struct zxdh_sc_dev *dev = NULL; + struct zxdh_pci_f *rf = NULL; + bool ftype = false; + + if (!cqp) + return -ENOMEM; + + dev = cqp->dev; + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + ftype = rf->ftype; // ftype==0 ->PF + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, mbhead_data->msg0); + set_64bit_val(wqe, 16, mbhead_data->msg1); + set_64bit_val(wqe, 24, mbhead_data->msg2); + set_64bit_val(wqe, 32, mbhead_data->msg3); + set_64bit_val(wqe, 40, mbhead_data->msg4); + + hdr = FIELD_PREP(ZXDH_CQPSQ_DSTVFID, dst_vf_id) | + FIELD_PREP(ZXDH_CQPSQ_SRCPFVFID, + ((ftype == 0) ? rf->pf_id : rf->vf_id)) | + FIELD_PREP(ZXDH_CQPSQ_PFVALID, !ftype) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_SEND_MAILBOX) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_sc_ccq_init - initialize control cq + * @cq: sc's cq ctruct + * @info: info for control cq initialization + */ +int zxdh_sc_ccq_init(struct zxdh_sc_cq *cq, struct zxdh_ccq_init_info *info) +{ + u32 pble_obj_cnt; + + if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size || + info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size) + return -EINVAL; + + if (info->ceq_index > (info->dev->max_ceqs - 1)) + return -EINVAL; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + + if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) + return -EINVAL; + + cq->cq_pa = info->cq_pa; + cq->cq_uk.cq_base = info->cq_base; + cq->shadow_area_pa = info->shadow_area_pa; + cq->cq_uk.shadow_area = info->shadow_area; + cq->shadow_read_threshold = info->shadow_read_threshold; + cq->dev = info->dev; + cq->ceq_id = info->ceq_id; + cq->ceq_index = info->ceq_index; + cq->cq_uk.cq_size = info->num_elem; + cq->cq_uk.cq_log_size = zxdh_num_to_log(info->num_elem); + cq->cq_type = ZXDH_CQ_TYPE_CQP; + cq->ceqe_mask = info->ceqe_mask; + ZXDH_RING_INIT(cq->cq_uk.cq_ring, info->num_elem); + cq->cq_uk.cq_id = info->cq_num; + cq->ceq_id_valid = info->ceq_id_valid; + cq->cq_uk.cqe_size = info->cqe_size; + cq->pbl_list = info->pbl_list; + cq->virtual_map = info->virtual_map; + cq->pbl_chunk_size = info->pbl_chunk_size; + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + cq->cq_uk.polarity = true; + cq->cq_max = info->cq_max; + cq->cq_period = info->cq_period; + cq->scqe_break_moderation_en = info->scqe_break_moderation_en; + cq->cq_st = info->cq_st; + cq->is_in_list_cnt = info->is_in_list_cnt; + + /* Only applicable to CQs other than CCQ so initialize to zero */ + cq->cq_uk.cqe_alloc_db = NULL; + + info->dev->ccq = cq; + writel(cq->cq_uk.cq_id, + (u32 __iomem *)(cq->dev->hw->hw_addr + C_RDMA_CQP_CQ_NUM)); + + return 0; +} + +/** + * zxdh_sc_ccq_create_done - poll cqp for ccq create + * @ccq: ccq sc struct + */ +static inline int zxdh_sc_ccq_create_done(struct zxdh_sc_cq *ccq) +{ + struct zxdh_sc_cqp *cqp; + + cqp = ccq->dev->cqp; + + return zxdh_sc_poll_for_cqp_op_done(cqp, ZXDH_CQP_OP_CREATE_CQ, NULL); +} + +/** + * zxdh_sc_ccq_create - create control cq + * @ccq: ccq sc struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_ccq_create(struct zxdh_sc_cq *ccq, u64 scratch, bool post_sq) +{ + int ret_code; + + ret_code = zxdh_sc_cq_create(ccq, scratch, post_sq); + if (ret_code) + return ret_code; + + if (post_sq) { + ret_code = zxdh_sc_ccq_create_done(ccq); + if (ret_code) + return ret_code; + } + + ccq->dev->cqp->process_config_pte_table = zxdh_cqp_config_pte_table_cmd; + + return 0; +} + +/** + * zxdh_sc_ccq_destroy - destroy ccq during close + * @ccq: ccq sc struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_ccq_destroy(struct zxdh_sc_cq *ccq, u64 scratch, bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 temp; + u64 hdr; + int ret_code = 0; + u32 tail, val, error; + + cqp = ccq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CQ_NUM)); + dma_wmb(); + + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_FIELD_ALL)); + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CQSTATE, 0) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQESIZE, ccq->cq_uk.cqe_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_VIRTMAP, ccq->virtual_map) | + FIELD_PREP(ZXDH_CQPSQ_CQ_LPBLSIZE, ccq->pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask); + + dma_wmb(); + set_64bit_val(wqe, 16, temp); + set_64bit_val(wqe, 24, RS_64_1(ccq->shadow_area_pa, 6)); + temp = FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0), + ZXDH_CQPSQ_CQ_CEQID) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQSIZE, ccq->cq_uk.cq_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SHADOW_READ_THRESHOLD, + ccq->shadow_read_threshold); + + dma_wmb(); + set_64bit_val(wqe, 32, temp); + set_64bit_val(wqe, 40, 0); + set_64bit_val(wqe, 48, + (ccq->virtual_map ? ccq->first_pm_pbl_idx : + RS_64_1(ccq->cq_pa, 8))); + set_64bit_val(wqe, 56, RS_64_1(ccq, 0)); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FLD_LS_64(ccq->dev, ccq->cq_uk.cq_id, ZXDH_CQPSQ_CQ_CQID); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CCQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_get_cqp_reg_info(cqp, &val, &tail, &error); + + if (post_sq) { + zxdh_sc_cqp_post_sq(cqp); + ret_code = zxdh_cqp_poll_registers( + cqp, tail, cqp->dev->hw_attrs.max_done_count); + } + + return ret_code; +} + +/** + * zxdh_cqp_ring_full - check if cqp ring is full + * @cqp: struct for cqp hw + */ +static bool zxdh_cqp_ring_full(struct zxdh_sc_cqp *cqp) +{ + return ZXDH_RING_FULL_ERR(cqp->sq_ring); +} + +/** + * zxdh_sc_query_rdma_features - query RDMA features and FW ver + * @cqp: struct for cqp hw + * @buf: buffer to hold query info + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_query_rdma_features(struct zxdh_sc_cqp *cqp, + struct zxdh_dma_mem *buf, u64 scratch) +{ + __le64 *wqe; + u64 temp; + u32 tail, val, error; + int status; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + temp = buf->pa; + set_64bit_val(wqe, 32, temp); + + temp = FIELD_PREP(ZXDH_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID, + cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) | + FIELD_PREP(ZXDH_CQPSQ_UP_OP, ZXDH_CQP_OP_QUERY_RDMA_FEATURES); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET, 16, + 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_get_cqp_reg_info(cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(cqp); + status = zxdh_cqp_poll_registers(cqp, tail, + cqp->dev->hw_attrs.max_done_count); + if (error || status) + status = -EIO; + + return status; +} + +u64 zxdh_get_hmc_align_2M(u64 paaddr) +{ + u64 pa = paaddr; + + if (paaddr % 0x200000 == 0) + return pa; + + pa = pa + 0x200000; + pa = pa & (~GENMASK_ULL(20, 0)); + + return pa; +} + +u64 zxdh_get_hmc_align_512(u64 paaddr) +{ + u64 pa = paaddr; + + if (paaddr % 512 == 0) + return pa; + + pa = pa + 512; + pa = pa & (~GENMASK_ULL(8, 0)); + + return pa; +} + +u64 zxdh_get_hmc_align_4K(u64 paaddr) +{ + u64 pa = paaddr; + + if (paaddr % 4096 == 0) + return pa; + + pa = pa + 4096; + pa = pa & (~GENMASK_ULL(11, 0)); + + return pa; +} + +u16 zxdh_txwind_ddr_size(u8 num) // 每个qp配置的tx window的条目数,可配置值2~9 +{ + u8 i = 0; + u16 result = 1; + + if (num > 9 || num < 2) { + result = 4; + return result; + } + + for (i = 0; i < num; i++) + result = result * 2; + + return result; +} + +void zxdh_hmc_dpu_capability(struct zxdh_sc_dev *dev) +{ + u32 val = 0; + struct zxdh_hmc_obj_info *obj_info = NULL; + u8 txwindo_ddr_reg = 9; + + //txwindo_ddr_reg = readl(dev->hw->hw_addr+ TXWINDOW_DDR_SIZE); + + obj_info = dev->hmc_info->hmc_obj; + + obj_info[ZXDH_HMC_IW_QP].cnt = obj_info[ZXDH_HMC_IW_QP].max_cnt; + obj_info[ZXDH_HMC_IW_QP].size = 512; + + obj_info[ZXDH_HMC_IW_CQ].cnt = obj_info[ZXDH_HMC_IW_CQ].max_cnt; + obj_info[ZXDH_HMC_IW_CQ].size = 64; + + obj_info[ZXDH_HMC_IW_SRQ].cnt = obj_info[ZXDH_HMC_IW_SRQ].max_cnt; + obj_info[ZXDH_HMC_IW_SRQ].size = 64; + + obj_info[ZXDH_HMC_IW_MR].cnt = obj_info[ZXDH_HMC_IW_MR].max_cnt; + obj_info[ZXDH_HMC_IW_MR].size = 64; + + obj_info[ZXDH_HMC_IW_AH].cnt = obj_info[ZXDH_HMC_IW_AH].max_cnt; + obj_info[ZXDH_HMC_IW_AH].size = 64; + + obj_info[ZXDH_HMC_IW_IRD].cnt = obj_info[ZXDH_HMC_IW_IRD].max_cnt; + obj_info[ZXDH_HMC_IW_IRD].size = 64 * 2 * (dev->ird_size); + + obj_info[ZXDH_HMC_IW_TXWINDOW].cnt = + obj_info[ZXDH_HMC_IW_TXWINDOW].max_cnt; + obj_info[ZXDH_HMC_IW_TXWINDOW].size = + 64 * zxdh_txwind_ddr_size(txwindo_ddr_reg); + + obj_info[ZXDH_HMC_IW_PBLE_MR].cnt = + obj_info[ZXDH_HMC_IW_PBLE_MR].max_cnt; + obj_info[ZXDH_HMC_IW_PBLE_MR].size = 8; + + obj_info[ZXDH_HMC_IW_PBLE].cnt = obj_info[ZXDH_HMC_IW_PBLE].max_cnt; + obj_info[ZXDH_HMC_IW_PBLE].size = 8; + + val = obj_info[ZXDH_HMC_IW_MR].cnt; + + writel(val, (u32 __iomem *)(dev->hw->hw_addr + C_TX_MRTE_INDEX_CFG)); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_ACK_PCI_MAX_MRTE_INDEX_PARA_CFG)); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + RDMARX_PCI_MAX_MRTE_INDEX_RAM)); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_LOCAL_MRTE_PARENT_PARA_CFG)); +} + +int zxdh_create_vf_pblehmc_entry(struct zxdh_sc_dev *dev) +{ + u32 sd_lmt, hmc_entry_total = 0, j = 0, k = 0, mem_size = 0, cnt = 0; + u64 fpm_limit = 0; + struct zxdh_hmc_info *hmc_info = NULL; + struct zxdh_hmc_obj_info *obj_info = NULL; + struct zxdh_virt_mem virt_mem = {}; + + hmc_info = dev->hmc_info; + obj_info = hmc_info->hmc_obj; + for (k = ZXDH_HMC_IW_PBLE; k < ZXDH_HMC_IW_MAX; k++) { + cnt = obj_info[k].cnt; + + fpm_limit = obj_info[k].size * cnt; + + if (fpm_limit == 0) + continue; + + if (k == ZXDH_HMC_IW_PBLE) + hmc_info->hmc_first_entry_pble = hmc_entry_total; + + if (k == ZXDH_HMC_IW_PBLE_MR) + hmc_info->hmc_first_entry_pble_mr = hmc_entry_total; + + if ((fpm_limit % ZXDH_HMC_DIRECT_BP_SIZE) == 0) { + sd_lmt = fpm_limit / ZXDH_HMC_DIRECT_BP_SIZE; + sd_lmt += 1; + } else { + sd_lmt = (u32)((fpm_limit - 1) / + ZXDH_HMC_DIRECT_BP_SIZE); + sd_lmt += 1; + } + + if (sd_lmt == 1) + hmc_entry_total++; + else { + for (j = 0; j < sd_lmt - 1; j++) + hmc_entry_total++; + + if (fpm_limit % ZXDH_HMC_DIRECT_BP_SIZE) + hmc_entry_total++; + } + } + + mem_size = sizeof(struct zxdh_hmc_sd_entry) * hmc_entry_total; + virt_mem.size = mem_size; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + if (!virt_mem.va) { + pr_err("HMC: failed to allocate memory for sd_entry buffer\n"); + return -ENOMEM; + } + hmc_info->sd_table.sd_entry = virt_mem.va; + hmc_info->hmc_entry_total = hmc_entry_total; + + return 0; +} + +int zxdh_sc_commit_hmc_register_val(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write32_date *dma_data, + bool post_sq, u8 wait_type) +{ + __le64 *wqe; + u64 hdr; + u32 tail, val, error; + int ret_code = 0; + u64 dest_path_index = 0; + int i, loop; + + if (!cqp) + return -ENOMEM; + + loop = dma_data->num; + if (loop > 4) + return -ENOMEM; + + dest_path_index = zxdh_get_path_index(dpath_index); + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + for (i = 0; i < loop; i++) { + set_64bit_val(wqe, 16 + i * 8, dma_data->addrbuf[i]); + if (i == 0) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATALOW, + dma_data->databuf[i]); + set_64bit_val(wqe, 48, hdr); + } else if (i == 1) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATAHIGH, + dma_data->databuf[i]); + set_64bit_val(wqe, 48, hdr); + } else if (i == 2) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATALOW, + dma_data->databuf[i]); + set_64bit_val(wqe, 56, hdr); + } else { //if (i == 3) + hdr = FIELD_PREP(ZXDH_CQPSQ_DATAHIGH, + dma_data->databuf[i]); + set_64bit_val(wqe, 56, hdr); + } + } + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE_32) | + FIELD_PREP(ZXDH_CQPSQ_InterSourSel, dma_data->inter_sour_sel) | + FIELD_PREP(ZXDH_CQPSQ_NeedInter, dma_data->need_inter) | + FIELD_PREP(ZXDH_CQPSQ_DATAINWQENUM, dma_data->num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 0, hdr); + + zxdh_get_cqp_reg_info(cqp, &val, &tail, &error); + + if (post_sq) { + zxdh_sc_cqp_post_sq(cqp); + if (wait_type == ZXDH_CQP_WAIT_POLL_REGS) + ret_code = zxdh_cqp_poll_registers( + cqp, tail, cqp->dev->hw_attrs.max_done_count); + else if (wait_type == ZXDH_CQP_WAIT_POLL_CQ) + ret_code = zxdh_sc_commit_fpm_val_done(cqp); + } + + return ret_code; +} + +u32 zxdh_hmc_register_config_comval(struct zxdh_sc_dev *dev, u32 rsrc_type) +{ + u32 tmp = 0, val = 0; + + if ((rsrc_type == ZXDH_HMC_IW_QP) || (rsrc_type == ZXDH_HMC_IW_CQ) || + (rsrc_type == ZXDH_HMC_IW_SRQ)) { + tmp = 0; // not use default 0 + tmp &= GENMASK_ULL(1, 0); + val |= tmp; + } else if ((rsrc_type == ZXDH_HMC_IW_IRD) && (dev->cache_id != 0)) { + tmp = 2; // ird cacheid is 2 + tmp &= GENMASK_ULL(1, 0); + val |= tmp; + } else if ((rsrc_type == ZXDH_HMC_IW_TXWINDOW) && + (dev->cache_id != 0)) { + tmp = 3; // tx_wind cacheid is 3 + tmp &= GENMASK_ULL(1, 0); + val |= tmp; + } else { + tmp = dev->cache_id; // cacheid + tmp &= GENMASK_ULL(1, 0); + val |= tmp; + } + + if ((rsrc_type == ZXDH_HMC_IW_QP) || (rsrc_type == ZXDH_HMC_IW_CQ) || + (rsrc_type == ZXDH_HMC_IW_SRQ)) { + if (dev->hmc_use_dpu_ddr) + tmp = ZXDH_INDICATE_DPU_DDR << 2; // indicateid + else + tmp = ZXDH_INDICATE_HOST_SMMU << 2; // indicateid + } else { + tmp = 0; // not used, Default is 0 + } + tmp &= GENMASK_ULL(3, 2); + val |= tmp; + + if (dev->hmc_use_dpu_ddr) + tmp = ZXDH_AXID_DPUDDR << 4; // AXID,DPU位1 + else + tmp = dev->hmc_epid << 4; + + tmp &= GENMASK_ULL(6, 4); // HOST is ep_id + val |= tmp; + + tmp = 0 << 7; // way_partition temp is 0 + tmp &= GENMASK_ULL(9, 7); + val |= tmp; + + tmp = 0 << 10; // rev is 0 + tmp &= GENMASK_ULL(31, 10); + val |= tmp; + + return val; +} + +u32 zxdh_hmc_register_config_cqpval(struct zxdh_sc_dev *dev, u32 max_cnt, + u32 rsrc_type) +{ + u32 tmp = 0, val = 0; + + if ((rsrc_type == ZXDH_HMC_IW_MR) || (rsrc_type == ZXDH_HMC_IW_AH)) { + tmp = dev->cache_id; // cacheid + tmp &= GENMASK_ULL(1, 0); + val |= tmp; + + tmp = 0 << 2; // way_partition temp is 0 + tmp &= GENMASK_ULL(4, 2); + val |= tmp; + + tmp = max_cnt << 5; + tmp &= GENMASK_ULL(28, 5); // max index + val |= tmp; + } + return val; +} + +/** + * zxdh_cfg_fpm_val - configure HMC objects + * @dev: sc device struct + * @qp_count: desired qp count + */ +int zxdh_cfg_fpm_val(struct zxdh_sc_dev *dev, u32 qp_count) +{ + struct zxdh_virt_mem virt_mem = {}; + struct zxdh_hmc_info *hmc_info = NULL; + int ret_code = 0; + u32 sd_lmt = 0, hmc_entry_total = 0, i = 0, j = 0, mem_size = 0, + cnt = 0, k = 0; + u64 fpm_limit = 0; + struct zxdh_hmc_obj_info *obj_info = NULL; + + hmc_info = dev->hmc_info; + zxdh_hmc_dpu_capability(dev); + + for (k = 0; k < ZXDH_HMC_IW_MAX; k++) { + zxdh_sc_write_hmc_register(dev, hmc_info->hmc_obj, k, + dev->vhca_id); + } + + obj_info = hmc_info->hmc_obj; + for (i = 0; i < ZXDH_HMC_IW_MAX; i++) { + switch (i) { + case ZXDH_HMC_IW_QP: + cnt = dev->hmc_pf_manager_info.total_qp_cnt; + break; + case ZXDH_HMC_IW_CQ: + cnt = dev->hmc_pf_manager_info.total_cq_cnt; + break; + case ZXDH_HMC_IW_SRQ: + cnt = dev->hmc_pf_manager_info.total_srq_cnt; + break; + case ZXDH_HMC_IW_AH: + cnt = dev->hmc_pf_manager_info.total_ah_cnt; + break; + case ZXDH_HMC_IW_MR: + cnt = dev->hmc_pf_manager_info.total_mrte_cnt; + break; + default: + cnt = obj_info[i].cnt; + break; + } + + fpm_limit = obj_info[i].size * cnt; + fpm_limit = ALIGN(fpm_limit, ZXDH_HMC_DIRECT_BP_SIZE); + + if (fpm_limit == 0) + continue; + + if (i == ZXDH_HMC_IW_PBLE) + hmc_info->hmc_first_entry_pble = hmc_entry_total; + + if (i == ZXDH_HMC_IW_PBLE_MR) + hmc_info->hmc_first_entry_pble_mr = hmc_entry_total; + + sd_lmt = fpm_limit / ZXDH_HMC_DIRECT_BP_SIZE; + sd_lmt += 1; + + if (sd_lmt == 1) { + hmc_entry_total++; + } else { + for (j = 0; j < sd_lmt - 1; j++) + hmc_entry_total++; + } + } + + mem_size = sizeof(struct zxdh_hmc_sd_entry) * hmc_entry_total; + virt_mem.size = mem_size; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + if (!virt_mem.va) { + pr_err("HMC: failed to allocate memory for sd_entry buffer\n"); + return -ENOMEM; + } + + hmc_info->sd_table.sd_entry = virt_mem.va; + hmc_info->hmc_entry_total = hmc_entry_total; + return ret_code; +} + +/** + * zxdh_exec_cqp_cmd - execute cqp cmd when wqe are available + * @dev: rdma device + * @pcmdinfo: cqp command info + */ +static int zxdh_exec_cqp_cmd(struct zxdh_sc_dev *dev, + struct cqp_cmds_info *pcmdinfo) +{ + int status; + bool alloc = false; + + dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++; + switch (pcmdinfo->cqp_cmd) { + case ZXDH_OP_CEQ_DESTROY: + status = zxdh_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq, + pcmdinfo->in.u.ceq_destroy.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_AEQ_DESTROY: + status = zxdh_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq, + pcmdinfo->in.u.aeq_destroy.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_CEQ_CREATE: + status = zxdh_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq, + pcmdinfo->in.u.ceq_create.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_AEQ_CREATE: + status = zxdh_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq, + pcmdinfo->in.u.aeq_create.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QP_UPLOAD_CONTEXT: + status = zxdh_sc_qp_upload_context( + pcmdinfo->in.u.qp_upload_context.dev, + &pcmdinfo->in.u.qp_upload_context.info, + pcmdinfo->in.u.qp_upload_context.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_CQ_CREATE: + status = zxdh_sc_cq_create(pcmdinfo->in.u.cq_create.cq, + pcmdinfo->in.u.cq_create.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_CQ_MODIFY: + status = zxdh_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq, + &pcmdinfo->in.u.cq_modify.info, + pcmdinfo->in.u.cq_modify.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_CQ_MODIFY_MODERATION: + status = zxdh_sc_modify_cq_moderation( + pcmdinfo->in.u.cq_modify.cq, + pcmdinfo->in.u.cq_modify.scratch, pcmdinfo->post_sq); + break; + case ZXDH_OP_CQ_DESTROY: + status = zxdh_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq, + pcmdinfo->in.u.cq_destroy.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QP_FLUSH_WQES: + status = zxdh_sc_qp_flush_wqes( + pcmdinfo->in.u.qp_flush_wqes.qp, + &pcmdinfo->in.u.qp_flush_wqes.info, + pcmdinfo->in.u.qp_flush_wqes.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_GEN_AE: + status = zxdh_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp, + &pcmdinfo->in.u.gen_ae.info, + pcmdinfo->in.u.gen_ae.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_MANAGE_PUSH_PAGE: + status = zxdh_sc_manage_push_page( + pcmdinfo->in.u.manage_push_page.cqp, + &pcmdinfo->in.u.manage_push_page.info, + pcmdinfo->in.u.manage_push_page.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_MANAGE_HMC_PM_FUNC_TABLE: + /* switch to calling through the call table */ + status = zxdh_sc_manage_hmc_pm_func_table( + pcmdinfo->in.u.manage_hmc_pm.dev->cqp, + &pcmdinfo->in.u.manage_hmc_pm.info, + pcmdinfo->in.u.manage_hmc_pm.scratch, true); + break; + case ZXDH_OP_SUSPEND: + status = zxdh_sc_suspend_qp( + pcmdinfo->in.u.suspend_resume.cqp, + pcmdinfo->in.u.suspend_resume.qp, + pcmdinfo->in.u.suspend_resume.scratch); + break; + case ZXDH_OP_RESUME: + status = zxdh_sc_resume_qp( + pcmdinfo->in.u.suspend_resume.cqp, + pcmdinfo->in.u.suspend_resume.qp, + pcmdinfo->in.u.suspend_resume.scratch); + break; + case ZXDH_OP_MANAGE_VF_PBLE_BP: + status = zxdh_manage_vf_pble_bp( + pcmdinfo->in.u.manage_vf_pble_bp.cqp, + &pcmdinfo->in.u.manage_vf_pble_bp.info, + pcmdinfo->in.u.manage_vf_pble_bp.scratch, true); + break; + case ZXDH_OP_STATS_ALLOCATE: + alloc = true; + fallthrough; + case ZXDH_OP_STATS_FREE: + status = zxdh_sc_manage_stats_inst( + pcmdinfo->in.u.stats_manage.cqp, + &pcmdinfo->in.u.stats_manage.info, alloc, + pcmdinfo->in.u.stats_manage.scratch); + break; + case ZXDH_OP_STATS_GATHER: + status = zxdh_sc_gather_stats( + pcmdinfo->in.u.stats_gather.cqp, + &pcmdinfo->in.u.stats_gather.info, + pcmdinfo->in.u.stats_gather.scratch); + break; + case ZXDH_OP_WS_MODIFY_NODE: + status = zxdh_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, + &pcmdinfo->in.u.ws_node.info, + ZXDH_MODIFY_NODE, + pcmdinfo->in.u.ws_node.scratch); + break; + case ZXDH_OP_WS_DELETE_NODE: + status = zxdh_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, + &pcmdinfo->in.u.ws_node.info, + ZXDH_DEL_NODE, + pcmdinfo->in.u.ws_node.scratch); + break; + case ZXDH_OP_WS_ADD_NODE: + status = zxdh_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, + &pcmdinfo->in.u.ws_node.info, + ZXDH_ADD_NODE, + pcmdinfo->in.u.ws_node.scratch); + break; + case ZXDH_OP_SET_UP_MAP: + status = zxdh_sc_set_up_map(pcmdinfo->in.u.up_map.cqp, + &pcmdinfo->in.u.up_map.info, + pcmdinfo->in.u.up_map.scratch); + break; + case ZXDH_OP_QUERY_RDMA_FEATURES: + status = zxdh_sc_query_rdma_features( + pcmdinfo->in.u.query_rdma.cqp, + &pcmdinfo->in.u.query_rdma.query_buff_mem, + pcmdinfo->in.u.query_rdma.scratch); + break; + case ZXDH_OP_DELETE_ARP_CACHE_ENTRY: + status = zxdh_sc_del_arp_cache_entry( + pcmdinfo->in.u.del_arp_cache_entry.cqp, + pcmdinfo->in.u.del_arp_cache_entry.scratch, + pcmdinfo->in.u.del_arp_cache_entry.arp_index, + pcmdinfo->post_sq); + break; + case ZXDH_OP_MANAGE_APBVT_ENTRY: + status = zxdh_sc_manage_apbvt_entry( + pcmdinfo->in.u.manage_apbvt_entry.cqp, + &pcmdinfo->in.u.manage_apbvt_entry.info, + pcmdinfo->in.u.manage_apbvt_entry.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_MANAGE_QHASH_TABLE_ENTRY: + status = zxdh_sc_manage_qhash_table_entry( + pcmdinfo->in.u.manage_qhash_table_entry.cqp, + &pcmdinfo->in.u.manage_qhash_table_entry.info, + pcmdinfo->in.u.manage_qhash_table_entry.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QP_MODIFY: + status = zxdh_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp, + &pcmdinfo->in.u.qp_modify.info, + pcmdinfo->in.u.qp_modify.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QP_CREATE: + status = zxdh_sc_qp_create(pcmdinfo->in.u.qp_create.qp, + pcmdinfo->in.u.qp_create.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QP_DESTROY: + status = zxdh_sc_qp_destroy( + pcmdinfo->in.u.qp_destroy.qp, + pcmdinfo->in.u.qp_destroy.scratch, + pcmdinfo->in.u.qp_destroy.ignore_mw_bnd, + pcmdinfo->post_sq); + break; + case ZXDH_OP_ALLOC_STAG: + status = zxdh_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev, + &pcmdinfo->in.u.alloc_stag.info, + pcmdinfo->in.u.alloc_stag.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_MR_REG_NON_SHARED: + status = zxdh_sc_mr_reg_non_shared( + pcmdinfo->in.u.mr_reg_non_shared.dev, + &pcmdinfo->in.u.mr_reg_non_shared.info, + pcmdinfo->in.u.mr_reg_non_shared.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_DEALLOC_STAG: + status = zxdh_sc_dealloc_stag( + pcmdinfo->in.u.dealloc_stag.dev, + &pcmdinfo->in.u.dealloc_stag.info, + pcmdinfo->in.u.dealloc_stag.scratch, pcmdinfo->post_sq); + break; + case ZXDH_OP_MW_ALLOC: + status = zxdh_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev, + &pcmdinfo->in.u.mw_alloc.info, + pcmdinfo->in.u.mw_alloc.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_ADD_ARP_CACHE_ENTRY: + status = zxdh_sc_add_arp_cache_entry( + pcmdinfo->in.u.add_arp_cache_entry.cqp, + &pcmdinfo->in.u.add_arp_cache_entry.info, + pcmdinfo->in.u.add_arp_cache_entry.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_AH_CREATE: + status = zxdh_sc_create_ah(pcmdinfo->in.u.ah_create.cqp, + &pcmdinfo->in.u.ah_create.info, + pcmdinfo->in.u.ah_create.scratch); + break; + case ZXDH_OP_AH_DESTROY: + status = zxdh_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp, + &pcmdinfo->in.u.ah_destroy.info, + pcmdinfo->in.u.ah_destroy.scratch); + break; + case ZXDH_OP_MC_CREATE: + status = zxdh_sc_create_mcast_grp( + pcmdinfo->in.u.mc_create.cqp, + pcmdinfo->in.u.mc_create.info, + pcmdinfo->in.u.mc_create.scratch); + break; + case ZXDH_OP_MC_DESTROY: + status = zxdh_sc_destroy_mcast_grp( + pcmdinfo->in.u.mc_destroy.cqp, + pcmdinfo->in.u.mc_destroy.info, + pcmdinfo->in.u.mc_destroy.scratch); + break; + case ZXDH_OP_MC_MODIFY: + status = zxdh_sc_modify_mcast_grp( + pcmdinfo->in.u.mc_modify.cqp, + pcmdinfo->in.u.mc_modify.info, + pcmdinfo->in.u.mc_modify.scratch); + break; + case ZXDH_OP_CONFIG_PTE_TAB: + case ZXDH_OP_CONFIG_PBLE_TAB: + case ZXDH_OP_DMA_WRITE: + status = zxdh_sc_dma_write( + pcmdinfo->in.u.dma_writeread.cqp, + pcmdinfo->in.u.dma_writeread.scratch, + &pcmdinfo->in.u.dma_writeread.src_dest, + &pcmdinfo->in.u.dma_writeread.src_path_index, + &pcmdinfo->in.u.dma_writeread.dest_path_index, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_PTE_TAB: + case ZXDH_OP_DMA_READ: + status = zxdh_sc_dma_read( + pcmdinfo->in.u.dma_writeread.cqp, + pcmdinfo->in.u.dma_writeread.scratch, + &pcmdinfo->in.u.dma_writeread.src_dest, + &pcmdinfo->in.u.dma_writeread.src_path_index, + &pcmdinfo->in.u.dma_writeread.dest_path_index, + pcmdinfo->post_sq); + break; + case ZXDH_OP_CONFIG_MAILBOX: + status = zxdh_sc_mb_create(pcmdinfo->in.u.hmc_mb.cqp, + pcmdinfo->in.u.hmc_mb.scratch, + &pcmdinfo->in.u.hmc_mb.mbhead_data, + pcmdinfo->post_sq, + pcmdinfo->in.u.hmc_mb.dst_vf_id); + break; + case ZXDH_OP_DMA_READ_USE_CQE: + status = zxdh_sc_dma_read_usecqe( + pcmdinfo->in.u.dma_read_cqe.cqp, + pcmdinfo->in.u.dma_read_cqe.scratch, + &pcmdinfo->in.u.dma_read_cqe.dma_rcqe, + &pcmdinfo->in.u.dma_read_cqe.src_path_index, + pcmdinfo->post_sq); + break; + case ZXDH_OP_DMA_WRITE32: + status = zxdh_sc_dma_write32( + pcmdinfo->in.u.dma_write32data.cqp, + pcmdinfo->in.u.dma_write32data.scratch, + &pcmdinfo->in.u.dma_write32data.dest_path_index, + &pcmdinfo->in.u.dma_write32data.dma_data, + pcmdinfo->post_sq); + break; + case ZXDH_OP_DMA_WRITE64: + status = zxdh_sc_dma_write64( + pcmdinfo->in.u.dma_write64data.cqp, + pcmdinfo->in.u.dma_write64data.scratch, + &pcmdinfo->in.u.dma_write64data.dest_path_index, + &pcmdinfo->in.u.dma_write64data.dma_data, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_QPC: + status = zxdh_sc_query_qpc(pcmdinfo->in.u.query_qpc.qp, + pcmdinfo->in.u.query_qpc.qpc_buf_pa, + pcmdinfo->in.u.query_qpc.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_CQC: + status = zxdh_sc_query_cqc(pcmdinfo->in.u.query_cqc.cq, + pcmdinfo->in.u.query_cqc.cqc_buf_pa, + pcmdinfo->in.u.query_cqc.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_CEQC: + status = zxdh_sc_query_ceqc( + pcmdinfo->in.u.query_ceqc.ceq, + pcmdinfo->in.u.query_ceqc.ceqc_buf_pa, + pcmdinfo->in.u.query_ceqc.scratch, pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_AEQC: + status = zxdh_sc_query_aeqc( + pcmdinfo->in.u.query_aeqc.aeq, + pcmdinfo->in.u.query_aeqc.aeqc_buf_pa, + pcmdinfo->in.u.query_aeqc.scratch, pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_SRQC: + status = zxdh_sc_query_srqc( + pcmdinfo->in.u.query_srqc.srq, + pcmdinfo->in.u.query_srqc.srqc_buf_pa, + pcmdinfo->in.u.query_srqc.scratch, pcmdinfo->post_sq); + break; + case ZXDH_OP_SRQ_MODIFY: + status = zxdh_sc_srq_modify(pcmdinfo->in.u.srq_modify.srq, + &pcmdinfo->in.u.srq_modify.info, + pcmdinfo->in.u.srq_modify.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_SRQ_CREATE: + status = zxdh_sc_srq_create(pcmdinfo->in.u.srq_create.srq, + &pcmdinfo->in.u.srq_create.info, + pcmdinfo->in.u.srq_create.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_SRQ_DESTROY: + status = zxdh_sc_srq_destroy(pcmdinfo->in.u.srq_destroy.srq, + pcmdinfo->in.u.srq_destroy.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_MKEY: + status = zxdh_sc_query_mkey(pcmdinfo->in.u.query_mkey.cqp, + pcmdinfo->in.u.query_mkey.mkeyindex, + pcmdinfo->in.u.query_mkey.scratch, + pcmdinfo->post_sq); + break; + default: + status = -EOPNOTSUPP; + break; + } + + return status; +} + +/** + * zxdh_process_cqp_cmd - process all cqp commands + * @dev: sc device struct + * @pcmdinfo: cqp command info + */ +int zxdh_process_cqp_cmd(struct zxdh_sc_dev *dev, + struct cqp_cmds_info *pcmdinfo) +{ + int status = 0; + unsigned long flags; + + spin_lock_irqsave(&dev->cqp_lock, flags); + if (list_empty(&dev->cqp_cmd_head) && !zxdh_cqp_ring_full(dev->cqp)) + status = zxdh_exec_cqp_cmd(dev, pcmdinfo); + else + list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head); + spin_unlock_irqrestore(&dev->cqp_lock, flags); + return status; +} + +/** + * zxdh_process_bh - called from tasklet for cqp list + * @dev: sc device struct + */ +int zxdh_process_bh(struct zxdh_sc_dev *dev) +{ + int status = 0; + struct cqp_cmds_info *pcmdinfo; + unsigned long flags; + + spin_lock_irqsave(&dev->cqp_lock, flags); + while (!list_empty(&dev->cqp_cmd_head) && + !zxdh_cqp_ring_full(dev->cqp)) { + pcmdinfo = (struct cqp_cmds_info *)zxdh_remove_cqp_head(dev); + if (!pcmdinfo) + return -ENOMEM; + status = zxdh_exec_cqp_cmd(dev, pcmdinfo); + if (status) + break; + } + spin_unlock_irqrestore(&dev->cqp_lock, flags); + return status; +} + +#if IS_ENABLED(CONFIG_CONFIGFS_FS) +/** + * zxdh_set_irq_rate_limit- Configure interrupt rate limit + * @dev: pointer to the device structure + * @idx: vector index + * @interval: Time interval in 4 usec units. Zero for no limit. + */ +void zxdh_set_irq_rate_limit(struct zxdh_sc_dev *dev, u32 idx, u32 interval) +{ + u32 reg_val = 0; + + if (interval) { +#define ZXDH_MAX_SUPPORTED_INT_RATE_INTERVAL 59 /* 59 * 4 = 236 us */ + if (interval > ZXDH_MAX_SUPPORTED_INT_RATE_INTERVAL) + interval = ZXDH_MAX_SUPPORTED_INT_RATE_INTERVAL; + reg_val = interval & ZXDH_GLINT_RATE_INTERVAL; + reg_val |= FIELD_PREP(ZXDH_GLINT_RATE_INTRL_ENA, 1); + } + writel(reg_val, dev->hw_regs[ZXDH_GLINT_RATE] + idx); +} + +#endif +/** + * zxdh_cfg_aeq- Configure AEQ interrupt + * @dev: pointer to the device structure + * @irq_idx: vector index + */ +void zxdh_cfg_aeq(struct zxdh_sc_dev *dev, u32 irq_idx) +{ + struct zxdh_pci_f *rf; + u32 hdr = 0; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + hdr = FIELD_PREP(ZXDH_AEQ_MSIX_DATA_VECTOR, irq_idx) | + FIELD_PREP(ZXDH_AEQ_MSIX_DATA_TC, 0) | + FIELD_PREP(ZXDH_AEQ_MSIX_DATA_VF_ACTIVE, rf->ftype) | + FIELD_PREP(ZXDH_AEQ_MSIX_DATA_VF_ID, rf->vf_id) | + FIELD_PREP(ZXDH_AEQ_MSIX_DATA_PF_ID, rf->pf_id); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + writel(hdr, dev->aeq_vhca_pfvf.aeq_msix_data); + + hdr = FIELD_PREP(ZXDH_AEQ_MSIX_CONFIG_IRQ, 0) | + FIELD_PREP(ZXDH_AEQ_MSIX_CONFIG_EPID, rf->ep_id); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + writel(hdr, dev->aeq_vhca_pfvf.aeq_msix_config); +} + +int zxdh_sc_config_pte_table(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest src_dest) +{ + __le64 *wqe; + u64 hdr; + u32 tail = 0, val = 0, error = 0; + int ret_code = 0; + u64 scratch = 0; + u64 src_path_index = 0, dest_path_index = 0; + struct zxdh_path_index spath_index = {}; + struct zxdh_path_index dpath_index = {}; + + if (!dev) + return -ENOMEM; + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + dpath_index.path_select = ZXDH_INDICATE_L2D; // L2D + dpath_index.obj_id = ZXDH_L2D_OBJ_ID; // L2D + dpath_index.vhca_id = dev->vhca_id; + dest_path_index = zxdh_get_path_index(&dpath_index); + + spath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + spath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + spath_index.obj_id = ZXDH_DMA_OBJ_ID; + spath_index.vhca_id = dev->vhca_id; + src_path_index = zxdh_get_path_index(&spath_index); + + wqe = zxdh_sc_cqp_get_next_send_wqe(dev->cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, src_path_index); + set_64bit_val(wqe, 16, src_dest.dest); // L2D Address + set_64bit_val(wqe, 24, src_dest.src); // Physical_Buffer_Address + set_64bit_val(wqe, 32, src_dest.len); // PTE_Length + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, dev->cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: QUERY_FPM WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers(dev->cqp, tail, + dev->hw_attrs.max_done_count); + return ret_code; +} + +u16 get_dev_rc_ws_offset(u16 vhca_id, u32 total_vhca) +{ + u16 ws_offset = 0; + + if (total_vhca <= 34) { + ws_offset = 24 * 8 * vhca_id + 1; + } else if (total_vhca <= 66) { + ws_offset = 15 * 8 * vhca_id + 1; + } else if (total_vhca <= 130) { + ws_offset = 15 * 4 * vhca_id + 1; + } else if (total_vhca <= 258) { + ws_offset = 15 * 2 * vhca_id + 1; + } + + return ws_offset; +} + +u16 zxdh_get_dev_ud_ws(u16 vhca_id, u32 total_vhca) +{ + u16 ud_8k_offset; + u16 ud_8k_index; + + ud_8k_offset = get_dev_rc_ws_offset(total_vhca, total_vhca); + ud_8k_index = ud_8k_offset + vhca_id; + + return ud_8k_index; +} + +/** + * zxdh_sc_dev_init - Initialize control part of device + * @ver: version + * @dev: Device pointer + * @info: Device init info + */ +void zxdh_sc_dev_init(enum zxdh_rdma_vers ver, struct zxdh_sc_dev *dev, + struct zxdh_device_init_info *info) +{ + INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */ + mutex_init(&dev->ws_mutex); + dev->privileged = info->privileged; + dev->num_vfs = info->max_vfs; + dev->cache_id = 1; + dev->ird_size = ICRDMA_MAX_IRD_SIZE; + + dev->hw = info->hw; + dev->hw->hw_addr = info->bar0; + dev->hmc_epid = (ZXDH_AXID_HOST_EP0 + dev->ep_id); + dev->ws_offset = get_dev_rc_ws_offset(dev->vhca_id, dev->total_vhca); + /* Setup the hardware limits, hmc may limit further */ + dev->hw_attrs.min_hw_qp_id = ZXDH_MIN_IW_QP_ID; + dev->hw_attrs.min_hw_aeq_size = ZXDH_MIN_AEQ_ENTRIES; + dev->hw_attrs.max_hw_aeq_size = ZXDH_MAX_AEQ_ENTRIES; + dev->hw_attrs.min_hw_ceq_size = ZXDH_MIN_CEQ_ENTRIES; + dev->hw_attrs.max_hw_ceq_size = ZXDH_MAX_CEQ_ENTRIES; + dev->hw_attrs.uk_attrs.min_hw_cq_size = ZXDH_MIN_CQ_SIZE; + dev->hw_attrs.uk_attrs.max_hw_cq_size = ZXDH_MAX_CQ_SIZE; + dev->hw_attrs.max_hw_outbound_msg_size = ZXDH_MAX_OUTBOUND_MSG_SIZE; + dev->hw_attrs.max_mr_size = ZXDH_MAX_MR_SIZE; + dev->hw_attrs.max_hw_inbound_msg_size = ZXDH_MAX_INBOUND_MSG_SIZE; + dev->hw_attrs.uk_attrs.max_hw_inline = ZXDH_MAX_INLINE_DATA_SIZE; + dev->hw_attrs.max_hw_wqes = ZXDH_MAX_WQ_ENTRIES; + dev->hw_attrs.max_qp_wr = ZXDH_MAX_QP_WRS(ZXDH_MAX_QUANTA_PER_WR); + dev->hw_attrs.max_srq_wr = ZXDH_MAX_SRQ_WRS; + + dev->hw_attrs.uk_attrs.max_hw_srq_wr = ZXDH_MAX_SRQ_WRS; + dev->hw_attrs.uk_attrs.max_hw_rq_quanta = ZXDH_QP_SW_MAX_RQ_QUANTA; + dev->hw_attrs.uk_attrs.max_hw_srq_quanta = ZXDH_QP_SW_MAX_SRQ_QUANTA; + dev->hw_attrs.uk_attrs.max_hw_wq_quanta = ZXDH_QP_SW_MAX_WQ_QUANTA; + dev->hw_attrs.max_hw_pds = ZXDH_MAX_PDS; + dev->hw_attrs.max_hw_ena_vf_count = ZXDH_MAX_PE_ENA_VF_COUNT; + + dev->hw_attrs.max_done_count = ZXDH_DONE_COUNT; + dev->hw_attrs.max_sleep_count = ZXDH_SLEEP_COUNT; + dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS; + + dev->hw_attrs.uk_attrs.hw_rev = (u8)ver; + + spin_lock_init(&dev->vf_dev_lock); + zxdh_init_hw(dev); +} + +u16 zxdh_get_tc_ws_offset(u32 total_vhca, u8 traffic_class, u16 *tc_ws_num) +{ + u16 tc_ws_offset = 0; + + if (total_vhca <= 34) { + *tc_ws_num = 24; + tc_ws_offset = 24 * traffic_class; + } else if (total_vhca <= 66) { + *tc_ws_num = 15; + tc_ws_offset = 15 * traffic_class; + } else if (total_vhca <= 130) { + *tc_ws_num = 15; + traffic_class /= 2; + tc_ws_offset = 16 * traffic_class; + } else if (total_vhca <= 258) { + *tc_ws_num = 15; + traffic_class /= 4; + tc_ws_offset = 16 * traffic_class; + } + + return tc_ws_offset; +} + +u16 zxdh_get_ws_index(struct zxdh_sc_qp *qp, u32 dest_ip) +{ + u16 dev_ws_offset = qp->dev->ws_offset; + u16 tc_ws_offset, tc_ws_num; + u16 dip_ws_offset; + u16 ws_index; + + if (qp->qp_uk.qp_type == ZXDH_QP_TYPE_ROCE_UD) { + return zxdh_get_dev_ud_ws(qp->dev->vhca_id, + qp->dev->total_vhca); + } + + tc_ws_offset = zxdh_get_tc_ws_offset(qp->dev->total_vhca, qp->user_pri, + &tc_ws_num); + dip_ws_offset = dest_ip % tc_ws_num; + ws_index = dev_ws_offset + tc_ws_offset + dip_ws_offset; + return ws_index; +} + +/** + * zxdh_init_destroy_aeq - destroy aeq + * @rf: RDMA PCI function + * + * Issue a destroy aeq request and + * free the resources associated with the aeq + * The function is called during driver unload + */ +int zxdh_init_destroy_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_dev *dev; + __le64 *wqe; + u64 hdr; + u32 tail = 0, val = 0, error = 0; + int ret_code = 0; + u64 scratch = 0; + + dev = &rf->sc_dev; + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, 0); + + set_64bit_val(wqe, 16, 0); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_AEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers(dev->cqp, tail, + dev->hw_attrs.max_done_count); + + if (ret_code) + return ret_code; + + return 0; +} + +/** + * zxdh_create_cqp_qp - create cqp qp + * @rf: RDMA PCI function + * + * Issue a create cqp qp request and + * create the resources associated with the cqp qp + * The function is called during driver load + */ +int zxdh_create_cqp_qp(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_dev *dev; + struct zxdh_dma_mem *cqp_host_ctx; + __le64 *wqe; + u64 hdr; + u32 tail = 0, val = 0, error = 0; + int ret_code = 0; + u64 scratch = 0; + + dev = &rf->sc_dev; + cqp = dev->cqp; + cqp_host_ctx = &rf->cqp_host_ctx; + + cqp_host_ctx->va = NULL; + cqp_host_ctx->size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + cqp_host_ctx->va = dma_alloc_coherent(dev->hw->device, + cqp_host_ctx->size, + &cqp_host_ctx->pa, GFP_KERNEL); + + if (!cqp_host_ctx->va) + return -ENOMEM; + + memset(cqp_host_ctx->va, 0, cqp_host_ctx->size); + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) { + dma_free_coherent(dev->hw->device, cqp_host_ctx->size, + cqp_host_ctx->va, cqp_host_ctx->pa); + cqp_host_ctx->va = NULL; + return -ENOSPC; + } + + set_64bit_val(wqe, 8, cqp_host_ctx->pa); + set_64bit_val(wqe, 16, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 24, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 32, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 40, RDMAQPC_MASK_INIT); + hdr = FIELD_PREP(ZXDH_CQPSQ_QP_ID, cqp->dev->base_qpn) | + FIELD_PREP(ZXDH_CQPSQ_QP_CONTEXT_ID, cqp->dev->base_qpn) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_QP); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers(dev->cqp, tail, + dev->hw_attrs.max_done_count); + + if (ret_code) { + dma_free_coherent(dev->hw->device, cqp_host_ctx->size, + cqp_host_ctx->va, cqp_host_ctx->pa); + cqp_host_ctx->va = NULL; + return ret_code; + } + return 0; +} + +/** + * zxdh_destroy_cqp_qp - destroy cqp qp + * @rf: RDMA PCI function + * + * Issue a destroy cqp qp request and + * free the resources associated with the cqp qp + * The function is called during driver unload + */ +int zxdh_destroy_cqp_qp(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_dev *dev; + struct zxdh_dma_mem *cqp_host_ctx; + __le64 *wqe; + u64 hdr; + u32 tail = 0, val = 0, error = 0; + int ret_code = 0; + u64 scratch = 0; + + dev = &rf->sc_dev; + cqp = dev->cqp; + cqp_host_ctx = &rf->cqp_host_ctx; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, cqp_host_ctx->pa); + set_64bit_val(wqe, 16, 0); + set_64bit_val(wqe, 24, RDMAQPC_TX_MASKH_QP_STATE); + set_64bit_val(wqe, 32, RDMAQPC_MASK_RESET); + set_64bit_val(wqe, 40, RDMAQPC_MASK_RESET); + hdr = FIELD_PREP(ZXDH_CQPSQ_QP_ID, cqp->dev->base_qpn) | + FIELD_PREP(ZXDH_CQPSQ_QP_CONTEXT_ID, cqp->dev->base_qpn) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_QP); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers(dev->cqp, tail, + dev->hw_attrs.max_done_count); + + if (ret_code) + return ret_code; + + dma_free_coherent(dev->hw->device, cqp_host_ctx->size, cqp_host_ctx->va, + cqp_host_ctx->pa); + cqp_host_ctx->va = NULL; + + return 0; +} + +int zxdh_sc_query_mkey(struct zxdh_sc_cqp *cqp, u32 mkeyindex, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 tmp = 0; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_MKEY) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + set_64bit_val(wqe, 24, FIELD_PREP(ZXDH_CQPSQ_QUERY_MKEY, mkeyindex)); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + get_64bit_val(wqe, 24, &tmp); + + return 0; +} + +int zxdh_sc_send_mailbox(struct zxdh_sc_dev *dev, u8 opt, u64 msg2, u64 msg3, + u64 msg4) +{ + __le64 *wqe; + u64 hdr; + u64 scratch = 0; + bool ftype = false; + struct zxdh_pci_f *rf = NULL; + int ret_code = 0; + + if (!dev) + return -ENOMEM; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + ftype = rf->ftype; + + wqe = zxdh_sc_cqp_get_next_send_wqe(dev->cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, opt); + set_64bit_val(wqe, 16, dev->vhca_id); + set_64bit_val(wqe, 24, msg2); + set_64bit_val(wqe, 32, msg3); + set_64bit_val(wqe, 40, msg4); + + hdr = FIELD_PREP(ZXDH_CQPSQ_DSTVFID, rf->vf_id) | + FIELD_PREP(ZXDH_CQPSQ_SRCPFVFID, + ((ftype == 0) ? rf->pf_id : rf->vf_id)) | + FIELD_PREP(ZXDH_CQPSQ_PFVALID, !ftype) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_SEND_MAILBOX) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, dev->cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + //zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + //ret_code = zxdh_cqp_poll_registers(dev->cqp, tail,dev->hw_attrs.max_done_count); + return ret_code; +} + +/** + * zxdh_copy_ip_ntohl - copy IP address from network to host + * @dst: IP address in host order + * @src: IP address in network order (big endian) + */ +void zxdh_copy_ip_ntohl(u32 *dst, __be32 *src) +{ + *dst++ = ntohl(*src++); + *dst++ = ntohl(*src++); + *dst++ = ntohl(*src++); + *dst = ntohl(*src); +} diff --git a/src/rdma/src/dbgfs.c b/src/rdma/src/dbgfs.c new file mode 100644 index 0000000000000000000000000000000000000000..959adea2ef10eb923dc09adf9c9013f8fe71be7a --- /dev/null +++ b/src/rdma/src/dbgfs.c @@ -0,0 +1,928 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "main.h" +#include "icrdma_hw.h" + +struct mutex zrdma_debugfs_mutex; +struct dentry *zrdma_debugfs_root; +EXPORT_SYMBOL(zrdma_debugfs_root); + +#define SET_32_REG_VAL(rf, reg, offset, var) \ + do { \ + u32 tmp = rd32((rf)->sc_dev.hw, (reg)) & ~(offset); \ + wr32((rf)->sc_dev.hw, (reg), tmp | FIELD_PREP(offset, var)); \ + } while (0) + +int read_np_cnp_dscp(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_NP_CNP_DSCP); + *var = FIELD_GET(ZXDH_DCQCN_NP_CNP_DSCP, tmp); + return 0; +} + +int write_np_cnp_dscp(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_NP_CNP_DSCP) { + return -EINVAL; + } + SET_32_REG_VAL(rf, RDMA_DCQCN_NP_CNP_DSCP, ZXDH_DCQCN_NP_CNP_DSCP, var); + return 0; +} + +int read_np_cnp_prio(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_NP_CNP_PRIO); + *var = FIELD_GET(ZXDH_DCQCN_NP_CNP_PRIO, tmp); + return 0; +} + +int write_np_cnp_prio(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_NP_CNP_PRIO) { + return -EINVAL; + } + SET_32_REG_VAL(rf, RDMA_DCQCN_NP_CNP_PRIO, ZXDH_DCQCN_NP_CNP_PRIO, var); + return 0; +} + +int read_np_cnp_prio_mode(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_NP_CNP_PRIO_MODE); + *var = FIELD_GET(ZXDH_DCQCN_NP_CNP_PRIO_MODE, tmp); + return 0; +} + +int write_np_cnp_prio_mode(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_NP_CNP_PRIO_MODE) { + return -EINVAL; + } + SET_32_REG_VAL(rf, RDMA_DCQCN_NP_CNP_PRIO_MODE, + ZXDH_DCQCN_NP_CNP_PRIO_MODE, var); + return 0; +} + +int read_np_min_time_between_cnps(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp_x = 0; + u32 tmp_y = 0; + tmp_x = rd32(rf->sc_dev.hw, RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X); + tmp_y = rd32(rf->sc_dev.hw, RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y); + *var = FIELD_GET(ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X, tmp_x) * + FIELD_GET(ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y, tmp_y); + return 0; +} + +int write_np_min_time_between_cnps(struct zxdh_pci_f *rf, u32 var) +{ + u32 y = 0; + u32 y_ex = 0; + u16 x = 0; + if (var > RDMA_FLOW_MAX_NP_MIN_TIME_BETWEEN_CNPS || + (var < RDMA_FLOW_MIN_NP_MIN_TIME_BETWEEN_CNPS)) { + return -EINVAL; + } + y = FIELD_GET(ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y, + rd32(rf->sc_dev.hw, + RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y)); + y_ex = FIELD_GET(ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y_EX, + rd32(rf->sc_dev.hw, + RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y_EX)); + if (y != RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y || + y_ex != RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y_EX) { + return -EPERM; + } + x = var / RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y; + SET_32_REG_VAL(rf, RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X, + ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X, x); + return 0; +} + +int read_prg_time_reset(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_DCQCN_RPG_TIME_RESET, var); +} + +int write_prg_time_reset(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_PRG_TIME_RESET || + var < RDMA_FLOW_MIN_PRG_TIME_RESET) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_DCQCN_RPG_TIME_RESET, var); +} + +int read_rpg_clamp_tgt_rate(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_DCQCN_CLAMP_TGT_RAGE, var); +} + +int write_rpg_clamp_tgt_rate(struct zxdh_pci_f *rf, u32 var) +{ + if (var != 1 && var != 0) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_DCQCN_CLAMP_TGT_RAGE, var); +} + +int read_rpg_clamp_tgt_rate_after_time_inc(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_DCQCN_CLAMP_TGT_RATE_AFTER_TIME_INC, + var); +} + +int write_rpg_clamp_tgt_rate_after_time_inc(struct zxdh_pci_f *rf, u32 var) +{ + if (var != 1 && var != 0) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_DCQCN_CLAMP_TGT_RATE_AFTER_TIME_INC, + var); +} + +int read_rp_dce_tcp_rtt(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_DCQCN_DCE_TCP_RTT, var); +} + +int write_rp_dce_tcp_rtt(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RP_DCE_TCP_RTT || + var < RDMA_FLOW_MIN_RP_DCE_TCP_RTT) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_DCQCN_DCE_TCP_RTT, var); +} + +int read_dce_tcp_g(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_DCQCN_DCE_TCP_G, + var); +} + +int write_dce_tcp_g(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_DCE_TCP_G) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_DCQCN_DCE_TCP_G, + var); +} + +int read_rpg_gd(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_DCQCN_RPG_GD, + var); +} + +int write_rpg_gd(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_GD) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_DCQCN_RPG_GD, + var); +} + +int read_rpg_initial_alpha_value(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_DCQCN_INITIAL_ALPHA_VALUE, var); +} + +int write_rpg_initial_alpha_value(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_INITIAL_ALPHA_VALUE) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_DCQCN_INITIAL_ALPHA_VALUE, var); +} + +int read_rpg_min_dec_fac(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_DCQCN_MIN_DEC_FAC, var); +} + +int write_rpg_min_dec_fac(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_MIN_DEC_FAC) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_DCQCN_MIN_DEC_FAC, var); +} + +int read_rpg_threshold(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_DCQCN_RPG_THRESHOLD, var); +} + +int write_rpg_threshold(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_THRESHOLD) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_DCQCN_RPG_THRESHOLD, var); +} + +int read_rpg_ratio_increase(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_DCQCN_RPG_RATIO_INCREASE, var); +} + +int write_rpg_ratio_increase(struct zxdh_pci_f *rf, u32 var) +{ + if (var != 1 && var != 0) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_DCQCN_RPG_RATIO_INCREASE, var); +} + +int read_rpg_ai_ratio(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_DCQCN_RPG_AI_RATIO, var); +} + +int write_rpg_ai_ratio(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_AI_RATIO) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_DCQCN_RPG_AI_RATIO, var); +} + +int read_rpg_hai_ratio(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_DCQCN_RPG_HAI_RATIO, var); +} + +int write_rpg_hai_ratio(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_HAI_RATIO) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_DCQCN_RPG_HAI_RATIO, var); +} + +int read_rpg_byte_reset(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_RPG_BYTE_RESET); + *var = FIELD_GET(ZXDH_DCQCN_RPG_BYTE_RESET, tmp); + return 0; +} + +int write_rpg_byte_reset(struct zxdh_pci_f *rf, u32 var) +{ + if (var < RDMA_FLOW_BYTE_RESET_THRESHOLD) { + return -EINVAL; + } + SET_32_REG_VAL(rf, RDMA_DCQCN_RPG_BYTE_RESET, ZXDH_DCQCN_RPG_BYTE_RESET, + var); + return 0; +} + +int read_rpg_ai_rate(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_RPG_AI_RATE); + *var = FIELD_GET(ZXDH_DCQCN_RPG_AI_RATE, tmp); + return 0; +} + +int write_rpg_ai_rate(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_CONTROL_RATE_1G) { + return -EINVAL; + } + SET_32_REG_VAL(rf, RDMA_DCQCN_RPG_AI_RATE, ZXDH_DCQCN_RPG_AI_RATE, var); + return 0; +} + +int read_rpg_hai_rate(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_RPG_HAI_RATE); + *var = FIELD_GET(ZXDH_DCQCN_RPG_HAI_RATE, tmp); + return 0; +} + +int write_rpg_hai_rate(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_CONTROL_RATE_10G) { + return -EINVAL; + } + SET_32_REG_VAL(rf, RDMA_DCQCN_RPG_HAI_RATE, ZXDH_DCQCN_RPG_HAI_RATE, + var); + return 0; +} + +int read_rpg_max_rate(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + tmp = rd32(rf->sc_dev.hw, RDMA_RPG_MAX_RATE); + *var = FIELD_GET(ZXDH_RPG_MAX_RATE, tmp); + return 0; +} + +int write_rpg_max_rate(struct zxdh_pci_f *rf, u32 var) +{ + int ret; + u32 tmp = 0; + if (var < RDMA_FLOW_CONTROL_RATE_10M || + var > RDMA_FLOW_CONTROL_RATE_200G) { + return -EINVAL; + } + ret = read_rpg_min_rate(rf, &tmp); + if (ret || tmp > var) { + return -EINVAL; + } + SET_32_REG_VAL(rf, RDMA_RPG_MAX_RATE, ZXDH_RPG_MAX_RATE, var); + return 0; +} + +int read_rpg_min_rate(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + tmp = rd32(rf->sc_dev.hw, RDMA_RPG_MIN_RATE); + *var = FIELD_GET(ZXDH_RPG_MIN_RATE, tmp); + return 0; +} + +int write_rpg_min_rate(struct zxdh_pci_f *rf, u32 var) +{ + int ret; + u32 tmp = 0; + if (var < RDMA_FLOW_CONTROL_RATE_10M || + var > RDMA_FLOW_CONTROL_RATE_200G) { + return -EINVAL; + } + ret = read_rpg_max_rate(rf, &tmp); + if (ret || tmp < var) { + return -EINVAL; + } + SET_32_REG_VAL(rf, RDMA_RPG_MIN_RATE, ZXDH_RPG_MIN_RATE, var); + return 0; +} + +parameter_t zrdma_dcqcn_params[] = { + { "np_cnp_dscp", ZRDMA_DBG_DCQCN_NP_CNP_DSCP, &read_np_cnp_dscp, + &write_np_cnp_dscp }, + { "np_cnp_prio", ZRDMA_DBG_DCQCN_NP_CNP_PRIO, &read_np_cnp_prio, + &write_np_cnp_prio }, + { "np_cnp_prio_mode", ZRDMA_DBG_DCQCN_NP_CNP_PRIO_MODE, + &read_np_cnp_prio_mode, &write_np_cnp_prio_mode }, + { "np_min_time_between_cnps", ZRDMA_DBG_DCQCN_NP_MIN_TIME_BETWEEN_CNPS, + &read_np_min_time_between_cnps, &write_np_min_time_between_cnps }, + { "prg_time_reset", ZRDMA_DBG_DCQCN_PRG_TIME_RESET, + &read_prg_time_reset, &write_prg_time_reset }, + { "clamp_tgt_rate", ZRDMA_DBG_DCQCN_RPG_CLAMP_TGT_RATE, + &read_rpg_clamp_tgt_rate, &write_rpg_clamp_tgt_rate }, + { "clamp_tgt_rate_after_time_inc", + ZRDMA_DBG_DCQCN_RPG_CLAMP_TGT_RATE_AFTER_TIME_INC, + &read_rpg_clamp_tgt_rate_after_time_inc, + &write_rpg_clamp_tgt_rate_after_time_inc }, + { "dce_tcp_rtt", ZRDMA_DBG_DCQCN_RP_DCE_TCP_RTT, &read_rp_dce_tcp_rtt, + &write_rp_dce_tcp_rtt }, + { "dce_tcp_g", ZRDMA_DBG_DCQCN_DCE_TCP_G, &read_dce_tcp_g, + &write_dce_tcp_g }, + { "rpg_gd", ZRDMA_DBG_DCQCN_RPG_GD, &read_rpg_gd, &write_rpg_gd }, + { "initial_alpha_value", ZRDMA_DBG_DCQCN_RPG_INITIAL_ALPHA_VALUE, + &read_rpg_initial_alpha_value, &write_rpg_initial_alpha_value }, + { "min_dec_fac", ZRDMA_DBG_DCQCN_RPG_MIN_DEC_FAC, &read_rpg_min_dec_fac, + &write_rpg_min_dec_fac }, + { "rpg_threshold", ZRDMA_DBG_DCQCN_RPG_THRESHOLD, &read_rpg_threshold, + &write_rpg_threshold }, + { "rpg_ratio_increase", ZRDMA_DBG_DCQCN_RPG_RATIO_INCREASE, + &read_rpg_ratio_increase, &write_rpg_ratio_increase }, + { "rpg_ai_ratio", ZRDMA_DBG_DCQCN_RPG_AI_RATIO, &read_rpg_ai_ratio, + &write_rpg_ai_ratio }, + { "rpg_hai_ratio", ZRDMA_DBG_DCQCN_RPG_HAI_RATIO, &read_rpg_hai_ratio, + &write_rpg_hai_ratio }, + { "rpg_byte_reset", ZRDMA_DBG_DCQCN_RPG_BYTE_RESET, + &read_rpg_byte_reset, &write_rpg_byte_reset }, + { "rpg_ai_rate", ZRDMA_DBG_DCQCN_RPG_AI_RATE, &read_rpg_ai_rate, + &write_rpg_ai_rate }, + { "rpg_hai_rate", ZRDMA_DBG_DCQCN_RPG_HAI_RATE, &read_rpg_hai_rate, + &write_rpg_hai_rate }, + { "rpg_max_rate", ZRDMA_DBG_DCQCN_RPG_MAX_RATE, &read_rpg_max_rate, + &write_rpg_max_rate }, + { "rpg_min_rate", ZRDMA_DBG_DCQCN_RPG_MIN_RATE, &read_rpg_min_rate, + &write_rpg_min_rate }, +}; + +int read_alpha(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_ALPHA, var); +} + +int write_alpha(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_ALPHA_VALUE || var == 0) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_ALPHA, var); +} + +int read_tlow(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_TLOW, var); +} + +int write_tlow(struct zxdh_pci_f *rf, u32 var) +{ + u32 tmp = 0; + int ret; + if (var > RDMA_FLOW_MAX_TLOW_VALUE || var == 0) { + return -EINVAL; + } + ret = read_thigh(rf, &tmp); + if (ret || tmp < var) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_TLOW, var); +} + +int read_thigh(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_THIGH, var); +} + +int write_thigh(struct zxdh_pci_f *rf, u32 var) +{ + u32 tmp = 0; + int ret; + if (var > RDMA_FLOW_MAX_THIGH_VALUE || var == 0) { + return -EINVAL; + } + ret = read_tlow(rf, &tmp); + if (ret || tmp > var) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_THIGH, var); +} + +int read_ai_num(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_AI_NUM, + var); +} + +int write_ai_num(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_AI_NUM_VALUE || var == 0) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_AI_NUM, + var); +} + +int read_thred_gradient(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_RTT_THRED_GRADIENT, var); +} + +int write_thred_gradient(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_THRED_GRADIENT) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_RTT_THRED_GRADIENT, var); +} + +int read_hai_n(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_HAI_N, var); +} + +int write_hai_n(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_HAI_N_VALUE || var == 0) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_HAI_N, var); +} + +int read_ai_n(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_AI_N, var); +} + +int write_ai_n(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_AI_N_VALUE || var == 0) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_AI_N, var); +} + +int read_vf_delta(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + tmp = rd32(rf->sc_dev.hw, RDMA_RPG_VF_DELTA); + *var = FIELD_GET(ZXDH_RTT_VF_DELTA, tmp); + return 0; +} + +int write_vf_delta(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_VF_DELTA_VALUE || var == 0) { + return -EINVAL; + } + SET_32_REG_VAL(rf, RDMA_RPG_VF_DELTA, ZXDH_RTT_VF_DELTA, var); + return 0; +} + +parameter_t zrdma_rtt_params[] = { + { "alpha", ZRDMA_DBG_RTT_ALPHA, &read_alpha, &write_alpha }, + { "tlow", ZRDMA_DBG_RTT_TLOW, &read_tlow, &write_tlow }, + { "thigh", ZRDMA_DBG_RTT_THIGH, &read_thigh, &write_thigh }, + { "ai_num", ZRDMA_DBG_RTT_AI_NUM, &read_ai_num, &write_ai_num }, + { "thred_gradient", ZRDMA_DBG_RTT_THRED_GRADIENT, &read_thred_gradient, + &write_thred_gradient }, + { "hai_n", ZRDMA_DBG_RTT_HAI_N, &read_hai_n, &write_hai_n }, + { "ai_n", ZRDMA_DBG_RTT_AI_N, &read_ai_n, &write_ai_n }, + { "rpg_max_rate", ZRDMA_DBG_RTT_RPG_MAX_RATE, &read_rpg_max_rate, + &write_rpg_max_rate }, + { "rpg_min_rate", ZRDMA_DBG_RTT_RPG_MIN_RATE, &read_rpg_min_rate, + &write_rpg_min_rate }, + { "delta", ZRDMA_DBG_RTT_VF_DELTA, &read_vf_delta, &write_vf_delta }, +}; + +int zrdma_ib_write_rtt_params(struct zxdh_pci_f *rf, int offset, u32 var) +{ + int ret; + if (offset >= ZRDMA_DBG_RTT_MAX || offset < 0) { + return -EINVAL; + } + ret = zrdma_rtt_params[offset].wfunc(rf, var); + return ret; +} + +int zrdma_ib_read_rtt_params(struct zxdh_pci_f *rf, int offset, u32 *var) +{ + int ret; + if (offset >= ZRDMA_DBG_RTT_MAX || offset < 0) { + return -EINVAL; + } + ret = zrdma_rtt_params[offset].rfunc(rf, var); + return ret; +} + +int zrdma_ib_write_dcqcn_params(struct zxdh_pci_f *rf, int offset, u32 var) +{ + int ret; + if (offset >= ZRDMA_DBG_DCQCN_MAX || offset < 0) { + return -EINVAL; + } + ret = zrdma_dcqcn_params[offset].wfunc(rf, var); + return ret; +} + +int zrdma_ib_read_dcqcn_params(struct zxdh_pci_f *rf, int offset, u32 *var) +{ + int ret; + if (offset >= ZRDMA_DBG_DCQCN_MAX || offset < 0) { + return -EINVAL; + } + ret = zrdma_dcqcn_params[offset].rfunc(rf, var); + return ret; +} + +static ssize_t check_write_param(const char __user *buf, size_t count, u32 *var) +{ + char lbuf[ZRDMA_DEBUGFS_MAX_BUF_LEN] = { 0 }; + if (count > sizeof(lbuf)) + return -EINVAL; + if (copy_from_user(lbuf, buf, count)) + return -EFAULT; + lbuf[sizeof(lbuf) - 1] = '\0'; + if (kstrtou32(lbuf, 0, var)) + return -EINVAL; + return 0; +} + +static ssize_t dcqcn_write_param(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) + +{ + struct zrdma_dbg_param *param = filp->private_data; + int offset = param->offset; + int ret; + u32 var = 0; + ret = check_write_param(buf, count, &var); + if (ret) { + return ret; + } + ret = zrdma_ib_write_dcqcn_params(param->dev, offset, var); + return ret ? ret : count; +} + +static ssize_t dcqcn_read_param(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct zrdma_dbg_param *param = filp->private_data; + int offset = param->offset; + u32 var = 0; + int ret; + char lbuf[ZRDMA_DEBUGFS_MAX_BUF_LEN] = { 0 }; + + ret = zrdma_ib_read_dcqcn_params(param->dev, offset, &var); + if (ret) + return ret; + + ret = snprintf(lbuf, sizeof(lbuf), "%d\n", var); + if (ret < 0) + return ret; + + return simple_read_from_buffer(buf, count, pos, lbuf, ret); +} + +static ssize_t rtt_write_param(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) + +{ + struct zrdma_dbg_param *param = filp->private_data; + int offset = param->offset; + int ret; + u32 var = 0; + ret = check_write_param(buf, count, &var); + if (ret) { + return ret; + } + ret = zrdma_ib_write_rtt_params(param->dev, offset, var); + return ret ? ret : count; +} + +static ssize_t rtt_read_param(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct zrdma_dbg_param *param = filp->private_data; + int offset = param->offset; + u32 var = 0; + int ret; + char lbuf[ZRDMA_DEBUGFS_MAX_BUF_LEN] = { 0 }; + + ret = zrdma_ib_read_rtt_params(param->dev, offset, &var); + if (ret) + return ret; + + ret = snprintf(lbuf, sizeof(lbuf), "%d\n", var); + if (ret < 0) + return ret; + + return simple_read_from_buffer(buf, count, pos, lbuf, ret); +} + +static const struct file_operations dbg_dcqcn_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = dcqcn_write_param, + .read = dcqcn_read_param, +}; + +static const struct file_operations dbg_rtt_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = rtt_write_param, + .read = rtt_read_param, +}; + +void zrdma_cleanup_debugfs_entry(struct zxdh_pci_f *rf) +{ + struct dentry *dentry = NULL; + char pci_board_bdf[64] = { 0 }; + + if (!rf) { + pr_info("zrdma_cleanup_debugfs rf is null\n"); + return; + } + + if (!zrdma_debugfs_root || !rf->debugfs_entry.board_root || + !rf->debugfs_entry.vhca_root) + return; + mutex_lock(&zrdma_debugfs_mutex); + get_pci_board_bdf(pci_board_bdf, rf); + dentry = debugfs_lookup(pci_board_bdf, zrdma_debugfs_root); + if (dentry) { + debugfs_remove_recursive(dentry); + rf->debugfs_entry.board_root = NULL; + } else { + rf->debugfs_entry.board_root = NULL; + } + mutex_unlock(&zrdma_debugfs_mutex); + debugfs_remove_recursive(rf->debugfs_entry.vhca_root); + rf->debugfs_entry.vhca_root = NULL; + if (rf->debugfs_entry.board_params.base) { + kfree(rf->debugfs_entry.board_params.base); + rf->debugfs_entry.board_params.base = NULL; + } + if (rf->debugfs_entry.vhca_params.base) { + kfree(rf->debugfs_entry.vhca_params.base); + rf->debugfs_entry.vhca_params.base = NULL; + } +} + +int create_debugfs_file_vhca_dcqcn(struct zxdh_pci_f *rf) +{ + int i; + int offset; + struct zrdma_dbg_vhca_dcqcn_params *dbg_cc_params; + dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL); + if (!dbg_cc_params) + return -ENOMEM; + rf->debugfs_entry.vhca_params.vhca_dcqcn_params = dbg_cc_params; + for (i = 0, offset = ZRDMA_DBG_DCQCN_RPG_BYTE_RESET; + i < ZRDMA_VHCA_DCQCN_CC_MAX; i++, offset++) { + dbg_cc_params->params[i].offset = offset; + dbg_cc_params->params[i].dev = rf; + debugfs_create_file(zrdma_dcqcn_params[offset].name, 0600, + rf->debugfs_entry.vhca_dcqcn_root, + &dbg_cc_params->params[i], &dbg_dcqcn_fops); + } + return 0; +} + +int create_debugfs_file_board_dcqcn(struct zxdh_pci_f *rf) +{ + int i; + struct zrdma_dbg_board_dcqcn_params *dbg_cc_params; + dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL); + if (!dbg_cc_params) + return -ENOMEM; + rf->debugfs_entry.board_params.board_dcqcn_params = dbg_cc_params; + for (i = 0; i < ZRDMA_BOARD_DCQCN_CC_MAX; i++) { + dbg_cc_params->params[i].offset = i; + dbg_cc_params->params[i].dev = rf; + debugfs_create_file(zrdma_dcqcn_params[i].name, 0600, + rf->debugfs_entry.board_dcqcn_root, + &dbg_cc_params->params[i], &dbg_dcqcn_fops); + } + return 0; +} + +int create_debugfs_file_vhca_rtt(struct zxdh_pci_f *rf) +{ + int i = 0; + int offset = 0; + struct zrdma_dbg_vhca_rtt_params *dbg_cc_params; + dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL); + if (!dbg_cc_params) + return -ENOMEM; + rf->debugfs_entry.vhca_params.vhca_rtt_params = dbg_cc_params; + for (i = 0, offset = ZRDMA_DBG_RTT_RPG_MAX_RATE; + i < ZRDMA_VHCA_RTT_CC_MAX; i++, offset++) { + dbg_cc_params->params[i].offset = offset; + dbg_cc_params->params[i].dev = rf; + debugfs_create_file(zrdma_rtt_params[offset].name, 0600, + rf->debugfs_entry.vhca_rtt_root, + &dbg_cc_params->params[i], &dbg_rtt_fops); + } + return 0; +} + +int create_debugfs_file_board_rtt(struct zxdh_pci_f *rf) +{ + int i = 0; + struct zrdma_dbg_board_rtt_params *dbg_cc_params; + dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL); + if (!dbg_cc_params) + return -ENOMEM; + rf->debugfs_entry.board_params.board_rtt_params = dbg_cc_params; + for (i = 0; i < ZRDMA_BOARD_RTT_CC_MAX; i++) { + dbg_cc_params->params[i].offset = i; + dbg_cc_params->params[i].dev = rf; + debugfs_create_file(zrdma_rtt_params[i].name, 0600, + rf->debugfs_entry.board_rtt_root, + &dbg_cc_params->params[i], &dbg_rtt_fops); + } + return 0; +} + +void create_debugfs_dcqcn_entry(const char *pci_bdf, struct zxdh_pci_f *rf) +{ + int ret; + mutex_lock(&zrdma_debugfs_mutex); + if (!rf->debugfs_entry.board_root) { + struct dentry *dentry = + debugfs_lookup(pci_bdf, zrdma_debugfs_root); + if (!dentry) { + rf->debugfs_entry.board_root = + debugfs_create_dir(pci_bdf, zrdma_debugfs_root); + rf->debugfs_entry.board_dcqcn_root = debugfs_create_dir( + ZRDMA_DEBUGFS_DCQCN_DIR, + rf->debugfs_entry.board_root); + ret = create_debugfs_file_board_dcqcn(rf); + if (ret) { + mutex_unlock(&zrdma_debugfs_mutex); + goto err; + } + } else { + rf->debugfs_entry.board_root = dentry; + } + } + mutex_unlock(&zrdma_debugfs_mutex); + rf->debugfs_entry.vhca_root = debugfs_create_dir( + dev_name(&rf->pcidev->dev), zrdma_debugfs_root); + rf->debugfs_entry.vhca_dcqcn_root = debugfs_create_dir( + ZRDMA_DEBUGFS_DCQCN_DIR, rf->debugfs_entry.vhca_root); + ret = create_debugfs_file_vhca_dcqcn(rf); + if (ret) { + goto err; + } + return; +err: + zrdma_cleanup_debugfs_entry(rf); + return; +} + +void create_debugfs_rtt_entry(const char *pci_bdf, struct zxdh_pci_f *rf) +{ + int ret; + mutex_lock(&zrdma_debugfs_mutex); + if (!rf->debugfs_entry.board_root) { + struct dentry *dentry = + debugfs_lookup(pci_bdf, zrdma_debugfs_root); + if (!dentry) { + rf->debugfs_entry.board_root = + debugfs_create_dir(pci_bdf, zrdma_debugfs_root); + rf->debugfs_entry.board_rtt_root = debugfs_create_dir( + ZRDMA_DEBUGFS_RTT_DIR, + rf->debugfs_entry.board_root); + ret = create_debugfs_file_board_rtt(rf); + if (ret) { + mutex_unlock(&zrdma_debugfs_mutex); + goto err; + } + } else { + rf->debugfs_entry.board_root = dentry; + } + } + mutex_unlock(&zrdma_debugfs_mutex); + rf->debugfs_entry.vhca_root = debugfs_create_dir( + dev_name(&rf->pcidev->dev), zrdma_debugfs_root); + rf->debugfs_entry.vhca_rtt_root = debugfs_create_dir( + ZRDMA_DEBUGFS_RTT_DIR, rf->debugfs_entry.vhca_root); + ret = create_debugfs_file_vhca_rtt(rf); + if (ret) { + goto err; + } + return; +err: + zrdma_cleanup_debugfs_entry(rf); + return; +} + +void create_debugfs_entry(struct zxdh_pci_f *rf) +{ + char pci_board_bdf[64] = { 0 }; + if (!zrdma_debugfs_root) { + pr_info("create_debugfs_entry debugfs zrdma_debugfs_root is null\n"); + return; + } + get_pci_board_bdf(pci_board_bdf, rf); + switch (rf->mcode_type) { + case MCODE_TYPE_DCQCN: + create_debugfs_dcqcn_entry(pci_board_bdf, rf); + break; + case MCODE_TYPE_RTT: + create_debugfs_rtt_entry(pci_board_bdf, rf); + break; + default: + break; + } +} + +void zrdma_register_debugfs(void) +{ + zrdma_debugfs_root = debugfs_create_dir("zrdma", NULL); + mutex_init(&zrdma_debugfs_mutex); +} + +void zrdma_unregister_debugfs(void) +{ + debugfs_remove(zrdma_debugfs_root); +} diff --git a/src/rdma/src/dbgfs.h b/src/rdma/src/dbgfs.h new file mode 100644 index 0000000000000000000000000000000000000000..fa69e4e0db13782bcf69483e794415875d000805 --- /dev/null +++ b/src/rdma/src/dbgfs.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#ifndef ZXDH_DEBUGFS_H +#define ZXDH_DEBUGFS_H +#include +#include "type.h" + +#define ZRDMA_DEBUGFS_MAX_BUF_LEN 35 + +#define ZRDMA_BOARD_DCQCN_CC_MAX 16 +#define ZRDMA_VHCA_DCQCN_CC_MAX 5 +#define ZRDMA_BOARD_RTT_CC_MAX 7 +#define ZRDMA_VHCA_RTT_CC_MAX 3 + +#define ZRDMA_DEBUGFS_DCQCN_DIR "dcqcn" +#define ZRDMA_DEBUGFS_RTT_DIR "rtt" + +enum zrdma_dbg_dcqcn_types { + ZRDMA_DBG_DCQCN_NP_CNP_DSCP = 0, + ZRDMA_DBG_DCQCN_NP_CNP_PRIO, + ZRDMA_DBG_DCQCN_NP_CNP_PRIO_MODE, + ZRDMA_DBG_DCQCN_NP_MIN_TIME_BETWEEN_CNPS, + ZRDMA_DBG_DCQCN_PRG_TIME_RESET, + ZRDMA_DBG_DCQCN_RPG_CLAMP_TGT_RATE, + ZRDMA_DBG_DCQCN_RPG_CLAMP_TGT_RATE_AFTER_TIME_INC, + ZRDMA_DBG_DCQCN_RP_DCE_TCP_RTT, + ZRDMA_DBG_DCQCN_DCE_TCP_G, + ZRDMA_DBG_DCQCN_RPG_GD, + ZRDMA_DBG_DCQCN_RPG_INITIAL_ALPHA_VALUE, + ZRDMA_DBG_DCQCN_RPG_MIN_DEC_FAC, + ZRDMA_DBG_DCQCN_RPG_THRESHOLD, + ZRDMA_DBG_DCQCN_RPG_RATIO_INCREASE, + ZRDMA_DBG_DCQCN_RPG_AI_RATIO, + ZRDMA_DBG_DCQCN_RPG_HAI_RATIO, + ZRDMA_DBG_DCQCN_RPG_BYTE_RESET, + ZRDMA_DBG_DCQCN_RPG_AI_RATE, + ZRDMA_DBG_DCQCN_RPG_HAI_RATE, + ZRDMA_DBG_DCQCN_RPG_MAX_RATE, + ZRDMA_DBG_DCQCN_RPG_MIN_RATE, + ZRDMA_DBG_DCQCN_MAX, +}; + +enum zrdma_dbg_rtt_types { + ZRDMA_DBG_RTT_ALPHA = 0, + ZRDMA_DBG_RTT_TLOW, + ZRDMA_DBG_RTT_THIGH, + ZRDMA_DBG_RTT_AI_NUM, + ZRDMA_DBG_RTT_THRED_GRADIENT, + ZRDMA_DBG_RTT_HAI_N, + ZRDMA_DBG_RTT_AI_N, + ZRDMA_DBG_RTT_RPG_MAX_RATE, + ZRDMA_DBG_RTT_RPG_MIN_RATE, + ZRDMA_DBG_RTT_VF_DELTA, + ZRDMA_DBG_RTT_MAX, +}; + +typedef struct { + const char *name; + uint8_t types; + int (*rfunc)(struct zxdh_pci_f *, u32 *); + int (*wfunc)(struct zxdh_pci_f *, u32); +} parameter_t; + +struct zrdma_dbg_param { + int offset; + struct zxdh_pci_f *dev; +}; + +struct zrdma_dbg_board_dcqcn_params { + struct dentry *root; + struct zrdma_dbg_param params[ZRDMA_BOARD_DCQCN_CC_MAX]; +}; + +struct zrdma_dbg_vhca_dcqcn_params { + struct dentry *root; + struct zrdma_dbg_param params[ZRDMA_VHCA_DCQCN_CC_MAX]; +}; + +struct zrdma_dbg_board_rtt_params { + struct dentry *root; + struct zrdma_dbg_param params[ZRDMA_BOARD_RTT_CC_MAX]; +}; + +struct zrdma_dbg_vhca_rtt_params { + struct dentry *root; + struct zrdma_dbg_param params[ZRDMA_VHCA_RTT_CC_MAX]; +}; + +struct zrdma_debugfs_entries { + struct dentry *board_root; + struct dentry *vhca_root; + struct dentry *board_dcqcn_root; + struct dentry *board_rtt_root; + struct dentry *vhca_dcqcn_root; + struct dentry *vhca_rtt_root; + union { + void *base; + struct zrdma_dbg_board_dcqcn_params *board_dcqcn_params; + struct zrdma_dbg_board_rtt_params *board_rtt_params; + } board_params; + union { + void *base; + struct zrdma_dbg_vhca_dcqcn_params *vhca_dcqcn_params; + struct zrdma_dbg_vhca_rtt_params *vhca_rtt_params; + } vhca_params; +}; + +void create_debugfs_entry(struct zxdh_pci_f *rf); +void zrdma_register_debugfs(void); +void zrdma_unregister_debugfs(void); +void zrdma_cleanup_debugfs_entry(struct zxdh_pci_f *rf); +int zrdma_ib_write_rtt_params(struct zxdh_pci_f *rf, int offset, u32 var); +int zrdma_ib_read_rtt_params(struct zxdh_pci_f *rf, int offset, u32 *var); +int zrdma_ib_write_dcqcn_params(struct zxdh_pci_f *rf, int offset, u32 var); +int zrdma_ib_read_dcqcn_params(struct zxdh_pci_f *rf, int offset, u32 *var); +int create_debugfs_file_vhca_dcqcn(struct zxdh_pci_f *rf); +int create_debugfs_file_board_dcqcn(struct zxdh_pci_f *rf); +int create_debugfs_file_vhca_rtt(struct zxdh_pci_f *rf); +int create_debugfs_file_board_rtt(struct zxdh_pci_f *rf); +void create_debugfs_dcqcn_entry(const char *pci_bdf, struct zxdh_pci_f *rf); +void create_debugfs_rtt_entry(const char *pci_bdf, struct zxdh_pci_f *rf); + +int read_np_cnp_dscp(struct zxdh_pci_f *rf, u32 *var); +int read_np_cnp_prio(struct zxdh_pci_f *rf, u32 *var); +int read_np_cnp_prio_mode(struct zxdh_pci_f *rf, u32 *var); +int read_np_min_time_between_cnps(struct zxdh_pci_f *rf, u32 *var); +int read_prg_time_reset(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_clamp_tgt_rate(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_clamp_tgt_rate_after_time_inc(struct zxdh_pci_f *rf, u32 *var); +int read_rp_dce_tcp_rtt(struct zxdh_pci_f *rf, u32 *var); +int read_dce_tcp_g(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_gd(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_initial_alpha_value(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_min_dec_fac(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_threshold(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_ratio_increase(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_ai_ratio(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_hai_ratio(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_byte_reset(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_ai_rate(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_hai_rate(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_max_rate(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_min_rate(struct zxdh_pci_f *rf, u32 *var); +int read_alpha(struct zxdh_pci_f *rf, u32 *var); +int read_tlow(struct zxdh_pci_f *rf, u32 *var); +int read_thigh(struct zxdh_pci_f *rf, u32 *var); +int read_ai_num(struct zxdh_pci_f *rf, u32 *var); +int read_thred_gradient(struct zxdh_pci_f *rf, u32 *var); +int read_hai_n(struct zxdh_pci_f *rf, u32 *var); +int read_ai_n(struct zxdh_pci_f *rf, u32 *var); +int read_vf_delta(struct zxdh_pci_f *rf, u32 *var); + +int write_np_cnp_dscp(struct zxdh_pci_f *rf, u32 var); +int write_np_cnp_prio(struct zxdh_pci_f *rf, u32 var); +int write_np_cnp_prio_mode(struct zxdh_pci_f *rf, u32 var); +int write_np_min_time_between_cnps(struct zxdh_pci_f *rf, u32 var); +int write_prg_time_reset(struct zxdh_pci_f *rf, u32 var); +int write_rpg_clamp_tgt_rate(struct zxdh_pci_f *rf, u32 var); +int write_rpg_clamp_tgt_rate_after_time_inc(struct zxdh_pci_f *rf, u32 var); +int write_rp_dce_tcp_rtt(struct zxdh_pci_f *rf, u32 var); +int write_dce_tcp_g(struct zxdh_pci_f *rf, u32 var); +int write_rpg_gd(struct zxdh_pci_f *rf, u32 var); +int write_rpg_initial_alpha_value(struct zxdh_pci_f *rf, u32 var); +int write_rpg_min_dec_fac(struct zxdh_pci_f *rf, u32 var); +int write_rpg_threshold(struct zxdh_pci_f *rf, u32 var); +int write_rpg_ratio_increase(struct zxdh_pci_f *rf, u32 var); +int write_rpg_ai_ratio(struct zxdh_pci_f *rf, u32 var); +int write_rpg_hai_ratio(struct zxdh_pci_f *rf, u32 var); +int write_rpg_byte_reset(struct zxdh_pci_f *rf, u32 var); +int write_rpg_ai_rate(struct zxdh_pci_f *rf, u32 var); +int write_rpg_hai_rate(struct zxdh_pci_f *rf, u32 var); +int write_rpg_max_rate(struct zxdh_pci_f *rf, u32 var); +int write_rpg_min_rate(struct zxdh_pci_f *rf, u32 var); +int write_alpha(struct zxdh_pci_f *rf, u32 var); +int write_tlow(struct zxdh_pci_f *rf, u32 var); +int write_thigh(struct zxdh_pci_f *rf, u32 var); +int write_ai_num(struct zxdh_pci_f *rf, u32 var); +int write_thred_gradient(struct zxdh_pci_f *rf, u32 var); +int write_hai_n(struct zxdh_pci_f *rf, u32 var); +int write_ai_n(struct zxdh_pci_f *rf, u32 var); +int write_vf_delta(struct zxdh_pci_f *rf, u32 var); + +#endif \ No newline at end of file diff --git a/src/rdma/src/defs.h b/src/rdma/src/defs.h new file mode 100644 index 0000000000000000000000000000000000000000..ec7e04e38d77eed175743f6a8ba924ad24832409 --- /dev/null +++ b/src/rdma/src/defs.h @@ -0,0 +1,2824 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#ifndef ZXDH_DEFS_H +#define ZXDH_DEFS_H + +#define ZXDH_FIRST_USER_QP_ID 3 + +#define ECN_CODE_PT_MASK 3 +#define ECN_CODE_PT_VAL 2 + +#define ZXDH_PUSH_OFFSET (8 * 1024 * 1024) +#define ZXDH_PF_FIRST_PUSH_PAGE_INDEX 16 +#define ZXDH_PF_BAR_RSVD (60 * 1024) +#define ZXDH_VF_PUSH_OFFSET ((8 + 64) * 1024) +#define ZXDH_VF_FIRST_PUSH_PAGE_INDEX 2 +#define ZXDH_VF_BAR_RSVD 4096 + +#define ZXDH_AE_REQUESTER 0x0 +#define ZXDH_AE_RESPONDER 0x1 + +#define ZXDH_RISCV_IDX 1023 +#define ZXDH_MAX_EP_NUM 5 +#define ZXDH_CQE_ERR_MAX 0xFFFF +#define ZXDH_AEQ_RETRY_LIMIT 5 + +#define ZXDH_IRD_HW_SIZE_4 0 +#define ZXDH_IRD_HW_SIZE_16 1 +#define ZXDH_IRD_HW_SIZE_64 2 +#define ZXDH_IRD_HW_SIZE_128 3 +#define ZXDH_IRD_HW_SIZE_256 4 + +#define ZXDH_RAM_REPEAT_READ_CNT 5 +#define ZXDH_RAM_DATA_REPEAT_READ_CNT 10 +#define ZXDH_RAM_DELAY_MS 30 + +#define ZXDH_RAM_WIDTH_LEN_UNIT_1 1 +#define ZXDH_RAM_WIDTH_LEN_UNIT_2 2 +#define ZXDH_RAM_WIDTH_LEN_UNIT_3 3 +#define ZXDH_RAM_WIDTH_LEN_UNIT_4 4 + +#define ZXDH_RAM_WIDTH_32_BIT 32 +#define ZXDH_RAM_WIDTH_64_BIT 64 +#define ZXDH_RAM_WIDTH_96_BIT 96 +#define ZXDH_RAM_WIDTH_128_BIT 128 +#define ZXDH_RAM_WIDTH_160_BIT 160 +#define ZXDH_RAM_WIDTH_192_BIT 192 +#define ZXDH_RAM_WIDTH_224_BIT 224 +#define ZXDH_RAM_WIDTH_256_BIT 256 +#define ZXDH_RAM_WIDTH_288_BIT 288 +#define ZXDH_RAM_WIDTH_320_BIT 320 +#define ZXDH_RAM_WIDTH_352_BIT 352 +#define ZXDH_RAM_WIDTH_384_BIT 384 +#define ZXDH_RAM_WIDTH_416_BIT 416 +#define ZXDH_RAM_WIDTH_448_BIT 448 +#define ZXDH_RAM_WIDTH_480_BIT 480 + +#define ZXDH_RAM_32_BIT_IDX_0 0 +#define ZXDH_RAM_32_BIT_IDX_1 1 +#define ZXDH_RAM_32_BIT_IDX_2 2 +#define ZXDH_RAM_32_BIT_IDX_3 3 +#define ZXDH_RAM_32_BIT_IDX_4 4 +#define ZXDH_RAM_32_BIT_IDX_5 5 +#define ZXDH_RAM_32_BIT_IDX_6 6 +#define ZXDH_RAM_32_BIT_IDX_7 7 +#define ZXDH_RAM_32_BIT_IDX_8 8 +#define ZXDH_RAM_32_BIT_IDX_9 9 +#define ZXDH_RAM_32_BIT_IDX_10 10 +#define ZXDH_RAM_32_BIT_IDX_11 11 +#define ZXDH_RAM_32_BIT_IDX_12 12 +#define ZXDH_RAM_32_BIT_IDX_13 13 +#define ZXDH_RAM_32_BIT_IDX_14 14 + +#define ZXDH_32_BIT_MASK_0_3 0x07UL +#define ZXDH_32_BIT_MASK_0_7 0xFFUL +#define ZXDH_32_BIT_MASK_0_15 0xFFFFUL +#define ZXDH_32_BIT_MASK_8_15 0xFF00UL +#define ZXDH_32_BIT_MASK_8_24 0xFFFF00UL +#define ZXDH_32_BIT_MASK_16_23 0xFF0000UL +#define ZXDH_32_BIT_MASK_24_31 0xFF000000UL +#define ZXDH_32_BIT_MASK_16_31 0xFFFF0000UL + +#define IRMDA_BIT_WIDTH_8 8 +#define IRMDA_BIT_WIDTH_11 11 +#define IRMDA_BIT_WIDTH_12 12 +#define IRMDA_BIT_WIDTH_16 16 +#define IRMDA_BIT_WIDTH_24 24 +#define IRMDA_BIT_WIDTH_32 32 + +#define ZXDH_RAM_H11 0x11 +#define ZXDH_RAM_H12 0x12 +#define ZXDH_RAM_H13 0x13 +#define ZXDH_RAM_H14 0x14 +#define ZXDH_RAM_H15 0x15 +#define ZXDH_RAM_H25 0x25 +#define ZXDH_RAM_H26 0x26 +#define ZXDH_RAM_H28 0x28 +#define ZXDH_RAM_H29 0x29 +#define ZXDH_RAM_H61 0x61 +#define ZXDH_RAM_H62 0x62 +#define ZXDH_RAM_H63 0x63 +#define ZXDH_RAM_H64 0x64 +#define ZXDH_RAM_H104 0x104 +#define ZXDH_RAM_H105 0x105 +#define ZXDH_RAM_H106 0x106 +#define ZXDH_RAM_H19D 0x19D + +#define RDMARX_MAX_MSG_SIZE 0x80000000 + +enum zxdh_protocol_used { + ZXDH_ANY_PROTOCOL = 0, + ZXDH_IWARP_PROTOCOL_ONLY = 1, + ZXDH_ROCE_PROTOCOL_ONLY = 2, +}; + +#define ZXDH_QPC_PF_REQ_ENA_S 30 +#define ZXDH_QPC_PF_REQ_ENA BIT_ULL(30) +#define ZXDH_QPC_PF_REQ_BASEQPN_S 10 +#define ZXDH_QPC_PF_REQ_BASEQPN GENMASK_ULL(29, 10) +#define ZXDH_QPC_PF_REQ_VHCAID_S 0 +#define ZXDH_QPC_PF_REQ_VHCAID GENMASK_ULL(9, 0) + +#define ZXDH_CQ_PF_REQ_ENA_S 30 +#define ZXDH_CQ_PF_REQ_ENA BIT_ULL(30) +#define ZXDH_CQ_PF_REQ_BASEQPN_S 10 +#define ZXDH_CQ_PF_REQ_BASEQPN GENMASK_ULL(29, 10) +#define ZXDH_CQ_PF_REQ_VHCAID_S 0 +#define ZXDH_CQ_PF_REQ_VHCAID GENMASK_ULL(9, 0) + +#define ZXDH_SRQ_PF_REQ_ENA_S 30 +#define ZXDH_SRQ_PF_REQ_ENA BIT_ULL(30) +#define ZXDH_SRQ_PF_REQ_BASEQPN_S 10 +#define ZXDH_SRQ_PF_REQ_BASEQPN GENMASK_ULL(29, 10) +#define ZXDH_SRQ_PF_REQ_VHCAID_S 0 +#define ZXDH_SRQ_PF_REQ_VHCAID GENMASK_ULL(9, 0) + +#define ZXDH_QP_STATE_INVALID 0 +#define ZXDH_QP_STATE_IDLE 1 +#define ZXDH_QP_STATE_RTS 2 +#define ZXDH_QP_STATE_CLOSING 3 +#define ZXDH_QP_STATE_SQD 3 +#define ZXDH_QP_STATE_RTR 4 +#define ZXDH_QP_STATE_TERMINATE 5 +#define ZXDH_QP_STATE_ERROR 6 + +//DPU QP state +#define ZXDH_QPS_RESET 0 +#define ZXDH_QPS_INIT 1 +#define ZXDH_QPS_RTR 2 +#define ZXDH_QPS_RTS 3 +#define ZXDH_QPS_SQE 4 +#define ZXDH_QPS_SQD 5 +#define ZXDH_QPS_ERR 6 +#define ZXDH_QPS_RSV 7 + +#define ZXDH_MAX_USER_PRIORITY 8 +#define ZXDH_DSCP_NUM_VAL 64 +#define IEEE_8021QAZ_MAX_TCS 8 +#define ZXDH_MAX_STATS_COUNT_GEN1 12 +#define ZXDH_MAX_STATS_COUNT 128 +#define ZXDH_FIRST_NON_PF_STAT 4 + +#define ZXDH_MIN_MTU_IPV4 576 +#define ZXDH_MIN_MTU_IPV6 1280 +#define ZXDH_MTU_TO_MSS_IPV4 40 +#define ZXDH_MTU_TO_MSS_IPV6 60 +#define ZXDH_DEFAULT_MTU 1500 + +#define ZXDH_INDICATE_ID_HOST 2 + +#define Q2_FPSN_OFFSET 64 +#define TERM_DDP_LEN_TAGGED 14 +#define TERM_DDP_LEN_UNTAGGED 18 +#define TERM_RDMA_LEN 28 +#define RDMA_OPCODE_M 0x0f +#define RDMA_READ_REQ_OPCODE 1 +#define Q2_BAD_FRAME_OFFSET 72 +#define CQE_MAJOR_DRV 0x8000 + +#define ZXDH_TERM_SENT 1 +#define ZXDH_TERM_RCVD 2 +#define ZXDH_TERM_DONE 4 +#define ZXDH_MAC_HLEN 14 + +#define ZXDH_CQP_WAIT_POLL_REGS 1 +#define ZXDH_CQP_WAIT_POLL_CQ 2 +#define ZXDH_CQP_WAIT_EVENT 3 + +#define ZXDH_AE_SOURCE_RSVD 0x0 +#define ZXDH_AE_SOURCE_RQ 0x1 +#define ZXDH_AE_SOURCE_RQ_0011 0x3 + +#define ZXDH_AE_SOURCE_CQ 0x2 +#define ZXDH_AE_SOURCE_CQ_0110 0x6 +#define ZXDH_AE_SOURCE_CQ_1010 0xa +#define ZXDH_AE_SOURCE_CQ_1110 0xe + +#define ZXDH_AE_SOURCE_SQ 0x5 +#define ZXDH_AE_SOURCE_SQ_0111 0x7 + +#define ZXDH_AE_SOURCE_IN_WR 0x9 +#define ZXDH_AE_SOURCE_IN_RR 0xb +#define ZXDH_AE_SOURCE_OUT_RR 0xd +#define ZXDH_AE_SOURCE_OUT_RR_1111 0xf + +#define ZXDH_TCP_STATE_NON_EXISTENT 0 +#define ZXDH_TCP_STATE_CLOSED 1 +#define ZXDH_TCP_STATE_LISTEN 2 +#define ZXDH_STATE_SYN_SEND 3 +#define ZXDH_TCP_STATE_SYN_RECEIVED 4 +#define ZXDH_TCP_STATE_ESTABLISHED 5 +#define ZXDH_TCP_STATE_CLOSE_WAIT 6 +#define ZXDH_TCP_STATE_FIN_WAIT_1 7 +#define ZXDH_TCP_STATE_CLOSING 8 +#define ZXDH_TCP_STATE_LAST_ACK 9 +#define ZXDH_TCP_STATE_FIN_WAIT_2 10 +#define ZXDH_TCP_STATE_TIME_WAIT 11 +#define ZXDH_TCP_STATE_RESERVED_1 12 +#define ZXDH_TCP_STATE_RESERVED_2 13 +#define ZXDH_TCP_STATE_RESERVED_3 14 +#define ZXDH_TCP_STATE_RESERVED_4 15 + +#define ZXDH_CQP_SW_SQSIZE_4 4 +#define ZXDH_CQP_SW_SQSIZE_2048 2048 + +#define ZXDH_CQ_TYPE_IO 1 +#define ZXDH_CQ_TYPE_ILQ 2 +#define ZXDH_CQ_TYPE_IEQ 3 +#define ZXDH_CQ_TYPE_CQP 4 + +#define ZXDH_DONE_COUNT 1000 +#define ZXDH_SLEEP_COUNT 10 + +#define ZXDH_UPDATE_SD_BUFF_SIZE 128 +#define ZXDH_FEATURE_BUF_SIZE (8 * ZXDH_MAX_FEATURES) + +#define ZXDH_MAX_QUANTA_PER_WR 16 +#define ZXDH_MAX_SQ_WQES_PER_PAGE 128 +#define ZXDH_MAX_SQ_DEPTH 32768 + +#define ZXDH_MAX_SQ_FRAG 31 +#define ZXDH_MAX_SQ_INLINE_DATELEN_WITH_IMM 210 + +#define INLINE_DATASIZE_7BYTES 7 +#define INLINE_DATASIZE_24BYTES 24 + +#define INLINE_DATA_OFFSET_7BYTES 7 +#define WQE_OFFSET_7BYTES 7 +#define WQE_OFFSET_8BYTES 8 +#define WQE_OFFSET_24BYTES 24 + +#define ZXDH_CQPDB_INIT_VALUE 0x800 +#define ZXDH_CCQN_INIT_VALUE 0x3 + +#define ZXDH_QP_SW_MAX_WQ_QUANTA 32768 +#define ZXDH_QP_SW_MAX_SQ_QUANTA 32768 +#define ZXDH_QP_SW_MAX_RQ_QUANTA 32768 +#define ZXDH_QP_SW_MAX_SRQ_QUANTA 32768 +#define ZXDH_MAX_QP_WRS(max_quanta_per_wr) \ + ((ZXDH_QP_SW_MAX_WQ_QUANTA - ZXDH_SQ_RSVD) / (max_quanta_per_wr)) +#define ZXDH_MAX_SRQ_WRS 32768 + +#define IRDMAQP_TERM_SEND_TERM_AND_FIN 0 +#define IRDMAQP_TERM_SEND_TERM_ONLY 1 +#define IRDMAQP_TERM_SEND_FIN_ONLY 2 +#define IRDMAQP_TERM_DONOT_SEND_TERM_OR_FIN 3 + +#define ZXDH_QP_TYPE_IWARP 3 +#define ZXDH_QP_TYPE_UDA 4 +#define ZXDH_QP_TYPE_CQP 0 +#define ZXDH_QP_TYPE_ROCE_RC 1 +#define ZXDH_QP_TYPE_ROCE_UD 2 + +#define ZXDH_QP_SERVICE_TYPE_RC 0 +#define ZXDH_QP_SERVICE_TYPE_UD 2 + +#define ZXDH_QP_UD_WS_IDX_START 8097 +#define ZXDH_QP_UD_TOS 0x62 +#define ZXDH_QP_UD_QUEUE_TC 3 + +#define ZXDH_QP_MODIFY_NVMEOF_FLR (1 << 21) +#define ZXDH_QP_MODIFY_NVMEOF_IOQ (1 << 20) +#define ZXDH_QP_MODIFY_NVMEOF_TGT (1 << 19) +#define ZXDH_QP_MODIFY_NVMEOF_QID (1 << 18) +#define ZXDH_QP_NVMEOF_IOQ_MASK_S 17 +#define ZXDH_QP_NVMEOF_IOQ_MASK BIT_ULL(17) +#define ZXDH_QP_NVMEOF_TGT_MASK_S 16 +#define ZXDH_QP_NVMEOF_TGT_MASK BIT_ULL(16) +#define ZXDH_QP_NVMEOF_QID_MASK_S 0 +#define ZXDH_QP_NVMEOF_QID_MASK GENMASK_ULL(15, 0) + +#define ZXDH_RC_WS_OFFSET 1 + +#define ZXDH_HW_PAGE_SIZE 4096 +#define ZXDH_HW_PAGE_SHIFT 12 +#define ZXDH_CQE_QTYPE_RQ 0 +#define ZXDH_CQE_QTYPE_SQ 1 + +#define ZXDH_QP_SW_MIN_WQSIZE 64u /* in WRs*/ +#define ZXDH_QP_WQE_MIN_SIZE 32 +#define ZXDH_QP_SQ_WQE_MIN_SIZE 32 +#define ZXDH_QP_RQ_WQE_MIN_SIZE 16 +#define ZXDH_QP_WQE_MAX_SIZE 256 +#define ZXDH_QP_WQE_MIN_QUANTA 1 +#define ZXDH_MAX_RQ_WQE_SHIFT_GEN1 2 +#define ZXDH_MAX_RQ_WQE_SHIFT_GEN2 3 +#define ZXDH_SRQ_FRAG_BYTESIZE 16 +#define ZXDH_QP_FRAG_BYTESIZE 16 +#define ZXDH_SQ_WQE_BYTESIZE 32 +#define ZXDH_SRQ_WQE_MIN_SIZE 16 + +#define ZXDH_SQ_RSVD 258 +#define ZXDH_RQ_RSVD 1 +#define ZXDH_SRQ_RSVD 1 + +#define ZXDH_FEATURE_RTS_AE 1ULL +#define ZXDH_FEATURE_CQ_RESIZE 2ULL +#define ZXDH_FEATURE_64_BYTE_CQE 128ULL +#define IRDMAQP_OP_RDMA_WRITE 0x00 +#define IRDMAQP_OP_RDMA_READ 0x01 +#define IRDMAQP_OP_RDMA_SEND 0x03 +#define IRDMAQP_OP_RDMA_SEND_INV 0x04 +#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT 0x05 +#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT_INV 0x06 +#define IRDMAQP_OP_BIND_MW 0x08 +#define IRDMAQP_OP_FAST_REGISTER 0x09 +#define IRDMAQP_OP_LOCAL_INVALIDATE 0x0a +#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b +#define IRDMAQP_OP_NOP 0x0c +#define IRDMAQP_OP_RDMA_WRITE_SOL 0x0d +#define IRDMAQP_OP_GEN_RTS_AE 0x30 + +#define IRDMAQPC_HW_SQ_TAIL_INIT 0x8000 + +#define ZXDH_SOC_TXRXCQP_IND_ACC_DPU_INTERNAL 0x0 +#define ZXDH_SOC_TXRXCQP_IND_ACC_RSV 0x1 +#define ZXDH_SOC_TXRXCQP_IND_ACC_HOST_NOT_THROUGH_SMMU 0x2 +#define ZXDH_SOC_TXRXCQP_IND_ACC_HOST_THROUGH_SMMU 0x3 + +#define ZXDH_SOC_TXRXCQP_AXID_DEST_L2D 0x0 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_DPU_DDR 0x1 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_EP5 0x2 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_EP6 0x3 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_EP7 0x4 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_EP8 0x5 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_EP9 0x6 + +#define ZXDH_SOC_TXRXCQP_CACHE_ID_0 0x0 +#define ZXDH_SOC_TXRXCQP_CACHE_ID_1 0x1 + +#define ZXDH_SOC_RDMAIO_IND_ACC_DPU_INTERNAL 0x0 +#define ZXDH_SOC_RDMAIO_IND_ACC_RSV 0x1 +#define ZXDH_SOC_RDMAIO_IND_ACC_HOST_NOT_THROUGH_SMMU 0x2 +#define ZXDH_SOC_RDMAIO_IND_ACC_HOST_THROUGH_SMMU 0x3 + +enum zxdh_cqp_op_type { + ZXDH_OP_CEQ_DESTROY = 1, + ZXDH_OP_AEQ_DESTROY = 2, + ZXDH_OP_DELETE_ARP_CACHE_ENTRY = 3, + ZXDH_OP_MANAGE_APBVT_ENTRY = 4, + ZXDH_OP_CEQ_CREATE = 5, + ZXDH_OP_AEQ_CREATE = 6, + ZXDH_OP_MANAGE_QHASH_TABLE_ENTRY = 7, + ZXDH_OP_QP_MODIFY = 8, + ZXDH_OP_QP_UPLOAD_CONTEXT = 9, + ZXDH_OP_CQ_CREATE = 10, + ZXDH_OP_CQ_DESTROY = 11, + ZXDH_OP_QP_CREATE = 12, + ZXDH_OP_QP_DESTROY = 13, + ZXDH_OP_ALLOC_STAG = 14, + ZXDH_OP_MR_REG_NON_SHARED = 15, + ZXDH_OP_DEALLOC_STAG = 16, + ZXDH_OP_MW_ALLOC = 17, + ZXDH_OP_QP_FLUSH_WQES = 18, + ZXDH_OP_ADD_ARP_CACHE_ENTRY = 19, + ZXDH_OP_MANAGE_PUSH_PAGE = 20, + ZXDH_OP_UPDATE_PE_SDS = 21, + ZXDH_OP_MANAGE_HMC_PM_FUNC_TABLE = 22, + ZXDH_OP_SUSPEND = 23, + ZXDH_OP_RESUME = 24, + ZXDH_OP_MANAGE_VF_PBLE_BP = 25, + ZXDH_OP_QUERY_FPM_VAL = 26, + ZXDH_OP_COMMIT_FPM_VAL = 27, + ZXDH_OP_REQ_CMDS = 28, + ZXDH_OP_CMPL_CMDS = 29, + ZXDH_OP_AH_CREATE = 30, + ZXDH_OP_AH_MODIFY = 31, + ZXDH_OP_AH_DESTROY = 32, + ZXDH_OP_MC_CREATE = 33, + ZXDH_OP_MC_DESTROY = 34, + ZXDH_OP_MC_MODIFY = 35, + ZXDH_OP_STATS_ALLOCATE = 36, + ZXDH_OP_STATS_FREE = 37, + ZXDH_OP_STATS_GATHER = 38, + ZXDH_OP_WS_ADD_NODE = 39, + ZXDH_OP_WS_MODIFY_NODE = 40, + ZXDH_OP_WS_DELETE_NODE = 41, + ZXDH_OP_WS_FAILOVER_START = 42, + ZXDH_OP_WS_FAILOVER_COMPLETE = 43, + ZXDH_OP_SET_UP_MAP = 44, + ZXDH_OP_GEN_AE = 45, + ZXDH_OP_QUERY_RDMA_FEATURES = 46, + ZXDH_OP_ADD_LOCAL_MAC_ENTRY = 48, + ZXDH_OP_DELETE_LOCAL_MAC_ENTRY = 49, + ZXDH_OP_CQ_MODIFY = 50, + ZXDH_OP_CONFIG_PTE_TAB = 51, + ZXDH_OP_QUERY_PTE_TAB = 52, + ZXDH_OP_CONFIG_PBLE_TAB = 53, + ZXDH_OP_CONFIG_MAILBOX = 54, + ZXDH_OP_DMA_WRITE = 55, + ZXDH_OP_DMA_WRITE32 = 56, + ZXDH_OP_DMA_WRITE64 = 57, + ZXDH_OP_DMA_READ = 58, + ZXDH_OP_DMA_READ_USE_CQE = 59, + ZXDH_OP_QUERY_QPC = 60, + ZXDH_OP_QUERY_CQC = 61, + ZXDH_OP_QUERY_SRQC = 62, + ZXDH_OP_QUERY_CEQC = 63, + ZXDH_OP_QUERY_AEQC = 64, + ZXDH_OP_SRQ_CREATE = 65, + ZXDH_OP_SRQ_DESTROY = 66, + ZXDH_OP_SRQ_MODIFY = 67, + ZXDH_OP_QUERY_MKEY = 68, + ZXDH_OP_CQ_MODIFY_MODERATION = 69, + ZXDH_OP_QP_MODIFY_UDP_SPORT = 70, + /* Must be last entry*/ + ZXDH_MAX_CQP_OPS = 71, +}; + +/* CQP SQ WQES */ +#define ZXDH_CQP_OP_NOP 0 +#define ZXDH_CQP_OP_CREATE_QP 0x01 +#define ZXDH_CQP_OP_MODIFY_QP 0x02 +#define ZXDH_CQP_OP_DESTROY_QP 0x03 +#define ZXDH_CQP_OP_QUERY_QP 0x04 +#define ZXDH_CQP_OP_CREATE_CQ 0x05 +#define ZXDH_CQP_OP_MODIFY_CQ 0x06 +#define ZXDH_CQP_OP_DESTROY_CQ 0x07 +#define ZXDH_CQP_OP_QUERY_CQ 0x08 +#define ZXDH_CQP_OP_CREATE_CEQ 0x09 +#define ZXDH_CQP_OP_DESTROY_CEQ 0x0b +#define ZXDH_CQP_OP_QUERY_CEQ 0x0c +#define ZXDH_CQP_OP_CREATE_AEQ 0x0d +#define ZXDH_CQP_OP_DESTROY_AEQ 0x0f +#define ZXDH_CQP_OP_QUERY_AEQ 0x10 +#define ZXDH_CQP_OP_ALLOC_MKEY 0x12 +#define ZXDH_CQP_OP_DEALLOC_MKEY 0x13 +#define ZXDH_CQP_OP_REG_MR 0x14 +#define ZXDH_CQP_OP_QUERY_MKEY 0x16 +#define ZXDH_CQP_OP_CREATE_AH 0x17 +#define ZXDH_CQP_OP_MODIFY_AH 0x18 +#define ZXDH_CQP_OP_DESTROY_AH 0x19 +#define ZXDH_CQP_OP_QUERY_BASE_REG 0x1b +#define ZXDH_CQP_OP_COMMIT_BASE_REG 0x1c +#define ZXDH_CQP_OP_FLUSH_WQES 0x1d +#define ZXDH_CQP_OP_SEND_MAILBOX 0x1e +#define ZXDH_CQP_OP_UPLOAD_QPC 0x1f +#define ZXDH_CQP_OP_CREATE_MCAST_GRP 0x20 +#define ZXDH_CQP_OP_MODIFY_MCAST_GRP 0x21 +#define ZXDH_CQP_OP_DESTROY_MCAST_GRP 0x22 +#define ZXDH_CQP_OP_CREATE_SRQ 0x24 +#define ZXDH_CQP_OP_MODIFY_SRQ 0x25 +#define ZXDH_CQP_OP_DESTROY_SRQ 0x26 +#define ZXDH_CQP_OP_QUERY_SRQ 0x27 +#define ZXDH_CQP_OP_WQE_DMA_WRITE 0x28 +#define ZXDH_CQP_OP_WQE_DMA_WRITE_32 0x29 +#define ZXDH_CQP_OP_WQE_DMA_WRITE_64 0x2a +#define ZXDH_CQP_OP_WQE_DMA_READ 0x2b +#define ZXDH_CQP_OP_WQE_DMA_READ_USECQE 0x2c +#define ZXDH_CQP_OP_SEND_MAILBOX 0x1e + +/*DELETED CQP SQ WQES*/ +#define ZXDH_CQP_OP_MANAGE_LOC_MAC_TABLE 0 +#define ZXDH_CQP_OP_MANAGE_ARP 0 +#define ZXDH_CQP_OP_MANAGE_VF_PBLE_BP 0 +#define ZXDH_CQP_OP_MANAGE_PUSH_PAGES 0 +#define ZXDH_CQP_OP_QUERY_RDMA_FEATURES 0 +#define ZXDH_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY 0 +#define ZXDH_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0 +#define ZXDH_CQP_OP_UPDATE_PE_SDS 0 +#define ZXDH_CQP_OP_GEN_AE 0 +#define ZXDH_CQP_OP_MANAGE_APBVT 0 +#define ZXDH_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0 +#define ZXDH_CQP_OP_SUSPEND_QP 0 +#define ZXDH_CQP_OP_RESUME_QP 0 +#define ZXDH_CQP_OP_SHMC_PAGES_ALLOCATED 0 +#define ZXDH_CQP_OP_WORK_SCHED_NODE 0 +#define ZXDH_CQP_OP_MANAGE_STATS 0 +#define ZXDH_CQP_OP_GATHER_STATS 0 +#define ZXDH_CQP_OP_UP_MAP 0 + +/* Async Events codes */ +#define ZXDH_AE_AMP_UNALLOCATED_STAG 0x0102 +#define ZXDH_AE_AMP_INVALID_STAG 0x0103 +#define ZXDH_AE_AMP_BAD_QP 0x0104 +#define ZXDH_AE_AMP_BAD_PD 0x0105 +#define ZXDH_AE_AMP_BAD_STAG_KEY 0x0106 +#define ZXDH_AE_AMP_BAD_STAG_INDEX 0x0107 +#define ZXDH_AE_AMP_BOUNDS_VIOLATION 0x0108 +#define ZXDH_AE_AMP_RIGHTS_VIOLATION 0x0109 +#define ZXDH_AE_AMP_TO_WRAP 0x010a +#define ZXDH_AE_AMP_FASTREG_VALID_STAG 0x010c +#define ZXDH_AE_AMP_FASTREG_MW_STAG 0x010d +#define ZXDH_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e +#define ZXDH_AE_AMP_FASTREG_INVALID_LENGTH 0x0110 +#define ZXDH_AE_AMP_INVALIDATE_SHARED 0x0111 +#define ZXDH_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112 +#define ZXDH_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113 +#define ZXDH_AE_AMP_MWBIND_VALID_STAG 0x0114 +#define ZXDH_AE_AMP_MWBIND_OF_MR_STAG 0x0115 +#define ZXDH_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116 +#define ZXDH_AE_AMP_MWBIND_TO_MW_STAG 0x0117 +#define ZXDH_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118 +#define ZXDH_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119 +#define ZXDH_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a +#define ZXDH_AE_AMP_MWBIND_BIND_DISABLED 0x011b +#define ZXDH_AE_PRIV_OPERATION_DENIED 0x011c +#define ZXDH_AE_AMP_INVALIDATE_TYPE1_MW 0x011d +#define ZXDH_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e +#define ZXDH_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f +#define ZXDH_AE_AMP_MWBIND_WRONG_TYPE 0x0120 +#define ZXDH_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121 +#define ZXDH_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132 +#define ZXDH_AE_UDA_XMIT_BAD_PD 0x0133 +#define ZXDH_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134 +#define ZXDH_AE_UDA_L4LEN_INVALID 0x0135 +#define ZXDH_AE_BAD_CLOSE 0x0201 +#define ZXDH_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202 +#define ZXDH_AE_CQ_OPERATION_ERROR 0x0203 +#define ZXDH_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205 +#define ZXDH_AE_STAG_ZERO_INVALID 0x0206 +#define ZXDH_AE_IB_RREQ_AND_Q1_FULL 0x0207 +#define ZXDH_AE_IB_INVALID_REQUEST 0x0208 +#define ZXDH_AE_WQE_UNEXPECTED_OPCODE 0x020a +#define ZXDH_AE_WQE_INVALID_PARAMETER 0x020b +#define ZXDH_AE_WQE_INVALID_FRAG_DATA 0x020c +#define ZXDH_AE_IB_REMOTE_ACCESS_ERROR 0x020d +#define ZXDH_AE_IB_REMOTE_OP_ERROR 0x020e +#define ZXDH_AE_WQE_LSMM_TOO_LONG 0x0220 +#define ZXDH_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301 +#define ZXDH_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303 +#define ZXDH_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304 +#define ZXDH_AE_DDP_UBE_INVALID_MO 0x0305 +#define ZXDH_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306 +#define ZXDH_AE_DDP_UBE_INVALID_QN 0x0307 +#define ZXDH_AE_DDP_NO_L_BIT 0x0308 +#define ZXDH_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311 +#define ZXDH_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312 +#define ZXDH_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313 +#define ZXDH_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314 +#define ZXDH_AE_ROCE_RSP_LENGTH_ERROR 0x0316 +#define ZXDH_AE_ROCE_EMPTY_MCG 0x0380 +#define ZXDH_AE_ROCE_BAD_MC_IP_ADDR 0x0381 +#define ZXDH_AE_ROCE_BAD_MC_QPID 0x0382 +#define ZXDH_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383 +#define ZXDH_AE_INVALID_ARP_ENTRY 0x0401 +#define ZXDH_AE_INVALID_TCP_OPTION_RCVD 0x0402 +#define ZXDH_AE_STALE_ARP_ENTRY 0x0403 +#define ZXDH_AE_INVALID_AH_ENTRY 0x0406 +#define ZXDH_AE_LLP_CLOSE_COMPLETE 0x0501 +#define ZXDH_AE_LLP_CONNECTION_RESET 0x0502 +#define ZXDH_AE_LLP_FIN_RECEIVED 0x0503 +#define ZXDH_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504 +#define ZXDH_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505 +#define ZXDH_AE_LLP_SEGMENT_TOO_SMALL 0x0507 +#define ZXDH_AE_LLP_SYN_RECEIVED 0x0508 +#define ZXDH_AE_LLP_TERMINATE_RECEIVED 0x0509 +#define ZXDH_AE_LLP_TOO_MANY_RETRIES 0x050a +#define ZXDH_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b +#define ZXDH_AE_LLP_DOUBT_REACHABILITY 0x050c +#define ZXDH_AE_LLP_CONNECTION_ESTABLISHED 0x050e +#define ZXDH_AE_RESOURCE_EXHAUSTION 0x0520 +#define ZXDH_AE_RESET_SENT 0x0601 +#define ZXDH_AE_TERMINATE_SENT 0x0602 +#define ZXDH_AE_RESET_NOT_SENT 0x0603 +#define ZXDH_AE_LCE_QP_CATASTROPHIC 0x0700 +#define ZXDH_AE_LCE_FUNCTION_CATASTROPHIC 0x0701 +#define ZXDH_AE_LCE_CQ_CATASTROPHIC 0x0702 +#define ZXDH_AE_QP_SUSPEND_COMPLETE 0x0900 + +/* Async Events codes request*/ +#define ZXDH_AE_REQ_AXI_RSP_ERR 0x01 + +#define ZXDH_AE_REQ_WQE_FLUSH 0x101 +#define ZXDH_AE_REQ_PSN_OPCODE_ERR 0x110 + +#define ZXDH_AE_REQ_WR_ORD_ERR 0x220 +#define ZXDH_AE_REQ_WR_INV_OPCODE 0x221 +#define ZXDH_AE_REQ_WR_CQP_QP_STATE 0x222 +#define ZXDH_AE_REQ_WR_LEN_ERR 0x223 +#define ZXDH_AE_REQ_WR_INLINE_LEN_ERR 0x224 +#define ZXDH_AE_REQ_WR_AH_VALID_ERR 0x225 +#define ZXDH_AE_REQ_WR_UD_PD_IDX_ERR 0x226 +#define IRMDA_AE_REQ_WR_QP_STATE_ERR 0x227 +#define ZXDH_AE_REQ_WR_SERVER_TYPE_MISMATCH_OPCODE 0x228 +#define ZXDH_AE_REQ_WR_UD_PAYLOAD_OUT_OF_PMTU 0x229 +#define ZXDH_AE_REQ_WR_PRE_READ_MOD_WQE_LEN_ZERO 0x22a +#define ZXDH_AE_REQ_WR_ADDL_SGE_NOT_READ_BACK 0x22b +#define ZXDH_AE_REQ_WR_IMM_OPCODE_MISMATCH_FLAG 0x22c +#define ZXDH_AE_REQ_HAD_SEND_MSG_OUT_OF_RANGE 0x22d +#define ZXDH_AE_REQ_WR_WQE_ZERO_LEN_SGE 0x99f + +#define ZXDH_AE_REQ_NVME_IDX_ERR 0x330 +#define ZXDH_AE_REQ_NVME_NOF_QID_ERR 0x331 +#define ZXDH_AE_REQ_NVME_PD_IDX_ERR 0x332 +#define ZXDH_AE_REQ_NVME_LEN_ERR 0x333 +#define ZXDH_AE_REQ_NVME_KEY_ERR 0x334 +#define ZXDH_AE_REQ_NVME_ACC_ERR 0x335 +#define ZXDH_AE_REQ_NVME_TX_ROUTE_IDX_ERR 0x336 +#define ZXDH_AE_REQ_NVME_TX_ROUTE_NOF_QID_ERR 0x337 +#define ZXDH_AE_REQ_NVME_TX_ROUTE_PD_IDX_ERR 0x338 +#define ZXDH_AE_REQ_NVME_TX_ROUTE_LEN_ERR 0x339 +#define ZXDH_AE_REQ_NVME_TX_ROUTE_KEY_ERR 0x33a +#define ZXDH_AE_REQ_NVME_TX_ROUTE_ACC_ERR 0x33b + +#define ZXDH_AE_REQ_MW_MR_MISMATCH_OPCODE 0x450 +#define ZXDH_AE_REQ_MW_INV_LKEY_ERR 0x451 +#define ZXDH_AE_REQ_MW_INV_TYPE_ERR 0x452 +#define ZXDH_AE_REQ_MW_INV_STATE_INV 0x453 +#define ZXDH_AE_REQ_MW_INV_PD_IDX_ERR 0x454 +#define ZXDH_AE_REQ_MW_INV_SHARE_MEM_ERR 0x455 +#define ZXDH_AE_REQ_MW_INV_PARENT_STATE_INV 0x456 +#define ZXDH_AE_REQ_MW_INV_MW_NUM_ZERO 0x458 +#define ZXDH_AE_REQ_MW_INV_MW_STAG_31_8_ZERO 0x459 +#define ZXDH_AE_REQ_MW_INV_QP_NUM_ERR 0x45A +#define ZXDH_AE_REQ_MR_INV_INV_LKEY_ERR 0x45B +#define ZXDH_AE_REQ_MR_INV_MW_NUM_ZERO 0x45C +#define ZXDH_AE_REQ_MR_INV_STATE_ERR 0x45D +#define ZXDH_AE_REQ_MR_INV_EN_ERR 0x45E +#define ZXDH_AE_REQ_MR_INV_SHARE_MEM_ERR 0x45F +#define ZXDH_AE_REQ_MR_INV_PD_IDX_ERR 0x460 +#define ZXDH_AE_REQ_MR_INV_MW_STAG_31_8_ZERO 0x461 +#define ZXDH_AE_REQ_MWBIND_WRITE_ACC_ERR 0x462 +#define ZXDH_AE_REQ_MWBIND_VA_BIND_ERR 0x463 +#define ZXDH_AE_REQ_MWBIND_PD_IDX_ERR 0x464 +#define ZXDH_AE_REQ_MWBIND_MRTE_STATE_TYPE_ERR 0x465 +#define ZXDH_AE_REQ_MWBIND_VA_LEN_ERR 0x466 +#define ZXDH_AE_REQ_MWBIND_TYPE_VA_ERR 0x467 +#define ZXDH_AE_REQ_MWBIND_TYPE_IDX_ERR 0x468 +#define ZXDH_AE_REQ_MWBIND_MRTE_MR_ERR 0x469 +#define ZXDH_AE_REQ_MWBIND_TYPE2_LEN_ERR 0x46A +#define ZXDH_AE_REQ_MWBIND_MRTE_STATE_ERR 0x46B +#define ZXDH_AE_REQ_MWBIND_QPC_EN_ERR 0x46C +#define ZXDH_AE_REQ_MWBIND_PARENT_MR_ERR 0x46D +#define ZXDH_AE_REQ_MWBIND_ACC_BIT4_ERR 0x46E +#define ZXDH_AE_REQ_MWBIND_MW_STAG_ERR 0x470 +#define ZXDH_AE_REQ_MWBIND_IDX_OUT_RANGE 0x471 +#define ZXDH_AE_REQ_MR_FASTREG_ACC_ERR 0x472 +#define ZXDH_AE_REQ_MR_FASTREG_PD_IDX_ERR 0x473 +#define ZXDH_AE_REQ_MR_FASTREG_MRTE_STATE_ERR 0x474 +#define ZXDH_AE_REQ_MR_FASTREG_MR_IS_NOT_1 0x475 +#define ZXDH_AE_REQ_MR_FASTREG_QPC_EN_ERR 0x476 +#define ZXDH_AE_REQ_MR_FASTREG_STAG_LEN_ERR 0x477 +#define ZXDH_AE_REQ_MR_FASTREG_SHARE_MR_ERR 0x478 +#define ZXDH_AE_REQ_MR_FASTREG_MW_STAG_ERR 0x479 +#define ZXDH_AE_REQ_MR_FASTREG_IDX_OUT_RANGE 0x47A +#define ZXDH_AE_REQ_MR_FASTREG_MR_EN_ERR 0x47B +#define ZXDH_AE_REQ_MW_BIND_PD_IDX_ERR 0x47C + +#define ZXDH_AE_REQ_MRTE_STATE_FREE 0x590 +#define ZXDH_AE_REQ_MRTE_STATE_INVALID 0x591 +#define ZXDH_AE_REQ_MRTE_MW_QP_ID_ERR 0x592 +#define ZXDH_AE_REQ_MRTE_PD_IDX_ERR 0x593 +#define ZXDH_AE_REQ_MRTE_KEY_ERR 0x594 +#define ZXDH_AE_REQ_MRTE_STAG_IDX_RANGE_ERR 0x595 +#define ZXDH_AE_REQ_MRTE_VIRT_ADDR_AND_LEN_ERR 0x596 +#define ZXDH_AE_REQ_MRTE_ACC_ERR 0x597 +#define ZXDH_AE_REQ_MRTE_STAG_IDX_RANGE_RSV_ERR 0x598 + +#define ZXDH_AE_REQ_LOC_LEN_READ_REP_ERR 0x6c0 + +#define ZXDH_AE_REQ_REM_INV_OPCODE 0x7d0 +#define ZXDH_AE_REQ_REM_INV_RKEY 0x7d1 +#define ZXDH_AE_REQ_REM_OPERATIONAL_ERR 0x7d2 +#define ZXDH_AE_REQ_RETURN_NAK 0x7d3 + +#define ZXDH_AE_REQ_RETRY_EXC_PSN_OUT_RANGE 0x8f1 +#define ZXDH_AE_REQ_RETRY_EXC_ACK_PSN_OUT_RANGE 0x8f2 +#define ZXDH_AE_REQ_RETRY_EXC_LOC_ACK_OUT_RANGE 0x8f3 +#define ZXDH_AE_REQ_RETRY_EXC_RNR_NAK_OUT_RANGE 0x8f4 +#define ZXDH_AE_REQ_RETRY_EXC_TX_WINDOW_GET_ENTRY_ERR 0x8f5 +#define ZXDH_AE_REQ_RETRY_EXC_TX_WINDOW_MSN_FALLBACK 0x8f6 +#define ZXDH_AE_REQ_RETRY_EXC_TX_WINDOW_MSN_LITTLE 0x8f7 +#define ZXDH_AE_REQ_PSN_LESS_THAN_START_PSN 0x8fe +#define ZXDH_AE_REQ_LOG_SQ_SIZE_MISMATCH_WQE_POINTER 0x8e0 +#define ZXDH_AE_REQ_OFED_INVALID_SQ_OPCODE 0x8e1 +#define ZXDH_AE_REQ_NVME_INVALID_SQ_OPCODE 0x8e2 + +#define ZXDH_AE_REQ_WQE_MRTE_STATE_FREE 0x990 +#define ZXDH_AE_REQ_WQE_MRTE_STATE_INV 0x991 +#define ZXDH_AE_REQ_WQE_MRTE_MW_QP_ID_ERR 0x992 +#define ZXDH_AE_REQ_WQE_MRTE_PD_IDX_ERR 0x993 +#define ZXDH_AE_REQ_WQE_MRTE_KEY_ERR 0x994 +#define ZXDH_AE_REQ_WQE_MRTE_STAG_IDX_ERR 0x995 +#define ZXDH_AE_REQ_WQE_MRTE_VIRT_ADDR_AND_LEN_CHK_ERR 0x996 +#define ZXDH_AE_REQ_WQE_MRTE_ACC_ERR 0x997 +#define ZXDH_AE_REQ_WQE_MRTE_RSV_LKEY_EN_ERR 0x998 + +/* Async Events codes respond */ +#define ZXDH_AE_RSP_WQE_FLUSH 0x12 + +#define ZXDH_AE_RSP_PRIFIELD_CHK_INV_OPCODE 0x50 +#define ZXDH_AE_RSP_PRIFIELD_CHK_OUT_OF_ORDER 0x51 +#define ZXDH_AE_RSP_PRIFIELD_CHK_LEN_ERR 0x52 +//qp context,qp err +#define ZXDH_AE_RSP_SRQ_CHK_SRQ_STA_ERR 0x53 +#define ZXDH_AE_RSP_WQE_CHK_FORMAT_ERR 0x54 +#define ZXDH_AE_RSP_WQE_CHK_LEN_ERR 0x55 + +//srq context +#define ZXDH_AE_RSP_SRQ_WATER_SIG 0x80 + +#define ZXDH_AE_RSP_PKT_TYPE_CQ_OVERFLOW 0x76 +#define ZXDH_AE_RSP_PKT_TYPE_CQ_OVERFLOW_QP 0x78 +#define ZXDH_AE_RSP_PKT_TYPE_CQ_STATE 0x7A +#define ZXDH_AE_RSP_PKT_TYPE_CQ_TWO_PBLE_RSP 0x7B +#define ZXDH_AE_RSP_PKT_TYPE_CQ_TWO_PBLE_RSP_QP 0x7C +#define ZXDH_AE_RSP_PKT_TYPE_NOF_IOQ_ERR 0x70 +#define ZXDH_AE_RSP_PKT_TYPE_NOF_PD_IDX_ERR 0x71 +#define ZXDH_AE_RSP_PKT_TYPE_NOF_LEN_ERR 0x72 +#define ZXDH_AE_RSP_PKT_TYPE_NOF_RKEY_ERR 0x73 +#define ZXDH_AE_RSP_PKT_TYPE_NOF_ACC_ERR 0x74 +//qp context, srq err +#define ZXDH_AE_RSP_SRQ_AXI_RSP_SIG 0xB1 +#define ZXDH_AE_RSP_PKT_TYPE_IRD_OVERFLOW_ERR 0x77 + +#define ZXDH_AE_RSP_PKT_TYPE_MR_DISTRIBUTE_ERR 0x90 +#define ZXDH_AE_RSP_PKT_TYPE_MR_INV_ERR 0x91 +#define ZXDH_AE_RSP_PKT_TYPE_MR_QP_CHK_ERR 0x92 +#define ZXDH_AE_RSP_PKT_TYPE_MR_PD_CHK_ERR 0x93 +#define ZXDH_AE_RSP_PKT_TYPE_MR_KEY_CHK_ERR 0x94 +#define ZXDH_AE_RSP_PKT_TYPE_MR_STAG_IDX_ERR 0x95 +#define ZXDH_AE_RSP_PKT_TYPE_MR_BOUNDARY_ERR 0x96 +#define ZXDH_AE_RSP_PKT_TYPE_MR_ACC_ERR 0x97 +#define ZXDH_AE_RSP_PKT_TYPE_MR_STAG0_ERR 0x98 +#define ZXDH_AE_RSP_PKT_TYPE_MW_STATE_ERR 0x99 +#define ZXDH_AE_RSP_PKT_TYPE_MW_PD_ERR 0x9A +#define ZXDH_AE_RSP_PKT_TYPE_MW_KEY_ERR 0x9B +#define ZXDH_AE_RSP_PKT_TYPE_MW_TYPE2B_QPN_ERR 0x9C +#define ZXDH_AE_RSP_PKT_TYPE_MW_KEY_IDX_ERR 0x9D +#define ZXDH_AE_RSP_PKT_TYPE_MW_SHARE_MR 0x9E +#define ZXDH_AE_RSP_PKT_TYPE_MW_TYPE_ERR 0x9F +#define ZXDH_AE_RSP_PKT_TYPE_REM_INV_PD_ERR 0xA0 +#define ZXDH_AE_RSP_PKT_TYPE_REM_INV_KEY_ERR 0xA1 +#define ZXDH_AE_RSP_PKT_TYPE_REM_INV_ACC_ERR 0xA2 +#define ZXDH_AE_RSP_CHK_ERR_SHARE_MR 0xA4 +#define ZXDH_AE_RSP_MW_NUM_ERR 0xA5 +#define ZXDH_AE_RSP_INV_EN_ERR 0xA6 +#define ZXDH_AE_RSP_QP_AXI_RSP_ERR 0xB0 + +#define LS_64_1(val, bits) ((u64)(uintptr_t)(val) << (bits)) +#define RS_64_1(val, bits) ((u64)(uintptr_t)(val) >> (bits)) +#define LS_32_1(val, bits) ((u32)((val) << (bits))) +#define RS_32_1(val, bits) ((u32)((val) >> (bits))) + +#define FLD_LS_64(dev, val, field) \ + (((u64)(val) << (dev)->hw_shifts[field##_S]) & \ + (dev)->hw_masks[field##_M]) +#define FLD_RS_64(dev, val, field) \ + ((u64)((val) & (dev)->hw_masks[field##_M]) >> \ + (dev)->hw_shifts[field##_S]) +#define FLD_LS_32(dev, val, field) \ + (((val) << (dev)->hw_shifts[field##_S]) & (dev)->hw_masks[field##_M]) +#define FLD_RS_32(dev, val, field) \ + ((u64)((val) & (dev)->hw_masks[field##_M]) >> \ + (dev)->hw_shifts[field##_S]) + +#define ZXDH_MAX_STATS_16 0xffffULL +#define ZXDH_MAX_STATS_24 0xffffffULL +#define ZXDH_MAX_STATS_32 0xffffffffULL +#define ZXDH_MAX_STATS_48 0xffffffffffffULL +#define ZXDH_MAX_STATS_56 0xffffffffffffffULL +#define ZXDH_MAX_STATS_64 0xffffffffffffffffULL + +#define ZXDH_VCHNL_RESP_DEFAULT_SIZE (sizeof(struct zxdh_virtchnl_resp_buf)) + +#define ZXDH_MAX_CQ_READ_THRESH 0x3FFFF +#define ZXDH_CQPSQ_QHASH_VLANID_S 32 +#define ZXDH_CQPSQ_QHASH_VLANID GENMASK_ULL(43, 32) +#define ZXDH_CQPSQ_QHASH_QPN_S 32 +#define ZXDH_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32) +#define ZXDH_CQPSQ_QHASH_QS_HANDLE_S 0 +#define ZXDH_CQPSQ_QHASH_QS_HANDLE GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_QHASH_SRC_PORT_S 16 +#define ZXDH_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16) +#define ZXDH_CQPSQ_QHASH_DEST_PORT_S 0 +#define ZXDH_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0) +#define ZXDH_CQPSQ_QHASH_ADDR0_S 32 +#define ZXDH_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32) +#define ZXDH_CQPSQ_QHASH_ADDR1_S 0 +#define ZXDH_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_QHASH_ADDR2_S 32 +#define ZXDH_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32) +#define ZXDH_CQPSQ_QHASH_ADDR3_S 0 +#define ZXDH_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_QHASH_WQEVALID_S 63 +#define ZXDH_CQPSQ_QHASH_WQEVALID BIT_ULL(63) +#define ZXDH_CQPSQ_QHASH_OPCODE_S 32 +#define ZXDH_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_QHASH_MANAGE_S 61 +#define ZXDH_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61) +#define ZXDH_CQPSQ_QHASH_IPV4VALID_S 60 +#define ZXDH_CQPSQ_QHASH_IPV4VALID BIT_ULL(60) +#define ZXDH_CQPSQ_QHASH_VLANVALID_S 59 +#define ZXDH_CQPSQ_QHASH_VLANVALID BIT_ULL(59) +#define ZXDH_CQPSQ_QHASH_ENTRYTYPE_S 42 +#define ZXDH_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42) +#define ZXDH_CQPSQ_STATS_WQEVALID_S 63 +#define ZXDH_CQPSQ_STATS_WQEVALID BIT_ULL(63) +#define ZXDH_CQPSQ_STATS_ALLOC_INST_S 62 +#define ZXDH_CQPSQ_STATS_ALLOC_INST BIT_ULL(62) +#define ZXDH_CQPSQ_STATS_USE_HMC_FCN_INDEX_S 60 +#define ZXDH_CQPSQ_STATS_USE_HMC_FCN_INDEX BIT_ULL(60) +#define ZXDH_CQPSQ_STATS_USE_INST_S 61 +#define ZXDH_CQPSQ_STATS_USE_INST BIT_ULL(61) +#define ZXDH_CQPSQ_STATS_OP_S 32 +#define ZXDH_CQPSQ_STATS_OP GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_STATS_INST_INDEX_S 0 +#define ZXDH_CQPSQ_STATS_INST_INDEX GENMASK_ULL(6, 0) +#define ZXDH_CQPSQ_STATS_HMC_FCN_INDEX_S 0 +#define ZXDH_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(5, 0) +#define ZXDH_CQPSQ_WS_WQEVALID_S 63 +#define ZXDH_CQPSQ_WS_WQEVALID BIT_ULL(63) +#define ZXDH_CQPSQ_WS_NODEOP_S 52 +#define ZXDH_CQPSQ_WS_NODEOP GENMASK_ULL(53, 52) + +#define ZXDH_CQPSQ_WS_ENABLENODE_S 62 +#define ZXDH_CQPSQ_WS_ENABLENODE BIT_ULL(62) +#define ZXDH_CQPSQ_WS_NODETYPE_S 61 +#define ZXDH_CQPSQ_WS_NODETYPE BIT_ULL(61) +#define ZXDH_CQPSQ_WS_PRIOTYPE_S 59 +#define ZXDH_CQPSQ_WS_PRIOTYPE GENMASK_ULL(60, 59) +#define ZXDH_CQPSQ_WS_TC_S 56 +#define ZXDH_CQPSQ_WS_TC GENMASK_ULL(58, 56) +#define ZXDH_CQPSQ_WS_VMVFTYPE_S 54 +#define ZXDH_CQPSQ_WS_VMVFTYPE GENMASK_ULL(55, 54) +#define ZXDH_CQPSQ_WS_VMVFNUM_S 42 +#define ZXDH_CQPSQ_WS_VMVFNUM GENMASK_ULL(51, 42) +#define ZXDH_CQPSQ_WS_OP_S 32 +#define ZXDH_CQPSQ_WS_OP GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_WS_PARENTID_S 16 +#define ZXDH_CQPSQ_WS_PARENTID GENMASK_ULL(25, 16) +#define ZXDH_CQPSQ_WS_NODEID_S 0 +#define ZXDH_CQPSQ_WS_NODEID GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_WS_VSI_S 48 +#define ZXDH_CQPSQ_WS_VSI GENMASK_ULL(57, 48) +#define ZXDH_CQPSQ_WS_WEIGHT_S 32 +#define ZXDH_CQPSQ_WS_WEIGHT GENMASK_ULL(38, 32) + +#define ZXDH_CQPSQ_UP_WQEVALID_S 63 +#define ZXDH_CQPSQ_UP_WQEVALID BIT_ULL(63) +#define ZXDH_CQPSQ_UP_USEVLAN_S 62 +#define ZXDH_CQPSQ_UP_USEVLAN BIT_ULL(62) +#define ZXDH_CQPSQ_UP_USEOVERRIDE_S 61 +#define ZXDH_CQPSQ_UP_USEOVERRIDE BIT_ULL(61) +#define ZXDH_CQPSQ_UP_OP_S 32 +#define ZXDH_CQPSQ_UP_OP GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_UP_HMCFCNIDX_S 0 +#define ZXDH_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(5, 0) +#define ZXDH_CQPSQ_UP_CNPOVERRIDE_S 32 +#define ZXDH_CQPSQ_UP_CNPOVERRIDE GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID_S 63 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID BIT_ULL(63) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN_S 0 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_OP_S 32 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_OP GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MODEL_USED_S 32 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MODEL_USED GENMASK_ULL(47, 32) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MAJOR_VERSION_S 16 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MAJOR_VERSION GENMASK_ULL(23, 16) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MINOR_VERSION_S 0 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MINOR_VERSION GENMASK_ULL(7, 0) +#define ZXDH_CQPHC_SQSIZE_S 8 +#define ZXDH_CQPHC_SQSIZE GENMASK_ULL(11, 8) +#define ZXDH_CQPHC_DISABLE_PFPDUS_S 1 +#define ZXDH_CQPHC_DISABLE_PFPDUS BIT_ULL(1) +#define ZXDH_CQPHC_ROCEV2_RTO_POLICY_S 2 +#define ZXDH_CQPHC_ROCEV2_RTO_POLICY BIT_ULL(2) +#define ZXDH_CQPHC_PROTOCOL_USED_S 3 +#define ZXDH_CQPHC_PROTOCOL_USED GENMASK_ULL(4, 3) +#define ZXDH_CQPHC_MIN_RATE_S 48 +#define ZXDH_CQPHC_MIN_RATE GENMASK_ULL(51, 48) +#define ZXDH_CQPHC_MIN_DEC_FACTOR_S 56 +#define ZXDH_CQPHC_MIN_DEC_FACTOR GENMASK_ULL(59, 56) +#define ZXDH_CQPHC_DCQCN_T_S 0 +#define ZXDH_CQPHC_DCQCN_T GENMASK_ULL(15, 0) +#define ZXDH_CQPHC_HAI_FACTOR_S 32 +#define ZXDH_CQPHC_HAI_FACTOR GENMASK_ULL(47, 32) +#define ZXDH_CQPHC_RAI_FACTOR_S 48 +#define ZXDH_CQPHC_RAI_FACTOR GENMASK_ULL(63, 48) +#define ZXDH_CQPHC_DCQCN_B_S 0 +#define ZXDH_CQPHC_DCQCN_B GENMASK_ULL(24, 0) +#define ZXDH_CQPHC_DCQCN_F_S 25 +#define ZXDH_CQPHC_DCQCN_F GENMASK_ULL(27, 25) +#define ZXDH_CQPHC_CC_CFG_VALID_S 31 +#define ZXDH_CQPHC_CC_CFG_VALID BIT_ULL(31) +#define ZXDH_CQPHC_RREDUCE_MPERIOD_S 32 +#define ZXDH_CQPHC_RREDUCE_MPERIOD GENMASK_ULL(63, 32) +#define ZXDH_CQPHC_HW_MINVER_S 0 +#define ZXDH_CQPHC_HW_MINVER GENMASK_ULL(15, 0) + +#define ZXDH_CQPHC_HW_MAJVER_GEN_1 0 +#define ZXDH_CQPHC_HW_MAJVER_GEN_2 1 +#define ZXDH_CQPHC_HW_MAJVER_GEN_3 2 +#define ZXDH_CQPHC_HW_MAJVER_S 16 +#define ZXDH_CQPHC_HW_MAJVER GENMASK_ULL(31, 16) +#define ZXDH_CQPHC_CEQPERVF_S 32 +#define ZXDH_CQPHC_CEQPERVF GENMASK_ULL(39, 32) + +#define ZXDH_CQPHC_EN_REM_ENDPOINT_TRK_S 3 +#define ZXDH_CQPHC_EN_REM_ENDPOINT_TRK BIT_ULL(3) + +#define ZXDH_CQPHC_ENABLED_VFS_S 32 +#define ZXDH_CQPHC_ENABLED_VFS GENMASK_ULL(37, 32) + +#define ZXDH_CQPHC_HMC_PROFILE_S 0 +#define ZXDH_CQPHC_HMC_PROFILE GENMASK_ULL(2, 0) +#define ZXDH_CQPHC_SVER_S 24 +#define ZXDH_CQPHC_SVER GENMASK_ULL(31, 24) +#define ZXDH_CQPHC_SQBASE_S 9 +#define ZXDH_CQPHC_SQBASE GENMASK_ULL(63, 9) + +#define ZXDH_CQPHC_QPCTX_S 0 +#define ZXDH_CQPHC_QPCTX GENMASK_ULL(63, 0) +#define ZXDH_QP_DBSA_HW_SQ_TAIL_S 0 +#define ZXDH_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(17, 0) +#define ZXDH_CQ_DBSA_CQEIDX_S 0 +#define ZXDH_CQ_DBSA_CQEIDX GENMASK_ULL(22, 0) +#define ZXDH_CQ_DBSA_SW_CQ_SELECT_S 23 +#define ZXDH_CQ_DBSA_SW_CQ_SELECT GENMASK_ULL(28, 23) +#define ZXDH_CQ_DBSA_ARM_NEXT_S 31 +#define ZXDH_CQ_DBSA_ARM_NEXT BIT_ULL(31) +// #define ZXDH_CQ_DBSA_ARM_NEXT_SE_S 15 +// #define ZXDH_CQ_DBSA_ARM_NEXT_SE BIT_ULL(15) +#define ZXDH_CQ_DBSA_ARM_SEQ_NUM_S 29 +#define ZXDH_CQ_DBSA_ARM_SEQ_NUM GENMASK_ULL(30, 29) + +/* RDMA TX DDR Access REG Masks */ +#define ZXDH_TX_CACHE_ID_S 0 +#define ZXDH_TX_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_TX_INDICATE_ID_S 2 +#define ZXDH_TX_INDICATE_ID GENMASK_ULL(3, 2) +#define ZXDH_TX_AXI_ID_S 4 +#define ZXDH_TX_AXI_ID GENMASK_ULL(6, 4) +#define ZXDH_TX_WAY_PARTITION_S 7 +#define ZXDH_TX_WAY_PARTITION GENMASK_ULL(9, 7) + +/* RDMA RX REG Masks */ +#define ZXDH_CQ_CQE_AXI_ID_S 4 +#define ZXDH_CQ_CQE_AXI_ID GENMASK_ULL(6, 4) +#define ZXDH_CQ_CQE_INDICATE_ID_S 2 +#define ZXDH_CQ_CQE_INDICATE_ID GENMASK_ULL(3, 2) + +#define ZXDH_CQ_ARM_DBSA_ARM_SEQ_NUM_S 0 +#define IZXDH_CQ_ARM_DBSA_ARM_SEQ_NUM GENMASK_ULL(1, 0) +#define ZXDH_CQ_ARM_DBSA_ARM_NXT_S 2 +#define ZXDH_CQ_ARM_DBSA_ARM_NXT BIT_ULL(2) +#define ZXDH_CQ_ARM_CQ_ID_S 10 +#define ZXDH_CQ_ARM_CQ_ID GENMASK_ULL(29, 10) +#define ZXDH_CQ_ARM_DBSA_VLD_S 30 +#define ZXDH_CQ_ARM_DBSA_VLD BIT_ULL(30) + +/* RDMA RX DDR Access REG Masks */ +#define ZXDH_RX_CACHE_ID_S 0 +#define ZXDH_RX_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_RX_INDICATE_ID_S 2 +#define ZXDH_RX_INDICATE_ID GENMASK_ULL(3, 2) +#define ZXDH_RX_AXI_ID_S 4 +#define ZXDH_RX_AXI_ID GENMASK_ULL(6, 4) +#define ZXDH_RX_WAY_PARTITION_S 7 +#define ZXDH_RX_WAY_PARTITION GENMASK_ULL(9, 7) + +/* RDMA IO REG Masks */ +#define ZXDH_IOTABLE2_SID_S 0 +#define ZXDH_IOTABLE2_SID GENMASK_ULL(5, 0) + +#define ZXDH_IOTABLE4_EPID_S 11 +#define ZXDH_IOTABLE4_EPID GENMASK_ULL(14, 11) +#define ZXDH_IOTABLE4_VFID_S 3 +#define ZXDH_IOTABLE4_VFID GENMASK_ULL(10, 3) +#define ZXDH_IOTABLE4_PFID_S 0 +#define ZXDH_IOTABLE4_PFID GENMASK_ULL(2, 0) + +#define ZXDH_IOTABLE7_PFID_S 2 +#define ZXDH_IOTABLE7_PFID GENMASK_ULL(4, 2) +#define ZXDH_IOTABLE7_EPID_S 5 +#define ZXDH_IOTABLE7_EPID GENMASK_ULL(8, 5) + +/* CQP Create Masks */ +#define ZXDH_CQP_CREATE_EPID_S 12 +#define ZXDH_CQP_CREATE_EPID GENMASK_ULL(15, 12) +#define ZXDH_CQP_CREATE_VFID_S 4 +#define ZXDH_CQP_CREATE_VFID GENMASK_ULL(11, 4) +#define ZXDH_CQP_CREATE_PFID_S 1 +#define ZXDH_CQP_CREATE_PFID GENMASK_ULL(3, 1) +#define ZXDH_CQP_CREATE_VFUNC_ACTIVE_S 0 +#define ZXDH_CQP_CREATE_VFUNC_ACTIVE BIT_ULL(0) + +#define ZXDH_CQP_CREATE_STATE_CFG_S 31 +#define ZXDH_CQP_CREATE_STATE_CFG BIT_ULL(31) +#define ZXDH_CQP_CREATE_SQSIZE_S 16 +#define ZXDH_CQP_CREATE_SQSIZE GENMASK_ULL(27, 16) +#define ZXDH_CQP_CREATE_QPC_OBJ_IDX_S 10 +#define ZXDH_CQP_CREATE_QPC_OBJ_IDX GENMASK_ULL(14, 10) +#define ZXDH_CQP_CREATE_QPC_INDICATE_IDX_S 8 +#define ZXDH_CQP_CREATE_QPC_INDICATE_IDX GENMASK_ULL(9, 8) +#define ZXDH_CQP_CREATE_OBJ_IDX_S 2 +#define ZXDH_CQP_CREATE_OBJ_IDX GENMASK_ULL(6, 2) +#define ZXDH_CQP_CREATE_INDICATE_IDX_S 0 +#define ZXDH_CQP_CREATE_INDICATE_IDX GENMASK_ULL(1, 0) + +#define ZXDH_CQP_CREATE_ENA_PFVF_NUM_S 8 +#define ZXDH_CQP_CREATE_ENA_PFVF_NUM GENMASK_ULL(15, 8) +#define ZXDH_CQP_CREATE_CEQPERVF_S 0 +#define ZXDH_CQP_CREATE_CEQPERVF GENMASK_ULL(7, 0) + +/* CQP and iWARP Completion Queue */ +#define ZXDH_CQ_QPCTX_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQ_QPCTX ZXDH_CQPHC_QPCTX + +#define ZXDH_CCQ_OPRETVAL_S 0 +#define ZXDH_CCQ_OPRETVAL GENMASK_ULL(33, 0) +#define ZXDH_SRC_VHCA_ID_S 0 +#define ZXDH_SRC_VHCA_ID GENMASK_ULL(9, 0) +#define ZXDH_SRC_PFVF_ID_S 10 +#define ZXDH_SRC_PFVF_ID GENMASK_ULL(17, 10) +#define ZXDH_SRC_DSTVHCA_ID_S 18 +#define ZXDH_SRC_DSTVHCA_ID GENMASK_ULL(27, 18) + +#define ZXDH_CQ_MINERR_S 7 +#define ZXDH_CQ_MINERR GENMASK_ULL(22, 7) +#define ZXDH_CQ_MAJERR_S 23 +#define ZXDH_CQ_MAJERR GENMASK_ULL(38, 23) +#define ZXDH_CQ_WQEIDX_S 40 +#define ZXDH_CQ_WQEIDX GENMASK_ULL(54, 40) +#define ZXDH_CQ_EXTCQE_S 50 +#define ZXDH_CQ_EXTCQE BIT_ULL(50) +#define ZXDH_OOO_CMPL_S 54 +#define ZXDH_OOO_CMPL BIT_ULL(54) +#define ZXDH_CQ_ERROR_S 39 +#define ZXDH_CQ_ERROR BIT_ULL(39) +#define ZXDH_CQ_SQ_S 4 +#define ZXDH_CQ_SQ BIT_ULL(4) + +#define ZXDH_CQ_TYPE_S 0 +#define ZXDH_CQ_TYPE GENMASK_ULL(1, 0) + +#define ZXDH_CQ_VALID_S 5 +#define ZXDH_CQ_VALID BIT_ULL(5) +#define ZXDH_CQ_IMMVALID_S 0 +#define ZXDH_CQ_IMMVALID BIT_ULL(0) +#define ZXDH_CQ_UDSMACVALID_S 26 +#define ZXDH_CQ_UDSMACVALID BIT_ULL(26) +#define ZXDH_CQ_UDVLANVALID_S 27 +#define ZXDH_CQ_UDVLANVALID BIT_ULL(27) +#define ZXDH_CQ_UDSMAC_S 0 +#define ZXDH_CQ_UDSMAC GENMASK_ULL(47, 0) +#define ZXDH_CQ_UDVLAN_S 48 +#define ZXDH_CQ_UDVLAN GENMASK_ULL(63, 48) +#define ZXDH_CQ_IMMDATA_S 0 +#define ZXDH_CQ_IMMDATA GENMASK_ULL(31, 0) +#define IRDMACQ_PAYLDLEN_S 32 +#define IRDMACQ_PAYLDLEN GENMASK_ULL(63, 32) +#define ZXDH_CQ_MAILBOXCQE_S 3 +#define ZXDH_CQ_MAILBOXCQE BIT_ULL(3) + +#define ZXDH_CQ_IMMDATALOW32_S 0 +#define ZXDH_CQ_IMMDATALOW32 GENMASK_ULL(31, 0) +#define ZXDH_CQ_IMMDATAUP32_S 32 +#define ZXDH_CQ_IMMDATAUP32 GENMASK_ULL(63, 32) +#define IRDMACQ_TCPSEQNUMRTT_S 32 +#define IRDMACQ_TCPSEQNUMRTT GENMASK_ULL(63, 32) +#define IRDMACQ_INVSTAG_S 11 +#define IRDMACQ_INVSTAG GENMASK_ULL(42, 11) +#define IRDMACQ_QPID_S 44 +#define IRDMACQ_QPID GENMASK_ULL(63, 44) + +#define IRDMACQ_UDSRCQPN_S 1 +#define IRDMACQ_UDSRCQPN GENMASK_ULL(24, 1) +#define IRDMACQ_PSHDROP_S 51 +#define IRDMACQ_PSHDROP BIT_ULL(51) +#define IRDMACQ_STAG_S 43 +#define IRDMACQ_STAG BIT_ULL(43) +#define IRDMACQ_IPV4_S 25 +#define IRDMACQ_IPV4 BIT_ULL(25) +#define IRDMACQ_SOEVENT_S 6 +#define IRDMACQ_SOEVENT BIT_ULL(6) +#define IRDMACQ_OP_S 58 +#define IRDMACQ_OP GENMASK_ULL(63, 58) + +#define ZXDH_CEQE_CQCTX_S 0 +#define ZXDH_CEQE_CQCTX GENMASK_ULL(62, 0) +#define ZXDH_CEQE_VALID_S 63 +#define ZXDH_CEQE_VALID BIT_ULL(63) + +/* AEQE format */ +#define ZXDH_AEQE_COMPCTX_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_AEQE_COMPCTX ZXDH_CQPHC_QPCTX +#define ZXDH_AEQE_QPCQID_S 0 +#define ZXDH_AEQE_QPCQID GENMASK_ULL(20, 0) +#define ZXDH_AEQE_WQDESCIDX_S 37 +#define ZXDH_AEQE_WQDESCIDX GENMASK_ULL(37, 21) +#define ZXDH_AEQE_OVERFLOW_S 62 +#define ZXDH_AEQE_OVERFLOW BIT_ULL(62) +#define ZXDH_AEQE_AECODE_S 50 +#define ZXDH_AEQE_AECODE GENMASK_ULL(61, 50) +#define ZXDH_AEQE_AESRC_S 45 +#define ZXDH_AEQE_AESRC BIT_ULL(45) +#define ZXDH_AEQE_VHCA_ID_S 35 +#define ZXDH_AEQE_VHCA_ID GENMASK_ULL(44, 35) +#define ZXDH_AEQE_IWSTATE_S 46 +#define ZXDH_AEQE_IWSTATE GENMASK_ULL(49, 46) +#define ZXDH_AEQE_VALID_S 63 +#define ZXDH_AEQE_VALID BIT_ULL(63) +#define ZXDH_AEQE_MINOR_ERROR_S 50 +#define ZXDH_AEQE_MINOR_ERROR GENMASK_ULL(57, 50) +#define ZXDH_AEQE_MAJOR_ERROR_S 58 +#define ZXDH_AEQE_MAJOR_ERROR GENMASK_ULL(61, 58) + +#define ZXDH_UDA_QPSQ_NEXT_HDR_S 16 +#define ZXDH_UDA_QPSQ_NEXT_HDR GENMASK_ULL(23, 16) +#define ZXDH_UDA_QPSQ_OPCODE_S 32 +#define ZXDH_UDA_QPSQ_OPCODE GENMASK_ULL(37, 32) +#define ZXDH_UDA_QPSQ_L4LEN_S 42 +#define ZXDH_UDA_QPSQ_L4LEN GENMASK_ULL(45, 42) +#define ZXDH_GEN1_UDA_QPSQ_L4LEN_S 24 +#define ZXDH_GEN1_UDA_QPSQ_L4LEN GENMASK_ULL(27, 24) +#define ZXDH_UDA_QPSQ_AHIDX_S 0 +#define ZXDH_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0) +#define ZXDH_UDA_QPSQ_VALID_S 63 +#define ZXDH_UDA_QPSQ_VALID BIT_ULL(63) +#define ZXDH_UDA_QPSQ_SIGCOMPL_S 62 +#define ZXDH_UDA_QPSQ_SIGCOMPL BIT_ULL(62) +#define ZXDH_UDA_QPSQ_MACLEN_S 56 +#define ZXDH_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56) +#define ZXDH_UDA_QPSQ_IPLEN_S 48 +#define ZXDH_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48) +#define ZXDH_UDA_QPSQ_L4T_S 30 +#define ZXDH_UDA_QPSQ_L4T GENMASK_ULL(31, 30) +#define ZXDH_UDA_QPSQ_IIPT_S 28 +#define ZXDH_UDA_QPSQ_IIPT GENMASK_ULL(29, 28) +#define ZXDH_UDA_PAYLOADLEN_S 0 +#define ZXDH_UDA_PAYLOADLEN GENMASK_ULL(13, 0) +#define ZXDH_UDA_HDRLEN_S 16 +#define ZXDH_UDA_HDRLEN GENMASK_ULL(24, 16) +#define ZXDH_VLAN_TAG_VALID_S 50 +#define ZXDH_VLAN_TAG_VALID BIT_ULL(50) +#define ZXDH_UDA_L3PROTO_S 0 +#define ZXDH_UDA_L3PROTO GENMASK_ULL(1, 0) +#define ZXDH_UDA_L4PROTO_S 16 +#define ZXDH_UDA_L4PROTO GENMASK_ULL(17, 16) +#define ZXDH_UDA_QPSQ_DOLOOPBACK_S 44 +#define ZXDH_UDA_QPSQ_DOLOOPBACK BIT_ULL(44) +#define ZXDH_CQPSQ_BUFSIZE_S 0 +#define ZXDH_CQPSQ_BUFSIZE GENMASK_ULL(31, 0) + +#define ZXDH_CQPSQ_OPCODE_S 58 +#define ZXDH_CQPSQ_OPCODE GENMASK_ULL(63, 58) +#define ZXDH_CQPSQ_WQEVALID_S 57 +#define ZXDH_CQPSQ_WQEVALID BIT_ULL(57) +#define ZXDH_CQPSQ_TPHVAL_S 0 +#define ZXDH_CQPSQ_TPHVAL GENMASK_ULL(7, 0) + +// DMA OP +#define ZXDH_CQPSQ_DESTPATHINDEX_S 0 +#define ZXDH_CQPSQ_DESTPATHINDEX GENMASK_ULL(39, 0) + +#define ZXDH_CQPSQ_SRCPATHINDEX_S 0 +#define ZXDH_CQPSQ_SRCPATHINDEX GENMASK_ULL(39, 0) + +#define ZXDH_CQPSQ_InterSourSel_S 41 +#define ZXDH_CQPSQ_InterSourSel GENMASK_ULL(45, 41) + +#define ZXDH_CQPSQ_NeedInter_S 40 +#define ZXDH_CQPSQ_NeedInter BIT_ULL(40) + +#define ZXDH_CQPSQ_DATAINWQENUM_S 54 +#define ZXDH_CQPSQ_DATAINWQENUM GENMASK_ULL(56, 54) + +#define ZXDH_CQPSQ_DATAHIGH_S 32 +#define ZXDH_CQPSQ_DATAHIGH GENMASK_ULL(63, 32) + +#define ZXDH_CQPSQ_DATALOW_S 31 +#define ZXDH_CQPSQ_DATALOW GENMASK_ULL(31, 0) + +#define ZXDH_CQPSQ_DATABITWIDTH_S 53 +#define ZXDH_CQPSQ_DATABITWIDTH BIT_ULL(53) + +#define ZXDH_CQPSQ_INTERSOURCESEL_S 41 +#define ZXDH_CQPSQ_INTERSOURCESEL GENMASK_ULL(45, 41) + +#define ZXDH_CQPSQ_NEEDINTER_S 40 +#define ZXDH_CQPSQ_NEEDINTER BIT_ULL(40) + +#define ZXDH_CQPSQ_DATAINCQENUM_S 54 +#define ZXDH_CQPSQ_DATAINCQENUM GENMASK_ULL(56, 54) +// DMA OP + +// MB +#define ZXDH_CQPSQ_PFVALID_S 56 +#define ZXDH_CQPSQ_PFVALID BIT_ULL(56) + +#define ZXDH_CQPSQ_SRCPFVFID_S 8 +#define ZXDH_CQPSQ_SRCPFVFID GENMASK_ULL(15, 8) + +#define ZXDH_CQPSQ_DSTVFID_S 0 +#define ZXDH_CQPSQ_DSTVFID GENMASK_ULL(7, 0) +// MB + +#define ZXDH_CQPSQ_DBPOLARITY_S 11 +#define ZXDH_CQPSQ_DBPOLARITY BIT_ULL(11) +#define ZXDH_CQPSQ_DBRINGHEAD_S 0 +#define ZXDH_CQPSQ_DBRINGHEAD GENMASK_ULL(10, 0) + +#define ZXDH_CQPSQ_VSIIDX_S 8 +#define ZXDH_CQPSQ_VSIIDX GENMASK_ULL(17, 8) +#define ZXDH_CQPSQ_TPHEN_S 60 +#define ZXDH_CQPSQ_TPHEN BIT_ULL(60) + +#define ZXDH_CQPSQ_PBUFADDR_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_PBUFADDR ZXDH_CQPHC_QPCTX + +/* Create/Modify/Destroy QP */ + +#define ZXDH_CQPSQ_QP_NEWMSS_S 32 +#define ZXDH_CQPSQ_QP_NEWMSS GENMASK_ULL(45, 32) +#define ZXDH_CQPSQ_QP_TERMLEN_S 48 +#define ZXDH_CQPSQ_QP_TERMLEN GENMASK_ULL(51, 48) + +#define ZXDH_CQPSQ_QP_QPCTX_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_QP_QPCTX ZXDH_CQPHC_QPCTX + +#define ZXDH_CQPSQ_QP_QPID_S 0 +#define ZXDH_CQPSQ_QP_QPID_M (0xFFFFFFUL) + +#define ZXDH_CQPSQ_QP_OP_S 32 +#define ZXDH_CQPSQ_QP_OP_M IRDMACQ_OP_M +#define ZXDH_CQPSQ_QP_ORDVALID_S 42 +#define ZXDH_CQPSQ_QP_ORDVALID BIT_ULL(42) +#define ZXDH_CQPSQ_QP_TOECTXVALID_S 43 +#define ZXDH_CQPSQ_QP_TOECTXVALID BIT_ULL(43) +#define ZXDH_CQPSQ_QP_CACHEDVARVALID_S 44 +#define ZXDH_CQPSQ_QP_CACHEDVARVALID BIT_ULL(44) +#define ZXDH_CQPSQ_QP_VQ_S 45 +#define ZXDH_CQPSQ_QP_VQ BIT_ULL(45) +#define ZXDH_CQPSQ_QP_FORCELOOPBACK_S 46 +#define ZXDH_CQPSQ_QP_FORCELOOPBACK BIT_ULL(46) +#define ZXDH_CQPSQ_QP_CQNUMVALID_S 47 +#define ZXDH_CQPSQ_QP_CQNUMVALID BIT_ULL(47) +#define ZXDH_CQPSQ_QP_QPTYPE_S 48 +#define ZXDH_CQPSQ_QP_QPTYPE GENMASK_ULL(50, 48) +#define ZXDH_CQPSQ_QP_MACVALID_S 51 +#define ZXDH_CQPSQ_QP_MACVALID BIT_ULL(51) +#define ZXDH_CQPSQ_QP_MSSCHANGE_S 52 +#define ZXDH_CQPSQ_QP_MSSCHANGE BIT_ULL(52) + +#define ZXDH_CQPSQ_QP_IGNOREMWBOUND_S 54 +#define ZXDH_CQPSQ_QP_IGNOREMWBOUND BIT_ULL(54) +#define ZXDH_CQPSQ_QP_REMOVEHASHENTRY_S 55 +#define ZXDH_CQPSQ_QP_REMOVEHASHENTRY BIT_ULL(55) +#define ZXDH_CQPSQ_QP_TERMACT_S 56 +#define ZXDH_CQPSQ_QP_TERMACT GENMASK_ULL(57, 56) +#define ZXDH_CQPSQ_QP_RESETCON_S 58 +#define ZXDH_CQPSQ_QP_RESETCON BIT_ULL(58) +#define ZXDH_CQPSQ_QP_ARPTABIDXVALID_S 59 +#define ZXDH_CQPSQ_QP_ARPTABIDXVALID BIT_ULL(59) +#define ZXDH_CQPSQ_QP_NEXTIWSTATE_S 60 +#define ZXDH_CQPSQ_QP_NEXTIWSTATE GENMASK_ULL(62, 60) + +#define ZXDH_CQPSQ_QP_ID_S 0 +#define ZXDH_CQPSQ_QP_ID GENMASK_ULL(19, 0) +#define ZXDH_CQPSQ_QP_CONTEXT_ID_S 20 +#define ZXDH_CQPSQ_QP_CONTEXT_ID GENMASK_ULL(39, 20) + +#define ZXDH_CQPSQ_QP_DBSHADOWADDR_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_QP_DBSHADOWADDR ZXDH_CQPHC_QPCTX + +#define ZXDH_CQPSQ_CQ_OP_S 32 +#define ZXDH_CQPSQ_CQ_OP GENMASK_ULL(37, 32) + +#define ZXDH_CQPSQ_CQ_CEQIDVALID_S 54 +#define ZXDH_CQPSQ_CQ_CEQIDVALID BIT_ULL(54) +#define ZXDH_CQPSQ_CQ_CQRESIZE_S 53 +#define ZXDH_CQPSQ_CQ_CQRESIZE BIT_ULL(53) +#define ZXDH_CQPSQ_CQ_CQADDRVALID_S 52 +#define ZXDH_CQPSQ_CQ_CQADDRVALID BIT_ULL(52) + +#define ZXDH_CQPSQ_CQ_CQSTATE_S 60 +#define ZXDH_CQPSQ_CQ_CQSTATE GENMASK_ULL(63, 60) +#define ZXDH_CQPSQ_CQ_OVERFLOW_LOCKED_FLAG_S 59 +#define ZXDH_CQPSQ_CQ_OVERFLOW_LOCKED_FLAG BIT_ULL(59) +#define ZXDH_CQPSQ_CQ_CQESIZE_S 58 +#define ZXDH_CQPSQ_CQ_CQESIZE BIT_ULL(58) +#define ZXDH_CQPSQ_CQ_VIRTMAP_S 57 +#define ZXDH_CQPSQ_CQ_VIRTMAP BIT_ULL(57) +#define ZXDH_CQPSQ_CQ_LPBLSIZE_S 55 +#define ZXDH_CQPSQ_CQ_LPBLSIZE GENMASK_ULL(56, 55) +#define ZXDH_CQPSQ_CQ_ENCEQEMASK_S 54 +#define ZXDH_CQPSQ_CQ_ENCEQEMASK BIT_ULL(54) +#define ZXDH_CQPSQ_CQ_DEBUG_SET_S 44 +#define ZXDH_CQPSQ_CQ_DEBUG_SET GENMASK_ULL(53, 44) +#define ZXDH_CQPSQ_CQ_VHCAID_S 34 +#define ZXDH_CQPSQ_CQ_VHCAID GENMASK_ULL(43, 34) +#define ZXDH_CQPSQ_CQ_CQMAX_S 18 +#define ZXDH_CQPSQ_CQ_CQMAX GENMASK_ULL(33, 18) +#define ZXDH_CQPSQ_CQ_CQPERIOD_S 7 +#define ZXDH_CQPSQ_CQ_CQPERIOD GENMASK_ULL(17, 7) +#define ZXDH_CQPSQ_CQ_SCQE_BREAK_MODERATION_EN_S 6 +#define ZXDH_CQPSQ_CQ_SCQE_BREAK_MODERATION_EN BIT_ULL(6) + +#define ZXDH_CQPSQ_CQ_CEQ_ID_S 48 +#define ZXDH_CQPSQ_CQ_CEQ_ID GENMASK_ULL(59, 48) +#define ZXDH_CQPSQ_CQ_ST_S 46 +#define ZXDH_CQPSQ_CQ_ST GENMASK_ULL(47, 46) +#define ZXDH_CQPSQ_CQ_IS_IN_LIST_CNT_S 33 +#define ZXDH_CQPSQ_CQ_IS_IN_LIST_CNT GENMASK_ULL(45, 33) +#define ZXDH_CQPSQ_CQ_CQSIZE_S 24 +#define ZXDH_CQPSQ_CQ_CQSIZE GENMASK_ULL(28, 24) +#define ZXDH_CQPSQ_CQ_SHADOW_READ_THRESHOLD_S 0 +#define ZXDH_CQPSQ_CQ_SHADOW_READ_THRESHOLD GENMASK(15, 0) +#define ZXDH_CQPSQ_CQ_FIRSTPMPBLIDX_S 0 +#define ZXDH_CQPSQ_CQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0) +#define ZXDH_CQPSQ_CQ_CQC_SET_MASK_S 0 +#define ZXDH_CQPSQ_CQ_CQC_SET_MASK GENMASK_ULL(63, 0) +#define ZXDH_CQPSQ_CQ_MODIFY_SIZE_S 20 +#define ZXDH_CQPSQ_CQ_MODIFY_SIZE BIT_ULL(20) + +/* Allocate/Register/Register Shared/Deallocate Stag */ +#define ZXDH_CQPSQ_STAG_VA_FBO_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_STAG_VA_FBO ZXDH_CQPHC_QPCTX +#define ZXDH_CQPSQ_STAG_STAGLEN_S 0 +#define ZXDH_CQPSQ_STAG_STAGLEN GENMASK_ULL(45, 0) +#define ZXDH_CQPSQ_STAG_KEY_S 0 +#define ZXDH_CQPSQ_STAG_KEY GENMASK_ULL(7, 0) +#define ZXDH_CQPSQ_STAG_IDX_S 8 +#define ZXDH_CQPSQ_STAG_IDX GENMASK_ULL(31, 8) +#define ZXDH_CQPSQ_STAG_PARENTSTAGIDX_S 32 +#define ZXDH_CQPSQ_STAG_PARENTSTAGIDX GENMASK_ULL(55, 32) +#define ZXDH_CQPSQ_STAG_MR_S 31 +#define ZXDH_CQPSQ_STAG_MR BIT_ULL(31) +#define ZXDH_CQPSQ_STAG_MWTYPE_S 30 +#define ZXDH_CQPSQ_STAG_MWTYPE BIT_ULL(30) +#define ZXDH_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY_S 29 +#define ZXDH_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY BIT_ULL(29) +#define ZXDH_CQPSQ_STAG_FAST_REGISTER_MR_EN_S 28 +#define ZXDH_CQPSQ_STAG_FAST_REGISTER_MR_EN BIT_ULL(28) +#define ZXDH_CQPSQ_STAG_MR_INVALID_EN_S 27 +#define ZXDH_CQPSQ_STAG_MR_INVALID_EN BIT_ULL(27) +#define ZXDH_CQPSQ_STAG_MR_FORCE_DEL_S 26 +#define ZXDH_CQPSQ_STAG_MR_FORCE_DEL BIT_ULL(26) +#define ZXDH_CQPSQ_STAG_MR_PDID_HIG_S 56 +#define ZXDH_CQPSQ_STAG_MR_PDID_HIG GENMASK_ULL(57, 56) +#define ZXDH_CQPSQ_STAG_MR_PDID_LOW_S 46 +#define ZXDH_CQPSQ_STAG_MR_PDID_LOW GENMASK_ULL(63, 46) +#define ZXDH_CQPSQ_QUERY_MKEY_S 8 +#define ZXDH_CQPSQ_QUERY_MKEY GENMASK_ULL(31, 8) + +#define ZXDH_CQPSQ_STAG_LPBLSIZE_S 32 +#define ZXDH_CQPSQ_STAG_LPBLSIZE GENMASK_ULL(33, 32) +#define ZXDH_CQPSQ_STAG_HPAGESIZE_S 34 +#define ZXDH_CQPSQ_STAG_HPAGESIZE GENMASK_ULL(38, 34) +#define ZXDH_CQPSQ_STAG_ARIGHTS_S 39 +#define ZXDH_CQPSQ_STAG_ARIGHTS GENMASK_ULL(43, 39) +// #define ZXDH_CQPSQ_STAG_REMACCENABLED_S 53 +// #define ZXDH_CQPSQ_STAG_REMACCENABLED BIT_ULL(53) +#define ZXDH_CQPSQ_STAG_VABASEDTO_S 45 +#define ZXDH_CQPSQ_STAG_VABASEDTO BIT_ULL(45) +#define ZXDH_CQPSQ_STAG_USEHMCFNIDX_S 56 +#define ZXDH_CQPSQ_STAG_USEHMCFNIDX BIT_ULL(56) +#define ZXDH_CQPSQ_STAG_FCN_INDEX_S 46 +#define ZXDH_CQPSQ_STAG_FCN_INDEX GENMASK_ULL(55, 46) +// #define ZXDH_CQPSQ_STAG_USEPFRID_S 61 +// #define ZXDH_CQPSQ_STAG_USEPFRID BIT_ULL(61) +#define ZXDH_CQPSQ_STAG_SHARED_S 44 +#define ZXDH_CQPSQ_STAG_SHARED BIT_ULL(44) + +#define ZXDH_CQPSQ_STAG_PBA_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_STAG_PBA ZXDH_CQPHC_QPCTX +#define ZXDH_CQPSQ_STAG_HMCFNIDX_S 0 +#define ZXDH_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(5, 0) + +#define ZXDH_CQPSQ_STAG_FIRSTPMPBLIDX_S 0 +#define ZXDH_CQPSQ_STAG_FIRSTPMPBLIDX GENMASK_ULL(27, 0) + +#define ZXDH_CQPSQ_QUERYSTAG_IDX_S ZXDH_CQPSQ_STAG_IDX_S +#define ZXDH_CQPSQ_QUERYSTAG_IDX ZXDH_CQPSQ_STAG_IDX +#define ZXDH_CQPSQ_MLM_TABLEIDX_S 0 +#define ZXDH_CQPSQ_MLM_TABLEIDX GENMASK_ULL(5, 0) +#define ZXDH_CQPSQ_MLM_FREEENTRY_S 62 +#define ZXDH_CQPSQ_MLM_FREEENTRY BIT_ULL(62) +#define ZXDH_CQPSQ_MLM_IGNORE_REF_CNT_S 61 +#define ZXDH_CQPSQ_MLM_IGNORE_REF_CNT BIT_ULL(61) +#define ZXDH_CQPSQ_MLM_MAC0_S 0 +#define ZXDH_CQPSQ_MLM_MAC0 GENMASK_ULL(7, 0) +#define ZXDH_CQPSQ_MLM_MAC1_S 8 +#define ZXDH_CQPSQ_MLM_MAC1 GENMASK_ULL(15, 8) +#define ZXDH_CQPSQ_MLM_MAC2_S 16 +#define ZXDH_CQPSQ_MLM_MAC2 GENMASK_ULL(23, 16) +#define ZXDH_CQPSQ_MLM_MAC3_S 24 +#define ZXDH_CQPSQ_MLM_MAC3 GENMASK_ULL(31, 24) +#define ZXDH_CQPSQ_MLM_MAC4_S 32 +#define ZXDH_CQPSQ_MLM_MAC4 GENMASK_ULL(39, 32) +#define ZXDH_CQPSQ_MLM_MAC5_S 40 +#define ZXDH_CQPSQ_MLM_MAC5 GENMASK_ULL(47, 40) +#define ZXDH_CQPSQ_MAT_REACHMAX_S 0 +#define ZXDH_CQPSQ_MAT_REACHMAX GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_MAT_MACADDR_S 0 +#define ZXDH_CQPSQ_MAT_MACADDR GENMASK_ULL(47, 0) +#define ZXDH_CQPSQ_MAT_ARPENTRYIDX_S 0 +#define ZXDH_CQPSQ_MAT_ARPENTRYIDX GENMASK_ULL(11, 0) +#define ZXDH_CQPSQ_MAT_ENTRYVALID_S 42 +#define ZXDH_CQPSQ_MAT_ENTRYVALID BIT_ULL(42) +#define ZXDH_CQPSQ_MAT_PERMANENT_S 43 +#define ZXDH_CQPSQ_MAT_PERMANENT BIT_ULL(43) +#define ZXDH_CQPSQ_MAT_QUERY_S 44 +#define ZXDH_CQPSQ_MAT_QUERY BIT_ULL(44) +#define ZXDH_CQPSQ_MVPBP_PD_ENTRY_CNT_S 0 +#define ZXDH_CQPSQ_MVPBP_PD_ENTRY_CNT GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_MVPBP_FIRST_PD_INX_S 16 +#define ZXDH_CQPSQ_MVPBP_FIRST_PD_INX GENMASK_ULL(24, 16) +#define ZXDH_CQPSQ_MVPBP_SD_INX_S 32 +#define ZXDH_CQPSQ_MVPBP_SD_INX GENMASK_ULL(43, 32) +#define ZXDH_CQPSQ_MVPBP_INV_PD_ENT_S 62 +#define ZXDH_CQPSQ_MVPBP_INV_PD_ENT BIT_ULL(62) +#define ZXDH_CQPSQ_MVPBP_PD_PLPBA_S 3 +#define ZXDH_CQPSQ_MVPBP_PD_PLPBA GENMASK_ULL(63, 3) + +/* Manage Push Page - MPP */ +#define ZXDH_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff +#define ZXDH_INVALID_PUSH_PAGE_INDEX 0xffffffff + +#define ZXDH_CQPSQ_MPP_QS_HANDLE_S 0 +#define ZXDH_CQPSQ_MPP_QS_HANDLE GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_MPP_PPIDX_S 0 +#define ZXDH_CQPSQ_MPP_PPIDX GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_MPP_PPTYPE_S 60 +#define ZXDH_CQPSQ_MPP_PPTYPE GENMASK_ULL(61, 60) + +#define ZXDH_CQPSQ_MPP_FREE_PAGE_S 62 +#define ZXDH_CQPSQ_MPP_FREE_PAGE BIT_ULL(62) + +/* Upload Context - UCTX */ +#define ZXDH_CQPSQ_UCTX_QPCTXADDR_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_UCTX_QPCTXADDR ZXDH_CQPHC_QPCTX +#define ZXDH_CQPSQ_UCTX_QPID_S 0 +#define ZXDH_CQPSQ_UCTX_QPID GENMASK_ULL(23, 0) +#define ZXDH_CQPSQ_UCTX_QPTYPE_S 48 +#define ZXDH_CQPSQ_UCTX_QPTYPE GENMASK_ULL(51, 48) + +#define ZXDH_CQPSQ_UCTX_RAWFORMAT_S 61 +#define ZXDH_CQPSQ_UCTX_RAWFORMAT BIT_ULL(61) +#define ZXDH_CQPSQ_UCTX_FREEZEQP_S 62 +#define ZXDH_CQPSQ_UCTX_FREEZEQP BIT_ULL(62) + +#define ZXDH_CQPSQ_MHMC_VFIDX_S 0 +#define ZXDH_CQPSQ_MHMC_VFIDX GENMASK_ULL(15, 0) +#define ZXDH_CQPSQ_MHMC_FREEPMFN_S 62 +#define ZXDH_CQPSQ_MHMC_FREEPMFN BIT_ULL(62) + +#define ZXDH_CQPSQ_SHMCRP_HMC_PROFILE_S 0 +#define ZXDH_CQPSQ_SHMCRP_HMC_PROFILE GENMASK_ULL(2, 0) +#define ZXDH_CQPSQ_SHMCRP_VFNUM_S 32 +#define ZXDH_CQPSQ_SHMCRP_VFNUM GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_CEQ_CEQSIZE_S 0 +#define ZXDH_CQPSQ_CEQ_CEQSIZE GENMASK_ULL(21, 0) +#define ZXDH_CQPSQ_CEQ_CEQID_S 0 +#define ZXDH_CQPSQ_CEQ_CEQID GENMASK_ULL(11, 0) + +#define ZXDH_CQPSQ_CEQ_LPBLSIZE_S ZXDH_CQPSQ_CQ_LPBLSIZE_S +#define ZXDH_CQPSQ_CEQ_LPBLSIZE_M ZXDH_CQPSQ_CQ_LPBLSIZE_M +#define ZXDH_CQPSQ_CEQ_LPBLSIZE ZXDH_CQPSQ_CQ_LPBLSIZE +#define ZXDH_CQPSQ_CEQ_VMAP_S 47 +#define ZXDH_CQPSQ_CEQ_VMAP BIT_ULL(47) +#define ZXDH_CQPSQ_CEQ_ITRNOEXPIRE_S 46 +#define ZXDH_CQPSQ_CEQ_ITRNOEXPIRE BIT_ULL(46) +#define ZXDH_CQPSQ_CEQ_FIRSTPMPBLIDX_S 0 +#define ZXDH_CQPSQ_CEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0) + +/*CEQC format*/ +#define ZXDH_CEQC_PERIOD_L_S 0 +#define ZXDH_CEQC_PERIOD_L GENMASK_ULL(2, 0) +#define ZXDH_CEQC_VHCA_S 3 +#define ZXDH_CEQC_VHCA GENMASK_ULL(12, 3) +#define ZXDH_CEQC_INTR_IDX_S 13 +#define ZXDH_CEQC_INTR_IDX GENMASK_ULL(30, 13) +#define ZXDH_CEQC_INT_TYPE_S 31 +#define ZXDH_CEQC_INT_TYPE BIT_ULL(31) +#define ZXDH_CEQC_CEQ_HEAD_S 32 +#define ZXDH_CEQC_CEQ_HEAD GENMASK_ULL(52, 32) +#define ZXDH_CEQC_CEQE_VALID_S 53 +#define ZXDH_CEQC_CEQE_VALID BIT_ULL(53) +#define ZXDH_CEQC_LEAF_PBL_SIZE_S 54 +#define ZXDH_CEQC_LEAF_PBL_SIZE GENMASK_ULL(55, 54) +#define ZXDH_CEQC_CEQ_SIZE_S 56 +#define ZXDH_CEQC_CEQ_SIZE GENMASK_ULL(57, 56) +#define ZXDH_CEQC_LOG_CEQ_NUM_S 58 +#define ZXDH_CEQC_LOG_CEQ_NUM GENMASK_ULL(62, 58) +#define ZXDH_CEQC_CEQ_STATE_S 63 +#define ZXDH_CEQC_CEQ_STATE BIT_ULL(63) + +#define ZXDH_CEQC_CEQ_ADDRESS_S 0 +#define ZXDH_CEQC_CEQ_ADDRESS GENMASK_ULL(56, 0) +#define ZXDH_CEQC_PERIOD_H_S 57 +#define ZXDH_CEQC_PERIOD_H GENMASK_ULL(63, 57) + +#define ZXDH_CEQC_CEQ_MAX_CNT_S 0 +#define ZXDH_CEQC_CEQ_MAX_CNT GENMASK_ULL(15, 0) +#define ZXDH_CEQC_CEQ_AXI_RSP_ERR_FLAG_S 16 +#define ZXDH_CEQC_CEQ_AXI_RSP_ERR_FLAG BIT_ULL(16) + +#define ZXDH_CEQ_CEQE_AXI_INFO_INDICATE_ID_S 2 +#define ZXDH_CEQ_CEQE_AXI_INFO_INDICATE_ID GENMASK_ULL(3, 2) +#define ZXDH_CEQ_CEQE_AXI_INFO_AXI_ID_S 4 +#define ZXDH_CEQ_CEQE_AXI_INFO_AXI_ID GENMASK_ULL(6, 4) + +#define ZXDH_CEQ_PBLE_AXI_INFO_CACHE_ID_S 0 +#define ZXDH_CEQ_PBLE_AXI_INFO_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_CEQ_PBLE_AXI_INFO_AXI_ID_S 4 +#define ZXDH_CEQ_PBLEE_AXI_INFO_AXI_ID GENMASK_ULL(6, 4) + +#define ZXDH_CQPSQ_AEQ_AEQECNT_S 0 +#define ZXDH_CQPSQ_AEQ_AEQECNT GENMASK_ULL(18, 0) + +#define ZXDH_CQPSQ_AEQ_LPBLSIZE_S ZXDH_CQPSQ_CQ_LPBLSIZE_S +#define ZXDH_CQPSQ_AEQ_LPBLSIZE_M ZXDH_CQPSQ_CQ_LPBLSIZE_M +#define ZXDH_CQPSQ_AEQ_LPBLSIZE ZXDH_CQPSQ_CQ_LPBLSIZE +#define ZXDH_CQPSQ_AEQ_VMAP_S 47 +#define ZXDH_CQPSQ_AEQ_VMAP BIT_ULL(47) +#define ZXDH_CQPSQ_AEQ_FIRSTPMPBLIDX_S 0 +#define ZXDH_CQPSQ_AEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0) + +#define ZXDH_CEQ_ARM_VHCA_ID_S 0 +#define ZXDH_CEQ_ARM_VHCA_ID GENMASK_ULL(9, 0) +#define ZXDH_CEQ_ARM_CEQ_ID_S 10 +#define ZXDH_CEQ_ARM_CEQ_ID GENMASK_ULL(21, 10) + +#define ZXDH_CEQ_INT_PCIE_DBI_EN_S 0 +#define ZXDH_CEQ_INT_PCIE_DBI_EN BIT_ULL(0) +#define ZXDH_CEQ_INT_EP_ID_S 1 +#define ZXDH_CEQ_INT_EP_ID GENMASK_ULL(3, 1) +#define ZXDH_CEQ_INT_PF_NUM_S 4 +#define ZXDH_CEQ_INT_PF_NUM GENMASK_ULL(8, 4) +#define ZXDH_CEQ_INT_VF_NUM_S 9 +#define ZXDH_CEQ_INT_VF_NUM GENMASK_ULL(16, 9) +#define ZXDH_CEQ_INT_VF_ACTIVE_S 17 +#define ZXDH_CEQ_INT_VF_ACTIVE BIT_ULL(17) + +/*AEQC format*/ +#define ZXDH_AEQC_INTR_IDX_S 0 +#define ZXDH_AEQC_INTR_IDX GENMASK_ULL(11, 0) +#define ZXDH_AEQC_AEQ_HEAD_S 13 +#define ZXDH_AEQC_AEQ_HEAD GENMASK_ULL(34, 13) +#define ZXDH_AEQC_LEAF_PBL_SIZE_S 35 +#define ZXDH_AEQC_LEAF_PBL_SIZE GENMASK_ULL(36, 35) +#define ZXDH_AEQC_VIRTUALLY_MAPPED_S 37 +#define ZXDH_AEQC_VIRTUALLY_MAPPED BIT_ULL(37) +#define ZXDH_AEQC_AEQ_SIZE_S 38 +#define ZXDH_AEQC_AEQ_SIZE GENMASK_ULL(59, 38) +#define ZXDH_AEQC_AEQ_STATE_S 60 +#define ZXDH_AEQC_AEQ_STATE GENMASK_ULL(63, 60) +#define ZXDH_AEQC_AEQ_ADDRESS_S 0 +#define ZXDH_AEQC_AEQ_ADDRESS GENMASK_ULL(63, 0) + +#define ZXDH_AEQ_MSIX_DATA_VECTOR_S 0 +#define ZXDH_AEQ_MSIX_DATA_VECTOR GENMASK_ULL(10, 0) +#define ZXDH_AEQ_MSIX_DATA_TC_S 12 +#define ZXDH_AEQ_MSIX_DATA_TC GENMASK_ULL(14, 12) +#define ZXDH_AEQ_MSIX_DATA_VF_ACTIVE_S 15 +#define ZXDH_AEQ_MSIX_DATA_VF_ACTIVE BIT_ULL(15) +#define ZXDH_AEQ_MSIX_DATA_VF_ID_S 16 +#define ZXDH_AEQ_MSIX_DATA_VF_ID GENMASK_ULL(23, 16) +#define ZXDH_AEQ_MSIX_DATA_PF_ID_S 24 +#define ZXDH_AEQ_MSIX_DATA_PF_ID GENMASK_ULL(28, 24) + +#define ZXDH_AEQ_MSIX_CONFIG_IRQ_S 0 +#define ZXDH_AEQ_MSIX_CONFIG_IRQ GENMASK_ULL(2, 0) +#define ZXDH_AEQ_MSIX_CONFIG_EPID_S 3 +#define ZXDH_AEQ_MSIX_CONFIG_EPID GENMASK_ULL(7, 3) + +#define ZXDH_AEQ_CACHE_ID_S 0 +#define ZXDH_AEQ_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_AEQ_AXI_ID_S 2 +#define ZXDH_AEQ_AXI_ID GENMASK_ULL(4, 2) +#define ZXDH_AEQ_WAY_PARTITION_S 5 +#define ZXDH_AEQ_WAY_PATITION GENMASK_ULL(7, 5) + +#define ZXDH_AEQ_INDICIATE_ID_S 0 +#define ZXDH_AEQ_INDICIATE_ID GENMASK_ULL(1, 0) + +#define ZXDH_COMMIT_FPM_QPCNT_S 0 +#define ZXDH_COMMIT_FPM_QPCNT GENMASK_ULL(18, 0) + +#define ZXDH_COMMIT_FPM_BASE_S 32 +#define ZXDH_CQPSQ_CFPM_HMCFNID_S 0 +#define ZXDH_CQPSQ_CFPM_HMCFNID GENMASK_ULL(5, 0) + +#define ZXDH_CQPSQ_FWQE_AECODE_S 0 +#define ZXDH_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0) +#define ZXDH_CQPSQ_FWQE_AESOURCE_S 32 +#define ZXDH_CQPSQ_FWQE_AESOURCE GENMASK_ULL(35, 32) +#define ZXDH_CQPSQ_FWQE_RQMNERR_S 0 +#define ZXDH_CQPSQ_FWQE_RQMNERR GENMASK_ULL(15, 0) +#define ZXDH_CQPSQ_FWQE_RQMJERR_S 16 +#define ZXDH_CQPSQ_FWQE_RQMJERR GENMASK_ULL(31, 16) +#define ZXDH_CQPSQ_FWQE_SQMNERR_S 32 +#define ZXDH_CQPSQ_FWQE_SQMNERR GENMASK_ULL(47, 32) +#define ZXDH_CQPSQ_FWQE_SQMJERR_S 48 +#define ZXDH_CQPSQ_FWQE_SQMJERR GENMASK_ULL(63, 48) +#define ZXDH_CQPSQ_FWQE_QPID_S 0 +#define ZXDH_CQPSQ_FWQE_QPID GENMASK_ULL(23, 0) +#define ZXDH_CQPSQ_FWQE_GENERATE_AE_S 53 +#define ZXDH_CQPSQ_FWQE_GENERATE_AE BIT_ULL(53) +#define ZXDH_CQPSQ_FWQE_USERFLCODE_S 54 +#define ZXDH_CQPSQ_FWQE_USERFLCODE BIT_ULL(54) +#define ZXDH_CQPSQ_FWQE_FLUSHSQ_S 55 +#define ZXDH_CQPSQ_FWQE_FLUSHSQ BIT_ULL(55) +#define ZXDH_CQPSQ_FWQE_FLUSHRQ_S 56 +#define ZXDH_CQPSQ_FWQE_FLUSHRQ BIT_ULL(56) +#define ZXDH_CQPSQ_MAPT_PORT_S 0 +#define ZXDH_CQPSQ_MAPT_PORT GENMASK_ULL(15, 0) +#define ZXDH_CQPSQ_MAPT_ADDPORT_S 62 +#define ZXDH_CQPSQ_MAPT_ADDPORT BIT_ULL(62) +#define ZXDH_CQPSQ_UPESD_SDCMD_S 0 +#define ZXDH_CQPSQ_UPESD_SDCMD GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_UPESD_SDDATALOW_S 0 +#define ZXDH_CQPSQ_UPESD_SDDATALOW GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_UPESD_SDDATAHI_S 32 +#define ZXDH_CQPSQ_UPESD_SDDATAHI GENMASK_ULL(63, 32) +#define ZXDH_CQPSQ_UPESD_HMCFNID_S 0 +#define ZXDH_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0) +#define ZXDH_CQPSQ_UPESD_ENTRY_VALID_S 63 +#define ZXDH_CQPSQ_UPESD_ENTRY_VALID BIT_ULL(63) + +#define ZXDH_CQPSQ_UPESD_BM_PF 0 +#define ZXDH_CQPSQ_UPESD_BM_CP_LM 1 +#define ZXDH_CQPSQ_UPESD_BM_AXF 2 +#define ZXDH_CQPSQ_UPESD_BM_LM 4 +#define ZXDH_CQPSQ_UPESD_BM_S 32 +#define ZXDH_CQPSQ_UPESD_BM GENMASK_ULL(34, 32) +#define ZXDH_CQPSQ_UPESD_ENTRY_COUNT_S 0 +#define ZXDH_CQPSQ_UPESD_ENTRY_COUNT GENMASK_ULL(3, 0) +#define ZXDH_CQPSQ_UPESD_SKIP_ENTRY_S 7 +#define ZXDH_CQPSQ_UPESD_SKIP_ENTRY BIT_ULL(7) + +/* Suspend QP */ +#define ZXDH_CQPSQ_SUSPENDQP_QPID_S 0 +#define ZXDH_CQPSQ_SUSPENDQP_QPID GENMASK_ULL(23, 0) +#define ZXDH_CQPSQ_RESUMEQP_QSHANDLE_S 0 +#define ZXDH_CQPSQ_RESUMEQP_QSHANDLE GENMASK_ULL(31, 0) + +/* Query hw Context OP */ +#define ZXDH_CQPSQ_QUERY_QPC_ID_S 0 +#define ZXDH_CQPSQ_QUERY_QPC_ID GENMASK_ULL(19, 0) +#define ZXDH_CQPSQ_QUERY_CQC_ID_S 0 +#define ZXDH_CQPSQ_QUERY_CQC_ID GENMASK_ULL(20, 0) +#define ZXDH_CQPSQ_QUERY_CEQC_ID_S 0 +#define ZXDH_CQPSQ_QUERY_CEQC_ID GENMASK_ULL(10, 0) +#define ZXDH_CQPSQ_QUERY_AEQC_ID_S 0 +#define ZXDH_CQPSQ_QUERY_AEQC_ID GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_QUERY_SRQC_ID_S 0 +#define ZXDH_CQPSQ_QUERY_SRQC_ID GENMASK_ULL(19, 0) + +#define ZXDH_CQPSQ_RESUMEQP_QPID_S ZXDH_CQPSQ_SUSPENDQP_QPID_S +#define ZXDH_CQPSQ_RESUMEQP_QPID_M ZXDH_CQPSQ_SUSPENDQP_QPID_M +#define ZXDH_CQPSQ_RESUMEQP_QPID ZXDH_CQPSQ_SUSPENDQP_QPID + +#define ZXDH_CQPSQ_MIN_STAG_INVALID 0x0001 +#define ZXDH_CQPSQ_MIN_SUSPEND_PND 0x0005 + +#define ZXDH_CQPSQ_MAJ_NO_ERROR 0x0000 +#define ZXDH_CQPSQ_MAJ_OBJCACHE_ERROR 0xF000 +#define ZXDH_CQPSQ_MAJ_CNTXTCACHE_ERROR 0xF001 +#define ZXDH_CQPSQ_MAJ_ERROR 0xFFFF + +//NVME OF IOQ SQ +#define NOF_IOQ_SQ_WQE_SIZE 32 +#define NOF_IOQ_SQ_SIZE 512 +#define NOF_IOQ_SQ_LOG_SIZE 9 + +//QPC_FIELD_MASK +#define RDMAQPC_MASK_INIT 0xFFFFFFFFFFFFFFFFUL +#define RDMAQPC_MASK_RESET 0xFFFFFFFFFFFFFFFFUL +#define RDMAQPC_TX_MASKL_DESTROY 0x146800UL +#define RDMAQPC_RX_MASKL_DESTROY 0x81C0000000UL + +#define RDMAQPC_TX_MASKL_RETRY_CNT (0x1UL << 1) +#define RDMAQPC_TX_MASKL_CUR_RETRY_CNT (0x1UL << 2) +#define RDMAQPC_TX_MASKL_READ_RETRY_FLAG (0x1UL << 3) +#define RDMAQPC_TX_MASKL_LAST_ACK_PSN (0x1UL << 4) +#define RDMAQPC_TX_MASKL_LSN (0x1UL << 6) +#define RDMAQPC_TX_MASKL_ACK_CREDITS (0x1UL << 7) +#define RDMAQPC_TX_MASKL_RNR_RETRY_FLAG (0x1UL << 8) +#define RDMAQPC_TX_MASKL_RNR_RETRY_THRESHOLD (0x1UL << 9) +#define RDMAQPC_TX_MASKL_RNR_RETRY_TIME (0x1UL << 10) +#define RDMAQPC_TX_MASKL_PSN_MAX (0x1UL << 16) +#define RDMAQPC_TX_MASKL_PSN_NXT (0x1UL << 17) +#define RDMAQPC_TX_MASKL_LOCAL_ACK_TIMEOUT (0x1UL << 21) +#define RDMAQPC_TX_MASKL_RETRY_FLAG (0x1UL << 22) +#define RDMAQPC_TX_MASKL_HW_SQ_TAIL_UNA (0x1UL << 23) +#define RDMAQPC_TX_MASKL_LAST_ACK_WQE_OFFSET (0x1UL << 24) +#define RDMAQPC_TX_MASKL_SQ_VMAP (0x1UL << 26) +#define RDMAQPC_TX_MASKL_SQ_LPBL_SIZE (0x1UL << 27) +#define RDMAQPC_TX_MASKL_IPV4 (0x1UL << 29) +#define RDMAQPC_TX_MASKL_INSERT_VLANTAG (0x1UL << 32) +#define RDMAQPC_TX_MASKL_VLANTAG (0x1UL << 33) +#define RDMAQPC_TX_MASKL_PD_ID (0x1UL << 34) +#define RDMAQPC_TX_MASKL_SQ_PA (0x1UL << 38) +#define RDMAQPC_TX_MASKL_DEST_IP_LOW (0x1UL << 39) +#define RDMAQPC_TX_MASKL_DEST_IP_HIGH (0x1UL << 40) +#define RDMAQPC_TX_MASKL_SRC_PORT (0x1UL << 41) +#define RDMAQPC_TX_MASKL_FLOWLABLE (0x1UL << 43) +#define RDMAQPC_TX_MASKL_TTL (0x1UL << 44) +#define RDMAQPC_TX_MASKL_QKEY (0x1UL << 46) +#define RDMAQPC_TX_MASKL_DEST_QPN (0x1UL << 47) +#define RDMAQPC_TX_MASKL_ORD_SIZE (0x1UL << 48) +#define RDMAQPC_TX_MASKL_PKEY (0x1UL << 49) +#define RDMAQPC_TX_MASKL_DEST_MAC (0x1UL << 50) +#define RDMAQPC_TX_MASKL_LOCAL_IP_LOW (0x1UL << 52) +#define RDMAQPC_TX_MASKL_LOCAL_IP_HIGH (0x1UL << 53) +#define RDMAQPC_TX_MASKL_PMTU (0x1UL << 55) +#define RDMAQPC_TX_MASKL_ACK_TIMEOUT (0x1UL << 56) +#define RDMAQPC_TX_MASKL_LOG_SQ_SIZE (0x1UL << 57) +#define RDMAQPC_TX_MASKL_NVMEOF_QID (0x1UL << 59) +#define RDMAQPC_TX_MASKL_NVMEOF_TGT (0x1UL << 60) +#define RDMAQPC_TX_MASKL_NVMEOF_IOQ (0x1UL << 61) +#define RDMAQPC_TX_MASKL_GQP_ID (0x1UL << 62) + +#define RDMAQPC_TX_MASKH_QUEUE_TC (0x1UL << 0) +#define RDMAQPC_TX_MASKH_TOS (0x1UL << 5) +#define RDMAQPC_TX_MASKH_WS_IDX (0x1UL << 7) +#define RDMAQPC_TX_MASKH_QP_STATE (0x1UL << 8) +#define RDMAQPC_TX_MASKH_RNR_RETRY_CNT (0x1UL << 26) +#define RDMAQPC_TX_MASKH_RNR_CUR_RETRY_CNT (0x1UL << 27) + +#define RDMAQPC_TX_MASKH_ERR_FLAG (0x1UL << 13) +#define RDMAQPC_TX_MASKH_ACK_ERR_FLAG (0x1UL << 14) +#define RDMAQPC_TX_MASKH_RDWQE_PYLD_LENGTH (0x1UL << 16) + +#define RDMAQPC_TX_MASKH_RD_MSG_LOSS_ERR_FLAG (0x1UL << 19) +#define RDMAQPC_TX_MASKH_PKTCHK_RD_MSG_LOSS_ERR_CNT (0x1UL << 20) +#define RDMAQPC_TX_MASKH_RECV_RD_MSG_LOSS_ERR_CNT (0x1UL << 21) +#define RDMAQPC_TX_MASKH_RECV_RD_MSG_LOSS_ERR_FLAG (0x1UL << 22) +#define RDMAQPC_TX_MASKH_RECV_ERR_FLAG (0x1UL << 23) +#define RDMAQPC_TX_MASKH_RECV_READ_FLAG (0x1UL << 24) +#define RDMAQPC_TX_MASKH_RETRY_CQE_SQ_OPCODE (0x1UL << 25) +#define RDMAQPC_TX_MASKH_PACKAGE_ERR_FLAG (0x1UL << 17) + +#define RDMAQPC_RX_MASKL_QKEY (0x1UL << 3) +#define RDMAQPC_RX_MASKL_EPSN (0x1UL << 6) +#define RDMAQPC_RX_MASKL_ACK_CREDITS (0x1UL << 7) +#define RDMAQPC_RX_MASKL_RNR_TIMER (0x1UL << 22) +#define RDMAQPC_RX_MASKL_LOCAL_IP (0x1UL << 30) +#define RDMAQPC_RX_MASKL_DEST_MAC (0x1UL << 32) +#define RDMAQPC_RX_MASKL_NVMEOF_IOQ (0x1UL << 33) +#define RDMAQPC_RX_MASKL_INSERT_VLANTAG (0x1UL << 34) +#define RDMAQPC_RX_MASKL_PMTU (0x1UL << 35) +#define RDMAQPC_RX_MASKL_IPV4 (0x1UL << 37) +#define RDMAQPC_RX_MASKL_PD_ID (0x1UL << 38) +#define RDMAQPC_RX_MASKL_QP_STATE (0x1UL << 39) +#define RDMAQPC_RX_MASKL_DEST_QPN (0x1UL << 40) +#define RDMAQPC_RX_MASKL_FLOWLABLE (0x1UL << 41) +#define RDMAQPC_RX_MASKL_TTL (0x1UL << 42) +#define RDMAQPC_RX_MASKL_TOS (0x1UL << 43) +#define RDMAQPC_RX_MASKL_VLANTAG (0x1UL << 44) +#define RDMAQPC_RX_MASKL_NVMEOF_QID (0x1UL << 45) +#define RDMAQPC_RX_MASKL_NVMEOF_TGT (0x1UL << 45) +#define RDMAQPC_RX_MASKL_HDR_LEN (0x1UL << 47) +#define RDMAQPC_RX_MASKL_PKEY (0x1UL << 48) +#define RDMAQPC_RX_MASKL_SRC_PORT (0x1UL << 49) +#define RDMAQPC_RX_MASKL_IRD_SIZE (0x1UL << 52) +#define RDMAQPC_RX_MASKL_WRITE_EN (0x1UL << 55) +#define RDMAQPC_RX_MASKL_READ_EN (0x1UL << 56) +#define RDMAQPC_RX_MASKL_GQP_ID (0x1UL << 62) +#define RDMAQPC_RX_MASKL_WS_IDX (0x1UL << 63) + +#define RDMAQPC_RX_MASKH_DEST_IP (0x1UL << 4) +#define RDMAQPC_RX_MASKH_QUEUE_TC (0x1UL << 10) + +//QPC_TX stucture of DPU +#define RDMAQPC_TX_RETRY_CNT_S 8 +#define RDMAQPC_TX_RETRY_CNT GENMASK_ULL(10, 8) +#define RDMAQPC_TX_CUR_RETRY_CNT_S 11 +#define RDMAQPC_TX_CUR_RETRY_CNT GENMASK_ULL(13, 11) +#define RDMAQPC_TX_LAST_ACK_PSN_S 15 +#define RDMAQPC_TX_LAST_ACK_PSN GENMASK_ULL(38, 15) +#define RDMAQPC_TX_LSN_LOW1_S 63 +#define RDMAQPC_TX_LSN_LOW1 BIT_ULL(63) + +#define RDMAQPC_TX_LSN_HIGH23_S 0 +#define RDMAQPC_TX_LSN_HIGH23 GENMASK_ULL(22, 0) +#define RDMAQPC_TX_ACKCREDITS_S 23 +#define RDMAQPC_TX_ACKCREDITS GENMASK_ULL(27, 23) +#define RDMAQPC_TX_RNR_RETRY_THRESHOLD_S 29 +#define RDMAQPC_TX_RNR_RETRY_THRESHOLD GENMASK_ULL(33, 29) + +#define RDMAQPC_TX_SSN_S 44 +#define RDMAQPC_TX_SSN GENMASK_ULL(63, 44) + +#define RDMAQPC_TX_PSN_MAX_S 5 +#define RDMAQPC_TX_PSN_MAX GENMASK_ULL(28, 5) +#define RDMAQPC_TX_PSN_NEXT_S 29 +#define RDMAQPC_TX_PSN_NEXT GENMASK_ULL(52, 29) + +#define RDMAQPC_TX_HW_SQ_TAIL_HIGH_S 0 +#define RDMAQPC_TX_HW_SQ_TAIL_HIGH GENMASK_ULL(6, 0) +#define RDMAQPC_TX_LOCAL_ACK_TIMEOUT_S 57 +#define RDMAQPC_TX_LOCAL_ACK_TIMEOUT GENMASK_ULL(61, 57) + +#define RDMAQPC_TX_RNR_RETRY_CNT_S 6 +#define RDMAQPC_TX_RNR_RETRY_CNT GENMASK_ULL(8, 6) +#define RDMAQPC_TX_RNR_CUR_RETRY_CNT_S 9 +#define RDMAQPC_TX_RNR_CUR_RETRY_CNT GENMASK_ULL(11, 9) + +#define RDMAQPC_TX_RETRY_FLAG BIT_ULL(62) +#define RDMAQPC_TX_RNR_RETRY_FLAG BIT_ULL(28) +#define RDMAQPC_TX_READ_RETRY_FLAG BIT_ULL(14) +#define RDMAQPC_TX_RETRY_CQE_SQ_OPCODE_FLAG GENMASK_ULL(5, 0) +#define RDMAQPC_TX_ERR_FLAG BIT_ULL(47) +#define RDMAQPC_TX_ACK_ERR_FLAG BIT_ULL(48) +#define RDMAQPC_TX_PACKAGE_ERR_FLAG BIT_ULL(27) +#define RDMAQPC_TX_RECV_ERR_FLAG GENMASK_ULL(47, 46) +#define RDMAQPC_TX_WIN_RADDR GENMASK_ULL(38, 30) +#define RDMAQPC_TX_RNR_RETRY_TIME_L GENMASK_ULL(63, 34) +#define RDMAQPC_TX_RNR_RETRY_TIME_H GENMASK(1, 0) +#define RDMAQPC_TX_LAST_ACK_WQE_OFFSET GENMASK_ULL(46, 16) +#define RDMAQPC_TX_HW_SQ_TAIL_UNA GENMASK_ULL(15, 0) +#define RDMAQPC_TX_RDWQE_PYLD_LENGTH_H GENMASK_ULL(26, 0) +#define RDMAQPC_TX_RDWQE_PYLD_LENGTH_L GENMASK_ULL(63, 59) +#define RDMAQPC_TX_RECV_READ_FLAG BIT_ULL(48) +#define RDMAQPC_TX_RECV_ERR_FLAG GENMASK_ULL(47, 46) +#define RDMAQPC_TX_RECV_RD_MSG_LOSS_ERR_FLAG BIT_ULL(45) +#define RDMAQPC_TX_RECV_RD_MSG_LOSS_ERR_CNT GENMASK_ULL(44, 43) +#define RDMAQPC_TX_RD_MSG_LOSS_ERR_FLAG BIT_ULL(40) +#define RDMAQPC_TX_PKTCHK_RD_MSG_LOSS_ERR_CNT GENMASK_ULL(42, 41) + +#define RDMAQPC_TX_HW_SQ_TAIL_UNA_S 0 +#define RDMAQPC_TX_HW_SQ_TAIL_UNA GENMASK_ULL(15, 0) + +#define RDMAQPC_TX_SERVICE_TYPE_S 0 +#define RDMAQPC_TX_SERVICE_TYPE GENMASK_ULL(2, 0) +#define RDMAQPC_TX_SQ_VMAP_S 3 +#define RDMAQPC_TX_SQ_VMAP BIT_ULL(3) +#define RDMAQPC_TX_SQ_LPBL_SIZE_S 4 +#define RDMAQPC_TX_SQ_LPBL_SIZE GENMASK_ULL(5, 4) +#define RDMAQPC_TX_IS_QP1_S 6 +#define RDMAQPC_TX_IS_QP1 BIT_ULL(6) +#define RDMAQPC_TX_IPV4_S 7 +#define RDMAQPC_TX_IPV4 BIT_ULL(7) +#define RDMAQPC_TX_FAST_REG_EN_S 8 +#define RDMAQPC_TX_FAST_REG_EN BIT_ULL(8) +#define RDMAQPC_TX_BIND_EN_S 9 +#define RDMAQPC_TX_BIND_EN BIT_ULL(9) +#define RDMAQPC_TX_INSERT_VLANTAG_S 10 +#define RDMAQPC_TX_INSERT_VLANTAG BIT_ULL(10) +#define RDMAQPC_TX_VLANTAG_S 11 +#define RDMAQPC_TX_VLANTAG GENMASK_ULL(26, 11) +#define RDMAQPC_TX_PD_INDEX_S 27 +#define RDMAQPC_TX_PD_INDEX GENMASK_ULL(50, 27) +#define RDMAQPC_TX_RSV_LKEY_EN_S 51 +#define RDMAQPC_TX_RSV_LKEY_EN BIT_ULL(51) +#define RDMAQPC_TX_ECN_EN_S 63 +#define RDMAQPC_TX_ECN_EN BIT_ULL(63) + +#define RDMAQPC_TX_DEST_IPADDR3_S 0 +#define RDMAQPC_TX_DEST_IPADDR3 GENMASK_ULL(31, 0) +#define RDMAQPC_TX_DEST_IPADDR2_S 32 +#define RDMAQPC_TX_DEST_IPADDR2 GENMASK_ULL(63, 32) +#define RDMAQPC_TX_DEST_IPADDR1_S 0 +#define RDMAQPC_TX_DEST_IPADDR1 GENMASK_ULL(31, 0) +#define RDMAQPC_TX_DEST_IPADDR0_S 32 +#define RDMAQPC_TX_DEST_IPADDR0 GENMASK_ULL(63, 32) + +#define RDMAQPC_TX_SRC_PORTNUM_S 0 +#define RDMAQPC_TX_SRC_PORTNUM GENMASK_ULL(15, 0) +#define RDMAQPC_TX_DEST_PORTNUM_S 16 +#define RDMAQPC_TX_DEST_PORTNUM GENMASK_ULL(31, 16) +#define RDMAQPC_TX_FLOWLABEL_S 32 +#define RDMAQPC_TX_FLOWLABEL GENMASK_ULL(51, 32) +#define RDMAQPC_TX_TTL_S 52 +#define RDMAQPC_TX_TTL GENMASK_ULL(59, 52) +#define RDMAQPC_TX_ROCE_TVER_S 60 +#define RDMAQPC_TX_ROCE_TVER GENMASK_ULL(63, 60) + +#define RDMAQPC_TX_QKEY_S 0 +#define RDMAQPC_TX_QKEY GENMASK_ULL(31, 0) +#define RDMAQPC_TX_DEST_QP_S 32 +#define RDMAQPC_TX_DEST_QP GENMASK_ULL(55, 32) +#define RDMAQPC_TX_ORD_SIZE_S 56 +#define RDMAQPC_TX_ORD_SIZE GENMASK_ULL(63, 56) + +#define RDMAQPC_TX_PKEY_S 0 +#define RDMAQPC_TX_PKEY GENMASK_ULL(15, 0) +#define RDMAQPC_TX_DEST_MAC_S 16 +#define RDMAQPC_TX_DEST_MAC GENMASK_ULL(63, 16) + +#define RDMAQPC_TX_LOCAL_IPADDR3_S 0 +#define RDMAQPC_TX_LOCAL_IPADDR3 GENMASK_ULL(31, 0) +#define RDMAQPC_TX_LOCAL_IPADDR2_S 32 +#define RDMAQPC_TX_LOCAL_IPADDR2 GENMASK_ULL(63, 32) +#define RDMAQPC_TX_LOCAL_IPADDR1_S 0 +#define RDMAQPC_TX_LOCAL_IPADDR1 GENMASK_ULL(31, 0) +#define RDMAQPC_TX_LOCAL_IPADDR0_S 32 +#define RDMAQPC_TX_LOCAL_IPADDR0 GENMASK_ULL(63, 32) + +#define RDMAQPC_TX_SRC_MAC_S 0 +#define RDMAQPC_TX_SRC_MAC GENMASK_ULL(47, 0) +#define RDMAQPC_TX_PMTU_S 48 +#define RDMAQPC_TX_PMTU GENMASK_ULL(50, 48) +#define RDMAQPC_TX_ACK_TIMEOUT_S 51 +#define RDMAQPC_TX_ACK_TIMEOUT GENMASK_ULL(55, 51) +#define RDMAQPC_TX_LOG_SQSIZE_S 56 +#define RDMAQPC_TX_LOG_SQSIZE GENMASK_ULL(59, 56) + +#define RDMAQPC_TX_CQN_S 0 +#define RDMAQPC_TX_CQN GENMASK_ULL(20, 0) +#define RDMAQPC_TX_NVMEOF_QID_S 21 +#define RDMAQPC_TX_NVMEOF_QID GENMASK_ULL(30, 21) +#define RDMAQPC_TX_IS_NVMEOF_TGT_S 31 +#define RDMAQPC_TX_IS_NVMEOF_TGT BIT_ULL(31) +#define RDMAQPC_TX_IS_NVMEOF_IOQ_S 32 +#define RDMAQPC_TX_IS_NVMEOF_IOQ BIT_ULL(32) +#define RDMAQPC_TX_DCQCN_ID_S 33 +#define RDMAQPC_TX_DCQCN_ID GENMASK_ULL(43, 33) +#define RDMAQPC_TX_DCQCN_EN_S 49 +#define RDMAQPC_TX_DCQCN_EN BIT_ULL(49) +#define RDMAQPC_TX_QUEUE_TC_S 50 +#define RDMAQPC_TX_QUEUE_TC GENMASK_ULL(52, 50) + +#define RDMAQPC_TX_QPN_S 0 +#define RDMAQPC_TX_QPN GENMASK_ULL(19, 0) +#define RDMAQPC_TX_TOS_S 50 +#define RDMAQPC_TX_TOS GENMASK_ULL(57, 50) +#define RDMAQPC_TX_VHCA_ID_LOW6_S 58 +#define RDMAQPC_TX_VHCA_ID_LOW6 GENMASK_ULL(63, 58) + +#define RDMAQPC_TX_VHCA_ID_HIGH4_S 0 +#define RDMAQPC_TX_VHCA_ID_HIGH4 GENMASK_ULL(3, 0) +#define RDMAQPC_TX_QP_FLOW_SET_S 4 +#define RDMAQPC_TX_QP_FLOW_SET GENMASK_ULL(16, 4) +#define RDMAQPC_TX_QPSTATE_S 17 +#define RDMAQPC_TX_QPSTATE GENMASK_ULL(19, 17) +#define RDMAQPC_TX_DEBUG_SET_S 20 +#define RDMAQPC_TX_DEBUG_SET GENMASK_ULL(29, 20) +#define RDMAQPC_TX_QP_GROUP_NUM_S 20 +#define RDMAQPC_TX_QP_GROUP_NUM GENMASK_ULL(30, 20) + +//QPC_RX stucture of DPU +#define RDMAQPC_RX_LAST_OPCODE_S 56 +#define RDMAQPC_RX_LAST_OPCODE GENMASK_ULL(63, 56) + +#define RDMAQPC_RX_EPSN_S 40 +#define RDMAQPC_RX_EPSN GENMASK_ULL(63, 40) + +#define RDMAQPC_RX_IRD_RXNUM_S 46 +#define RDMAQPC_RX_IRD_RXNUM GENMASK_ULL(54, 46) + +#define RDMAQPC_RX_LOCAL_IPADDR3_S 0 +#define RDMAQPC_RX_LOCAL_IPADDR3 GENMASK_ULL(31, 0) +#define RDMAQPC_RX_LOCAL_IPADDR2_S 32 +#define RDMAQPC_RX_LOCAL_IPADDR2 GENMASK_ULL(63, 32) + +#define RDMAQPC_RX_SRC_MAC_HIGH16_S 0 +#define RDMAQPC_RX_SRC_MAC_HIGH16 GENMASK_ULL(15, 0) +#define RDMAQPC_RX_DEST_MAC_S 16 +#define RDMAQPC_RX_DEST_MAC GENMASK_ULL(63, 16) + +#define RDMAQPC_RX_IS_NVMEOF_IOQ_S 0 +#define RDMAQPC_RX_IS_NVMEOF_IOQ BIT_ULL(0) +#define RDMAQPC_RX_INSERT_VLANTAG_S 1 +#define RDMAQPC_RX_INSERT_VLANTAG BIT_ULL(1) +#define RDMAQPC_RX_PMTU_S 2 +#define RDMAQPC_RX_PMTU GENMASK_ULL(4, 2) +#define RDMAQPC_RX_SERVICE_TYPE_S 5 +#define RDMAQPC_RX_SERVICE_TYPE GENMASK_ULL(7, 5) +#define RDMAQPC_RX_IPV4_S 8 +#define RDMAQPC_RX_IPV4 BIT_ULL(8) +#define RDMAQPC_RX_PD_INDEX_S 9 +#define RDMAQPC_RX_PD_INDEX GENMASK_ULL(28, 9) +#define RDMAQPC_RX_QPSTATE_S 29 +#define RDMAQPC_RX_QPSTATE GENMASK_ULL(31, 29) +#define RDMAQPC_RX_SRC_MAC_LOW32_S 32 +#define RDMAQPC_RX_SRC_MAC_LOW32 GENMASK_ULL(63, 32) + +#define RDMAQPC_RX_DEST_QP_HIGH12_S 0 +#define RDMAQPC_RX_DEST_QP_HIGH12 GENMASK_ULL(11, 0) +#define RDMAQPC_RX_FLOWLABEL_S 12 +#define RDMAQPC_RX_FLOWLABEL GENMASK_ULL(31, 12) +#define RDMAQPC_RX_TTL_S 32 +#define RDMAQPC_RX_TTL GENMASK_ULL(39, 32) +#define RDMAQPC_RX_TOS_S 40 +#define RDMAQPC_RX_TOS GENMASK_ULL(47, 40) +#define RDMAQPC_RX_VLANTAG_S 48 +#define RDMAQPC_RX_VLANTAG GENMASK_ULL(63, 48) + +#define RDMAQPC_RX_SRQN_S 0 +#define RDMAQPC_RX_SRQN GENMASK_ULL(18, 0) +#define RDMAQPC_RX_NVMEOF_QID_S 0 +#define RDMAQPC_RX_NVMEOF_QID GENMASK_ULL(9, 0) +#define RDMAQPC_RX_IS_NVMEOF_TGT_S 10 +#define RDMAQPC_RX_IS_NVMEOF_TGT BIT_ULL(10) + +#define RDMAQPC_RX_HDR_LEN_S 0 +#define RDMAQPC_RX_HDR_LEN GENMASK_ULL(9, 0) +#define RDMAQPC_RX_PKEY_S 32 +#define RDMAQPC_RX_PKEY GENMASK_ULL(47, 32) +#define RDMAQPC_RX_SRC_PORTNUM_S 48 +#define RDMAQPC_RX_SRC_PORTNUM GENMASK_ULL(63, 48) + +#define RDMAQPC_RX_WQE_SIGN_EN_S 1 +#define RDMAQPC_RX_WQE_SIGN_EN BIT_ULL(1) +#define RDMAQPC_RX_RQ_VMAP_S 2 +#define RDMAQPC_RX_RQ_VMAP BIT_ULL(2) +#define RDMAQPC_RX_IRD_SIZE_S 3 +#define RDMAQPC_RX_IRD_SIZE GENMASK_ULL(6, 3) +#define RDMAQPC_RX_LOG_RQSIZE_S 7 +#define RDMAQPC_RX_LOG_RQSIZE GENMASK_ULL(10, 7) +#define RDMAQPC_RX_SEND_EN_S 11 +#define RDMAQPC_RX_SEND_EN BIT_ULL(11) +#define RDMAQPC_RX_WRITE_EN_S 12 +#define RDMAQPC_RX_WRITE_EN BIT_ULL(12) +#define RDMAQPC_RX_READ_EN_S 13 +#define RDMAQPC_RX_READ_EN BIT_ULL(13) +#define RDMAQPC_RX_LOG_RQE_SIZE_S 14 +#define RDMAQPC_RX_LOG_RQE_SIZE GENMASK_ULL(16, 14) +#define RDMAQPC_RX_USE_SRQ_S 17 +#define RDMAQPC_RX_USE_SRQ BIT_ULL(17) +#define RDMAQPC_RX_CQN_S 18 +#define RDMAQPC_RX_CQN GENMASK_ULL(38, 18) +#define RDMAQPC_RX_DEST_QP_LOW12_S 39 +#define RDMAQPC_RX_DEST_QP_LOW12 GENMASK_ULL(50, 39) +#define RDMAQPC_RX_RQ_LPBL_SIZE_S 51 +#define RDMAQPC_RX_RQ_LPBL_SIZE GENMASK_ULL(52, 51) +#define RDMAQPC_RX_RSV_LKEY_EN_S 53 +#define RDMAQPC_RX_RSV_LKEY_EN BIT_ULL(53) +#define RDMAQPC_RX_RNR_TIMER_S 58 +#define RDMAQPC_RX_RNR_TIMER GENMASK_ULL(62, 58) +#define RDMAQPC_RX_ACK_CREDITS_S 63 +#define RDMAQPC_RX_ACK_CREDITS BIT_ULL(63) + +#define RDMAQPC_RX_QP_GROUP_NUM_S 0 +#define RDMAQPC_RX_QP_GROUP_NUM GENMASK_ULL(10, 0) +#define RDMAQPC_RX_QP_FLOW_SET_S 11 +#define RDMAQPC_RX_QP_FLOW_SET GENMASK_ULL(23, 11) +#define RDMAQPC_RX_DEBUG_SET_S 40 +#define RDMAQPC_RX_DEBUG_SET GENMASK_ULL(49, 40) +#define RDMAQPC_RX_VHCA_ID_S 50 +#define RDMAQPC_RX_VHCA_ID GENMASK_ULL(59, 50) +#define RDMAQPC_RX_QUEUE_TC_S 60 +#define RDMAQPC_RX_QUEUE_TC GENMASK_ULL(62, 60) + +#define RDMAQPC_RX_DEST_IPADDR1_S 0 +#define RDMAQPC_RX_DEST_IPADDR1 GENMASK_ULL(31, 0) +#define RDMAQPC_RX_DEST_IPADDR0_S 32 +#define RDMAQPC_RX_DEST_IPADDR0 GENMASK_ULL(63, 32) +#define RDMAQPC_RX_DEST_IPADDR3_S 0 +#define RDMAQPC_RX_DEST_IPADDR3 GENMASK_ULL(31, 0) +#define RDMAQPC_RX_DEST_IPADDR2_S 32 +#define RDMAQPC_RX_DEST_IPADDR2 GENMASK_ULL(63, 32) + +#define RDMAQPC_RX_LOCAL_IPADDR1_S 0 +#define RDMAQPC_RX_LOCAL_IPADDR1 GENMASK_ULL(31, 0) +#define RDMAQPC_RX_LOCAL_IPADDR0_S 32 +#define RDMAQPC_RX_LOCAL_IPADDR0 GENMASK_ULL(63, 32) + +//QPC stucture +#define IRDMAQPC_DDP_VER_S 0 +#define IRDMAQPC_DDP_VER GENMASK_ULL(1, 0) +#define IRDMAQPC_IBRDENABLE_S 2 +#define IRDMAQPC_IBRDENABLE BIT_ULL(2) +#define IRDMAQPC_IPV4_S 3 +#define IRDMAQPC_IPV4 BIT_ULL(3) +#define IRDMAQPC_NONAGLE_S 4 +#define IRDMAQPC_NONAGLE BIT_ULL(4) +#define IRDMAQPC_INSERTVLANTAG_S 5 +#define IRDMAQPC_INSERTVLANTAG BIT_ULL(5) +#define IRDMAQPC_ISQP1_S 6 +#define IRDMAQPC_ISQP1 BIT_ULL(6) +#define IRDMAQPC_TIMESTAMP_S 7 +#define IRDMAQPC_TIMESTAMP BIT_ULL(7) +#define IRDMAQPC_RQWQESIZE_S 8 +#define IRDMAQPC_RQWQESIZE GENMASK_ULL(9, 8) +#define IRDMAQPC_INSERTL2TAG2_S 11 +#define IRDMAQPC_INSERTL2TAG2 BIT_ULL(11) +#define IRDMAQPC_LIMIT_S 12 +#define IRDMAQPC_LIMIT GENMASK_ULL(13, 12) + +#define IRDMAQPC_ECN_EN_S 14 +#define IRDMAQPC_ECN_EN BIT_ULL(14) +#define IRDMAQPC_DROPOOOSEG_S 15 +#define IRDMAQPC_DROPOOOSEG BIT_ULL(15) +#define IRDMAQPC_DUPACK_THRESH_S 16 +#define IRDMAQPC_DUPACK_THRESH GENMASK_ULL(18, 16) +#define IRDMAQPC_ERR_RQ_IDX_VALID_S 19 +#define IRDMAQPC_ERR_RQ_IDX_VALID BIT_ULL(19) +#define IRDMAQPC_DIS_VLAN_CHECKS_S 19 +#define IRDMAQPC_DIS_VLAN_CHECKS GENMASK_ULL(21, 19) +#define IRDMAQPC_DC_TCP_EN_S 25 +#define IRDMAQPC_DC_TCP_EN BIT_ULL(25) +#define IRDMAQPC_RCVTPHEN_S 28 +#define IRDMAQPC_RCVTPHEN BIT_ULL(28) +#define IRDMAQPC_XMITTPHEN_S 29 +#define IRDMAQPC_XMITTPHEN BIT_ULL(29) +#define IRDMAQPC_RQTPHEN_S 30 +#define IRDMAQPC_RQTPHEN BIT_ULL(30) +#define IRDMAQPC_SQTPHEN_S 31 +#define IRDMAQPC_SQTPHEN BIT_ULL(31) +#define IRDMAQPC_PPIDX_S 32 +#define IRDMAQPC_PPIDX GENMASK_ULL(41, 32) +#define IRDMAQPC_PMENA_S 47 +#define IRDMAQPC_PMENA BIT_ULL(47) +#define IRDMAQPC_RDMAP_VER_S 62 +#define IRDMAQPC_RDMAP_VER GENMASK_ULL(63, 62) +#define IRDMAQPC_ROCE_TVER_S 60 +#define IRDMAQPC_ROCE_TVER GENMASK_ULL(63, 60) + +#define IRDMAQPC_SQADDR_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPC_SQADDR ZXDH_CQPHC_QPCTX + +#define IRDMAQPC_RQADDR_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPC_RQADDR ZXDH_CQPHC_QPCTX +#define IRDMAQPC_TTL_S 0 +#define IRDMAQPC_TTL GENMASK_ULL(7, 0) +#define IRDMAQPC_RQSIZE_S 8 +#define IRDMAQPC_RQSIZE GENMASK_ULL(11, 8) +#define IRDMAQPC_SQSIZE_S 12 +#define IRDMAQPC_SQSIZE GENMASK_ULL(15, 12) +#define IRDMAQPC_GEN1_SRCMACADDRIDX_S 16 +#define IRDMAQPC_GEN1_SRCMACADDRIDX GENMASK(21, 16) +#define IRDMAQPC_AVOIDSTRETCHACK_S 23 +#define IRDMAQPC_AVOIDSTRETCHACK BIT_ULL(23) +#define IRDMAQPC_TOS_S 24 +#define IRDMAQPC_TOS GENMASK_ULL(31, 24) +#define IRDMAQPC_SRCPORTNUM_S 32 +#define IRDMAQPC_SRCPORTNUM GENMASK_ULL(47, 32) +#define IRDMAQPC_DESTPORTNUM_S 48 +#define IRDMAQPC_DESTPORTNUM GENMASK_ULL(63, 48) +#define IRDMAQPC_DESTIPADDR0_S 32 +#define IRDMAQPC_DESTIPADDR0 GENMASK_ULL(63, 32) +#define IRDMAQPC_DESTIPADDR1_S 0 +#define IRDMAQPC_DESTIPADDR1 GENMASK_ULL(31, 0) +#define IRDMAQPC_DESTIPADDR2_S 32 +#define IRDMAQPC_DESTIPADDR2 GENMASK_ULL(63, 32) +#define IRDMAQPC_DESTIPADDR3_S 0 +#define IRDMAQPC_DESTIPADDR3 GENMASK_ULL(31, 0) +#define IRDMAQPC_SNDMSS_S 16 +#define IRDMAQPC_SNDMSS GENMASK_ULL(29, 16) +#define IRDMAQPC_SYN_RST_HANDLING_S 30 +#define IRDMAQPC_SYN_RST_HANDLING GENMASK_ULL(31, 30) +#define IRDMAQPC_VLANTAG_S 32 +#define IRDMAQPC_VLANTAG GENMASK_ULL(47, 32) +#define IRDMAQPC_ARPIDX_S 48 +#define IRDMAQPC_ARPIDX GENMASK_ULL(63, 48) +#define IRDMAQPC_FLOWLABEL_S 0 +#define IRDMAQPC_FLOWLABEL GENMASK_ULL(19, 0) +#define IRDMAQPC_WSCALE_S 20 +#define IRDMAQPC_WSCALE BIT_ULL(20) +#define IRDMAQPC_KEEPALIVE_S 21 +#define IRDMAQPC_KEEPALIVE BIT_ULL(21) +#define IRDMAQPC_IGNORE_TCP_OPT_S 22 +#define IRDMAQPC_IGNORE_TCP_OPT BIT_ULL(22) +#define IRDMAQPC_IGNORE_TCP_UNS_OPT_S 23 +#define IRDMAQPC_IGNORE_TCP_UNS_OPT BIT_ULL(23) +#define IRDMAQPC_TCPSTATE_S 28 +#define IRDMAQPC_TCPSTATE GENMASK_ULL(31, 28) +#define IRDMAQPC_RCVSCALE_S 32 +#define IRDMAQPC_RCVSCALE GENMASK_ULL(35, 32) +#define IRDMAQPC_SNDSCALE_S 40 +#define IRDMAQPC_SNDSCALE GENMASK_ULL(43, 40) +#define IRDMAQPC_PDIDX_S 48 +#define IRDMAQPC_PDIDX GENMASK_ULL(63, 48) +#define IRDMAQPC_PDIDXHI_S 20 +#define IRDMAQPC_PDIDXHI GENMASK_ULL(21, 20) +#define IRDMAQPC_PKEY_S 32 +#define IRDMAQPC_PKEY GENMASK_ULL(47, 32) +#define IRDMAQPC_ACKCREDITS_S 20 +#define IRDMAQPC_ACKCREDITS GENMASK_ULL(24, 20) +#define IRDMAQPC_QKEY_S 32 +#define IRDMAQPC_QKEY GENMASK_ULL(63, 32) +#define IRDMAQPC_DESTQP_S 0 +#define IRDMAQPC_DESTQP GENMASK_ULL(23, 0) +#define IRDMAQPC_KALIVE_TIMER_MAX_PROBES_S 16 +#define IRDMAQPC_KALIVE_TIMER_MAX_PROBES GENMASK_ULL(23, 16) +#define IRDMAQPC_KEEPALIVE_INTERVAL_S 24 +#define IRDMAQPC_KEEPALIVE_INTERVAL GENMASK_ULL(31, 24) +#define IRDMAQPC_TIMESTAMP_RECENT_S 0 +#define IRDMAQPC_TIMESTAMP_RECENT GENMASK_ULL(31, 0) +#define IRDMAQPC_TIMESTAMP_AGE_S 32 +#define IRDMAQPC_TIMESTAMP_AGE GENMASK_ULL(63, 32) +#define IRDMAQPC_SNDNXT_S 0 +#define IRDMAQPC_SNDNXT GENMASK_ULL(31, 0) +#define IRDMAQPC_ISN_S 32 +#define IRDMAQPC_ISN GENMASK_ULL(55, 32) +#define IRDMAQPC_PSNNXT_S 0 +#define IRDMAQPC_PSNNXT GENMASK_ULL(23, 0) +#define IRDMAQPC_LSN_S 32 +#define IRDMAQPC_LSN GENMASK_ULL(55, 32) +#define IRDMAQPC_SNDWND_S 32 +#define IRDMAQPC_SNDWND GENMASK_ULL(63, 32) +#define IRDMAQPC_RCVNXT_S 0 +#define IRDMAQPC_RCVNXT GENMASK_ULL(31, 0) +#define IRDMAQPC_EPSN_S 0 +#define IRDMAQPC_EPSN GENMASK_ULL(23, 0) +#define IRDMAQPC_RCVWND_S 32 +#define IRDMAQPC_RCVWND GENMASK_ULL(63, 32) +#define IRDMAQPC_SNDMAX_S 0 +#define IRDMAQPC_SNDMAX GENMASK_ULL(31, 0) +#define IRDMAQPC_SNDUNA_S 32 +#define IRDMAQPC_SNDUNA GENMASK_ULL(63, 32) +#define IRDMAQPC_PSNMAX_S 0 +#define IRDMAQPC_PSNMAX GENMASK_ULL(23, 0) +#define IRDMAQPC_PSNUNA_S 32 +#define IRDMAQPC_PSNUNA GENMASK_ULL(55, 32) +#define IRDMAQPC_SRTT_S 0 +#define IRDMAQPC_SRTT GENMASK_ULL(31, 0) +#define IRDMAQPC_RTTVAR_S 32 +#define IRDMAQPC_RTTVAR GENMASK_ULL(63, 32) +#define IRDMAQPC_SSTHRESH_S 0 +#define IRDMAQPC_SSTHRESH GENMASK_ULL(31, 0) +#define IRDMAQPC_CWND_S 32 +#define IRDMAQPC_CWND GENMASK_ULL(63, 32) +#define IRDMAQPC_CWNDROCE_S 32 +#define IRDMAQPC_CWNDROCE GENMASK_ULL(55, 32) +#define IRDMAQPC_SNDWL1_S 0 +#define IRDMAQPC_SNDWL1 GENMASK_ULL(31, 0) +#define IRDMAQPC_SNDWL2_S 32 +#define IRDMAQPC_SNDWL2 GENMASK_ULL(63, 32) +#define IRDMAQPC_ERR_RQ_IDX_S 32 +#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(45, 32) +#define IRDMAQPC_RTOMIN_S 57 +#define IRDMAQPC_RTOMIN GENMASK_ULL(63, 57) +#define IRDMAQPC_MAXSNDWND_S 0 +#define IRDMAQPC_MAXSNDWND GENMASK_ULL(31, 0) +#define IRDMAQPC_REXMIT_THRESH_S 48 +#define IRDMAQPC_REXMIT_THRESH GENMASK_ULL(53, 48) +#define IRDMAQPC_RNRNAK_THRESH_S 54 +#define IRDMAQPC_RNRNAK_THRESH GENMASK_ULL(56, 54) +#define IRDMAQPC_TXCQNUM_S 0 +#define IRDMAQPC_TXCQNUM GENMASK_ULL(18, 0) +#define IRDMAQPC_RXCQNUM_S 32 +#define IRDMAQPC_RXCQNUM GENMASK_ULL(50, 32) +#define IRDMAQPC_STAT_INDEX_S 0 +#define IRDMAQPC_STAT_INDEX GENMASK_ULL(6, 0) +#define IRDMAQPC_Q2ADDR_S 8 +#define IRDMAQPC_Q2ADDR GENMASK_ULL(63, 8) +#define IRDMAQPC_LASTBYTESENT_S 0 +#define IRDMAQPC_LASTBYTESENT GENMASK_ULL(7, 0) +#define IRDMAQPC_MACADDRESS_S 16 +#define IRDMAQPC_MACADDRESS GENMASK_ULL(63, 16) +#define IRDMAQPC_ORDSIZE_S 0 +#define IRDMAQPC_ORDSIZE GENMASK_ULL(7, 0) + +#define IRDMAQPC_IRDSIZE_S 16 +#define IRDMAQPC_IRDSIZE GENMASK_ULL(18, 16) + +#define IRDMAQPC_UDPRIVCQENABLE_S 19 +#define IRDMAQPC_UDPRIVCQENABLE BIT_ULL(19) +#define IRDMAQPC_WRRDRSPOK_S 20 +#define IRDMAQPC_WRRDRSPOK BIT_ULL(20) +#define IRDMAQPC_RDOK_S 21 +#define IRDMAQPC_RDOK BIT_ULL(21) +#define IRDMAQPC_SNDMARKERS_S 22 +#define IRDMAQPC_SNDMARKERS BIT_ULL(22) +#define IRDMAQPC_DCQCNENABLE_S 22 +#define IRDMAQPC_DCQCNENABLE BIT_ULL(22) +#define IRDMAQPC_FW_CC_ENABLE_S 28 +#define IRDMAQPC_FW_CC_ENABLE BIT_ULL(28) +#define IRDMAQPC_RCVNOICRC_S 31 +#define IRDMAQPC_RCVNOICRC BIT_ULL(31) +#define IRDMAQPC_BINDEN_S 23 +#define IRDMAQPC_BINDEN BIT_ULL(23) +#define IRDMAQPC_FASTREGEN_S 24 +#define IRDMAQPC_FASTREGEN BIT_ULL(24) +#define IRDMAQPC_PRIVEN_S 25 +#define IRDMAQPC_PRIVEN BIT_ULL(25) +#define IRDMAQPC_TIMELYENABLE_S 27 +#define IRDMAQPC_TIMELYENABLE BIT_ULL(27) +#define IRDMAQPC_THIGH_S 52 +#define IRDMAQPC_THIGH GENMASK_ULL(63, 52) +#define IRDMAQPC_TLOW_S 32 +#define IRDMAQPC_TLOW GENMASK_ULL(39, 32) +#define IRDMAQPC_REMENDPOINTIDX_S 0 +#define IRDMAQPC_REMENDPOINTIDX GENMASK_ULL(16, 0) +#define IRDMAQPC_USESTATSINSTANCE_S 26 +#define IRDMAQPC_USESTATSINSTANCE BIT_ULL(26) +#define IRDMAQPC_IWARPMODE_S 28 +#define IRDMAQPC_IWARPMODE BIT_ULL(28) +#define IRDMAQPC_RCVMARKERS_S 29 +#define IRDMAQPC_RCVMARKERS BIT_ULL(29) +#define IRDMAQPC_ALIGNHDRS_S 30 +#define IRDMAQPC_ALIGNHDRS BIT_ULL(30) +#define IRDMAQPC_RCVNOMPACRC_S 31 +#define IRDMAQPC_RCVNOMPACRC BIT_ULL(31) +#define IRDMAQPC_RCVMARKOFFSET_S 32 +#define IRDMAQPC_RCVMARKOFFSET GENMASK_ULL(40, 32) +#define IRDMAQPC_SNDMARKOFFSET_S 48 +#define IRDMAQPC_SNDMARKOFFSET GENMASK_ULL(56, 48) + +#define IRDMAQPC_QPCOMPCTX_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPC_QPCOMPCTX ZXDH_CQPHC_QPCTX +#define IRDMAQPC_SQTPHVAL_S 0 +#define IRDMAQPC_SQTPHVAL GENMASK_ULL(7, 0) +#define IRDMAQPC_RQTPHVAL_S 8 +#define IRDMAQPC_RQTPHVAL GENMASK_ULL(15, 8) +#define IRDMAQPC_QSHANDLE_S 16 +#define IRDMAQPC_QSHANDLE GENMASK_ULL(25, 16) +#define IRDMAQPC_EXCEPTION_LAN_QUEUE_S 32 +#define IRDMAQPC_EXCEPTION_LAN_QUEUE GENMASK_ULL(43, 32) +#define IRDMAQPC_LOCAL_IPADDR3_S 0 +#define IRDMAQPC_LOCAL_IPADDR3 GENMASK_ULL(31, 0) +#define IRDMAQPC_LOCAL_IPADDR2_S 32 +#define IRDMAQPC_LOCAL_IPADDR2 GENMASK_ULL(63, 32) +#define IRDMAQPC_LOCAL_IPADDR1_S 0 +#define IRDMAQPC_LOCAL_IPADDR1 GENMASK_ULL(31, 0) +#define IRDMAQPC_LOCAL_IPADDR0_S 32 +#define IRDMAQPC_LOCAL_IPADDR0 GENMASK_ULL(63, 32) +#define ZXDH_FW_VER_MINOR_S 0 +#define ZXDH_FW_VER_MINOR GENMASK_ULL(15, 0) +#define ZXDH_FW_VER_MAJOR_S 16 +#define ZXDH_FW_VER_MAJOR GENMASK_ULL(31, 16) +#define ZXDH_FEATURE_INFO_S 0 +#define ZXDH_FEATURE_INFO GENMASK_ULL(47, 0) +#define ZXDH_FEATURE_CNT_S 32 +#define ZXDH_FEATURE_CNT GENMASK_ULL(47, 32) +#define ZXDH_FEATURE_TYPE_S 48 +#define ZXDH_FEATURE_TYPE GENMASK_ULL(63, 48) +#define ZXDH_RSVD_S 41 +#define ZXDH_RSVD GENMASK_ULL(55, 41) + +#define IRDMAQPSQ_OPCODE_S 57 +#define IRDMAQPSQ_OPCODE GENMASK_ULL(62, 57) +#define IRDMAQPSQ_COPY_HOST_PBL_S 43 +#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43) +#define IRDMAQPSQ_ADDFRAGCNT_S 32 +#define IRDMAQPSQ_ADDFRAGCNT GENMASK_ULL(39, 32) +#define IRDMAQPSQ_UD_ADDFRAGCNT_S 29 +#define IRDMAQPSQ_UD_ADDFRAGCNT GENMASK_ULL(36, 29) +#define IRDMAQPSQ_PUSHWQE_S 56 +#define IRDMAQPSQ_PUSHWQE BIT_ULL(56) +#define IRDMAQPSQ_STREAMMODE_S 58 +#define IRDMAQPSQ_STREAMMODE BIT_ULL(58) +#define IRDMAQPSQ_WAITFORRCVPDU_S 59 +#define IRDMAQPSQ_WAITFORRCVPDU BIT_ULL(59) +#define IRDMAQPSQ_READFENCE_S 54 +#define IRDMAQPSQ_READFENCE BIT_ULL(54) +#define IRDMAQPSQ_LOCALFENCE_S 55 +#define IRDMAQPSQ_LOCALFENCE BIT_ULL(55) +#define IRDMAQPSQ_UDPHEADER_S 61 +#define IRDMAQPSQ_UDPHEADER BIT_ULL(61) +#define IRDMAQPSQ_L4LEN_S 42 +#define IRDMAQPSQ_L4LEN GENMASK_ULL(45, 42) +#define IRDMAQPSQ_SIGCOMPL_S 56 +#define IRDMAQPSQ_SIGCOMPL BIT_ULL(56) +#define IRDMAQPSQ_SOLICITED_S 53 +#define IRDMAQPSQ_SOLICITED BIT_ULL(53) +#define IRDMAQPSQ_VALID_S 63 +#define IRDMAQPSQ_VALID BIT_ULL(63) + +#define IRDMAQPSQ_FRAG_TO_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPSQ_FRAG_TO ZXDH_CQPHC_QPCTX +#define IRDMAQPSQ_FRAG_VALID_S 63 +#define IRDMAQPSQ_FRAG_VALID BIT_ULL(63) +#define IRDMAQPSQ_FIRST_FRAG_VALID_S 0 +#define IRDMAQPSQ_FIRST_FRAG_VALID BIT_ULL(0) +#define IRDMAQPSQ_FIRST_FRAG_LEN_S 1 +#define IRDMAQPSQ_FIRST_FRAG_LEN GENMASK_ULL(31, 1) +#define IRDMAQPSQ_FIRST_FRAG_STAG_S 32 +#define IRDMAQPSQ_FIRST_FRAG_STAG GENMASK_ULL(63, 32) +#define IRDMAQPSQ_FRAG_LEN_S 32 +#define IRDMAQPSQ_FRAG_LEN GENMASK_ULL(62, 32) +#define IRDMAQPSQ_FRAG_STAG_S 0 +#define IRDMAQPSQ_FRAG_STAG GENMASK_ULL(31, 0) +#define IRDMAQPSQ_GEN1_FRAG_LEN_S 0 +#define IRDMAQPSQ_GEN1_FRAG_LEN GENMASK_ULL(31, 0) +#define IRDMAQPSQ_GEN1_FRAG_STAG_S 32 +#define IRDMAQPSQ_GEN1_FRAG_STAG GENMASK_ULL(63, 32) +#define IRDMAQPSQ_REMSTAGINV_S 0 +#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0) +#define IRDMAQPSQ_DESTQKEY_S 0 +#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0) +#define IRDMAQPSQ_DESTQPN_S 32 +#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32) +#define IRDMAQPSQ_AHID_S 0 +#define IRDMAQPSQ_AHID GENMASK_ULL(18, 0) +#define IRDMAQPSQ_INLINEDATAFLAG_S 63 +#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(63) +#define IRDMAQPSQ_UD_INLINEDATAFLAG_S 50 +#define IRDMAQPSQ_UD_INLINEDATAFLAG BIT_ULL(50) +#define IRDMAQPSQ_WRITE_INLINEDATAFLAG_S 48 +#define IRDMAQPSQ_WRITE_INLINEDATAFLAG BIT_ULL(48) + +#define ZXDH_INLINE_VALID_S 7 +#define IRDMAQPSQ_INLINE_VALID_S 63 +#define IRDMAQPSQ_INLINE_VALID BIT_ULL(63) +#define IRDMAQPSQ_INLINEDATALEN_S 50 +#define IRDMAQPSQ_INLINEDATALEN GENMASK_ULL(62, 55) +#define IRDMAQPSQ_UD_INLINEDATALEN_S 42 +#define IRDMAQPSQ_UD_INLINEDATALEN GENMASK_ULL(49, 42) +#define IRDMAQPSQ_WRITE_INLINEDATALEN_S 40 +#define IRDMAQPSQ_WRITE_INLINEDATALEN GENMASK_ULL(47, 40) +#define IRDMAQPSQ_IMMDATAFLAG_S 52 +#define IRDMAQPSQ_IMMDATAFLAG BIT_ULL(52) +#define IRDMAQPSQ_REPORTRTT_S 46 +#define IRDMAQPSQ_REPORTRTT BIT_ULL(46) + +#define IRDMAQPSQ_IMMDATA_VALID_S 63 +#define IRDMAQPSQ_IMMDATA_VALID BIT_ULL(63) +#define IRDMAQPSQ_IMMDATA_S 0 +#define IRDMAQPSQ_IMMDATA GENMASK_ULL(31, 0) +#define IRDMAQPSQ_REMSTAG_S 0 +#define IRDMAQPSQ_REMSTAG GENMASK_ULL(31, 0) + +#define IRDMAQPSQ_REMTO_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPSQ_REMTO ZXDH_CQPHC_QPCTX + +#define IRDMAQPSQ_STAGRIGHTS_S 47 +#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(51, 47) +#define IRDMAQPSQ_VABASEDTO_S 53 +#define IRDMAQPSQ_VABASEDTO BIT_ULL(53) +#define IRDMAQPSQ_MEMWINDOWTYPE_S 52 +#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(52) + +#define IRDMAQPSQ_MWLEN_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPSQ_MWLEN ZXDH_CQPHC_QPCTX +#define IRDMAQPSQ_PARENTMRSTAG_S 32 +#define IRDMAQPSQ_PARENTMRSTAG GENMASK_ULL(63, 32) +#define IRDMAQPSQ_MWSTAG_S 0 +#define IRDMAQPSQ_MWSTAG GENMASK_ULL(31, 0) + +#define IRDMAQPSQ_BASEVA_TO_FBO_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPSQ_BASEVA_TO_FBO ZXDH_CQPHC_QPCTX + +#define IRDMAQPSQ_LOCSTAG_S 0 +#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0) + +#define IRDMAQPSQ_STAGKEY_S 0 +#define IRDMAQPSQ_STAGKEY GENMASK_ULL(7, 0) +#define IRDMAQPSQ_STAGINDEX_S 8 +#define IRDMAQPSQ_STAGINDEX GENMASK_ULL(31, 8) +#define IRDMAQPSQ_COPYHOSTPBLS_S 43 +#define IRDMAQPSQ_COPYHOSTPBLS BIT_ULL(43) +#define IRDMAQPSQ_LPBLSIZE_S 40 +#define IRDMAQPSQ_LPBLSIZE GENMASK_ULL(41, 40) +#define IRDMAQPSQ_HPAGESIZE_S 43 +#define IRDMAQPSQ_HPAGESIZE GENMASK_ULL(46, 43) +#define IRDMAQPSQ_STAGLEN_S 0 +#define IRDMAQPSQ_STAGLEN GENMASK_ULL(40, 0) +#define IRDMAQPSQ_FIRSTPMPBLIDXLO_S 46 +#define IRDMAQPSQ_FIRSTPMPBLIDXLO GENMASK_ULL(61, 46) +#define IRDMAQPSQ_FIRSTPMPBLIDXHI_S 52 +#define IRDMAQPSQ_FIRSTPMPBLIDXHI GENMASK_ULL(63, 52) +#define IRDMAQPSQ_PBLADDR_S 51 +#define IRDMAQPSQ_PBLADDR GENMASK_ULL(51, 0) + +//QP RQ WQE common fields +#define IRDMAQPRQ_SIGNATURE_S 16 +#define IRDMAQPRQ_SIGNATURE GENMASK_ULL(31, 16) + +#define IRDMAQPRQ_ADDFRAGCNT_S IRDMAQPSQ_ADDFRAGCNT_S +#define IRDMAQPRQ_ADDFRAGCNT IRDMAQPSQ_ADDFRAGCNT + +#define IRDMAQPRQ_VALID_S IRDMAQPSQ_VALID_S +#define IRDMAQPRQ_VALID IRDMAQPSQ_VALID + +#define IRDMAQPRQ_COMPLCTX_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPRQ_COMPLCTX ZXDH_CQPHC_QPCTX + +#define IRDMAQPRQ_FRAG_LEN_S IRDMAQPSQ_FRAG_LEN_S +#define IRDMAQPRQ_FRAG_LEN IRDMAQPSQ_FRAG_LEN + +#define IRDMAQPRQ_STAG_S IRDMAQPSQ_FRAG_STAG_S +#define IRDMAQPRQ_STAG IRDMAQPSQ_FRAG_STAG + +#define IRDMAQPRQ_TO_S IRDMAQPSQ_FRAG_TO_S +#define IRDMAQPRQ_TO IRDMAQPSQ_FRAG_TO + +#define IRDMAQPSRQ_RSV GENMASK_ULL(63, 40) +#define IRDMAQPSRQ_VALID_SGE_NUM GENMASK_ULL(39, 32) +#define IRDMAQPSRQ_SIGNATURE GENMASK_ULL(31, 24) +#define IRDMAQPSRQ_NEXT_WQE_INDEX GENMASK_ULL(15, 0) +#define IRDMAQPSRQ_START_PADDING BIT_ULL(63) +#define IRDMAQPSRQ_FRAG_LEN GENMASK_ULL(62, 32) +#define IRDMAQPSRQ_FRAG_STAG GENMASK_ULL(31, 0) + +//QP RQ DBSA fields +#define IRDMAQPDBSA_RQ_POLARITY_S 15 +#define IRDMAQPDBSA_RQ_POLARITY BIT_ULL(15) +#define IRDMAQPDBSA_RQ_SW_HEAD_S 0 +#define IRDMAQPDBSA_RQ_SW_HEAD GENMASK_ULL(14, 0) + +#define IRDMAPFINT_OICR_HMC_ERR_M BIT(26) +#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27) +#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28) + +#define ZXDH_QUERY_FPM_MAX_QPS_S 0 +#define ZXDH_QUERY_FPM_MAX_QPS GENMASK_ULL(18, 0) +#define ZXDH_QUERY_FPM_MAX_CQS_S 0 +#define ZXDH_QUERY_FPM_MAX_CQS GENMASK_ULL(19, 0) +#define ZXDH_QUERY_FPM_FIRST_PE_SD_INDEX_S 0 +#define ZXDH_QUERY_FPM_FIRST_PE_SD_INDEX GENMASK_ULL(13, 0) +#define ZXDH_QUERY_FPM_MAX_PE_SDS_S 32 +#define ZXDH_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(45, 32) + +#define ZXDH_QUERY_FPM_MAX_CEQS_S 0 +#define ZXDH_QUERY_FPM_MAX_CEQS GENMASK_ULL(9, 0) +#define ZXDH_QUERY_FPM_XFBLOCKSIZE_S 32 +#define ZXDH_QUERY_FPM_XFBLOCKSIZE GENMASK_ULL(63, 32) +#define ZXDH_QUERY_FPM_Q1BLOCKSIZE_S 32 +#define ZXDH_QUERY_FPM_Q1BLOCKSIZE GENMASK_ULL(63, 32) +#define ZXDH_QUERY_FPM_HTMULTIPLIER_S 16 +#define ZXDH_QUERY_FPM_HTMULTIPLIER GENMASK_ULL(19, 16) +#define ZXDH_QUERY_FPM_TIMERBUCKET_S 32 +#define ZXDH_QUERY_FPM_TIMERBUCKET GENMASK_ULL(47, 32) +#define ZXDH_QUERY_FPM_RRFBLOCKSIZE_S 32 +#define ZXDH_QUERY_FPM_RRFBLOCKSIZE GENMASK_ULL(63, 32) +#define ZXDH_QUERY_FPM_RRFFLBLOCKSIZE_S 32 +#define ZXDH_QUERY_FPM_RRFFLBLOCKSIZE GENMASK_ULL(63, 32) +#define ZXDH_QUERY_FPM_OOISCFBLOCKSIZE_S 32 +#define ZXDH_QUERY_FPM_OOISCFBLOCKSIZE GENMASK_ULL(63, 32) +#define ZXDH_SHMC_PAGE_ALLOCATED_HMC_FN_ID_S 0 +#define ZXDH_SHMC_PAGE_ALLOCATED_HMC_FN_ID GENMASK_ULL(5, 0) + +#define IRDMATX_WIN_START_PSN GENMASK_ULL(23, 0) + +//qpc table index +#define ZXDH_QPC_RETY_COUNT_BYTE_OFFSET 0x00 +#define ZXDH_QPC_TX_LAST_ACK_PSN_BYTE_OFFSET 0x00 +#define ZXDH_QPC_CUR_RETRY_COUNT_BYTE_OFFSET 0x00 +#define ZXDH_QPC_READ_RETRY_FALG_BYTE_OFFSET 0x00 +#define ZXDH_QPC_RNR_RETRY_FALG_BYTE_OFFSET 0x08 +#define ZXDH_QPC_SEND_PSN_BYTE_OFFSET 0x18 +#define ZXDH_QPC_RETRY_FALG_BYTE_OFFSET 0x20 +#define ZXDH_QPC_ACK_ERR_FLAG_BYTE_OFFSET 0x28 +#define ZXDH_QPC_ERR_FLAG_BYTE_OFFSET 0x28 +#define ZXDH_QPC_PACKAGE_ERR_FLAG_BYTE_OFFSET 0x30 +#define ZXDH_QPC_RETRY_CQE_SQ_OPCODE_BYTE_OFFSET 0x38 +#define ZXDH_QPC_SEND_EPSN_BYTE_OFFSET 0x110 +#define ZXDH_QPC_RECV_ERR_FLAG_BYTE_OFFSET 0x30 +#define ZXDH_QPC_TX_WIN_RADDR_BYTE_OFFSET 0x30 +#define ZXDH_QPC_RNR_RETRY_TIME_L_BYTE_OFFSET 0x08 +#define ZXDH_QPC_RNR_RETRY_TIME_H_BYTE_OFFSET 0x10 +#define ZXDH_QPC_RNR_RETRY_THRESHOLD_BYTE_OFFSET 0x08 + +#define ZXDH_TX_WIN_START_PSN_BYTE_OFFSET 0x00 + +//Flow Control Algorithms +/*DCQCN*/ +#define ZXDH_DCQCN_NP_CNP_DSCP GENMASK_ULL(7, 2) +#define ZXDH_DCQCN_NP_CNP_PRIO GENMASK_ULL(2, 0) +#define ZXDH_DCQCN_NP_CNP_PRIO_MODE BIT_ULL(0) +#define ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X GENMASK_ULL(15, 0) +#define ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y GENMASK_ULL(7, 0) +#define ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y_EX GENMASK_ULL(8, 0) +#define ZXDH_DCQCN_PRG_TIME_RESET GENMASK_ULL(31, 16) +#define ZXDH_DCQCN_RPG_CLAMP_TGT_RATE BIT_ULL(0) +#define ZXDH_DCQCN_RPG_CLAMP_TGT_RATE_AFTER_TIME_INC BIT_ULL(0) +#define ZXDH_DCQCN_RP_DCE_TCP_RTT GENMASK_ULL(31, 16) +#define ZXDH_DCQCN_DCE_TCP_G GENMASK_ULL(31, 16) +#define ZXDH_DCQCN_RPG_GD GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_INITIAL_ALPHA_VALUE GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_MIN_DEC_FAC GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_THRESHOLD GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_RATIO_INCREASE BIT_ULL(0) +#define ZXDH_DCQCN_RPG_AI_RATIO GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_HAI_RATIO GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_BYTE_RESET GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_AI_RATE GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_HAI_RATE GENMASK_ULL(31, 0) +#define ZXDH_RPG_MAX_RATE GENMASK_ULL(31, 0) +#define ZXDH_RPG_MIN_RATE GENMASK_ULL(31, 0) +/*RTT*/ +#define ZXDH_RTT_VF_DELTA GENMASK_ULL(31, 0) + +#define ZXDH_GET_CURRENT_AEQ_ELEM(_aeq) \ + ((_aeq)->aeqe_base[ZXDH_RING_CURRENT_TAIL((_aeq)->aeq_ring)].buf) + +#define ZXDH_GET_CURRENT_CEQ_ELEM(_ceq) \ + ((_ceq)->ceqe_base[ZXDH_RING_CURRENT_TAIL((_ceq)->ceq_ring)].buf) + +#define ZXDH_GET_CEQ_ELEM_AT_POS(_ceq, _pos) ((_ceq)->ceqe_base[_pos].buf) + +#define ZXDH_RING_GET_NEXT_TAIL(_ring, _idx) \ + (((_ring).tail + (_idx)) % (_ring).size) + +#define ZXDH_GET_CURRENT_CQ_ELEM(_cq) \ + ((_cq)->cq_base[ZXDH_RING_CURRENT_HEAD((_cq)->cq_ring)].buf) +#define ZXDH_GET_CURRENT_EXTENDED_CQ_ELEM(_cq) \ + (((struct zxdh_extended_cqe \ + *)((_cq)->cq_base))[ZXDH_RING_CURRENT_HEAD((_cq)->cq_ring)] \ + .buf) + +#define ZXDH_RING_INIT(_ring, _size) \ + { \ + (_ring).head = 0; \ + (_ring).tail = 0; \ + (_ring).size = (_size); \ + } +#define ZXDH_RING_SIZE(_ring) ((_ring).size) +#define ZXDH_RING_CURRENT_HEAD(_ring) ((_ring).head) +#define ZXDH_RING_CURRENT_TAIL(_ring) ((_ring).tail) + +#define ZXDH_RING_MOVE_HEAD(_ring, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if (!ZXDH_RING_FULL_ERR(_ring)) { \ + (_ring).head = ((_ring).head + 1) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOSPC; \ + } \ + } +#define ZXDH_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if ((ZXDH_RING_USED_QUANTA(_ring) + (_count)) < size) { \ + (_ring).head = ((_ring).head + (_count)) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOSPC; \ + } \ + } +#define ZXDH_SQ_RING_MOVE_HEAD(_ring, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if (!ZXDH_SQ_RING_FULL_ERR(_ring)) { \ + (_ring).head = ((_ring).head + 1) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOSPC; \ + } \ + } +#define ZXDH_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if ((ZXDH_RING_USED_QUANTA(_ring) + (_count)) < \ + (size - 256)) { \ + (_ring).head = ((_ring).head + (_count)) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOSPC; \ + } \ + } +#define ZXDH_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \ + (_ring).head = ((_ring).head + (_count)) % (_ring).size + +#define ZXDH_RING_MOVE_TAIL(_ring) \ + (_ring).tail = ((_ring).tail + 1) % (_ring).size + +#define ZXDH_RING_MOVE_HEAD_NOCHECK(_ring) \ + (_ring).head = ((_ring).head + 1) % (_ring).size + +#define ZXDH_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \ + (_ring).tail = ((_ring).tail + (_count)) % (_ring).size + +#define ZXDH_RING_SET_TAIL(_ring, _pos) (_ring).tail = (_pos) % (_ring).size + +#define ZXDH_RING_FULL_ERR(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 1))) + +#define ZXDH_ERR_RING_FULL2(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 2))) + +#define ZXDH_ERR_RING_FULL3(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 3))) + +#define ZXDH_SQ_RING_FULL_ERR(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 257))) + +#define ZXDH_ERR_SQ_RING_FULL2(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 258))) +#define ZXDH_ERR_SQ_RING_FULL3(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 259))) +#define ZXDH_RING_MORE_WORK(_ring) ((ZXDH_RING_USED_QUANTA(_ring) != 0)) + +#define ZXDH_RING_USED_QUANTA(_ring) \ + ((((_ring).head + (_ring).size - (_ring).tail) % (_ring).size)) + +#define ZXDH_RING_FREE_QUANTA(_ring) \ + (((_ring).size - ZXDH_RING_USED_QUANTA(_ring) - 1)) + +#define ZXDH_SQ_RING_FREE_QUANTA(_ring) \ + (((_ring).size - ZXDH_RING_USED_QUANTA(_ring) - 257)) + +#define ZXDH_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \ + { \ + index = ZXDH_RING_CURRENT_HEAD(_ring); \ + ZXDH_RING_MOVE_HEAD(_ring, _retcode); \ + } + +#define ZXDH_GET_QPC_ITEM(type, qp_ctx, index, name) \ + ({ \ + type ___t; \ + u64 temp; \ + get_64bit_val(qp_ctx, index, &temp); \ + ___t = (type)FIELD_GET(name, temp); \ + ___t; \ + }) + +enum zxdh_qp_wqe_size { + ZXDH_WQE_SIZE_32 = 32, + ZXDH_WQE_SIZE_64 = 64, + ZXDH_WQE_SIZE_96 = 96, + ZXDH_WQE_SIZE_128 = 128, + ZXDH_WQE_SIZE_256 = 256, +}; + +enum zxdh_cq_wqe_size { + ZXDH_CQE_SIZE_64 = 0, + ZXDH_CQE_SIZE_128 = 1, + ZXDH_CQE_SIZE_RESV = 2, +}; + +enum zxdh_cqc_set_field_mask { + ZXDH_CQC_SET_LPBLE_SIZE = 1 << 5, + ZXDH_CQC_SET_CQ_STATE = 1 << 8, + ZXDH_CQC_SET_CQ_SIZE = 1 << 11, + ZXDH_CQC_SET_CQ_ADDR = 1 << 13, +}; + +enum zxdh_cqc_set_cq_moderation { + ZXDH_CQC_SET_CQ_COUNT_AND_PERIOD = 1 << 1, +}; + +#define ZXDH_CQC_SET_FIELD_ALL 0xffff +#define ZXDH_CQC_SET_FIELD_RESIZE \ + (ZXDH_CQC_SET_LPBLE_SIZE | ZXDH_CQC_SET_CQ_SIZE | ZXDH_CQC_SET_CQ_ADDR) +#define ZXDH_CQC_SET_FIELD_MODIFY ZXDH_CQC_SET_CQ_COUNT_AND_PERIOD + +enum zxdh_ws_node_op { + ZXDH_ADD_NODE = 0, + ZXDH_MODIFY_NODE, + ZXDH_DEL_NODE, +}; + +enum { + ZXDH_Q_ALIGNMENT_M = (128 - 1), + ZXDH_AEQ_ALIGNMENT_M = (256 - 1), + ZXDH_Q2_ALIGNMENT_M = (256 - 1), + ZXDH_CEQ_ALIGNMENT_M = (256 - 1), + ZXDH_CQ0_ALIGNMENT_M = (256 - 1), + ZXDH_HOST_CTX_ALIGNMENT_M = (4 - 1), + ZXDH_SHADOWAREA_M = (128 - 1), + ZXDH_FPM_QUERY_BUF_ALIGNMENT_M = (4 - 1), + ZXDH_FPM_COMMIT_BUF_ALIGNMENT_M = (4 - 1), +}; + +enum zxdh_alignment { + ZXDH_CQP_ALIGNMENT = 0x200, + ZXDH_AEQ_ALIGNMENT = 0x100, + ZXDH_CEQ_ALIGNMENT = 0x100, + ZXDH_CQ0_ALIGNMENT = 0x100, + ZXDH_SD_BUF_ALIGNMENT = 0x80, + ZXDH_FEATURE_BUF_ALIGNMENT = 0x10, + ZXDH_QPC_ALIGNMENT = 0x20, + ZXDH_CQC_ALIGNMENT = 0x20, + ZXDH_CEQC_ALIGNMENT = 0x20, + ZXDH_AEQC_ALIGNMENT = 0x20, + ZXDH_SRQC_ALIGNMENT = 0x20, +}; + +enum icrdma_protocol_used { + ICRDMA_ANY_PROTOCOL = 0, + ICRDMA_IWARP_PROTOCOL_ONLY = 1, + ICRDMA_ROCE_PROTOCOL_ONLY = 2, +}; + +/** + * set_64bit_val - set 64 bit value to hw wqe + * @wqe_words: wqe addr to write + * @byte_index: index in wqe + * @val: value to write + **/ +static inline void set_64bit_val(__le64 *wqe_words, u32 byte_index, u64 val) +{ + wqe_words[byte_index >> 3] = cpu_to_le64(val); +} + +/** + * set_32bit_val - set 32 bit value to hw wqe + * @wqe_words: wqe addr to write + * @byte_index: index in wqe + * @val: value to write + **/ +static inline void set_32bit_val(__le32 *wqe_words, u32 byte_index, u32 val) +{ + wqe_words[byte_index >> 2] = cpu_to_le32(val); +} + +/** + * set_16bit_val - set 16 bit value to hw wqe + * @wqe_words: wqe addr to write + * @byte_index: index in wqe + * @val: value to write + **/ +static inline void set_16bit_val(__le16 *wqe_words, u32 byte_index, u16 val) +{ + wqe_words[byte_index >> 1] = cpu_to_le16(val); +} + +/** + * get_64bit_val - read 64 bit value from wqe + * @wqe_words: wqe addr + * @byte_index: index to read from + * @val: read value + **/ +static inline void get_64bit_val(__le64 *wqe_words, u32 byte_index, u64 *val) +{ + *val = le64_to_cpu(wqe_words[byte_index >> 3]); +} + +/** + * get_32bit_val - read 32 bit value from wqe + * @wqe_words: wqe addr + * @byte_index: index to reaad from + * @val: return 32 bit value + **/ +static inline void get_32bit_val(__le32 *wqe_words, u32 byte_index, u32 *val) +{ + *val = le32_to_cpu(wqe_words[byte_index >> 2]); +} +#endif /* ZXDH_DEFS_H */ diff --git a/src/rdma/src/distro_ver.h b/src/rdma/src/distro_ver.h new file mode 100644 index 0000000000000000000000000000000000000000..fd98f66c71f71a8f4af2a341d48dca7ba85ebc2d --- /dev/null +++ b/src/rdma/src/distro_ver.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#ifndef DISTRO_VER_H +#define DISTRO_VER_H + +#if defined(RHEL_RELEASE_CODE) +#if (RHEL_RELEASE_VERSION(8, 5) == RHEL_RELEASE_CODE) +#define RHEL_8_5 +#endif + +#if (RHEL_RELEASE_VERSION(8, 4) == RHEL_RELEASE_CODE) +#define RHEL_8_4 +#endif + +#if (RHEL_RELEASE_VERSION(8, 3) == RHEL_RELEASE_CODE) +#define RHEL_8_3 +#endif + +#if (RHEL_RELEASE_VERSION(7, 9) == RHEL_RELEASE_CODE) +#define RHEL_7_9 +#endif + +#if (RHEL_RELEASE_VERSION(8, 2) == RHEL_RELEASE_CODE) +#define RHEL_8_2 +#endif + +#if (RHEL_RELEASE_VERSION(8, 1) == RHEL_RELEASE_CODE) +#define RHEL_8_1 +#endif + +#if (RHEL_RELEASE_VERSION(7, 8) == RHEL_RELEASE_CODE) +#define RHEL_7_8 +#endif + +#if (RHEL_RELEASE_VERSION(7, 7) == RHEL_RELEASE_CODE) +#define RHEL_7_7 +#endif + +#if (RHEL_RELEASE_VERSION(8, 0) == RHEL_RELEASE_CODE) +#define RHEL_8_0 +#endif + +#if (RHEL_RELEASE_VERSION(7, 6) == RHEL_RELEASE_CODE) +#define RHEL_7_6 +#endif + +#if (RHEL_RELEASE_VERSION(7, 5) == RHEL_RELEASE_CODE) +#define RHEL_7_5 +#endif + +#if (RHEL_RELEASE_VERSION(7, 4) == RHEL_RELEASE_CODE) +#define RHEL_7_4 +#endif + +#if (RHEL_RELEASE_VERSION(7, 2) == RHEL_RELEASE_CODE) +#define RHEL_7_2 +#endif + +#endif /* RHEL_RELEASE_CODE */ + +#ifdef CONFIG_SUSE_KERNEL +#ifndef SLE_VERSION +#define SLE_VERSION(a, b, c) KERNEL_VERSION(a, b, c) +#endif +#define SLE_LOCALVERSION(a, b, c) KERNEL_VERSION(a, b, c) + +#if (KERNEL_VERSION(4, 12, 14) == LINUX_VERSION_CODE && \ + (KERNEL_VERSION(94, 41, 0) == SLE_LOCALVERSION_CODE || \ + (KERNEL_VERSION(95, 0, 0) <= SLE_LOCALVERSION_CODE && \ + KERNEL_VERSION(96, 0, 0) > SLE_LOCALVERSION_CODE))) +/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */ +#define SLE_VERSION_CODE SLE_VERSION(12, 4, 0) +#define SLES_12_SP_4 +#elif (KERNEL_VERSION(4, 12, 14) == LINUX_VERSION_CODE && \ + KERNEL_VERSION(25, 23, 0) <= SLE_LOCALVERSION_CODE) +/* SLES15 SP1 Beta1 is 4.12.14-25.23 */ +#define SLE_VERSION_CODE SLE_VERSION(15, 1, 0) +#define SLES_15_SP_1 +#endif + +#if (KERNEL_VERSION(4, 12, 14) == LINUX_VERSION_CODE && \ + (KERNEL_VERSION(23, 0, 0) == SLE_LOCALVERSION_CODE || \ + KERNEL_VERSION(2, 0, 0) == SLE_LOCALVERSION_CODE || \ + KERNEL_VERSION(136, 0, 0) == SLE_LOCALVERSION_CODE || \ + (KERNEL_VERSION(25, 0, 0) <= SLE_LOCALVERSION_CODE && \ + KERNEL_VERSION(25, 23, 0) > SLE_LOCALVERSION_CODE))) +#define SLE_VERSION_CODE SLE_VERSION(15, 0, 0) +#define SLES_15 +#endif + +#if KERNEL_VERSION(5, 3, 18) <= LINUX_VERSION_CODE +#if KERNEL_VERSION(46, 0, 0) <= SLE_LOCALVERSION_CODE +#define SLES_15_SP_3 +#else +#define SLE_VERSION_CODE SLE_VERSION(15, 2, 0) +#define SLES_15_SP_2 +#endif +#endif + +#if ((KERNEL_VERSION(4, 4, 73) == LINUX_VERSION_CODE || \ + KERNEL_VERSION(4, 4, 82) == LINUX_VERSION_CODE || \ + KERNEL_VERSION(4, 4, 92) == LINUX_VERSION_CODE) || \ + (KERNEL_VERSION(4, 4, 103) == LINUX_VERSION_CODE && \ + (KERNEL_VERSION(6, 33, 0) == SLE_LOCALVERSION_CODE || \ + KERNEL_VERSION(6, 38, 0) == SLE_LOCALVERSION_CODE)) || \ + (KERNEL_VERSION(4, 4, 114) <= LINUX_VERSION_CODE && \ + KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE && \ + KERNEL_VERSION(94, 0, 0) <= SLE_LOCALVERSION_CODE && \ + KERNEL_VERSION(95, 0, 0) > SLE_LOCALVERSION_CODE)) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0) +#define SLES_12_SP_3 +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ + +#endif /* CONFIG_SUSE_KERENL */ + +#ifdef UTS_UBUNTU_RELEASE_ABI +#define UBUNTU_VERSION_CODE \ + (((~0xFF & LINUX_VERSION_CODE) << 8) + UTS_UBUNTU_RELEASE_ABI) + +#define UBUNTU_VERSION(a, b, c, d) ((KERNEL_VERSION(a, b, 0) << 8) + (d)) + +#if (UBUNTU_VERSION(5, 15, 0, 94) <= UBUNTU_VERSION_CODE) +#define UBUNTU_220404 +#elif (UBUNTU_VERSION(5, 13, 0, 28) <= UBUNTU_VERSION_CODE) +#define UBUNTU_200404 +#elif (UBUNTU_VERSION(5, 11, 0, 27) <= UBUNTU_VERSION_CODE) +#define UBUNTU_200403 +#elif (UBUNTU_VERSION(5, 8, 0, 48) <= UBUNTU_VERSION_CODE) +#define UBUNTU_200402 +#elif (UBUNTU_VERSION(5, 4, 0, 26) <= UBUNTU_VERSION_CODE) +#define UBUNTU_2004 +#else +#define UBUNTU_1804 +#endif +#endif /* UTS_UBUNTU_RELEASE_ABI */ + +#endif /* DISTRO_VER_H */ diff --git a/src/rdma/src/hmc.c b/src/rdma/src/hmc.c new file mode 100644 index 0000000000000000000000000000000000000000..55aac3e21162c970a397dceace7aba1b28cdd80b --- /dev/null +++ b/src/rdma/src/hmc.c @@ -0,0 +1,877 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "vf.h" +#include "virtchnl.h" +#include "icrdma_hw.h" +#include "main.h" + +extern enum zxdh_hmc_rsrc_type iw_hmc_obj_types[ZXDH_HMC_IW_TXWINDOW + 1]; + +/** + * zxdh_sc_create_hmc_obj - allocate backing store for hmc objects + * @dev: pointer to the device structure + * @info: pointer to zxdh_hmc_create_obj_info struct + * + * This will allocate memory for PDs and backing pages and populate + * the sd and pd entries. + */ +int zxdh_sc_create_hmc_obj(struct zxdh_sc_dev *dev, + struct zxdh_hmc_create_obj_info *info) +{ + struct zxdh_hmc_sd_entry *sd_entry; + u32 sd_lmt = 0; + u32 i = 0, cnt = 0; + u64 fpm_addr = 0, fpm_limit = 0; + struct zxdh_hw *hw = dev->hw; + struct zxdh_dma_mem dma_mem = {}; + u64 alloc_len = 0; + struct stPteRequest ptMmuMmapCfg = {}; + + memset(&ptMmuMmapCfg, 0, sizeof(ptMmuMmapCfg)); + + fpm_addr = info->hmc_info->hmc_obj[info->rsrc_type].base; + switch (info->rsrc_type) { + case ZXDH_HMC_IW_QP: + cnt = dev->hmc_pf_manager_info.total_qp_cnt; + break; + case ZXDH_HMC_IW_CQ: + cnt = dev->hmc_pf_manager_info.total_cq_cnt; + break; + case ZXDH_HMC_IW_SRQ: + cnt = dev->hmc_pf_manager_info.total_srq_cnt; + break; + case ZXDH_HMC_IW_AH: + cnt = dev->hmc_pf_manager_info.total_ah_cnt; + break; + case ZXDH_HMC_IW_MR: + cnt = dev->hmc_pf_manager_info.total_mrte_cnt; + break; + default: + cnt = info->hmc_info->hmc_obj[info->rsrc_type].cnt; + break; + } + fpm_limit = info->hmc_info->hmc_obj[info->rsrc_type].size * cnt; + fpm_limit = ALIGN(fpm_limit, ZXDH_HMC_DIRECT_BP_SIZE); + + sd_lmt = fpm_limit / ZXDH_HMC_DIRECT_BP_SIZE; + sd_lmt += 1; + + for (i = 0; i < sd_lmt - 1; i++) { // 满足2M空间 + sd_entry = &info->hmc_info->sd_table.sd_entry[info->add_sd_cnt]; + + alloc_len = ZXDH_HMC_DIRECT_BP_SIZE; //按实际2M分配空间 + dma_mem.size = ALIGN(alloc_len, ZXDH_HMC_PD_BP_BUF_ALIGNMENT); + dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size, + &dma_mem.pa, GFP_KERNEL); + + if (!dma_mem.va) + return -ENOMEM; + + memset(dma_mem.va, 0, alloc_len); + // ********************调用SMMU接口*********************** + ptMmuMmapCfg.uddPhyAddr = dma_mem.pa; + ptMmuMmapCfg.uddVirAddr = fpm_addr; + ptMmuMmapCfg.uddSize = alloc_len; + ptMmuMmapCfg.udStreamid = dev->hmc_fn_id; + // bspSmmuSetPTE(&ptMmuMmapCfg,dev); // for Crash + ptMmuMmapCfg.udRWFlag = 0x03; + zxdh_smmu_set_pte(&ptMmuMmapCfg, dev); + pr_info("smmu sid:%d pte iova:0x%llx pa:0x%llx\n", + dev->hmc_fn_id, fpm_addr, dma_mem.pa); + + memcpy(&sd_entry->u.bp.addr, &dma_mem, + sizeof(sd_entry->u.bp.addr)); + + sd_entry->u.bp.sd_pd_index = info->add_sd_cnt; + info->hmc_info->sd_indexes[info->add_sd_cnt] = + (u16)info->add_sd_cnt; + sd_entry->valid = true; + fpm_addr = fpm_addr + alloc_len; + info->add_sd_cnt++; + } + + return 0; +} + +static int zxdh_pf2vf_add_pble_hmc_obj(struct zxdh_sc_dev *dev, + struct zxdh_vfdev *vf_dev, u32 rsrc_type) +{ + struct zxdh_hmc_sd_entry *sd_entry = NULL; + struct zxdh_dma_mem dma_mem = {}; + u32 pble_hmc_comm_index = 0, pages = 0; + u32 unallocated_pble = 0; + u64 alloc_len = 0, size = 0; + u64 next_fpm_addr = 0, fpm_base_addr = 0; + u32 pd_idx = 0, rel_pd_idx = 0; + struct zxdh_hmc_info *hmc_info = &vf_dev->hmc_info; + + struct stPteRequest ptMmuMmapCfg = {}; + + if (rsrc_type == ZXDH_HMC_IW_PBLE) { + pble_hmc_comm_index = hmc_info->pble_hmc_index; + unallocated_pble = vf_dev->pbleq_unallocated_pble; + fpm_base_addr = vf_dev->pbleq_fpm_base_addr; + next_fpm_addr = vf_dev->pbleq_next_fpm_addr; + } else if (rsrc_type == ZXDH_HMC_IW_PBLE_MR) { + pble_hmc_comm_index = hmc_info->pble_mr_hmc_index; + unallocated_pble = vf_dev->pblemr_unallocated_pble; + fpm_base_addr = vf_dev->pblemr_fpm_base_addr; + next_fpm_addr = vf_dev->pblemr_next_fpm_addr; + } + + if (unallocated_pble < PBLE_PER_PAGE) + return -ENOMEM; + + sd_entry = &hmc_info->sd_table.sd_entry[pble_hmc_comm_index]; + pd_idx = (u32)((next_fpm_addr - fpm_base_addr) / + ZXDH_HMC_PAGED_BP_SIZE); //4096 + rel_pd_idx = (pd_idx % ZXDH_HMC_PD_CNT_IN_SD); // 512 + pages = (rel_pd_idx) ? (ZXDH_HMC_PD_CNT_IN_SD - rel_pd_idx) : + ZXDH_HMC_PD_CNT_IN_SD; + + pages = min(pages, + unallocated_pble >> PBLE_512_SHIFT); // PBLE_512_SHIFT==9 + + if (!sd_entry->valid) { + alloc_len = pages * ZXDH_HMC_PAGED_BP_SIZE; + dma_mem.size = ALIGN(alloc_len, ZXDH_HMC_PD_BP_BUF_ALIGNMENT); + dma_mem.va = dma_alloc_coherent(dev->hw->device, dma_mem.size, + &dma_mem.pa, GFP_KERNEL); + if (!dma_mem.va) + return -ENOMEM; + + memcpy(&sd_entry->u.bp.addr, &dma_mem, + sizeof(sd_entry->u.bp.addr)); + + ptMmuMmapCfg.uddPhyAddr = dma_mem.pa; + ptMmuMmapCfg.uddVirAddr = next_fpm_addr; + ptMmuMmapCfg.uddSize = alloc_len; + ptMmuMmapCfg.udStreamid = dev->hmc_fn_id; // 这个SID后续需要修改 + ptMmuMmapCfg.udRWFlag = 0x03; + // 这里调用SMMU接口 + zxdh_smmu_set_pte(&ptMmuMmapCfg, dev); + + sd_entry->u.bp.sd_pd_index = pble_hmc_comm_index; + hmc_info->sd_table.use_cnt = pble_hmc_comm_index; + hmc_info->sd_table.sd_entry->entry_type = ZXDH_SD_TYPE_DIRECT; + } + + sd_entry->valid = true; + size = pages << HMC_PAGED_BP_SHIFT; + if (rsrc_type == ZXDH_HMC_IW_PBLE) { + vf_dev->pbleq_next_fpm_addr += size; + vf_dev->pbleq_unallocated_pble -= (u32)(size >> 3); + } else { + vf_dev->pblemr_next_fpm_addr += size; + vf_dev->pblemr_unallocated_pble -= (u32)(size >> 3); + } + + return 0; +} + +int zxdh_vf_add_pble_hmc_obj(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info, + struct zxdh_hmc_pble_rsrc *pble_rsrc, u32 pages) +{ + struct zxdh_hmc_sd_entry *sd_entry; + struct zxdh_dma_mem dma_mem = {}; + struct zxdh_pci_f *rf; + u64 alloc_len; + u32 pble_hmc_comm_index = 0, cnt = 0; + struct zxdh_hw *hw = pble_rsrc->dev->hw; + u64 pa = 0; + void *va = NULL; + struct zxdh_dma_write32_date dma_data = {}; + struct zxdh_src_copy_dest src_dest = {}; + int status; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + if (pble_rsrc->pble_type == PBLE_QUEUE) + pble_hmc_comm_index = hmc_info->pble_hmc_index; + else + pble_hmc_comm_index = hmc_info->pble_mr_hmc_index; + + sd_entry = &hmc_info->sd_table.sd_entry[pble_hmc_comm_index]; + + if (!sd_entry->valid) { + if (!dev->hmc_use_dpu_ddr) { + va = kmalloc(8, GFP_KERNEL); + if (!va) + return -ENOMEM; + memset((void *)va, 0, 8); + pa = __pa(va); + dma_data.num = 1; + dma_data.addrbuf[0] = + C_RDMA_VF_HMC_CQP_CQ_DISTRIBUTE_DONE( + dev->vhca_id); + dma_data.databuf[0] = 0; + zxdh_cqp_rdma_write32_cmd(dev, &dma_data); + + if (pble_rsrc->pble_type == PBLE_QUEUE) { + status = zxdh_sc_send_mailbox_cmd( + dev, ZTE_ZXDH_OP_ADD_QPBLE_HMC_RANGE, 0, + 0, 0, rf->vf_id); + } else { + status = zxdh_sc_send_mailbox_cmd( + dev, ZTE_ZXDH_OP_ADD_MRPBLE_HMC_RANGE, + 0, 0, 0, rf->vf_id); + } + + if (status) { + kfree(va); + return status; + } + + src_dest.src = dma_data.addrbuf[0]; + src_dest.dest = pa; + src_dest.len = 0x08; + do { + zxdh_cqp_rdma_readreg_cmd(dev, &src_dest); + if (cnt++ > dev->hw_attrs.max_done_count) + return 0; + udelay(dev->hw_attrs.max_sleep_count * 2); + } while (!(*(u64 *)va)); + kfree(va); + } + + alloc_len = (u64)pages * ZXDH_HMC_PAGED_BP_SIZE; + dma_mem.size = ALIGN(alloc_len, ZXDH_HMC_PD_BP_BUF_ALIGNMENT); + dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size, + &dma_mem.pa, GFP_KERNEL); + if (!dma_mem.va) + return -ENOMEM; + + memcpy(&sd_entry->u.bp.addr, &dma_mem, + sizeof(sd_entry->u.bp.addr)); + + sd_entry->u.bp.sd_pd_index = pble_hmc_comm_index; + if (pble_rsrc->pble_type == PBLE_QUEUE) + hmc_info->pble_hmc_index++; + else + hmc_info->pble_mr_hmc_index++; + + hmc_info->sd_table.use_cnt++; + hmc_info->sd_table.sd_entry->entry_type = ZXDH_SD_TYPE_DIRECT; + } + return 0; +} + +int zxdh_add_pble_hmc_obj(struct zxdh_hmc_info *hmc_info, + struct zxdh_hmc_pble_rsrc *pble_rsrc, u32 pages) +{ + struct zxdh_hmc_sd_entry *sd_entry; + struct zxdh_dma_mem dma_mem = {}; + + struct stPteRequest ptMmuMmapCfg = {}; + + u64 alloc_len; + u32 pble_hmc_comm_index; + struct zxdh_hw *hw = pble_rsrc->dev->hw; + + memset(&ptMmuMmapCfg, 0, sizeof(ptMmuMmapCfg)); + + if (pble_rsrc->pble_type == PBLE_QUEUE) + pble_hmc_comm_index = hmc_info->pble_hmc_index; + else + pble_hmc_comm_index = hmc_info->pble_mr_hmc_index; + + sd_entry = &hmc_info->sd_table.sd_entry[pble_hmc_comm_index]; + + if (!sd_entry->valid) { + alloc_len = (u64)pages * ZXDH_HMC_PAGED_BP_SIZE; + dma_mem.size = ALIGN(alloc_len, ZXDH_HMC_PD_BP_BUF_ALIGNMENT); + dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size, + &dma_mem.pa, GFP_KERNEL); + if (!dma_mem.va) + return -ENOMEM; + + memset(dma_mem.va, 0, dma_mem.size); + + memcpy(&sd_entry->u.bp.addr, &dma_mem, + sizeof(sd_entry->u.bp.addr)); + + if (false == pble_rsrc->dev->hmc_use_dpu_ddr) { // is HOST DDR + memset(&dma_mem, 0, sizeof(struct zxdh_dma_mem)); + dma_mem.size = + ALIGN(alloc_len, ZXDH_HMC_PD_BP_BUF_ALIGNMENT); + dma_mem.va = + dma_alloc_coherent(hw->device, dma_mem.size, + &dma_mem.pa, GFP_KERNEL); + if (!dma_mem.va) + return -ENOMEM; + memset(dma_mem.va, 0, dma_mem.size); + + memcpy(&sd_entry->u.bp.addr_hardware, &dma_mem, + sizeof(sd_entry->u.bp.addr_hardware)); + + ptMmuMmapCfg.uddPhyAddr = dma_mem.pa; + ptMmuMmapCfg.uddVirAddr = pble_rsrc->next_fpm_addr; + ptMmuMmapCfg.uddSize = alloc_len; + ptMmuMmapCfg.udStreamid = + pble_rsrc->dev->hmc_fn_id; // 这个SID后续需要修改 + ptMmuMmapCfg.udRWFlag = 0x03; + // 这里调用SMMU接口 + zxdh_smmu_set_pte(&ptMmuMmapCfg, pble_rsrc->dev); + pr_info("smmu sid:%d pte iova:0x%llx pa:0x%llx\n", + pble_rsrc->dev->hmc_fn_id, + pble_rsrc->next_fpm_addr, dma_mem.pa); + } + + sd_entry->u.bp.sd_pd_index = pble_hmc_comm_index; + if (pble_rsrc->pble_type == PBLE_QUEUE) + hmc_info->pble_hmc_index++; + else + hmc_info->pble_mr_hmc_index++; + + hmc_info->sd_table.use_cnt++; + hmc_info->sd_table.sd_entry->entry_type = ZXDH_SD_TYPE_DIRECT; + } + + return 0; +} + +/** + * zxdh_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + */ +int zxdh_prep_remove_sd_bp(struct zxdh_hmc_info *hmc_info, u32 idx) +{ + struct zxdh_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + + hmc_info->sd_table.use_cnt--; + sd_entry->valid = false; + + return 0; +} + +/** + * zxdh_get_next_vf_idx - return the next vf_idx available + * @dev: pointer to RDMA dev structure + */ +static u16 zxdh_get_next_vf_idx(struct zxdh_sc_dev *dev) +{ + u16 vf_idx; + + for (vf_idx = 0; vf_idx < dev->num_vfs; vf_idx++) { + if (!dev->vf_dev[vf_idx]) + break; + } + + return vf_idx < dev->num_vfs ? vf_idx : ZXDH_VCHNL_INVALID_VF_IDX; +} + +static int zxdh_get_vf_hmc_baseinfo(struct zxdh_sc_dev *dev, + struct zxdh_hmc_obj_info *hmc_obj, + u16 iw_vf_idx, u16 vf_id) +{ + u16 i = 0; + + for (i = 0; i < ZXDH_HMC_IW_MAX; i++) { + if ((i == ZXDH_HMC_IW_IRD) || (i == ZXDH_HMC_IW_TXWINDOW)) { + hmc_obj[i].max_cnt = dev->hmc_pf_manager_info.vf_qp_cnt; + hmc_obj[i].cnt = dev->hmc_pf_manager_info.vf_qp_cnt; + hmc_obj[i].size = dev->hmc_info->hmc_obj[i].size; + hmc_obj[i].type = dev->hmc_info->hmc_obj[i].type; + hmc_obj[i].base = dev->hmc_info->hmc_obj[i].base + + (dev->hmc_info->hmc_obj[i].cnt + + hmc_obj[i].cnt * vf_id) * + hmc_obj[i].size; + } else if (i == ZXDH_HMC_IW_PBLE_MR) { + hmc_obj[i].max_cnt = + dev->hmc_pf_manager_info.vf_pblemr_cnt; + hmc_obj[i].cnt = dev->hmc_pf_manager_info.vf_pblemr_cnt; + hmc_obj[i].size = dev->hmc_info->hmc_obj[i].size; + hmc_obj[i].type = dev->hmc_info->hmc_obj[i].type; + hmc_obj[i].base = dev->hmc_info->hmc_obj[i].base + + (dev->hmc_info->hmc_obj[i].cnt + + hmc_obj[i].cnt * vf_id) * + hmc_obj[i].size; + } else if (i == ZXDH_HMC_IW_PBLE) { + hmc_obj[i].max_cnt = + dev->hmc_pf_manager_info.vf_pblequeue_cnt; + hmc_obj[i].cnt = + dev->hmc_pf_manager_info.vf_pblequeue_cnt; + hmc_obj[i].size = dev->hmc_info->hmc_obj[i].size; + hmc_obj[i].type = dev->hmc_info->hmc_obj[i].type; + hmc_obj[i].base = dev->hmc_info->hmc_obj[i].base + + (dev->hmc_info->hmc_obj[i].cnt + + hmc_obj[i].cnt * vf_id) * + hmc_obj[i].size; + } + pr_info("vf%d hmc_obj[%d].base = 0x%llx\n", vf_id, i, + hmc_obj[i].base); + } + + return 0; +} + +struct zxdh_vfdev *zxdh_pf_get_vf_hmc_res(struct zxdh_sc_dev *dev, u16 vf_id) +{ + struct zxdh_virt_mem virt_mem; + struct zxdh_vfdev *vf_dev; + u16 iw_vf_idx = 0; + + iw_vf_idx = zxdh_get_next_vf_idx(dev); + if (iw_vf_idx == ZXDH_VCHNL_INVALID_VF_IDX || + iw_vf_idx >= ZXDH_MAX_PE_ENA_VF_COUNT) + return NULL; + + virt_mem.size = sizeof(struct zxdh_vfdev) + + sizeof(struct zxdh_hmc_obj_info) * ZXDH_HMC_IW_MAX; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + + if (!virt_mem.va) { + pr_err("VIRT: VF%u Unable to allocate a VF device structure.\n", + vf_id); + return NULL; + } + + vf_dev = virt_mem.va; + vf_dev->pf_dev = dev; + vf_dev->vf_id = vf_id; + vf_dev->iw_vf_idx = iw_vf_idx; + vf_dev->pf_hmc_initialized = false; + vf_dev->hmc_info.hmc_obj = (struct zxdh_hmc_obj_info *)(&vf_dev[1]); + zxdh_get_vf_hmc_baseinfo(dev, vf_dev->hmc_info.hmc_obj, iw_vf_idx, + vf_id); + + refcount_set(&vf_dev->refcnt, 1); + dev->vf_dev[iw_vf_idx] = vf_dev; + refcount_inc(&vf_dev->refcnt); + + return vf_dev; +} + +int zxdh_pf_recv_mb(struct zxdh_sc_dev *dev, struct zxdh_ccq_cqe_info *info) +{ + int resp_code = 0, i = 0; + u16 srcvhcaid = 0, mb_vfid = 0; + u8 opt = 0xff; // 避免与mailbox消息类型宏定义同值. + struct zxdh_vfdev *vf_dev = NULL; + struct zxdh_hmc_create_obj_info obj_info = {}; + struct zxdh_dma_write32_date dma_data = {}; + + if (info->mailbox_cqe != 1) + return -EINVAL; + + opt = (u8)info->addrbuf + [0]; //info 被memreset后结构全部为0,故避免opt默认为0情况,ZTE_ZXDH_VCHNL_OP_GET_HMC_FCN修改为1 + + mb_vfid = FIELD_GET(ZXDH_SRC_PFVF_ID, info->op_ret_val); + srcvhcaid = FIELD_GET(ZXDH_SRC_VHCA_ID, info->op_ret_val); + + vf_dev = zxdh_find_vf_dev(dev, mb_vfid); + + switch (opt) { + case ZTE_ZXDH_VCHNL_OP_GET_HMC_FCN: // fix is 1, + if (!vf_dev) { + vf_dev = zxdh_pf_get_vf_hmc_res(dev, mb_vfid); + if (!vf_dev) { + resp_code = -ENODEV; + break; + } + + vf_dev->vhca_id = srcvhcaid; + obj_info.hmc_info = &vf_dev->hmc_info; + obj_info.add_sd_cnt = 0; + zxdh_vfhmc_enter(dev, obj_info.hmc_info); + + vf_dev->hmc_info.pble_hmc_index = + vf_dev->hmc_info.hmc_first_entry_pble; + vf_dev->pbleq_unallocated_pble = + obj_info.hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + vf_dev->pbleq_fpm_base_addr = + obj_info.hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE] + .base; + vf_dev->pbleq_next_fpm_addr = + vf_dev->pbleq_fpm_base_addr; + pr_info("vf%d pble_hmc_index=%d pbleq_fpm_base_addr=%llx\n", + mb_vfid, vf_dev->hmc_info.pble_hmc_index, + vf_dev->pbleq_fpm_base_addr); + + vf_dev->hmc_info.pble_mr_hmc_index = + vf_dev->hmc_info.hmc_first_entry_pble_mr; + vf_dev->pblemr_unallocated_pble = + obj_info.hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR] + .cnt; + vf_dev->pblemr_fpm_base_addr = + obj_info.hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR] + .base; + vf_dev->pblemr_next_fpm_addr = + vf_dev->pblemr_fpm_base_addr; + pr_info("vf%d pble_mr_hmc_index=%d pbleq_fpm_base_addr=%llx\n", + mb_vfid, vf_dev->hmc_info.pble_mr_hmc_index, + vf_dev->pblemr_fpm_base_addr); + + for (i = ZXDH_HMC_IW_IRD; i < ZXDH_HMC_IW_TXWINDOW + 1; + i++) { + zxdh_create_vf_hmc_objs(dev, &vf_dev->hmc_info, + i, &obj_info); + } + dma_data.num = 1; + dma_data.addrbuf[0] = + C_RDMA_VF_HMC_CQP_CQ_DISTRIBUTE_DONE( + vf_dev->vhca_id); + dma_data.databuf[0] = 1; + zxdh_cqp_rdma_write32_cmd(dev, &dma_data); + } + + break; + case ZTE_ZXDH_OP_ADD_QPBLE_HMC_RANGE: + case ZTE_ZXDH_OP_ADD_MRPBLE_HMC_RANGE: + if (!vf_dev) + return -ENODEV; + if (!dev->hmc_use_dpu_ddr) { + if (opt == ZTE_ZXDH_OP_ADD_QPBLE_HMC_RANGE) + i = ZXDH_HMC_IW_PBLE; /* code */ + else + i = ZXDH_HMC_IW_PBLE_MR; + + if (vf_dev->hmc_info.hmc_obj[i].cnt) { + resp_code = zxdh_pf2vf_add_pble_hmc_obj( + dev, vf_dev, i); + if (i == ZXDH_HMC_IW_PBLE) + vf_dev->hmc_info.pble_hmc_index++; + else + vf_dev->hmc_info.pble_mr_hmc_index++; + } + + dma_data.num = 1; + dma_data.addrbuf[0] = + C_RDMA_VF_HMC_CQP_CQ_DISTRIBUTE_DONE( + vf_dev->vhca_id); + dma_data.databuf[0] = 1; + zxdh_cqp_rdma_write32_cmd(dev, &dma_data); + } + break; + default: + break; + } + + return resp_code; +} + +int zxdh_create_vf_hmc_objs(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info, u8 type, + struct zxdh_hmc_create_obj_info *obj_info) +{ + int status = 0; + + if (hmc_info->hmc_obj[type].cnt) { + obj_info->rsrc_type = type; + obj_info->count = hmc_info->hmc_obj[obj_info->rsrc_type].cnt; + status = zxdh_sc_create_hmc_obj(dev, obj_info); + if (status) { + zxdh_del_hmc_objects(dev, hmc_info); + pr_err("ERR: create obj type %d status = %d\n", + iw_hmc_obj_types[obj_info->rsrc_type], status); + } + } + + return status; +} + +int zxdh_vfhmc_enter(struct zxdh_sc_dev *dev, struct zxdh_hmc_info *vf_hmc_info) +{ + u32 sd_lmt = 0, hmc_entry_total = 0, j = 0, k = 0, mem_size = 0, + cnt = 0; + u64 fpm_limit = 0; + struct zxdh_hmc_info *hmc_info = NULL; + struct zxdh_virt_mem virt_mem = {}; + struct zxdh_hmc_obj_info *obj_info = NULL; + + hmc_info = vf_hmc_info; + obj_info = hmc_info->hmc_obj; + + for (k = ZXDH_HMC_IW_IRD; k < ZXDH_HMC_IW_MAX; k++) { + cnt = obj_info[k].cnt; + + fpm_limit = obj_info[k].size * cnt; + + if (fpm_limit == 0) + continue; + + if (k == ZXDH_HMC_IW_PBLE) + hmc_info->hmc_first_entry_pble = hmc_entry_total; + + if (k == ZXDH_HMC_IW_PBLE_MR) + hmc_info->hmc_first_entry_pble_mr = hmc_entry_total; + + if ((fpm_limit % ZXDH_HMC_DIRECT_BP_SIZE) == 0) { + sd_lmt = fpm_limit / ZXDH_HMC_DIRECT_BP_SIZE; + sd_lmt += 1; + } else { + sd_lmt = (u32)((fpm_limit - 1) / + ZXDH_HMC_DIRECT_BP_SIZE); + sd_lmt += 1; + } + + if (sd_lmt == 1) + hmc_entry_total++; + else { + for (j = 0; j < sd_lmt - 1; j++) + hmc_entry_total++; + + if (fpm_limit % ZXDH_HMC_DIRECT_BP_SIZE) + hmc_entry_total++; + } + } + + mem_size = sizeof(struct zxdh_hmc_sd_entry) * hmc_entry_total; + virt_mem.size = mem_size; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + if (!virt_mem.va) { + pr_err("HMC: failed to allocate memory for sd_entry buffer\n"); + return -ENOMEM; + } + hmc_info->sd_table.sd_entry = virt_mem.va; + hmc_info->hmc_entry_total = hmc_entry_total; + return 0; +} + +int zxdh_sc_write_hmc_register(struct zxdh_sc_dev *dev, + struct zxdh_hmc_obj_info *obj_info, + u32 rsrc_type, u16 vhca_id) +{ + u32 base_low = 0, base_high = 0, val = 0; + u64 base = 0; + struct zxdh_sc_cqp *cqp = dev->cqp; + + if (dev->cache_id > 3) { + pr_info("cache id is error!!!\n"); + return -EACCES; + } + + if ((rsrc_type == ZXDH_HMC_IW_QP) || (rsrc_type == ZXDH_HMC_IW_CQ) || + (rsrc_type == ZXDH_HMC_IW_SRQ) || (rsrc_type == ZXDH_HMC_IW_IRD) || + (rsrc_type == ZXDH_HMC_IW_TXWINDOW)) { + base = dev->hmc_info->hmc_obj[rsrc_type].base; + } else { + base = obj_info[rsrc_type].base; + } + + base = base / 512; + base_low = (u32)(base & 0x00000000ffffffff); + base_high = (u32)((base & 0xffffffff00000000) >> 32); + + switch (rsrc_type) { + case ZXDH_HMC_IW_PBLE_MR: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_TX1)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RX1)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RX2)); + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RDMAIO_INDICATE)); + } + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RDMAIO_BASE_LOW)); + writel(base_high, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RDMAIO_BASE_HIGH)); + pr_info("vhca:%d PBLE_MR base:0x%llx", vhca_id, base * 512); + break; + case ZXDH_HMC_IW_PBLE: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_TX1)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_TX2)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + RDMATX_DB_PBLE_ID_CFG)); + + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RX1)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RX2)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RX3)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RX4)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RX5)); + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RDMAIO_INDICATE)); + } + writel(base_low, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RDMAIO_BASE_LOW)); + writel(base_high, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RDMAIO_BASE_HIGH)); + pr_info("vhca:%d PBLE_Q base:0x%llx", vhca_id, base * 512); + break; + case ZXDH_HMC_IW_MR: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_MRTE_TX1)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_MRTE_TX3)); + + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_MRTE_RX1)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_MRTE_RX2)); + + val = zxdh_hmc_register_config_cqpval( + dev, obj_info[rsrc_type].max_cnt, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_MRTE_CQP)); + + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_MRTE_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_MRTE_RDMAIO_INDICATE)); + } + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_MRTE_RDMAIO_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_MRTE_RDMAIO_BASE_HIGH)); + pr_info("vhca:%d MR base:0x%llx", vhca_id, base * 512); + break; + case ZXDH_HMC_IW_AH: + + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_AH_TX)); + + val = zxdh_hmc_register_config_cqpval( + dev, obj_info[rsrc_type].max_cnt, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_AH_CQP)); + + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_AH_RDMAIO_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_AH_RDMAIO_BASE_HIGH)); + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_AH_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_AH_RDMAIO_INDICATE)); + } + pr_info("vhca:%d AH base:0x%llx", vhca_id, base * 512); + break; + case ZXDH_HMC_IW_IRD: + + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_IRD_RX1)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_IRD_RX2)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_IRD_RX3)); + + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_IRD_RDMAIO_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_IRD_RDMAIO_BASE_HIGH)); + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_IRD_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_IRD_RDMAIO_INDICATE)); + } + pr_info("vhca:%d IRD base:0x%llx", vhca_id, base * 512); + break; + case ZXDH_HMC_IW_TXWINDOW: + + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_TX_WINDOW_TX)); + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_TX_WINDOW_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_TX_WINDOW_RDMAIO_INDICATE)); + } + writel(base_low, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_TX_WINDOW_RDMAIO_BASE_LOW)); + writel(base_high, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_TX_WINDOW_RDMAIO_BASE_HIGH)); + pr_info("vhca:%d TXWINDOW base:0x%llx", vhca_id, base * 512); + break; + case ZXDH_HMC_IW_QP: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_QPC_RX)); + + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_QPC_RX_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_QPC_RX_BASE_HIGH)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_QPC_TX)); + + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_QPC_TX_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_QPC_TX_BASE_HIGH)); + pr_info("vhca:%d QP base:0x%llx", vhca_id, base * 512); + break; + case ZXDH_HMC_IW_SRQ: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_SRQC_RX)); + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_SRQC_RX_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_SRQC_RX_BASE_HIGH)); + pr_info("vhca:%d SRQ base:0x%llx", vhca_id, base * 512); + break; + case ZXDH_HMC_IW_CQ: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_CQC_RX1)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_CQC_RX2)); + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_CQC_RX_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_CQC_RX_BASE_HIGH)); + pr_info("vhca:%d CQ base:0x%llx", vhca_id, base * 512); + break; + default: + break; + } + return 0; +} diff --git a/src/rdma/src/hmc.h b/src/rdma/src/hmc.h new file mode 100644 index 0000000000000000000000000000000000000000..a6a1ce98f7c276406c67dfa9c282b5e1cd4a9802 --- /dev/null +++ b/src/rdma/src/hmc.h @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#ifndef ZXDH_HMC_H +#define ZXDH_HMC_H + +#include "defs.h" +#include "pble.h" +#include "./smmu/kernel/adk_mmu600.h" + +#define ZXDH_HMC_MAX_BP_COUNT 512 +#define ZXDH_MAX_SD_ENTRIES 11 +#define ZXDH_HW_DBG_HMC_INVALID_BP_MARK 0xca +#define ZXDH_HMC_INFO_SIGNATURE 0x484d5347 +#define ZXDH_HMC_PD_CNT_IN_SD 512 +#define ZXDH_HMC_DIRECT_BP_SIZE 0x200000 +#define ZXDH_HMC_MAX_SD_COUNT 8192 +#define ZXDH_HMC_PAGED_BP_SIZE 4096 +#define ZXDH_HMC_PD_BP_BUF_ALIGNMENT 4096 +#define ZXDH_FIRST_VF_FPM_ID 8 +#define FPM_MULTIPLIER 1024 + +#define ZXDH_MIN_GLOBAL_CQPN 2 +#define ZXDH_MAX_GLOBAL_CQPN 1025 +#define ZXDH_MIN_GLOBAL_QPN (1 + ZXDH_MAX_GLOBAL_CQPN) +#define ZXDH_MIN_GLOBAL_CQN 1 +#define ZXDH_MIN_GLOBAL_SRQN 1 +#define ZXDH_MIN_GLOBAL_CEQN 1 + +//修改为结构体 +// #define ZXDH_HMC_CNT_DEBUG 64 +// #define ZXDH_HMC_1K 1024 +// #define ZXDH_HMC_1M (1024*ZXDH_HMC_1K/ZXDH_HMC_CNT_DEBUG) +// #define ZXDH_HMC_HOST_QPC_MAX_QUANTITY (ZXDH_HMC_1M/ZXDH_HMC_CNT_DEBUG) +// #define ZXDH_HMC_HOST_CQC_MAX_QUANTITY 2*ZXDH_HMC_1M/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_SRQC_MAX_QUANTITY 512*ZXDH_HMC_1K/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_MRTE_MAX_QUANTITY 16*ZXDH_HMC_1M/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_AH_MAX_QUANTITY 512*ZXDH_HMC_1K/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_MGCPAYLOAD_MAX_QUANTITY 2 //There is no multicast scenario. The number can be set to a small value. +// #define ZXDH_HMC_HOST_MGC_MAX_QUANTITY 8*ZXDH_HMC_1K/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_PBLEMR_MAX_QUANTITY 512*ZXDH_HMC_1M/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_PBLEOTHER_MAX_QUANTITY 512*ZXDH_HMC_1M/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_CEQC_MAX_QUANTITY 4 + +// smmu使用参数 +#define ZXDH_HMC_CNT_DEBUG 64 +#define ZXDH_HMC_1K 1024 +#define ZXDH_HMC_1M (1024 * ZXDH_HMC_1K / ZXDH_HMC_CNT_DEBUG) +#define ZXDH_HMC_HOST_QPC_MAX_QUANTITY (1024 * 8) +#define ZXDH_HMC_HOST_CQC_MAX_QUANTITY (1024 * 8) +#define ZXDH_HMC_HOST_SRQC_MAX_QUANTITY (1024 * 8) +#define ZXDH_HMC_HOST_MRTE_MAX_QUANTITY (1024 * 32) +#define ZXDH_HMC_HOST_AH_MAX_QUANTITY (1024 * 32) +// #define ZXDH_HMC_HOST_MGCPAYLOAD_MAX_QUANTITY 600 +// #define ZXDH_HMC_HOST_MGC_MAX_QUANTITY (8192) +#define ZXDH_HMC_HOST_PBLEMR_MAX_QUANTITY (1024 * 1024 * 4) +#define ZXDH_HMC_HOST_PBLEOTHER_MAX_QUANTITY (1024 * 1024 * 4) +#define ZXDH_HMC_HOST_CEQC_MAX_QUANTITY 4 + +enum zxdh_hmc_rsrc_type { + ZXDH_HMC_IW_QP = 0, + ZXDH_HMC_IW_CQ = 1, + ZXDH_HMC_IW_SRQ = 2, + ZXDH_HMC_IW_AH = 3, + ZXDH_HMC_IW_MR = 4, + ZXDH_HMC_IW_IRD = 5, + ZXDH_HMC_IW_TXWINDOW = 6, + ZXDH_HMC_IW_PBLE = 7, + ZXDH_HMC_IW_PBLE_MR = 8, + ZXDH_HMC_IW_MAX, /* Must be last entry */ +}; + +enum zxdh_indicate_id { + ZXDH_INDICATE_L2D = 0, + ZXDH_INDICATE_DPU_DDR = ZXDH_INDICATE_L2D, // 外挂 + ZXDH_INDICATE_REGISTER = ZXDH_INDICATE_L2D, + ZXDH_INDICATE_RESERVED = 1, + ZXDH_INDICATE_HOST_NOSMMU = 2, + ZXDH_INDICATE_HOST_SMMU = 3, +}; + +enum zxdh_axid_type { + ZXDH_AXID_L2D = 0, + ZXDH_AXID_DPUDDR = 1, + ZXDH_AXID_HOST_EP0 = 2, + ZXDH_AXID_HOST_EP1 = 3, + ZXDH_AXID_HOST_EP2 = 4, + ZXDH_AXID_HOST_EP3 = 5, + ZXDH_AXID_HOST_EP4 = 6, +}; + +enum zxdh_interface_type { + ZXDH_INTERFACE_CACHE = 0, + ZXDH_INTERFACE_NOTCACHE = 1, +}; + +enum zxdh_object_id { + ZXDH_PBLE_MR_OBJ_ID = 0, + ZXDH_PBLE_QUEUE_OBJ_ID = 1, + ZXDH_MR_OBJ_ID = 2, + ZXDH_AH_OBJ_ID = 3, + ZXDH_IRD_OBJ_ID = 4, + ZXDH_TX_WINDOW_OBJ_ID = 5, + ZXDH_SRQC_OBJ_ID = 6, + ZXDH_CQC_OBJ_ID = 7, + ZXDH_MG_PAYLOAD_OBJ_ID = 8, + ZXDH_MG_OBJ_ID = 9, + ZXDH_RW_PAYLOAD = 10, + ZXDH_SQ = 11, + ZXDH_SQ_SHADOW_AREA = 12, + ZXDH_RQ = 13, + ZXDH_RQ_SHADOW_AREA = 14, + ZXDH_SRQP = 15, + ZXDH_SRQ = 16, + ZXDH_SRQ_SHADOW_AREA = 17, + ZXDH_CQ = 18, + ZXDH_CQ_SHADOW_AREA = 19, + ZXDH_CEQ = 20, + ZXDH_AEQ = 21, + ZXDH_MG_QPN = 22, + ZXDH_QPC_OBJ_ID = 29, + ZXDH_DMA_OBJ_ID = 30, + ZXDH_L2D_OBJ_ID = 31, + ZXDH_REG_OBJ_ID = ZXDH_L2D_OBJ_ID, +}; + +enum zxdh_sd_entry_type { + ZXDH_SD_TYPE_INVALID = 0, + ZXDH_SD_TYPE_PAGED = 1, + ZXDH_SD_TYPE_DIRECT = 2, +}; + +enum zxdh_mb_opt_type { + ZTE_ZXDH_VCHNL_OP_GET_HMC_FCN = 1, + ZTE_ZXDH_OP_ADD_HMC_OBJ_RANGE = 2, + ZTE_ZXDH_OP_DEL_HMC_OBJ_RANGE = 3, + ZTE_ZXDH_OP_GET_TYPEONE_HMC_CNT = 6, + ZTE_ZXDH_OP_REPLY_TYPEONE_HMC_CNT = 7, + ZTE_ZXDH_OP_GET_PBLE_HMC_BASEINFO = 8, + ZTE_ZXDH_OP_REPLY_PBLE_HMC_BASEINFO = 9, + ZTE_ZXDH_OP_ADD_QPBLE_HMC_RANGE = 10, + ZTE_ZXDH_OP_ADD_MRPBLE_HMC_RANGE = 11, +}; + +enum function_type { + FUNCTION_TYPE_VF = 0, + FUNCTION_TYPE_PF = 1, +}; + +struct zxdh_hmc_obj_manage { + u64 hmc_base; + u32 total_qp_cnt; + u32 total_cq_cnt; + u32 total_srq_cnt; + u32 total_mrte_cnt; + u32 total_ah_cnt; + u32 pf_pblemr_cnt; + u32 pf_pblequeue_cnt; + u32 vf_qp_cnt; + u32 vf_pblemr_cnt; + u32 vf_pblequeue_cnt; +}; + +struct zxdh_hmc_obj_info { + u64 base; + u32 max_cnt; + u32 cnt; + u64 size; + u8 type; +}; + +struct zxdh_vf_hmc_obj_info { + struct zxdh_hmc_obj_info hmc_objinfo[ZXDH_HMC_IW_MAX]; + u16 vf_id; + u8 valid : 1; +}; + +struct zxdh_hmc_bp { + enum zxdh_sd_entry_type entry_type; + struct zxdh_dma_mem addr; + struct zxdh_dma_mem addr_hardware; // for hardware + u32 sd_pd_index; + u32 use_cnt; +}; + +struct zxdh_hmc_pd_entry { + struct zxdh_hmc_bp bp; + u32 sd_index; + u8 rsrc_pg : 1; + u8 valid : 1; +}; + +struct zxdh_hmc_pd_table { + struct zxdh_dma_mem pd_page_addr; + struct zxdh_hmc_pd_entry *pd_entry; + struct zxdh_virt_mem pd_entry_virt_mem; + u32 use_cnt; + u32 sd_index; +}; + +struct zxdh_hmc_sd_entry { + enum zxdh_sd_entry_type entry_type; + bool valid; + union { + struct zxdh_hmc_pd_table pd_table; + struct zxdh_hmc_bp bp; + } u; +}; + +struct zxdh_hmc_sd_table { + struct zxdh_virt_mem addr; + u32 sd_cnt; + u32 use_cnt; + struct zxdh_hmc_sd_entry *sd_entry; +}; + +struct zxdh_hmc_info { + u32 signature; + u8 hmc_fn_id; + u16 first_sd_index; + u32 pble_hmc_index; + u32 pble_mr_hmc_index; + u32 hmc_entry_total; + u32 hmc_first_entry_pble; + u32 hmc_first_entry_pble_mr; + struct zxdh_hmc_obj_info *hmc_obj; + struct zxdh_virt_mem hmc_obj_virt_mem; + struct zxdh_hmc_sd_table sd_table; + u16 sd_indexes[ZXDH_HMC_MAX_SD_COUNT]; +}; + +struct zxdh_update_sd_entry { + u64 cmd; + u64 data; +}; + +struct zxdh_update_sds_info { + u32 cnt; + u8 hmc_fn_id; + struct zxdh_update_sd_entry entry[ZXDH_MAX_SD_ENTRIES]; +}; + +struct zxdh_ccq_cqe_info; +struct zxdh_hmc_fcn_info { + u32 vf_id; + u8 free_fcn; +}; + +struct zxdh_hmc_create_obj_info { + struct zxdh_hmc_info *hmc_info; + struct zxdh_virt_mem add_sd_virt_mem; + u32 rsrc_type; + u32 count; + u32 add_sd_cnt; + enum zxdh_sd_entry_type entry_type; + bool privileged; +}; + +struct zxdh_hmc_del_obj_info { + struct zxdh_hmc_info *hmc_info; + struct zxdh_virt_mem del_sd_virt_mem; + u32 rsrc_type; + u32 count; + u32 del_sd_cnt; + bool privileged; +}; + +int zxdh_sc_create_hmc_obj(struct zxdh_sc_dev *dev, + struct zxdh_hmc_create_obj_info *info); +int zxdh_add_pble_hmc_obj(struct zxdh_hmc_info *hmc_info, + struct zxdh_hmc_pble_rsrc *pble_rsrc, u32 pages); +int zxdh_vf_add_pble_hmc_obj(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info, + struct zxdh_hmc_pble_rsrc *pble_rsrc, u32 pages); + +int zxdh_prep_remove_sd_bp(struct zxdh_hmc_info *hmc_info, u32 idx); + +int zxdh_pf_recv_mb(struct zxdh_sc_dev *dev, struct zxdh_ccq_cqe_info *info); + +struct zxdh_vfdev *zxdh_pf_get_vf_hmc_res(struct zxdh_sc_dev *dev, u16 vf_id); +int zxdh_sc_write_hmc_register(struct zxdh_sc_dev *dev, + struct zxdh_hmc_obj_info *obj_info, + u32 rsrc_type, u16 vhca_id); +int zxdh_vfhmc_enter(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *vf_hmc_info); + +int zxdh_create_vf_hmc_objs(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info, u8 type, + struct zxdh_hmc_create_obj_info *obj_info); +#endif /* ZXDH_HMC_H */ diff --git a/src/rdma/src/hw.c b/src/rdma/src/hw.c new file mode 100644 index 0000000000000000000000000000000000000000..e7c9b5226b92ae566de50c5dd8b98658dc93188f --- /dev/null +++ b/src/rdma/src/hw.c @@ -0,0 +1,2320 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "main.h" +#include "user.h" +#include "icrdma_hw.h" +#include "hmc.h" + +/* types of hmc objects */ +enum zxdh_hmc_rsrc_type iw_hmc_obj_types[ZXDH_HMC_IW_TXWINDOW + 1] = { + ZXDH_HMC_IW_QP, ZXDH_HMC_IW_CQ, ZXDH_HMC_IW_SRQ, ZXDH_HMC_IW_AH, + ZXDH_HMC_IW_MR, ZXDH_HMC_IW_IRD, ZXDH_HMC_IW_TXWINDOW, +}; + +/** + * zxdh_iwarp_ce_handler - handle iwarp completions + * @iwcq: iwarp cq receiving event + */ +static void zxdh_iwarp_ce_handler(struct zxdh_sc_cq *iwcq) +{ + struct zxdh_cq *cq = iwcq->back_cq; + + if (cq != NULL) { + if (!cq->user_mode) + cq->armed = false; + if (cq->ibcq.comp_handler) + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); + } +} + +static void zxdh_ceq_ena_intr(struct zxdh_sc_dev *dev, u32 ceq_id); + +/** + * zxdh_process_ceq - handle ceq for completions + * @rf: RDMA PCI function + * @ceq: ceq having cq for completion + */ +static void zxdh_process_ceq(struct zxdh_pci_f *rf, struct zxdh_ceq *ceq) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_sc_ceq *sc_ceq; + struct zxdh_sc_cq *cq; + unsigned long flags; + + sc_ceq = &ceq->sc_ceq; + do { + spin_lock_irqsave(&ceq->ce_lock, flags); + cq = zxdh_sc_process_ceq(dev, sc_ceq); + if (!cq) { + spin_unlock_irqrestore(&ceq->ce_lock, flags); + break; + } + if (cq->cq_type == ZXDH_CQ_TYPE_IO) + zxdh_iwarp_ce_handler(cq); + spin_unlock_irqrestore(&ceq->ce_lock, flags); + + if (cq->cq_type == ZXDH_CQ_TYPE_CQP) { + rf->sc_dev.ceq_interrupt = true; + queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work); + } + } while (1); +} + +static void zxdh_set_flush_fields_requester(struct zxdh_sc_qp *qp, + struct zxdh_aeqe_info *info) +{ + switch (info->ae_id) { + case ZXDH_AE_REQ_NVME_IDX_ERR: + case ZXDH_AE_REQ_NVME_PD_IDX_ERR: + case ZXDH_AE_REQ_NVME_KEY_ERR: + case ZXDH_AE_REQ_NVME_ACC_ERR: + case ZXDH_AE_REQ_NVME_TX_ROUTE_IDX_ERR: + case ZXDH_AE_REQ_NVME_TX_ROUTE_PD_IDX_ERR: + case ZXDH_AE_REQ_NVME_TX_ROUTE_KEY_ERR: + case ZXDH_AE_REQ_NVME_TX_ROUTE_ACC_ERR: + case ZXDH_AE_REQ_MW_INV_LKEY_ERR: + case ZXDH_AE_REQ_MW_INV_TYPE_ERR: + case ZXDH_AE_REQ_MW_INV_STATE_INV: + case ZXDH_AE_REQ_MW_INV_PD_IDX_ERR: + case ZXDH_AE_REQ_MW_INV_SHARE_MEM_ERR: + case ZXDH_AE_REQ_MW_INV_PARENT_STATE_INV: + case ZXDH_AE_REQ_MW_INV_MW_NUM_ZERO: + case ZXDH_AE_REQ_MW_INV_MW_STAG_31_8_ZERO: + case ZXDH_AE_REQ_MW_INV_QP_NUM_ERR: + case ZXDH_AE_REQ_MR_INV_INV_LKEY_ERR: + case ZXDH_AE_REQ_MR_INV_MW_NUM_ZERO: + case ZXDH_AE_REQ_MR_INV_STATE_ERR: + case ZXDH_AE_REQ_MR_INV_EN_ERR: + case ZXDH_AE_REQ_MR_INV_SHARE_MEM_ERR: + case ZXDH_AE_REQ_MR_INV_PD_IDX_ERR: + case ZXDH_AE_REQ_MR_INV_MW_STAG_31_8_ZERO: + case ZXDH_AE_REQ_MWBIND_WRITE_ACC_ERR: + case ZXDH_AE_REQ_MWBIND_VA_BIND_ERR: + case ZXDH_AE_REQ_MWBIND_PD_IDX_ERR: + case ZXDH_AE_REQ_MWBIND_MRTE_STATE_TYPE_ERR: + case ZXDH_AE_REQ_MWBIND_VA_LEN_ERR: + case ZXDH_AE_REQ_MWBIND_TYPE_VA_ERR: + case ZXDH_AE_REQ_MWBIND_TYPE_IDX_ERR: + case ZXDH_AE_REQ_MWBIND_MRTE_MR_ERR: + case ZXDH_AE_REQ_MWBIND_TYPE2_LEN_ERR: + case ZXDH_AE_REQ_MWBIND_MRTE_STATE_ERR: + case ZXDH_AE_REQ_MWBIND_QPC_EN_ERR: + case ZXDH_AE_REQ_MWBIND_PARENT_MR_ERR: + case ZXDH_AE_REQ_MWBIND_ACC_BIT4_ERR: + case ZXDH_AE_REQ_MWBIND_MW_STAG_ERR: + case ZXDH_AE_REQ_MWBIND_IDX_OUT_RANGE: + case ZXDH_AE_REQ_MR_FASTREG_ACC_ERR: + case ZXDH_AE_REQ_MR_FASTREG_PD_IDX_ERR: + case ZXDH_AE_REQ_MR_FASTREG_MRTE_STATE_ERR: + case ZXDH_AE_REQ_MR_FASTREG_MR_IS_NOT_1: + case ZXDH_AE_REQ_MR_FASTREG_QPC_EN_ERR: + case ZXDH_AE_REQ_MR_FASTREG_STAG_LEN_ERR: + case ZXDH_AE_REQ_MR_FASTREG_SHARE_MR_ERR: + case ZXDH_AE_REQ_MR_FASTREG_MW_STAG_ERR: + case ZXDH_AE_REQ_MR_FASTREG_IDX_OUT_RANGE: + case ZXDH_AE_REQ_MR_FASTREG_MR_EN_ERR: + case ZXDH_AE_REQ_MW_BIND_PD_IDX_ERR: + case ZXDH_AE_REQ_MRTE_STATE_FREE: + case ZXDH_AE_REQ_MRTE_STATE_INVALID: + case ZXDH_AE_REQ_MRTE_MW_QP_ID_ERR: + case ZXDH_AE_REQ_MRTE_PD_IDX_ERR: + case ZXDH_AE_REQ_MRTE_KEY_ERR: + case ZXDH_AE_REQ_MRTE_STAG_IDX_RANGE_ERR: + case ZXDH_AE_REQ_MRTE_VIRT_ADDR_AND_LEN_ERR: + case ZXDH_AE_REQ_MRTE_ACC_ERR: + case ZXDH_AE_REQ_MRTE_STAG_IDX_RANGE_RSV_ERR: + case ZXDH_AE_REQ_REM_INV_RKEY: + case ZXDH_AE_REQ_WQE_MRTE_STATE_FREE: + case ZXDH_AE_REQ_WQE_MRTE_STATE_INV: + case ZXDH_AE_REQ_WQE_MRTE_MW_QP_ID_ERR: + case ZXDH_AE_REQ_WQE_MRTE_PD_IDX_ERR: + case ZXDH_AE_REQ_WQE_MRTE_KEY_ERR: + case ZXDH_AE_REQ_WQE_MRTE_STAG_IDX_ERR: + case ZXDH_AE_REQ_WQE_MRTE_VIRT_ADDR_AND_LEN_CHK_ERR: + case ZXDH_AE_REQ_WQE_MRTE_ACC_ERR: + case ZXDH_AE_REQ_WQE_MRTE_RSV_LKEY_EN_ERR: + qp->event_type = ZXDH_QP_EVENT_ACCESS_ERR; + break; + case ZXDH_AE_REQ_REM_INV_OPCODE: + case ZXDH_AE_REQ_OFED_INVALID_SQ_OPCODE: + case ZXDH_AE_REQ_NVME_INVALID_SQ_OPCODE: + qp->event_type = ZXDH_QP_EVENT_REQ_ERR; + break; + default: + qp->event_type = ZXDH_QP_EVENT_CATASTROPHIC; + break; + } +} + +static void zxdh_set_flush_fields_responder(struct zxdh_sc_qp *qp, + struct zxdh_aeqe_info *info) +{ + switch (info->ae_id) { + case ZXDH_AE_RSP_PRIFIELD_CHK_INV_OPCODE: + qp->event_type = ZXDH_QP_EVENT_REQ_ERR; + break; + case ZXDH_AE_RSP_PKT_TYPE_NOF_PD_IDX_ERR: + case ZXDH_AE_RSP_PKT_TYPE_NOF_RKEY_ERR: + case ZXDH_AE_RSP_PKT_TYPE_NOF_ACC_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_DISTRIBUTE_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_INV_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_QP_CHK_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_PD_CHK_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_KEY_CHK_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_STAG_IDX_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_BOUNDARY_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_ACC_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_STAG0_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_STATE_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_PD_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_KEY_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_TYPE2B_QPN_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_KEY_IDX_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_SHARE_MR: + case ZXDH_AE_RSP_PKT_TYPE_MW_TYPE_ERR: + case ZXDH_AE_RSP_PKT_TYPE_REM_INV_PD_ERR: + case ZXDH_AE_RSP_PKT_TYPE_REM_INV_KEY_ERR: + case ZXDH_AE_RSP_PKT_TYPE_REM_INV_ACC_ERR: + case ZXDH_AE_RSP_CHK_ERR_SHARE_MR: + case ZXDH_AE_RSP_MW_NUM_ERR: + case ZXDH_AE_RSP_INV_EN_ERR: + qp->event_type = ZXDH_QP_EVENT_ACCESS_ERR; + break; + default: + qp->event_type = ZXDH_QP_EVENT_CATASTROPHIC; + break; + } +} + +/** + * zxdh_process_aeq - handle aeq events + * @rf: RDMA PCI function + */ +static void zxdh_process_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_aeq *aeq = &rf->aeq; + struct zxdh_sc_aeq *sc_aeq = &aeq->sc_aeq; + struct zxdh_aeqe_info aeinfo; + struct zxdh_aeqe_info *info = &aeinfo; + int ret; + struct zxdh_qp *iwqp = NULL; + struct zxdh_cq *iwcq = NULL; + struct zxdh_srq *iwsrq = NULL; + struct zxdh_sc_qp *qp = NULL; + unsigned long flags; + struct ib_event ibevent; + + u32 aeqcnt = 0; + + if (!sc_aeq->size) + return; + + do { + memset(info, 0, sizeof(*info)); + ret = zxdh_sc_get_next_aeqe(sc_aeq, info); + if (ret) + break; + + aeqcnt++; + zxdh_dbg( + dev, + "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n", + info->ae_id, info->qp, info->qp_cq_id, info->tcp_state, + info->iwarp_state, info->ae_src); + + if (info->qp) { + spin_lock_irqsave(&rf->qptable_lock, flags); + if (info->qp_cq_id < dev->base_qpn) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + pr_err("qp information is valid,qpn < base_qpn, qpn:%d\n", + info->qp_cq_id); + continue; + } + iwqp = rf->qp_table[info->qp_cq_id - dev->base_qpn]; + if (!iwqp) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + zxdh_dbg(dev, + "AEQ: qp_id %d is already freed\n", + info->qp_cq_id); + continue; + } + zxdh_qp_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&rf->qptable_lock, flags); + qp = &iwqp->sc_qp; + spin_lock_irqsave(&iwqp->lock, flags); + iwqp->hw_iwarp_state = info->iwarp_state; + iwqp->last_aeq = info->ae_id; + spin_unlock_irqrestore(&iwqp->lock, flags); + } else { + if (info->ae_id == ZXDH_AE_REQ_WQE_FLUSH) + continue; + else if (info->ae_id == ZXDH_AE_RSP_WQE_FLUSH) + continue; + } + switch (info->ae_id) { + case ZXDH_AE_RSP_SRQ_WATER_SIG: + spin_lock_irqsave(&rf->srqtable_lock, flags); + if (info->qp_cq_id < dev->base_srqn) { + spin_unlock_irqrestore(&rf->srqtable_lock, + flags); + pr_err("aeq srq water limit event,srqn < base_srqn, srqn:%d\n", + info->qp_cq_id); + continue; + } + iwsrq = rf->srq_table[info->qp_cq_id - dev->base_srqn]; + if (!iwsrq) { + spin_unlock_irqrestore(&rf->srqtable_lock, + flags); + zxdh_dbg(dev, + "AEQ: srq_id %d is already freed\n", + info->qp_cq_id); + continue; + } + zxdh_srq_add_ref(&iwsrq->ibsrq); + spin_unlock_irqrestore(&rf->srqtable_lock, flags); + if (iwsrq->ibsrq.event_handler) { + ibevent.device = iwsrq->ibsrq.device; + ibevent.event = IB_EVENT_SRQ_LIMIT_REACHED; + ibevent.element.srq = &iwsrq->ibsrq; + iwsrq->ibsrq.event_handler( + &ibevent, iwsrq->ibsrq.srq_context); + } + zxdh_srq_rem_ref(&iwsrq->ibsrq); + break; + case ZXDH_AE_RSP_PKT_TYPE_CQ_OVERFLOW: + case ZXDH_AE_RSP_PKT_TYPE_CQ_STATE: + case ZXDH_AE_RSP_PKT_TYPE_CQ_TWO_PBLE_RSP: + dev_err(idev_to_dev(dev), + "Processing CQ[0x%x] op error, AE 0x%04X\n", + info->qp_cq_id, info->ae_id); + spin_lock_irqsave(&rf->cqtable_lock, flags); + if (info->qp_cq_id < dev->base_cqn) { + spin_unlock_irqrestore(&rf->cqtable_lock, + flags); + pr_err("aeq cq err, cqn < base_cqn cqn:%d\n", + info->qp_cq_id); + continue; + } + iwcq = rf->cq_table[info->qp_cq_id - dev->base_cqn]; + if (!iwcq) { + spin_unlock_irqrestore(&rf->cqtable_lock, + flags); + zxdh_dbg(dev, + "AEQ: cq_id %d is already freed\n", + info->qp_cq_id); + continue; + } + zxdh_cq_add_ref(&iwcq->ibcq); + spin_unlock_irqrestore(&rf->cqtable_lock, flags); + if (iwcq->ibcq.event_handler) { + ibevent.device = iwcq->ibcq.device; + ibevent.event = IB_EVENT_CQ_ERR; + ibevent.element.cq = &iwcq->ibcq; + iwcq->ibcq.event_handler(&ibevent, + iwcq->ibcq.cq_context); + } + zxdh_cq_rem_ref(&iwcq->ibcq); + break; + case ZXDH_AE_RSP_SRQ_AXI_RSP_SIG: + + spin_lock_irqsave(&rf->qptable_lock, flags); + if (info->qp_cq_id < dev->base_qpn) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + pr_err("aeq srq axi err, qpn < base_qpn qpn:%d\n", + info->qp_cq_id); + continue; + } + iwqp = rf->qp_table[info->qp_cq_id - dev->base_qpn]; + if (!iwqp) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + zxdh_dbg(dev, + "AEQ: qp_id %d is already freed\n", + info->qp_cq_id); + continue; + } + spin_unlock_irqrestore(&rf->qptable_lock, flags); + + if (iwqp->is_srq == false) { + pr_err("aeq srq axi err, qp is not bound to srq\n"); + continue; + } + iwsrq = iwqp->iwsrq; + + spin_lock_irqsave(&rf->srqtable_lock, flags); + if (!iwsrq) { + spin_unlock_irqrestore(&rf->srqtable_lock, + flags); + zxdh_dbg(dev, + "AEQ: srq_id %d is already freed\n", + info->qp_cq_id); + continue; + } + zxdh_srq_add_ref(&iwsrq->ibsrq); + spin_unlock_irqrestore(&rf->srqtable_lock, flags); + if (iwsrq->ibsrq.event_handler) { + ibevent.device = iwsrq->ibsrq.device; + ibevent.event = IB_EVENT_SRQ_ERR; + ibevent.element.srq = &iwsrq->ibsrq; + iwsrq->ibsrq.event_handler( + &ibevent, iwsrq->ibsrq.srq_context); + } + zxdh_srq_rem_ref(&iwsrq->ibsrq); + break; + case ZXDH_AE_REQ_RETRY_EXC_LOC_ACK_OUT_RANGE: + // 0x8f3�������� + if (iwqp) + zxdh_aeq_process_retry_err(iwqp); + + break; + case ZXDH_AE_REQ_RETRY_EXC_TX_WINDOW_GET_ENTRY_ERR: + // 0x8f5�������� + if (iwqp) + zxdh_aeq_process_entry_err(iwqp); + + break; + default: + if (qp == NULL) + break; + + if (info->ae_src == ZXDH_AE_REQUESTER) { //requestor + zxdh_set_flush_fields_requester(qp, info); + } else if (info->ae_src == + ZXDH_AE_RESPONDER) { //responder + zxdh_set_flush_fields_responder(qp, info); + } else { + pr_err("bad ae_src, ae_src:%d\n", info->ae_src); + break; + } + if (iwqp) + zxdh_aeq_qp_disconn(iwqp); + + break; + } + + if (info->qp) + zxdh_qp_rem_ref(&iwqp->ibqp); + } while (1); + + if (aeqcnt) + zxdh_sc_repost_aeq_tail(dev, sc_aeq->aeq_ring.tail); +} + +/** + * zxdh_ceq_ena_intr - set up device interrupts + * @dev: hardware control device structure + * @ceq_id: ceq of the interrupt to be enabled + */ +static void zxdh_ceq_ena_intr(struct zxdh_sc_dev *dev, u32 ceq_id) +{ + dev->irq_ops->zxdh_ceq_en_irq(dev, ceq_id); +} + +/** + * zxdh_aeq_ena_intr - set up device interrupts + * @dev: hardware control device structure + * @enable: aeq of the interrupt to be enabled + */ +static void zxdh_aeq_ena_intr(struct zxdh_sc_dev *dev, bool enable) +{ + dev->irq_ops->zxdh_aeq_en_irq(dev, enable); +} + +/** + * zxdh_dpc - tasklet for aeq and ceq 0 + * @t: tasklet_struct ptr + */ +static void zxdh_dpc(struct tasklet_struct *t) +{ + struct zxdh_pci_f *rf = from_tasklet(rf, t, dpc_tasklet); + + zxdh_process_aeq(rf); + zxdh_aeq_ena_intr(&rf->sc_dev, true); +} + +/** + * zxdh_ceq_dpc - dpc handler for CEQ + * @t: tasklet_struct ptr + */ +static void zxdh_ceq_dpc(struct tasklet_struct *t) +{ + struct zxdh_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet); + struct zxdh_pci_f *rf = iwceq->rf; + + zxdh_process_ceq(rf, iwceq); + zxdh_ceq_ena_intr(&rf->sc_dev, iwceq->sc_ceq.ceq_id); +} + +/** + * zxdh_save_msix_info - copy msix vector information to iwarp device + * @rf: RDMA PCI function + * + * Allocate iwdev msix table and copy the msix info to the table + * Return 0 if successful, otherwise return error + */ +static int zxdh_save_msix_info(struct zxdh_pci_f *rf) +{ + struct zxdh_qvlist_info *iw_qvlist; + struct zxdh_qv_info *iw_qvinfo; +#ifdef MSIX_DEBUG + struct msix_entry *pmsix; +#else + u32 vector; + u16 entry; +#endif + u32 ceq_idx; + u32 i; + u32 size; + u32 online_cpus_num; + + if (!rf->msix_count) + return -EINVAL; + + size = sizeof(struct zxdh_msix_vector) * rf->msix_count; + size += sizeof(struct zxdh_qvlist_info); + size += sizeof(struct zxdh_qv_info) * rf->msix_count - 1; + rf->iw_msixtbl = kzalloc(size, GFP_KERNEL); + if (!rf->iw_msixtbl) + return -ENOMEM; + + rf->iw_qvlist = + (struct zxdh_qvlist_info *)(&rf->iw_msixtbl[rf->msix_count]); + iw_qvlist = rf->iw_qvlist; + iw_qvinfo = iw_qvlist->qv_info; + iw_qvlist->num_vectors = rf->msix_count; + online_cpus_num = num_online_cpus(); +#ifdef MSIX_DEBUG + pmsix = rf->msix_entries; +#else + entry = rf->msix_entries->entry; +#endif + +#ifdef MSIX_SUPPORT + for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) { +#ifdef MSIX_DEBUG + rf->iw_msixtbl[i].idx = pmsix->entry; + rf->iw_msixtbl[i].irq = pmsix->vector; +#else + rf->iw_msixtbl[i].idx = entry + i; + vector = pci_irq_vector(rf->pcidev, (entry + i)); + rf->iw_msixtbl[i].irq = vector; +#endif + if (rf->msix_count <= (online_cpus_num + 1)) + rf->iw_msixtbl[i].cpu_affinity = ceq_idx; + else + rf->iw_msixtbl[i].cpu_affinity = + (ceq_idx % online_cpus_num); + if (!i) { + iw_qvinfo->aeq_idx = 0; + iw_qvinfo->ceq_idx = ZXDH_Q_INVALID_IDX; + } else { + iw_qvinfo->aeq_idx = ZXDH_Q_INVALID_IDX; + iw_qvinfo->ceq_idx = ceq_idx++; + } + iw_qvinfo->itr_idx = ZXDH_IDX_NOITR; + iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx; +#ifdef MSIX_DEBUG + pmsix++; +#endif + } +#endif + return 0; +} + +/** + * zxdh_aeq_handler - interrupt handler for aeq + * @irq: Interrupt request number + * @data: RDMA PCI function + */ +static irqreturn_t zxdh_aeq_handler(int irq, void *data) +{ + struct zxdh_pci_f *rf = data; + + tasklet_schedule(&rf->dpc_tasklet); + + return IRQ_HANDLED; +} + +/** + * zxdh_ceq_handler - interrupt handler for ceq + * @irq: interrupt request number + * @data: ceq pointer + */ +static irqreturn_t zxdh_ceq_handler(int irq, void *data) +{ + struct zxdh_ceq *iwceq = data; + + if (iwceq->irq != irq) + dev_err(idev_to_dev(&iwceq->rf->sc_dev), + "expected irq = %d received irq = %d\n", iwceq->irq, + irq); + tasklet_schedule(&iwceq->dpc_tasklet); + + return IRQ_HANDLED; +} + +/** + * zxdh_destroy_irq - destroy device interrupts + * @msix_vec: msix vector to disable irq + * @dev_id: parameter to pass to free_irq (used during irq setup) + * + * The function is called when destroying aeq/ceq + */ +static void zxdh_destroy_irq(struct zxdh_msix_vector *msix_vec, void *dev_id) +{ + irq_set_affinity_hint(msix_vec->irq, NULL); + free_irq(msix_vec->irq, dev_id); +} + +/** + * zxdh_destroy_cqp - destroy control qp + * @rf: RDMA PCI function + * @free_hwcqp: 1 if hw cqp should be freed + * + * Issue destroy cqp request and + * free the resources associated with the cqp + */ +static void zxdh_destroy_cqp(struct zxdh_pci_f *rf, bool free_hwcqp) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_cqp *cqp = &rf->cqp; + int status = 0; + + if (rf->cqp_cmpl_wq) + destroy_workqueue(rf->cqp_cmpl_wq); + status = zxdh_sc_cqp_destroy(dev->cqp, free_hwcqp); + if (status) + zxdh_dbg(dev, "ERR: Destroy CQP failed %d\n", status); + + zxdh_cleanup_pending_cqp_op(rf); + dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va, + cqp->sq.pa); + cqp->sq.va = NULL; + kfree(cqp->scratch_array); + cqp->scratch_array = NULL; + kfree(cqp->cqp_requests); + cqp->cqp_requests = NULL; +} + +static void zxdh_destroy_virt_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_aeq *aeq = &rf->aeq; + u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); + dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr; + + zxdh_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt); + zxdh_free_pble(rf->pble_rsrc, &aeq->palloc); + vfree(aeq->mem.va); +} + +/** + * zxdh_destroy_aeq - destroy aeq + * @rf: RDMA PCI function + * + * Issue a destroy aeq request and + * free the resources associated with the aeq + * The function is called during driver unload + */ +static void zxdh_destroy_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_aeq *aeq = &rf->aeq; + int status = -EBUSY; +#ifdef MSIX_SUPPORT + zxdh_destroy_irq(rf->iw_msixtbl, rf); +#endif + aeq->sc_aeq.size = 0; + status = zxdh_cqp_aeq_cmd(dev, &aeq->sc_aeq, ZXDH_OP_AEQ_DESTROY); + if (status) + zxdh_dbg(dev, "ERR: Destroy AEQ failed %d\n", status); + + if (aeq->virtual_map) + zxdh_destroy_virt_aeq(rf); + else { + dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va, + aeq->mem.pa); + aeq->mem.va = NULL; + } +} + +/** + * zxdh_destroy_ceq - destroy ceq + * @rf: RDMA PCI function + * @iwceq: ceq to be destroyed + * + * Issue a destroy ceq request and + * free the resources associated with the ceq + */ +static void zxdh_destroy_ceq(struct zxdh_pci_f *rf, struct zxdh_ceq *iwceq) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + int status; + + if (rf->reset) + goto exit; + + status = zxdh_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1); + if (status) { + zxdh_dbg(dev, "ERR: CEQ destroy command failed %d\n", status); + goto exit; + } + + status = zxdh_sc_cceq_destroy_done(&iwceq->sc_ceq); + if (status) + zxdh_dbg(dev, "ERR: CEQ destroy completion failed %d\n", + status); +exit: + dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va, + iwceq->mem.pa); + iwceq->mem.va = NULL; +} + +/** + * zxdh_del_ceq_0 - destroy ceq 0 + * @rf: RDMA PCI function + * + * Disable the ceq 0 interrupt and destroy the ceq 0 + */ +static void zxdh_del_ceq_0(struct zxdh_pci_f *rf) +{ + struct zxdh_ceq *iwceq = rf->ceqlist; + struct zxdh_msix_vector *msix_vec; + + msix_vec = &rf->iw_msixtbl[1]; + +#ifdef MSIX_SUPPORT + zxdh_destroy_irq(msix_vec, iwceq); +#endif + zxdh_destroy_ceq(rf, iwceq); + rf->sc_dev.ceq_valid = false; + rf->ceqs_count = 0; +} + +/** + * zxdh_del_ceqs - destroy all ceq's except CEQ 0 + * @rf: RDMA PCI function + * + * Go through all of the device ceq's, except 0, and for each + * ceq disable the ceq interrupt and destroy the ceq + */ +static void zxdh_del_ceqs(struct zxdh_pci_f *rf) +{ + struct zxdh_ceq *iwceq = &rf->ceqlist[1]; + + struct zxdh_msix_vector *msix_vec; + u32 i = 0; + + msix_vec = &rf->iw_msixtbl[2]; + for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) { +#ifdef MSIX_SUPPORT + zxdh_destroy_irq(msix_vec, iwceq); +#endif + zxdh_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, + ZXDH_OP_CEQ_DESTROY); + dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size, + iwceq->mem.va, iwceq->mem.pa); + iwceq->mem.va = NULL; + } + + rf->ceqs_count = 1; +} + +/** + * zxdh_destroy_ccq - destroy control cq + * @rf: RDMA PCI function + * + * Issue destroy ccq request and + * free the resources associated with the ccq + */ +static void zxdh_destroy_ccq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_ccq *ccq = &rf->ccq; + int status = 0; + + if (!rf->reset) + status = zxdh_sc_ccq_destroy(dev->ccq, 0, true); + if (status) + zxdh_dbg(dev, "ERR: CCQ destroy failed %d\n", status); + dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va, + ccq->mem_cq.pa); + ccq->mem_cq.va = NULL; + dma_free_coherent(dev->hw->device, ccq->shadow_area.size, + ccq->shadow_area.va, ccq->shadow_area.pa); + ccq->shadow_area.va = NULL; + zxdh_free_rsrc(rf, rf->allocated_cqs, + ccq->sc_cq.cq_uk.cq_id - dev->base_cqn); +} + +/** + * zxdh_create_hmc_obj_type - create hmc object of a given type + * @dev: hardware control device structure + * @info: information for the hmc object to create + */ +static int zxdh_create_hmc_obj_type(struct zxdh_sc_dev *dev, + struct zxdh_hmc_create_obj_info *info) +{ + return zxdh_sc_create_hmc_obj(dev, info); +} + +void zxdh_del_hmc_objects(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info) +{ + unsigned int i, sd_idx; + u32 del_sd_cnt = 0; + struct zxdh_hmc_sd_entry *sd_entry; + struct zxdh_dma_mem *mem = NULL; + struct zxdh_dma_mem *mem_harware = NULL; + + for (i = 0; i < hmc_info->hmc_entry_total; i++) { + if (!hmc_info->sd_table.sd_entry[i].valid) + continue; + zxdh_prep_remove_sd_bp(hmc_info, i); + hmc_info->sd_indexes[del_sd_cnt] = (u16)i; + del_sd_cnt++; + } + + for (i = 0; i < del_sd_cnt; i++) { + sd_idx = hmc_info->sd_indexes[i]; + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + mem = &sd_entry->u.bp.addr; + if (!mem || !mem->va) + pr_err("HMC: error cqp sd mem\n"); + else { + dma_free_coherent(dev->hw->device, mem->size, mem->va, + mem->pa); + mem->va = NULL; + } + + mem_harware = &sd_entry->u.bp.addr_hardware; + if (mem_harware && mem_harware->va) { + dma_free_coherent(dev->hw->device, mem_harware->size, + mem_harware->va, mem_harware->pa); + mem_harware->va = NULL; + } + } +} + +/** + * zxdh_create_hmc_objs - create all hmc objects for the device + * @rf: RDMA PCI function + * @privileged: permission to create HMC objects + * + * Create the device hmc objects and allocate hmc pages + * Return 0 if successful, otherwise clean up and return error + */ +static int zxdh_create_hmc_objs(struct zxdh_pci_f *rf, bool privileged) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_hmc_create_obj_info info = {}; + int i, status = 0; + + info.hmc_info = dev->hmc_info; + info.privileged = privileged; + info.add_sd_cnt = 0; + + for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { + if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) { + info.rsrc_type = iw_hmc_obj_types[i]; + info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt; + status = zxdh_create_hmc_obj_type(dev, &info); + if (status) { + zxdh_del_hmc_objects(&rf->sc_dev, + rf->sc_dev.hmc_info); + zxdh_dbg( + dev, + "ERR: create obj type %d status = %d\n", + iw_hmc_obj_types[i], status); + break; + } + } + } + + return status; +} + +static int zxdh_create_hmcobjs_dpuddr(struct zxdh_pci_f *rf) +{ + u32 sd_lmt, hmc_entry_total = 0, j = 0, k = 0, mem_size = 0, cnt = 0; + u64 fpm_limit = 0; + struct zxdh_hmc_info *hmc_info; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_virt_mem virt_mem = {}; + struct zxdh_hmc_obj_info *obj_info; + + hmc_info = dev->hmc_info; + + zxdh_hmc_dpu_capability(dev); + for (k = 0; k < ZXDH_HMC_IW_MAX; k++) { + zxdh_sc_write_hmc_register(dev, hmc_info->hmc_obj, k, + dev->vhca_id); + } + + obj_info = hmc_info->hmc_obj; + for (k = ZXDH_HMC_IW_PBLE; k < ZXDH_HMC_IW_MAX; k++) { + cnt = obj_info[k].cnt; + + fpm_limit = obj_info[k].size * cnt; + + if (fpm_limit == 0) + continue; + + if (k == ZXDH_HMC_IW_PBLE) + hmc_info->hmc_first_entry_pble = hmc_entry_total; + + if (k == ZXDH_HMC_IW_PBLE_MR) + hmc_info->hmc_first_entry_pble_mr = hmc_entry_total; + + sd_lmt = (u32)((fpm_limit - 1) / ZXDH_HMC_DIRECT_BP_SIZE); + sd_lmt += 1; + + if (sd_lmt == 1) { + hmc_entry_total++; + } else { + for (j = 0; j < sd_lmt - 1; j++) + hmc_entry_total++; + + if (fpm_limit % ZXDH_HMC_DIRECT_BP_SIZE) + hmc_entry_total++; + } + } + + mem_size = sizeof(struct zxdh_hmc_sd_entry) * hmc_entry_total; + virt_mem.size = mem_size; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + if (!virt_mem.va) { + zxdh_dbg( + dev, + "HMC: failed to allocate memory for sd_entry buffer\n"); + return -ENOMEM; + } + hmc_info->sd_table.sd_entry = virt_mem.va; + hmc_info->hmc_entry_total = hmc_entry_total; + + return 0; +} + +/** + * zxdh_create_cqp - create control qp + * @rf: RDMA PCI function + * + * Return 0, if the cqp and all the resources associated with it + * are successfully created, otherwise return error + */ +static int zxdh_create_cqp(struct zxdh_pci_f *rf) +{ + u32 sqsize = ZXDH_CQP_SW_SQSIZE_2048; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_cqp_init_info cqp_init_info = {}; + struct zxdh_cqp *cqp = + &rf->cqp; // this struct will be transferred to CQE. + u16 maj_err, min_err; + int i, status; + + cqp->cqp_requests = + kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); + if (!cqp->cqp_requests) + return -ENOMEM; + + cqp->scratch_array = + kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); + if (!cqp->scratch_array) { + status = -ENOMEM; + goto err_scratch; + } + + dev->cqp = &cqp->sc_cqp; + dev->cqp->dev = dev; + cqp->sq.size = ALIGN(sizeof(struct zxdh_cqp_sq_wqe) * sqsize, + ZXDH_CQP_ALIGNMENT); + cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size, + &cqp->sq.pa, GFP_KERNEL); + if (!cqp->sq.va) { + status = -ENOMEM; + goto err_sq; + } + + // populate the cqp init info + cqp_init_info.dev = dev; + cqp_init_info.sq_size = sqsize; + cqp_init_info.sq = cqp->sq.va; + cqp_init_info.sq_pa = cqp->sq.pa; + if (dev->privileged) { + cqp_init_info.hmc_profile = rf->rsrc_profile; + cqp_init_info.ena_vf_count = rf->max_rdma_vfs; + } + cqp_init_info.scratch_array = cqp->scratch_array; + cqp_init_info.protocol_used = rf->protocol_used; + memcpy(&cqp_init_info.dcqcn_params, &rf->dcqcn_params, + sizeof(cqp_init_info.dcqcn_params)); + + cqp_init_info.hw_maj_ver = ZXDH_CQPHC_HW_MAJVER_GEN_2; + status = zxdh_sc_cqp_init(dev->cqp, &cqp_init_info); + if (status) { + pr_err("ERR: cqp init status %d\n", status); + goto err_ctx; + } + + spin_lock_init(&cqp->req_lock); + spin_lock_init(&cqp->compl_lock); + + status = zxdh_sc_cqp_create(dev->cqp, &maj_err, &min_err); + if (status) { + zxdh_dbg( + dev, + "ERR: cqp create failed - status %d maj_err %d min_err %d\n", + status, maj_err, min_err); + goto err_create; + } + + INIT_LIST_HEAD(&cqp->cqp_avail_reqs); + INIT_LIST_HEAD(&cqp->cqp_pending_reqs); + + /* init the waitqueue of the cqp_requests and add them to the list */ + for (i = 0; i < sqsize; i++) { + init_waitqueue_head(&cqp->cqp_requests[i].waitq); + list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs); + } + init_waitqueue_head(&cqp->remove_wq); + return 0; + +err_create: +err_ctx: + dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va, + cqp->sq.pa); + cqp->sq.va = NULL; +err_sq: + kfree(cqp->scratch_array); + cqp->scratch_array = NULL; +err_scratch: + kfree(cqp->cqp_requests); + cqp->cqp_requests = NULL; + + return status; +} + +/** + * zxdh_create_ccq - create control cq + * @rf: RDMA PCI function + * + * Return 0, if the ccq and the resources associated with it + * are successfully created, otherwise return error + */ +static int zxdh_create_ccq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_ccq_init_info info = {}; + struct zxdh_ccq *ccq = &rf->ccq; + u32 cq_num = 0; + int status; + + status = zxdh_alloc_rsrc( + rf, rf->allocated_cqs, rf->max_cq, &cq_num, + &rf->next_cq); /* cq_num is the allocated cq_id. */ + if (status) + return status; + cq_num += dev->base_cqn; + info.cq_num = cq_num; + dev->ccq = &ccq->sc_cq; + dev->ccq->dev = dev; + info.dev = dev; + ccq->shadow_area.size = sizeof(struct zxdh_cq_shadow_area); + ccq->mem_cq.size = ALIGN(sizeof(struct zxdh_cqe) * IW_CCQ_SIZE, + ZXDH_CQ0_ALIGNMENT); + ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size, + &ccq->mem_cq.pa, GFP_KERNEL); + if (!ccq->mem_cq.va) + return -ENOMEM; + + ccq->shadow_area.va = + dma_alloc_coherent(dev->hw->device, ccq->shadow_area.size, + &ccq->shadow_area.pa, GFP_KERNEL); + if (!ccq->shadow_area.va) { + dma_free_coherent(dev->hw->device, ccq->mem_cq.size, + ccq->mem_cq.va, ccq->mem_cq.pa); + ccq->mem_cq.va = NULL; + zxdh_free_rsrc(rf, rf->allocated_cqs, cq_num - dev->base_cqn); + return -ENOMEM; + } + + ccq->sc_cq.back_cq = ccq; + /* populate the ccq init info */ + info.cq_base = ccq->mem_cq.va; + info.cq_pa = ccq->mem_cq.pa; + info.num_elem = IW_CCQ_SIZE; + info.shadow_area = ccq->shadow_area.va; + info.shadow_area_pa = ccq->shadow_area.pa; + info.ceqe_mask = false; + info.ceq_id_valid = true; + info.ceq_id = dev->base_ceqn; + info.ceq_index = 0; + info.shadow_read_threshold = 16; + info.cqe_size = ZXDH_CQE_SIZE_64; + info.cq_max = 0; + info.cq_period = 0; + info.scqe_break_moderation_en = false; + info.cq_st = 0; + info.is_in_list_cnt = 0; + + status = zxdh_sc_ccq_init(dev->ccq, &info); + if (status) + goto exit; + + status = zxdh_sc_ccq_create(dev->ccq, 0, true); +exit: + if (status) { + dma_free_coherent(dev->hw->device, ccq->mem_cq.size, + ccq->mem_cq.va, ccq->mem_cq.pa); + ccq->mem_cq.va = NULL; + dma_free_coherent(dev->hw->device, ccq->shadow_area.size, + ccq->shadow_area.va, ccq->shadow_area.pa); + ccq->shadow_area.va = NULL; + zxdh_free_rsrc(rf, rf->allocated_cqs, cq_num - dev->base_cqn); + } + + return status; +} + +/** + * zxdh_cfg_ceq_vector - set up the msix interrupt vector for + * ceq + * @rf: RDMA PCI function + * @iwceq: ceq associated with the vector + * @ceq_id: the id number of the iwceq + * @msix_vec: interrupt vector information + * + * Allocate interrupt resources and enable irq handling + * Return 0 if successful, otherwise return error + */ +static int zxdh_cfg_ceq_vector(struct zxdh_pci_f *rf, struct zxdh_ceq *iwceq, + u32 ceq_id, struct zxdh_msix_vector *msix_vec) +{ +#ifndef MSIX_SUPPORT + return 0; +#endif + int status; + + tasklet_setup(&iwceq->dpc_tasklet, zxdh_ceq_dpc); + status = request_irq(msix_vec->irq, zxdh_ceq_handler, 0, "CEQ", iwceq); + cpumask_clear(&msix_vec->mask); + cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); + irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask); + if (status) { + pr_err("ERR: ceq irq config fail\n"); + return status; + } + + msix_vec->ceq_id = ceq_id; + return 0; +} + +/** + * zxdh_cfg_aeq_vector - set up the msix vector for aeq + * @rf: RDMA PCI function + * + * Allocate interrupt resources and enable irq handling + * Return 0 if successful, otherwise return error + */ +static int zxdh_cfg_aeq_vector(struct zxdh_pci_f *rf) +{ +#ifndef MSIX_SUPPORT + return 0; +#endif + struct zxdh_msix_vector *msix_vec = rf->iw_msixtbl; + u32 ret = 0; + + tasklet_setup(&rf->dpc_tasklet, zxdh_dpc); + ret = request_irq(msix_vec->irq, zxdh_aeq_handler, 0, "AEQ", rf); + if (ret) { + pr_err("ERR: aeq irq config fail\n"); + return -EINVAL; + } + rf->sc_dev.irq_ops->zxdh_cfg_aeq(&rf->sc_dev, msix_vec->idx); + return 0; +} + +/** + * zxdh_create_ceq - create completion event queue + * @rf: RDMA PCI function + * @iwceq: pointer to the ceq resources to be created + * @ceq_id: the id number of the iwceq + * + * Return 0, if the ceq and the resources associated with it + * are successfully created, otherwise return error + */ +static int zxdh_create_ceq(struct zxdh_pci_f *rf, struct zxdh_ceq *iwceq, + u32 ceq_id) +{ + int status; + struct zxdh_ceq_init_info info = {}; + struct zxdh_sc_dev *dev = &rf->sc_dev; + u64 scratch; + u32 ceq_size; + u32 log2_ceq_size; + + info.ceq_id = ceq_id; + info.ceq_index = ceq_id - dev->base_ceqn; + iwceq->rf = rf; + ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].cnt, + dev->hw_attrs.max_hw_ceq_size); + ceq_size = roundup_pow_of_two(ceq_size); + log2_ceq_size = order_base_2(ceq_size); + + iwceq->mem.size = + ALIGN(sizeof(struct zxdh_ceqe) * ceq_size, ZXDH_CEQ_ALIGNMENT); + iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size, + &iwceq->mem.pa, GFP_KERNEL); + if (!iwceq->mem.va) + return -ENOMEM; + + info.ceqe_base = iwceq->mem.va; + info.ceqe_pa = iwceq->mem.pa; + info.elem_cnt = ceq_size; + info.log2_elem_size = log2_ceq_size; + info.msix_idx = iwceq->msix_idx; + iwceq->sc_ceq.ceq_id = ceq_id; + info.dev = dev; + scratch = (uintptr_t)&rf->cqp.sc_cqp; + status = zxdh_sc_ceq_init(&iwceq->sc_ceq, &info); + + if (!status) { + if (dev->ceq_valid) + status = zxdh_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, + ZXDH_OP_CEQ_CREATE); + else + status = zxdh_sc_cceq_create(&iwceq->sc_ceq, scratch); + } + + if (status) { + dma_free_coherent(dev->hw->device, iwceq->mem.size, + iwceq->mem.va, iwceq->mem.pa); + iwceq->mem.va = NULL; + } + + return status; +} + +/** + * zxdh_setup_ceq_0 - create CEQ 0 and it's interrupt resource + * @rf: RDMA PCI function + * + * Allocate a list for all device completion event queues + * Create the ceq 0 and configure it's msix interrupt vector + * Return 0, if successfully set up, otherwise return error + */ +static int zxdh_setup_ceq_0(struct zxdh_pci_f *rf) +{ + struct zxdh_ceq *iwceq; + struct zxdh_msix_vector *msix_vec; + int status = 0; + u32 num_ceqs; + + num_ceqs = min(rf->msix_count, rf->sc_dev.max_ceqs); + rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL); + if (!rf->ceqlist) { + status = -ENOMEM; + goto exit; + } + + iwceq = &rf->ceqlist[0]; + //0 is aeq, 1~xx is ceq + msix_vec = &rf->iw_msixtbl[1]; + iwceq->irq = msix_vec->irq; + iwceq->msix_idx = msix_vec->idx; + status = zxdh_create_ceq(rf, iwceq, rf->sc_dev.base_ceqn); + if (status) { + pr_err("ERR: create ceq status = %d\n", status); + goto exit; + } + + spin_lock_init(&iwceq->ce_lock); + status = zxdh_cfg_ceq_vector(rf, iwceq, rf->sc_dev.base_ceqn, msix_vec); + if (status) { + zxdh_destroy_ceq(rf, iwceq); + goto exit; + } + + zxdh_ceq_ena_intr(&rf->sc_dev, iwceq->sc_ceq.ceq_id); + rf->ceqs_count++; + +exit: + if (status && !rf->ceqs_count) { + kfree(rf->ceqlist); + rf->ceqlist = NULL; + return status; + } + rf->sc_dev.ceq_valid = true; + + return 0; +} + +/** + * zxdh_setup_ceqs - manage the device ceq's and their interrupt resources + * @rf: RDMA PCI function + * + * Allocate a list for all device completion event queues + * Create the ceq's and configure their msix interrupt vectors + * Return 0, if ceqs are successfully set up, otherwise return error + */ +static int zxdh_setup_ceqs(struct zxdh_pci_f *rf) +{ + u32 i; + u32 ceq_id; + u32 ceq_id_offset; + struct zxdh_ceq *iwceq; + struct zxdh_msix_vector *msix_vec; + int status; + u32 num_ceqs; + + num_ceqs = min(rf->msix_count, rf->sc_dev.max_ceqs); + i = 2; + for (ceq_id_offset = 1; ceq_id_offset < num_ceqs; + i++, ceq_id_offset++) { + iwceq = &rf->ceqlist[ceq_id_offset]; + ceq_id = rf->sc_dev.base_ceqn + ceq_id_offset; + msix_vec = &rf->iw_msixtbl[i]; + iwceq->irq = msix_vec->irq; + iwceq->msix_idx = msix_vec->idx; + status = zxdh_create_ceq(rf, iwceq, ceq_id); + if (status) { + pr_err("ERR: create ceq status = %d\n", status); + goto del_ceqs; + } + spin_lock_init(&iwceq->ce_lock); + status = zxdh_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec); + if (status) { + zxdh_destroy_ceq(rf, iwceq); + goto del_ceqs; + } + + zxdh_ceq_ena_intr(&rf->sc_dev, iwceq->sc_ceq.ceq_id); + rf->ceqs_count++; + } + + return 0; + +del_ceqs: + zxdh_del_ceqs(rf); + + return status; +} + +#if 0 +static int zxdh_create_virt_aeq(struct zxdh_pci_f *rf, u32 size) +{ + struct zxdh_aeq *aeq = &rf->aeq; + dma_addr_t *pg_arr; + u32 pg_cnt; + int status; + + if (rf->rdma_ver < ZXDH_GEN_2) + return -EOPNOTSUPP; + + aeq->mem.size = sizeof(struct zxdh_sc_aeqe) * size; + aeq->mem.va = vzalloc(aeq->mem.size); + + if (!aeq->mem.va) + return -ENOMEM; + + pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); + status = zxdh_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true); + if (status) { + vfree(aeq->mem.va); + return status; + } + + pg_arr = (dma_addr_t *)aeq->palloc.level1.addr; + status = zxdh_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt); + if (status) { + zxdh_free_pble(rf->pble_rsrc, &aeq->palloc); + vfree(aeq->mem.va); + return status; + } + + return 0; +} +#endif + +/** + * zxdh_create_aeq - create async event queue + * @rf: RDMA PCI function + * + * Return 0, if the aeq and the resources associated with it + * are successfully created, otherwise return error + */ +static int zxdh_create_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_aeq_init_info info = {}; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_aeq *aeq = &rf->aeq; + struct zxdh_hmc_info *hmc_info = rf->sc_dev.hmc_info; + u32 aeq_size; + u8 multiplier = (rf->protocol_used == ZXDH_IWARP_PROTOCOL_ONLY) ? 2 : 1; + int status; + + aeq_size = multiplier * hmc_info->hmc_obj[ZXDH_HMC_IW_QP].cnt + + hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].cnt + + hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].cnt; + aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size); + + aeq->mem.size = ALIGN(sizeof(struct zxdh_sc_aeqe) * aeq_size, + ZXDH_AEQ_ALIGNMENT); + aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size, + &aeq->mem.pa, + GFP_KERNEL | __GFP_NOWARN); + + if (aeq->mem.va) + goto skip_virt_aeq; + + pr_err("aeq_size out of range, failed to apply for physical memory!\n"); + return -ENOMEM; + +#if 0 + /* physically mapped aeq failed. setup virtual aeq */ + status = zxdh_create_virt_aeq(rf, aeq_size); + if (status) + return status; + + info.virtual_map = true; + aeq->virtual_map = info.virtual_map; + info.pbl_chunk_size = 1; + info.first_pm_pbl_idx = aeq->palloc.level1.idx; +#endif + +skip_virt_aeq: + info.aeqe_base = aeq->mem.va; + info.aeq_elem_pa = aeq->mem.pa; + info.elem_cnt = aeq_size; + info.dev = dev; + info.msix_idx = rf->iw_msixtbl->idx; + status = zxdh_sc_aeq_init(&aeq->sc_aeq, &info); + if (status) + goto err; + + status = zxdh_cqp_aeq_create(&aeq->sc_aeq); + if (status) + goto err; + + return 0; + +err: + if (aeq->virtual_map) + zxdh_destroy_virt_aeq(rf); + else { + dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va, + aeq->mem.pa); + aeq->mem.va = NULL; + } + return status; +} + +/** + * zxdh_setup_aeq - set up the device aeq + * @rf: RDMA PCI function + * + * Create the aeq and configure its msix interrupt vector + * Return 0 if successful, otherwise return error + */ +static int zxdh_setup_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + int status; + + status = zxdh_create_aeq(rf); + if (status) + return status; + status = zxdh_cfg_aeq_vector(rf); + if (status) { + zxdh_init_destroy_aeq(rf); + return status; + } + zxdh_aeq_ena_intr(dev, true); + return 0; +} + +/** + * zxdh_hmc_setup - create hmc objects for the device + * @rf: RDMA PCI function + * + * Set up the device private memory space for the number and size of + * the hmc objects and create the objects + * Return 0 if successful, otherwise return error + */ +static int zxdh_hmc_setup(struct zxdh_pci_f *rf) +{ + int status; + struct zxdh_sc_dev *dev = &rf->sc_dev; + u32 qpcnt = 0; + + status = zxdh_cfg_fpm_val(dev, qpcnt); + if (status) + return status; + + status = zxdh_create_hmc_objs(rf, true); + + return status; +} + +/** + * zxdh_del_init_mem - deallocate memory resources + * @rf: RDMA PCI function + */ +static void zxdh_del_init_mem(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + + kfree(dev->hmc_info->sd_table.sd_entry); + dev->hmc_info->sd_table.sd_entry = NULL; + vfree(rf->mem_rsrc); + rf->mem_rsrc = NULL; + + kfree(rf->ceqlist); + rf->ceqlist = NULL; + kfree(rf->iw_msixtbl); + rf->iw_msixtbl = NULL; + kfree(rf->hmc_info_mem); + rf->hmc_info_mem = NULL; +} + +/** + * zxdh_initialize_dev - initialize device + * @rf: RDMA PCI function + * + * Allocate memory for the hmc objects and initialize iwdev + * Return 0 if successful, otherwise clean up the resources + * and return error + */ +static int zxdh_initialize_dev(struct zxdh_pci_f *rf) +{ + struct zxdh_device_init_info info = {}; + + info.bar0 = rf->hw.hw_addr; + info.privileged = !rf->ftype; + info.max_vfs = rf->max_rdma_vfs; + info.hw = &rf->hw; + zxdh_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info); + + return 0; +} + +/** + * zxdh_rt_deinit_hw - clean up the zrdma device resources + * @iwdev: zrdma device + * + * remove the mac ip entry and ipv4/ipv6 addresses, destroy the + * device queues and free the pble and the hmc objects + */ +void zxdh_rt_deinit_hw(struct zxdh_device *iwdev) +{ + switch (iwdev->init_state) { + case AEQ_CREATED: + case PBLE_CHUNK_MEM: + case CEQS_CREATED: + default: + dev_warn(idev_to_dev(&iwdev->rf->sc_dev), + "bad init_state = %d\n", iwdev->init_state); + break; + } + + if (iwdev->cleanup_wq) + destroy_workqueue(iwdev->cleanup_wq); +} + +static int zxdh_setup_init_state(struct zxdh_pci_f *rf) +{ + int status; + + status = zxdh_save_msix_info(rf); + if (status) + return status; + rf->hw.device = &rf->pcidev->dev; + + mutex_init(&rf->sc_dev.vchnl_mutex); + status = zxdh_initialize_dev(rf); + if (status) + goto clean_msixtbl; + + return 0; + +clean_msixtbl: + kfree(rf->iw_msixtbl); + rf->iw_msixtbl = NULL; + return status; +} + +/** + * zxdh_get_used_rsrc - determine resources used internally + * @iwdev: zrdma device + * + * Called at the end of open to get all internal allocations + */ +static void zxdh_get_used_rsrc(struct zxdh_device *iwdev) +{ + iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds, + iwdev->rf->max_pd, 0); + iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps, + iwdev->rf->max_qp, 0); + iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs, + iwdev->rf->max_cq, 0); + iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs, + iwdev->rf->max_mr, 0); + iwdev->rf->used_srqs = find_next_zero_bit(iwdev->rf->allocated_srqs, + iwdev->rf->max_srq, 0); +} + +void zxdh_ctrl_deinit_hw(struct zxdh_pci_f *rf) +{ + enum init_completion_state state = rf->init_state; + + rf->init_state = INVALID_STATE; + if (rf->rsrc_created) { + zxdh_destroy_aeq(rf); + zxdh_destroy_pble_prm(rf->pble_rsrc); + zxdh_destroy_pble_prm(rf->pble_mr_rsrc); + zxdh_del_ceqs(rf); + rf->rsrc_created = false; + } + + switch (state) { + case CEQ0_CREATED: + zxdh_del_ceq_0(rf); + fallthrough; + case CCQ_CREATED: + zxdh_destroy_ccq(rf); + fallthrough; + case HW_RSRC_INITIALIZED: + case HMC_OBJS_CREATED: + zxdh_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info); + fallthrough; + case CQP_QP_CREATED: + zxdh_destroy_cqp_qp(rf); + fallthrough; + case SMMU_PAGETABLE_INITIALIZED: + if (!rf->ftype) { + zxdh_smmu_pagetable_exit(&rf->sc_dev); + } + fallthrough; + case CQP_CREATED: + zxdh_destroy_cqp(rf, !rf->reset); + // if(rf->ftype) // VF + // zxdh_sc_send_mailbox_cmd(&rf->sc_dev,ZTE_ZXDH_OP_DEL_HMC_OBJ_RANGE,0,0,0,rf->vf_id); + fallthrough; + case INITIAL_STATE: + zxdh_del_init_mem(rf); + break; + case INVALID_STATE: + default: + pr_warn("bad init_state = %d\n", rf->init_state); + break; + } +} + +/** + * zxdh_rt_init_hw - Initializes runtime portion of HW + * @iwdev: zrdma device + * + * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup zrdma + * device resource objects. + */ +int zxdh_rt_init_hw(struct zxdh_device *iwdev) +{ + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_sc_dev *dev = &rf->sc_dev; + int status; + + zxdh_sc_dev_qplist_init(dev); + do { + if (!rf->rsrc_created) { + status = zxdh_setup_ceqs(rf); + if (status) + break; + + iwdev->init_state = CEQS_CREATED; + + rf->pble_rsrc->fpm_base_addr = + rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE] + .base; + rf->sc_dev.hmc_info->pble_hmc_index = + rf->sc_dev.hmc_info->hmc_first_entry_pble; + status = zxdh_hmc_init_pble(&rf->sc_dev, rf->pble_rsrc, + PBLE_QUEUE); + if (status) { + zxdh_del_ceqs(rf); + break; + } + pr_info("vhca:%d init pble_q iova:0x%llx\n", + dev->vhca_id, rf->pble_rsrc->fpm_base_addr); + rf->pble_mr_rsrc->fpm_base_addr = + rf->sc_dev.hmc_info + ->hmc_obj[ZXDH_HMC_IW_PBLE_MR] + .base; + rf->sc_dev.hmc_info->pble_mr_hmc_index = + rf->sc_dev.hmc_info->hmc_first_entry_pble_mr; + status = zxdh_hmc_init_pble(&rf->sc_dev, + rf->pble_mr_rsrc, PBLE_MR); + pr_info("vhca:%d init pble_mr iova:0x%llx status=%d\n", + dev->vhca_id, rf->pble_mr_rsrc->fpm_base_addr, + status); + + iwdev->init_state = PBLE_CHUNK_MEM; + rf->rsrc_created = true; + } + + iwdev->device_cap_flags = + IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW | + IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_BAD_QKEY_CNTR | + IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | + IB_DEVICE_N_NOTIFY_CQ; + + iwdev->cleanup_wq = alloc_workqueue( + "zrdma-cleanup-wq", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); + if (!iwdev->cleanup_wq) + return -ENOMEM; + + zxdh_get_used_rsrc(iwdev); + init_waitqueue_head(&iwdev->suspend_wq); + + return 0; + } while (0); + + dev_err(idev_to_dev(dev), + "HW runtime init FAIL status = %d last cmpl = %d\n", status, + iwdev->init_state); + zxdh_rt_deinit_hw(iwdev); + + return status; +} + +static void zxdh_config_tx_regs(struct zxdh_sc_dev *dev) +{ + u32 temp; + + temp = FIELD_PREP(ZXDH_TX_CACHE_ID, 0) | + FIELD_PREP(ZXDH_TX_INDICATE_ID, ZXDH_INDICATE_HOST_NOSMMU) | + FIELD_PREP(ZXDH_TX_AXI_ID, (ZXDH_AXID_HOST_EP0 + dev->ep_id)) | + FIELD_PREP(ZXDH_TX_WAY_PARTITION, 0); + + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_ACK_SQWQE_PARA_CFG)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_ACK_DDR_PARA_CFG)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_DB_SQWQE_ID_CFG)); + writel(temp, (u32 __iomem *)(dev->hw->hw_addr + RDMATX_SQWQE_PARA_CFG)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_PAYLOAD_PARA_CFG)); + + temp = FIELD_PREP(ZXDH_TX_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_TX_INDICATE_ID, ZXDH_INDICATE_DPU_DDR) | + FIELD_PREP(ZXDH_TX_AXI_ID, (ZXDH_AXID_HOST_EP0 + dev->ep_id)) | + FIELD_PREP(ZXDH_TX_WAY_PARTITION, 0); + writel(temp, (u32 __iomem *)(dev->hw->hw_addr + C_HMC_MRTE_TX2)); + writel(temp, (u32 __iomem *)(dev->hw->hw_addr + C_HMC_PBLEMR_TX2)); + + writel((ZXDH_AXID_HOST_EP0 + dev->ep_id), + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_HOSTID_CFG)); + + /*adding token config to 200Gbps, equal to time(us)*size(Byte)*/ + writel(0x1, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_ADD_TOKEN_CHANGE_EN)); + writel(0x1900, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_TIME_ADD_TOKEN_CFG)); + writel(0x132d7, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_SIZE_ADD_TOKEN_CFG)); + writel(0x3FFFFFF, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_TOKEN_MAX_CFG)); +} + +static void zxdh_config_rx_regs(struct zxdh_sc_dev *dev) +{ + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + u32 temp; + + temp = FIELD_PREP(ZXDH_RX_CACHE_ID, 0) | + FIELD_PREP(ZXDH_RX_INDICATE_ID, ZXDH_INDICATE_HOST_NOSMMU) | + FIELD_PREP(ZXDH_RX_AXI_ID, (ZXDH_AXID_HOST_EP0 + dev->ep_id)) | + FIELD_PREP(ZXDH_RX_WAY_PARTITION, 0); + + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_PLD_WR_AXIID_RAM)); + writel(temp, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RQ_AXI_RAM)); + writel(temp, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_SRQ_AXI_RAM)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_ACK_RQDB_AXI_RAM)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_CQ_CQE_AXI_INFO_RAM)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_CQ_DBSA_AXI_INFO_RAM)); + writel(dev->hmc_fn_id, (u32 __iomem *)(dev->hw->hw_addr + + RDMARX_MUL_CACHE_CFG_SIDN_RAM)); + writel((ZXDH_AXID_HOST_EP0 + dev->ep_id), + (u32 __iomem *)(dev->hw->hw_addr + + RDMARX_MUL_COPY_QPN_INDICATE)); + writel(RDMARX_MAX_MSG_SIZE, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_VHCA_MAX_SIZE_RAM)); + + if (rf->ftype == 0) { + // writel(ZXDH_HMC_HOST_MGCPAYLOAD_MAX_QUANTITY, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_MUL_CACHE_CFG_INDEX_SUM_RAM)); + // writel(1, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_MUL_CACHE_CFG_VLD_RAM)); + // writel(dev->vhca_id, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_MUL_CACHE_CFG_VHCA_RAM)); + } +} + +static void zxdh_config_io_regs(struct zxdh_sc_dev *dev) +{ + u32 temp0, temp1, temp2; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + temp0 = FIELD_PREP(ZXDH_IOTABLE2_SID, dev->hmc_fn_id); + writel(temp0, (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE2)); + + temp1 = FIELD_PREP(ZXDH_IOTABLE4_EPID, + (ZXDH_HOST_EP0_ID + dev->ep_id)) | + FIELD_PREP(ZXDH_IOTABLE4_VFID, dev->vf_id) | + FIELD_PREP(ZXDH_IOTABLE4_PFID, rf->pf_id); + pr_info("%s %d hmc_epid:%d vf_id:%d hmc_fn_id:%d\n", __func__, __LINE__, + dev->hmc_epid, dev->vf_id, dev->hmc_fn_id); + writel(temp1, (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE4)); + + temp0 = 0x10000; + writel(temp0, (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE3)); + for (temp0 = 0; temp0 < 32; temp0++) { + if (temp0 < ZXDH_RW_PAYLOAD || temp0 == ZXDH_QPC_OBJ_ID) { + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + C_RDMAIO_TABLE5_0 + + (temp0 * 4))); + } else { + writel((rf->ftype), (u32 __iomem *)(dev->hw->hw_addr + + C_RDMAIO_TABLE5_0 + + (temp0 * 4))); + } + } + + if (rf->ftype == 0) { + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_0)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_1)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_2)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_3)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_4)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_5)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_6)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_7)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_8)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_9)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_10)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_11)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_12)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_13)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_14)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_15)); + + temp2 = FIELD_PREP(ZXDH_IOTABLE7_PFID, rf->pf_id) | + FIELD_PREP(ZXDH_IOTABLE7_EPID, + (ZXDH_HOST_EP0_ID + rf->ep_id)); + writel(temp2, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE7)); + } +} + +static void zxdh_config_hw_regs(struct zxdh_sc_dev *dev) +{ + zxdh_config_tx_regs(dev); + zxdh_config_rx_regs(dev); + zxdh_config_io_regs(dev); +} + +/** + * zxdh_ctrl_init_hw - Initializes control portion of HW + * @rf: RDMA PCI function + * + * Create admin queues, HMC obejcts and RF resource objects + */ +int zxdh_ctrl_init_hw(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + u32 cnt = 0, k = 0; + int status = 0; + u64 pa = 0; + void *va = NULL; + struct zxdh_dma_write32_date dma_data = {}; + + do { + status = zxdh_setup_init_state(rf); + if (status) + break; + rf->init_state = INITIAL_STATE; + + zxdh_config_hw_regs(dev); + + status = zxdh_create_cqp(rf); + if (status) + break; + rf->init_state = CQP_CREATED; + zxdh_init_destroy_aeq(rf); + if (!rf->ftype) { + status = zxdh_smmu_pagetable_init(dev); + if (status) + break; + rf->init_state = SMMU_PAGETABLE_INITIALIZED; + + if (dev->hmc_use_dpu_ddr) { + status = zxdh_clear_dpuddr( + dev, (0x100000 * 0x63e), + true); //TODO:VF clear dpu ddr + if (status) { + if (dev->clear_dpu_mem.va) { + dma_free_coherent( + dev->hw->device, + dev->clear_dpu_mem.size, + dev->clear_dpu_mem.va, + dev->clear_dpu_mem.pa); + dev->clear_dpu_mem.va = NULL; + } + break; + } + status = zxdh_create_hmcobjs_dpuddr(rf); + } else + status = zxdh_hmc_setup(rf); + + if (dev->clear_dpu_mem.va) { + dma_free_coherent(dev->hw->device, + dev->clear_dpu_mem.size, + dev->clear_dpu_mem.va, + dev->clear_dpu_mem.pa); + dev->clear_dpu_mem.va = NULL; + } + } else if (rf->ftype == 1) { + zxdh_hmc_dpu_capability(dev); + for (k = 0; k < ZXDH_HMC_IW_MAX; k++) { + zxdh_sc_write_hmc_register( + dev, dev->hmc_info->hmc_obj, k, + dev->vhca_id); + } + zxdh_create_vf_pblehmc_entry(dev); + } else { + pr_info("ftype is error!!\n"); + status = EINVAL; + } + + if (status) + break; + rf->init_state = HMC_OBJS_CREATED; + + status = zxdh_initialize_hw_rsrc(rf); + if (status) + break; + rf->init_state = HW_RSRC_INITIALIZED; + status = zxdh_create_cqp_qp(rf); + if (status) + break; + rf->init_state = CQP_QP_CREATED; + + status = zxdh_setup_aeq(rf); + if (status) + break; + rf->init_state = AEQ_CREATED; + + status = zxdh_create_ccq(rf); + if (status) + break; + rf->init_state = CCQ_CREATED; + + status = zxdh_setup_ceq_0(rf); + if (status) + break; + + rf->sc_dev.ceq_0_ok = true; + rf->sc_dev.ceq_interrupt = false; + rf->init_state = CEQ0_CREATED; + /* Handles processing of CQP completions */ + rf->cqp_cmpl_wq = alloc_ordered_workqueue( + "cqp_cmpl_wq", WQ_HIGHPRI | WQ_UNBOUND); + if (!rf->cqp_cmpl_wq) { + status = -ENOMEM; + break; + } + INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker); +#ifdef MSIX_SUPPORT + zxdh_sc_ccq_arm(dev->ccq); +#endif + + if (rf->ftype == 1 && !dev->hmc_use_dpu_ddr) { + struct zxdh_src_copy_dest src_dest = {}; + + va = kmalloc(8, GFP_KERNEL); + memset((void *)va, 0, 8); + pa = __pa(va); + dma_data.num = 1; + dma_data.addrbuf[0] = + C_RDMA_VF_HMC_CQP_CQ_DISTRIBUTE_DONE( + dev->vhca_id); + dma_data.databuf[0] = 0; + zxdh_cqp_rdma_write32_cmd(dev, &dma_data); + + zxdh_sc_send_mailbox_cmd(dev, + ZTE_ZXDH_VCHNL_OP_GET_HMC_FCN, + 0x12, 0x13, 0x15, dev->vf_id); + src_dest.src = dma_data.addrbuf[0]; + src_dest.dest = pa; + src_dest.len = 0x08; + do { + zxdh_cqp_rdma_readreg_cmd(dev, &src_dest); + if (cnt++ > dev->hw_attrs.max_done_count) { + status = -ETIMEDOUT; + break; + } + + udelay(dev->hw_attrs.max_sleep_count * 2); + + } while (!(*(u64 *)va)); + + kfree(va); + } + + return 0; + } while (0); + + pr_err("ZRDMA hardware initialization FAILED init_state=%d status=%d\n", + rf->init_state, status); + zxdh_ctrl_deinit_hw(rf); + return status; +} + +/** + * zxdh_set_hw_rsrc - set hw memory resources. + * @rf: RDMA PCI function + */ +static void zxdh_set_hw_rsrc(struct zxdh_pci_f *rf) +{ +#ifdef Z_CONFIG_RDMA_ARP + rf->allocated_srqs = + (void *)(rf->mem_rsrc + + (sizeof(struct zxdh_arp_entry) * rf->arp_table_size)); +#else + rf->allocated_srqs = (void *)(rf->mem_rsrc); +#endif + rf->allocated_qps = &rf->allocated_srqs[BITS_TO_LONGS(rf->max_srq)]; + rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)]; + rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)]; + rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)]; + rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)]; + rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)]; +#ifdef Z_CONFIG_RDMA_ARP + rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]; + rf->qp_table = (struct zxdh_qp **)(&rf->allocated_arps[BITS_TO_LONGS( + rf->arp_table_size)]); + +#else + rf->qp_table = + (struct zxdh_qp * + *)(&rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]); +#endif + rf->cq_table = (struct zxdh_cq **)(&rf->qp_table[rf->max_qp]); + rf->srq_table = (struct zxdh_srq **)(&rf->cq_table[rf->max_cq]); + + spin_lock_init(&rf->rsrc_lock); +#ifdef Z_CONFIG_RDMA_ARP + spin_lock_init(&rf->arp_lock); +#endif + spin_lock_init(&rf->qptable_lock); + spin_lock_init(&rf->cqtable_lock); + spin_lock_init(&rf->srqtable_lock); +} + +/** + * zxdh_calc_mem_rsrc_size - calculate memory resources size. + * @rf: RDMA PCI function + */ +static u32 zxdh_calc_mem_rsrc_size(struct zxdh_pci_f *rf) +{ + u32 rsrc_size; + +#ifdef Z_CONFIG_RDMA_ARP + rsrc_size = sizeof(struct zxdh_arp_entry) * rf->arp_table_size; + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq); +#else + rsrc_size = sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq); +#endif + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd); +#ifdef Z_CONFIG_RDMA_ARP + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size); +#endif + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg); + rsrc_size += sizeof(struct zxdh_qp **) * rf->max_qp; + rsrc_size += sizeof(struct zxdh_cq **) * rf->max_cq; + rsrc_size += sizeof(struct zxdh_srq **) * rf->max_srq; + + return rsrc_size; +} + +/** + * zxdh_initialize_hw_rsrc - initialize hw resource tracking array + * @rf: RDMA PCI function + */ +u32 zxdh_initialize_hw_rsrc(struct zxdh_pci_f *rf) +{ + u32 rsrc_size; + u32 mrdrvbits; + u32 ret; + + rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size; + rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_QP].cnt; + rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_MR].cnt; + rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].cnt; + rf->max_srq = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].cnt; + rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds; + rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_AH].cnt; + rf->max_mcg = rf->max_qp; + + rsrc_size = zxdh_calc_mem_rsrc_size(rf); + rf->mem_rsrc = vzalloc(rsrc_size); + if (!rf->mem_rsrc) { + ret = -ENOMEM; + goto mem_rsrc_vmalloc_fail; + } +#ifdef Z_CONFIG_RDMA_ARP + rf->arp_table = (struct zxdh_arp_entry *)rf->mem_rsrc; +#endif + + zxdh_set_hw_rsrc(rf); + + set_bit(0, rf->allocated_mrs); + set_bit(1, rf->allocated_mrs); + set_bit(0, rf->allocated_pds); + set_bit(0, rf->allocated_qps); +#ifdef Z_CONFIG_RDMA_ARP + set_bit(0, rf->allocated_arps); +#endif + set_bit(0, rf->allocated_ahs); + set_bit(0, rf->allocated_mcgs); + set_bit(0, rf->allocated_srqs); + + /* stag index mask has a minimum of 14 bits */ + mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14); + rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); + + return 0; + +mem_rsrc_vmalloc_fail: + return ret; +} + +/** + * zxdh_cqp_ce_handler - handle cqp completions + * @rf: RDMA PCI function + * @cq: cq for cqp completions + */ +void zxdh_cqp_ce_handler(struct zxdh_pci_f *rf, struct zxdh_sc_cq *cq) +{ + struct zxdh_cqp_request *cqp_request; + struct zxdh_sc_dev *dev = &rf->sc_dev; + u32 cqe_count = 0; + struct zxdh_ccq_cqe_info info; + unsigned long flags; + int ret = 0; + + do { + memset(&info, 0, sizeof(info)); + spin_lock_irqsave(&rf->cqp.compl_lock, flags); + ret = zxdh_sc_ccq_get_cqe_info(cq, &info); + spin_unlock_irqrestore(&rf->cqp.compl_lock, flags); + if (ret) + break; + + cqp_request = + (struct zxdh_cqp_request *)(unsigned long)info.scratch; + if (info.error && + zxdh_cqp_crit_err(dev, cqp_request->info.cqp_cmd, + info.maj_err_code, info.min_err_code)) + pr_err("cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n", + info.op_code, info.maj_err_code, + info.min_err_code); + if (cqp_request && (info.mailbox_cqe != 1)) { + cqp_request->compl_info.maj_err_code = + info.maj_err_code; + cqp_request->compl_info.min_err_code = + info.min_err_code; + cqp_request->compl_info.op_ret_val = info.op_ret_val; + cqp_request->compl_info.error = info.error; + + if (info.op_code == ZXDH_CQP_OP_WQE_DMA_READ_USECQE) { + cqp_request->compl_info.addrbuf[0] = + info.addrbuf[0]; + cqp_request->compl_info.addrbuf[1] = + info.addrbuf[1]; + cqp_request->compl_info.addrbuf[2] = + info.addrbuf[2]; + cqp_request->compl_info.addrbuf[3] = + info.addrbuf[3]; + cqp_request->compl_info.addrbuf[4] = + info.addrbuf[4]; + } + + if (cqp_request->waiting) { + cqp_request->request_done = true; + wake_up(&cqp_request->waitq); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + } else { + if (cqp_request->callback_fcn) + cqp_request->callback_fcn(cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + } + } else if (info.mailbox_cqe == 1) { + if (rf->ftype == 0) + zxdh_pf_recv_mb(dev, &info); + } + + cqe_count++; + } while (1); + + if (cqe_count) { + zxdh_sc_ccq_arm(dev->ccq); + dev->ceq_interrupt = false; + zxdh_process_bh(dev); + } + if (dev->ceq_interrupt == true) { + zxdh_sc_ccq_arm(dev->ccq); + dev->ceq_interrupt = false; + } +} + +/** + * cqp_compl_worker - Handle cqp completions + * @work: Pointer to work structure + */ +void cqp_compl_worker(struct work_struct *work) +{ + struct zxdh_pci_f *rf = + container_of(work, struct zxdh_pci_f, cqp_cmpl_work); + struct zxdh_sc_cq *cq = &rf->ccq.sc_cq; + + zxdh_cqp_ce_handler(rf, cq); +} + +/** + * zxdh_hw_flush_wqes - flush qp's wqe + * @rf: RDMA PCI function + * @qp: hardware control qp + * @info: info for flush + * @wait: flag wait for completion + */ +int zxdh_hw_flush_wqes(struct zxdh_pci_f *rf, struct zxdh_sc_qp *qp, + struct zxdh_qp_flush_info *info, bool wait) +{ + int status; + struct zxdh_qp_flush_info *hw_info; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + hw_info = &cqp_request->info.in.u.qp_flush_wqes.info; + memcpy(hw_info, info, sizeof(*hw_info)); + cqp_info->cqp_cmd = ZXDH_OP_QP_FLUSH_WQES; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_flush_wqes.qp = qp; + cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + if (status) { + qp->qp_uk.sq_flush_complete = true; + qp->qp_uk.rq_flush_complete = true; + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; + } + + if (!wait || cqp_request->compl_info.maj_err_code) + goto put_cqp; + + if (info->rq) { + if (cqp_request->compl_info.min_err_code == + ZXDH_CQP_COMPL_SQ_WQE_FLUSHED || + cqp_request->compl_info.min_err_code == 0) { + /* RQ WQE flush was requested but did not happen */ + qp->qp_uk.rq_flush_complete = true; + } + } + if (info->sq) { + if (cqp_request->compl_info.min_err_code == + ZXDH_CQP_COMPL_RQ_WQE_FLUSHED || + cqp_request->compl_info.min_err_code == 0) { + /* SQ WQE flush was requested but did not happen */ + qp->qp_uk.sq_flush_complete = true; + } + } + +put_cqp: + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +void zxdh_flush_wqes(struct zxdh_qp *iwqp, u32 flush_mask) +{ + struct zxdh_qp_flush_info info = {}; + struct zxdh_pci_f *rf = iwqp->iwdev->rf; + u8 flush_code = iwqp->sc_qp.flush_code; + + if (!(flush_mask & ZXDH_FLUSH_SQ) && !(flush_mask & ZXDH_FLUSH_RQ)) + return; + + if (iwqp->sc_qp.is_nvmeof_ioq) + return; + + /* Set flush info fields*/ + info.sq = flush_mask & ZXDH_FLUSH_SQ; + info.rq = flush_mask & ZXDH_FLUSH_RQ; + + /* Generate userflush errors in CQE */ + info.sq_major_code = ZXDH_FLUSH_MAJOR_ERR; + info.sq_minor_code = FLUSH_GENERAL_ERR; + info.rq_major_code = ZXDH_FLUSH_MAJOR_ERR; + info.rq_minor_code = FLUSH_GENERAL_ERR; + info.userflushcode = true; + + if (flush_mask & ZXDH_REFLUSH) { + if (info.sq) + iwqp->sc_qp.flush_sq = false; + if (info.rq) + iwqp->sc_qp.flush_rq = false; + } else { + if (flush_code) { + if (info.sq && iwqp->sc_qp.sq_flush_code) + info.sq_minor_code = flush_code; + if (info.rq && iwqp->sc_qp.rq_flush_code) + info.rq_minor_code = flush_code; + } + } + + /* Issue flush */ + (void)zxdh_hw_flush_wqes(rf, &iwqp->sc_qp, &info, + flush_mask & ZXDH_FLUSH_WAIT); + iwqp->flush_issued = true; +} diff --git a/src/rdma/src/icrdma_hw.c b/src/rdma/src/icrdma_hw.c new file mode 100644 index 0000000000000000000000000000000000000000..15ea01bb5800d8210dc6ea57e70904e9d5025577 --- /dev/null +++ b/src/rdma/src/icrdma_hw.c @@ -0,0 +1,387 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "osdep.h" +#include "type.h" +#include "icrdma_hw.h" +#include "main.h" + +static u64 icrdma_masks[ZXDH_MAX_MASKS] = { + ICRDMA_CCQPSTATUS_CCQP_DONE, ICRDMA_CCQPSTATUS_CCQP_ERR, + ICRDMA_CQPSQ_STAG_PDID, ICRDMA_CQPSQ_CQ_CEQID, + ICRDMA_CQPSQ_CQ_CQID, ICRDMA_COMMIT_FPM_CQCNT, +}; + +static u8 icrdma_shifts[ZXDH_MAX_SHIFTS] = { + ICRDMA_CCQPSTATUS_CCQP_DONE_S, ICRDMA_CCQPSTATUS_CCQP_ERR_S, + ICRDMA_CQPSQ_STAG_PDID_S, ICRDMA_CQPSQ_CQ_CEQID_S, + ICRDMA_CQPSQ_CQ_CQID_S, ICRDMA_COMMIT_FPM_CQCNT_S, +}; + +static unsigned int zxdh_dbi_en = 1; +module_param(zxdh_dbi_en, uint, 0444); +MODULE_PARM_DESC(zxdh_dbi_en, "zxdh_dbi_en =1, enable dbi module"); + +static unsigned int zxdh_ep_addr = 0x948; +module_param(zxdh_ep_addr, uint, 0444); +MODULE_PARM_DESC(zxdh_ep_addr, + "zxdh_ep_addr = 0x948, dbi model ,0x948 is register addr"); + +static unsigned int zxdh_ep_id; +module_param(zxdh_ep_id, uint, 0444); +MODULE_PARM_DESC(zxdh_ep_id, + "zxdh_ep_id 0 is 5, 1 is 6, 2 is 7, 3 is 8, 4 is 9"); + +/** + * zxdh_rdma_ena_ceq_irq - Enable ceq interrupt + * @dev: pointer to the device structure + * @ceq_id: ceq id + */ +static void zxdh_rdma_ena_ceq_irq(struct zxdh_sc_dev *dev, u32 ceq_id) +{ + u32 hdr; + + hdr = FIELD_PREP(ZXDH_CEQ_ARM_VHCA_ID, dev->vhca_id) | + FIELD_PREP(ZXDH_CEQ_ARM_CEQ_ID, ceq_id); + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->ceq_itr_enable); + // printk("%s hdr:0x%x\n",__func__,hdr); +} + +/** + * zxdh_rdma_ena_aeq_irq - Enable aeq interrupt + * @dev: pointer to the device structure + * @enable: enable value + */ +static void zxdh_rdma_ena_aeq_irq(struct zxdh_sc_dev *dev, bool enable) +{ + writel(enable, dev->aeq_itr_enable); +} + +static const struct zxdh_irq_ops zxdh_rdma_irq_ops = { + .zxdh_cfg_aeq = zxdh_cfg_aeq, + .zxdh_ceq_en_irq = zxdh_rdma_ena_ceq_irq, + .zxdh_aeq_en_irq = zxdh_rdma_ena_aeq_irq, +}; + +static void zxdh_init_ceq_hw(struct zxdh_sc_dev *dev) +{ + struct zxdh_pci_f *rf; + u32 hdr; + u8 __iomem *hw_addr; + + hw_addr = dev->hw->hw_addr; + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + dev->ceq_itr_enable = (u32 __iomem *)(hw_addr + C_CEQ_EQARM_RAM); + dev->ceq_axi.ceqe_axi_info = + (u32 __iomem *)(hw_addr + C_CEQ_CEQE_AXI_INFO_RAM); + dev->ceq_axi.rpble_axi_info = + (u32 __iomem *)(hw_addr + C_CEQ_RPBLE_AXI_INFO_RAM); + dev->ceq_axi.lpble_axi_info = + (u32 __iomem *)(hw_addr + C_CEQ_LPBLE_AXI_INFO_RAM); + dev->ceq_axi.int_info = (u32 __iomem *)(hw_addr + C_CEQ_INT_INFO_RAM); + + hdr = FIELD_PREP(ZXDH_CEQ_CEQE_AXI_INFO_INDICATE_ID, + dev->soc_tx_rx_cqp_ind) | //�1�7�1�7�1�7�1�7smmu + FIELD_PREP(ZXDH_CEQ_CEQE_AXI_INFO_AXI_ID, + dev->soc_tx_rx_cqp_axid); //ep5 + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->ceq_axi.ceqe_axi_info); + + hdr = FIELD_PREP(ZXDH_CEQ_PBLE_AXI_INFO_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_CEQ_CEQE_AXI_INFO_AXI_ID, + dev->soc_tx_rx_cqp_axid); //ep5 + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->ceq_axi.rpble_axi_info); + + hdr = FIELD_PREP(ZXDH_CEQ_PBLE_AXI_INFO_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_CEQ_CEQE_AXI_INFO_AXI_ID, + dev->soc_tx_rx_cqp_axid); //ep5 + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->ceq_axi.lpble_axi_info); + + hdr = FIELD_PREP(ZXDH_CEQ_INT_PCIE_DBI_EN, zxdh_dbi_en) | + FIELD_PREP(ZXDH_CEQ_INT_EP_ID, rf->ep_id) | + FIELD_PREP(ZXDH_CEQ_INT_PF_NUM, rf->pf_id) | + FIELD_PREP(ZXDH_CEQ_INT_VF_NUM, rf->vf_id) | + FIELD_PREP(ZXDH_CEQ_INT_VF_ACTIVE, rf->ftype); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->ceq_axi.int_info); +} + +static void zxdh_init_aeq_hw(struct zxdh_sc_dev *dev) +{ + u8 __iomem *hw_addr; + u32 hdr; + + hw_addr = dev->hw->hw_addr; + + dev->aeq_itr_enable = (u32 __iomem *)(hw_addr + C_RDMA_CPU_AEQ_ARM); + dev->aeq_tail_pointer = + (u32 __iomem *)(hw_addr + C_RDMA_CPU_SOFTWARE_TAIL); + dev->aeq_vhca_pfvf.aeq_msix_data = + (u32 __iomem *)(hw_addr + RDMA_CPU_MSIX_DATA); + dev->aeq_vhca_pfvf.aeq_msix_config = + (u32 __iomem *)(hw_addr + RDMA_CPU_MSIX_CONFIG); + dev->aeq_vhca_pfvf.aeq_root_axi_data = + (u32 __iomem *)(hw_addr + AEQ_REPORT_ROOT_AXI_DATA); + dev->aeq_vhca_pfvf.aeq_leaf_axi_data = + (u32 __iomem *)(hw_addr + AEQ_REPORT_LEAF_AXI_DATA); + dev->aeq_vhca_pfvf.aeq_wr_axi_data = + (u32 __iomem *)(hw_addr + AEQ_REPORT_WR_AXI_DATA); + dev->aeq_vhca_pfvf.aeq_aee_flag = + (u32 __iomem *)(hw_addr + AEQ_AEQC_AEE_FLAG); + + writel(0, dev->aeq_tail_pointer); + //soc hmc config + hdr = FIELD_PREP(ZXDH_AEQ_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_AEQ_AXI_ID, dev->soc_tx_rx_cqp_axid) | + FIELD_PREP(ZXDH_AEQ_WAY_PATITION, 0); + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->aeq_vhca_pfvf.aeq_root_axi_data); + + hdr = FIELD_PREP(ZXDH_AEQ_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_AEQ_AXI_ID, dev->soc_tx_rx_cqp_axid) | + FIELD_PREP(ZXDH_AEQ_WAY_PATITION, 0); + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->aeq_vhca_pfvf.aeq_leaf_axi_data); + //soc data config + hdr = FIELD_PREP(ZXDH_AEQ_INDICIATE_ID, dev->soc_tx_rx_cqp_ind) | + FIELD_PREP(ZXDH_AEQ_AXI_ID, dev->soc_tx_rx_cqp_axid) | + FIELD_PREP(ZXDH_AEQ_WAY_PATITION, 0); + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->aeq_vhca_pfvf.aeq_wr_axi_data); + //clear 0 after reading values during maintenance + writel(0, dev->aeq_vhca_pfvf.aeq_aee_flag); +} + +void zxdh_init_hw(struct zxdh_sc_dev *dev) +{ + int i; + u32 hdr; + u8 __iomem *hw_addr; + + dev->ceq_0_ok = false; + dev->soc_tx_rx_cqp_ind = ZXDH_SOC_TXRXCQP_IND_ACC_HOST_NOT_THROUGH_SMMU; + dev->soc_tx_rx_cqp_axid = ZXDH_SOC_TXRXCQP_AXID_DEST_EP5; + dev->soc_rdma_io_ind = ZXDH_SOC_RDMAIO_IND_ACC_HOST_NOT_THROUGH_SMMU; + + hw_addr = dev->hw->hw_addr; + + dev->wqe_alloc_db = (u32 __iomem *)(hw_addr + C_RDMA_SQ_DBINFO_LOW_DIN); + dev->cq_arm_db = (u32 __iomem *)(hw_addr + RDMARX_CQ_CQARM); + dev->cqp_db = (u32 __iomem *)(hw_addr + C_RDMA_CQP_DB); + + zxdh_init_ceq_hw(dev); + zxdh_init_aeq_hw(dev); + dev->hw_attrs.max_hw_vf_fpm_id = ZXDH_MAX_VF_FPM_ID; + dev->hw_attrs.first_hw_vf_fpm_id = ZXDH_FIRST_VF_FPM_ID; + + for (i = 0; i < ZXDH_MAX_SHIFTS; ++i) + dev->hw_shifts[i] = icrdma_shifts[i]; + + for (i = 0; i < ZXDH_MAX_MASKS; ++i) + dev->hw_masks[i] = icrdma_masks[i]; + + dev->srq_axi_ram.db = (u32 __iomem *)(hw_addr + C_DB_AXI_RAM); + dev->srq_axi_ram.srql = (u32 __iomem *)(hw_addr + C_SRQL_AXI_RAM); + + dev->irq_ops = &zxdh_rdma_irq_ops; + + dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; + dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; + dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; + dev->hw_attrs.max_stat_idx = ZXDH_HW_STAT_INDEX_MAX; + + dev->hw_attrs.uk_attrs.max_hw_sq_chunk = ZXDH_MAX_QUANTA_PER_WR; + dev->hw_attrs.uk_attrs.feature_flags |= + ZXDH_FEATURE_RTS_AE | ZXDH_FEATURE_CQ_RESIZE | + ZXDH_FEATURE_64_BYTE_CQE; /* RC UD both set to 64 Bytes*/ + + hdr = FIELD_PREP(ZXDH_SRQ_DB_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_SRQ_DB_INDICATE_ID, dev->soc_tx_rx_cqp_ind) | + FIELD_PREP(ZXDH_SRQ_DB_AXI_ID, dev->soc_tx_rx_cqp_axid) | + FIELD_PREP(ZXDH_SRQ_DB_WAY_PATION, 0); + wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->srq_axi_ram.db); + + hdr = FIELD_PREP(ZXDH_SRQ_DSRQL_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_SRQ_SRQL_INDICATE_ID, dev->soc_tx_rx_cqp_ind) | + FIELD_PREP(ZXDH_SRQ_SRQL_AXI_ID, dev->soc_tx_rx_cqp_axid) | + FIELD_PREP(ZXDH_SRQ_SRQL_WAY_PATION, 0); + wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->srq_axi_ram.srql); + + writel(IRDMARX_RD_TIME_LIMIT_VALUE, + (u32 __iomem *)(hw_addr + RDMATX_RD_TIME_LIMIT)); + writel(IRDMARX_RD_TIME_LIMIT_VALUE, + (u32 __iomem *)(hw_addr + RDMARX_RD_TIME_LIMIT)); +} + +void zxdh_init_config_check(struct zxdh_config_check *cc, u8 traffic_class, + u16 qs_handle) +{ + cc->config_ok = false; + cc->traffic_class = traffic_class; + cc->qs_handle = qs_handle; + cc->lfc_set = 0; + cc->pfc_set = 0; +} + +static bool zxdh_is_lfc_set(struct zxdh_config_check *cc, + struct zxdh_sc_vsi *vsi) +{ + u32 lfc = 1; + u8 fn_id = vsi->dev->hmc_fn_id; + + lfc &= (rd32(vsi->dev->hw, + PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >> + 8); + lfc &= (rd32(vsi->dev->hw, + PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >> + 8); + lfc &= rd32(vsi->dev->hw, + PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 + 4 * vsi->dev->hmc_fn_id); + + if (lfc) + return true; + return false; +} + +static bool zxdh_check_tc_has_pfc(struct zxdh_sc_vsi *vsi, u64 reg_offset, + u16 traffic_class) +{ + u32 value, pfc = 0; + u32 i; + + value = rd32(vsi->dev->hw, reg_offset); + for (i = 0; i < 4; i++) + pfc |= (value >> (8 * i + traffic_class)) & 0x1; + + if (pfc) + return true; + return false; +} + +static bool zxdh_is_pfc_set(struct zxdh_config_check *cc, + struct zxdh_sc_vsi *vsi) +{ + u32 pause; + u8 fn_id = vsi->dev->hmc_fn_id; + + pause = (rd32(vsi->dev->hw, + PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >> + cc->traffic_class) & + BIT(0); + pause &= (rd32(vsi->dev->hw, + PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >> + cc->traffic_class) & + BIT(0); + + return zxdh_check_tc_has_pfc(vsi, GLDCB_TC2PFC, cc->traffic_class) && + pause; +} + +bool zxdh_is_config_ok(struct zxdh_config_check *cc, struct zxdh_sc_vsi *vsi) +{ + cc->lfc_set = zxdh_is_lfc_set(cc, vsi); + cc->pfc_set = zxdh_is_pfc_set(cc, vsi); + + cc->config_ok = cc->lfc_set || cc->pfc_set; + + return cc->config_ok; +} + +#define ZXDH_RCV_WND_NO_FC 0x1FFFC +#define ZXDH_RCV_WND_FC 0x3FFFC + +#define ZXDH_CWND_NO_FC 0x20 +#define ZXDH_CWND_FC 0x400 + +#define ZXDH_RTOMIN_NO_FC 0x5 +#define ZXDH_RTOMIN_FC 0x32 + +#define ZXDH_ACKCREDS_NO_FC 0x02 +#define ZXDH_ACKCREDS_FC 0x1E + +static void zxdh_check_flow_ctrl(struct zxdh_sc_vsi *vsi, u8 user_prio, + u8 traffic_class) +{ +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + struct zxdh_config_check *cfg_chk = &vsi->cfg_check[user_prio]; + struct zxdh_device *iwdev = vsi->back_vsi; + + if (!zxdh_is_config_ok(cfg_chk, vsi)) { + if (!iwdev->override_rcv_wnd) + iwdev->rcv_wnd = ZXDH_RCV_WND_NO_FC; + if (!iwdev->override_cwnd) + iwdev->roce_cwnd = ZXDH_CWND_NO_FC; + if (!iwdev->override_rtomin) + iwdev->roce_rtomin = ZXDH_RTOMIN_NO_FC; + if (!iwdev->override_ackcreds) + iwdev->roce_ackcreds = ZXDH_ACKCREDS_NO_FC; +#define ZXDH_READ_FENCE_RATE_NO_FC 4 + if (iwdev->roce_mode && !iwdev->override_rd_fence_rate) + iwdev->rd_fence_rate = ZXDH_READ_FENCE_RATE_NO_FC; + if (vsi->tc_print_warning[traffic_class]) { + pr_info("INFO: Flow control is disabled for this traffic class (%d) on this vsi.\n", + traffic_class); + vsi->tc_print_warning[traffic_class] = false; + } + } else { + if (!iwdev->override_rcv_wnd) + iwdev->rcv_wnd = ZXDH_RCV_WND_FC; + if (!iwdev->override_cwnd) + iwdev->roce_cwnd = ZXDH_CWND_FC; + if (!iwdev->override_rtomin) + iwdev->roce_rtomin = ZXDH_RTOMIN_FC; + if (!iwdev->override_ackcreds) + iwdev->roce_ackcreds = ZXDH_ACKCREDS_FC; +#define ZXDH_READ_FENCE_RATE_FC 0 + if (!iwdev->override_rd_fence_rate) + iwdev->rd_fence_rate = ZXDH_READ_FENCE_RATE_FC; + if (vsi->tc_print_warning[traffic_class]) { + pr_info("INFO: Flow control is enabled for this traffic class (%d) on this vsi.\n", + traffic_class); + vsi->tc_print_warning[traffic_class] = false; + } + } +#endif +} + +void zxdh_check_fc_for_tc_update(struct zxdh_sc_vsi *vsi, + struct zxdh_l2params *l2params) +{ + u8 i; + + if (!vsi->dev->privileged) + return; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + vsi->tc_print_warning[i] = true; + + for (i = 0; i < ZXDH_MAX_USER_PRIORITY; i++) { + struct zxdh_config_check *cfg_chk = &vsi->cfg_check[i]; + u8 tc = l2params->up2tc[i]; + + cfg_chk->traffic_class = tc; + cfg_chk->qs_handle = vsi->qos[i].qs_handle; + zxdh_check_flow_ctrl(vsi, i, tc); + } +} + +void zxdh_check_fc_for_qp(struct zxdh_sc_vsi *vsi, struct zxdh_sc_qp *sc_qp) +{ + u8 i; + + if (!vsi->dev->privileged) + return; + for (i = 0; i < ZXDH_MAX_USER_PRIORITY; i++) { + struct zxdh_config_check *cfg_chk = &vsi->cfg_check[i]; + + zxdh_init_config_check(cfg_chk, vsi->qos[i].traffic_class, + vsi->qos[i].qs_handle); + if (sc_qp->qs_handle == cfg_chk->qs_handle) + zxdh_check_flow_ctrl(vsi, i, cfg_chk->traffic_class); + } +} diff --git a/src/rdma/src/icrdma_hw.h b/src/rdma/src/icrdma_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..fdcef47d6a436a3899061c0ad897d00dd6d687d4 --- /dev/null +++ b/src/rdma/src/icrdma_hw.h @@ -0,0 +1,852 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ICRDMA_HW_H +#define ICRDMA_HW_H + +#include "zrdma.h" + +#define VFPE_CQPTAIL1 0x0000a000 +#define VFPE_CQPDB1 0x0000bc00 +#define VFPE_CCQPSTATUS1 0x0000b800 +#define VFPE_CCQPHIGH1 0x00009800 +#define VFPE_CCQPLOW1 0x0000ac00 +#define VFPE_CQARM1 0x0000b400 +#define VFPE_CQARM1 0x0000b400 +#define VFPE_CQACK1 0x0000b000 +#define VFPE_AEQALLOC1 0x0000a400 +#define VFPE_CQPERRCODES1 0x00009c00 +#define VFPE_WQEALLOC1 0x0000c000 +#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i)*4)) /* _i=0...63 */ + +#define PFPE_CQPTAIL 0x801C //0x00500880 +#define PFPE_CQPDB 0x8014 //0x00500800 +#define PFPE_CCQPSTATUS 0x8044 //0x0050a000 +#define PFPE_CCQPHIGH 0x8054 //0x0050a100 +#define PFPE_CCQPLOW 0x804C //0x0050a080 +#define PFPE_CQARM 0x8024 //0x00502c00 +#define PFPE_CQACK 0x8034 //0x00502c80 +#define PFPE_AEQALLOC 0x802C //0x00502d00 +#define GLINT_DYN_CTL(_INT) \ + (0x6004 + ((_INT)*4)) //(0x00160000 + ((_INT) * 4)) /* _i=0...2047 */ +#define GLPCI_LBARCTRL 0x800C //0x0009de74 +#define GLPE_CPUSTATUS0 0x8064 //0x0050ba5c +#define GLPE_CPUSTATUS1 0x806C //0x0050ba60 +#define GLPE_CPUSTATUS2 0x8074 //0x0050ba64 +#define PFINT_AEQCTL 0x8004 //0x0016cb00 +#define PFPE_CQPERRCODES 0x805C //0x0050a200 +#define PFPE_WQEALLOC 0x803C //0x00504400 +#define GLINT_CEQCTL(_INT) \ + (0x4004 + ((_INT)*4)) //(0x0015c000 + ((_INT) * 4)) /* _i=0...2047 */ +#define VSIQF_PE_CTL1(_VSI) \ + (0x807C + ((_VSI)*4)) //(0x00414000 + ((_VSI) * 4)) /* _i=0...767 */ +#define PFHMC_PDINV 0x8C7C //0x00520300 +#define GLHMC_VFPDINV(_i) \ + (0x8C84 + ((_i)*4)) //(0x00528300 + ((_i) * 4)) /* _i=0...31 */ +#define GLPE_CRITERR 0x8D04 //0x00534000 +#define GLINT_RATE(_INT) \ + (0x2004 + \ + ((_INT)*4)) //(0x0015A000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ + +#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 0x001e3180 +#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_1 0x001e3184 +#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_2 0x001e3188 +#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_3 0x001e318c + +#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 0x001e31a0 +#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_1 0x001e31a4 +#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_2 0x001e31a8 +#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_3 0x001e31aC + +#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 0x001e34c0 +#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_1 0x001e34c4 +#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_2 0x001e34c8 +#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_3 0x001e34cC + +#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_0 0x001e35c0 +#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_1 0x001e35c4 +#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_2 0x001e35c8 +#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_3 0x001e35cC + +#define GLDCB_TC2PFC 0x001d2694 +#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001e31c0 + +#define ICRDMA_DB_ADDR_OFFSET (8 * 1024) //(8 * 1024 * 1024 - 64 * 1024) + +#define ICRDMA_VF_DB_ADDR_OFFSET (64 * 1024) + +#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0 +#define ICRDMA_CCQPSTATUS_CCQP_DONE BIT_ULL(0) +#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31 +#define ICRDMA_CCQPSTATUS_CCQP_ERR BIT_ULL(31) +#define ICRDMA_CQPSQ_STAG_PDID_S 46 +#define ICRDMA_CQPSQ_STAG_PDID GENMASK_ULL(63, 46) +#define ICRDMA_CQPSQ_CQ_CEQID_S 48 +#define ICRDMA_CQPSQ_CQ_CEQID GENMASK_ULL(59, 48) +#define ICRDMA_CQPSQ_CQ_CQID_S 0 +#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(20, 0) +#define ICRDMA_COMMIT_FPM_CQCNT_S 0 +#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0) + +#define ZXDH_PRI_BASE_RD_BAD_QKEY_COUNTER GENMASK(31, 24) + +/**************** Start of ZTE RDMA Registers ***************/ +#define C_RDMA_BASE_ADDRESS (0x6204000000u) +#define C_RDMA_HW_BAR_PAGE_NUM 31 +#define C_RDMA_HW_BAR_PAGE_SIZE 0x1000 +extern u64 zxdh_hw_bar_pages[C_RDMA_HW_BAR_PAGE_NUM]; + +/****** RDMA REG BASE Address******/ +#define C_RDMA_CQP_PUBLIC_PAGE1 (zxdh_hw_bar_pages[0]) //hw addr:0x6204402000 +#define C_RDMA_CQP_PUBLIC_PAGE2 (zxdh_hw_bar_pages[1]) //hw addr:0x6204403000 +#define C_RDMA_CQP_VHCA_PAGE (zxdh_hw_bar_pages[2]) +//hw addr:0x6204800000 + vhca_id * 0x1000 +#define C_RDMA_CQP_VHCA_PF_PAGE (zxdh_hw_bar_pages[3]) +//hw addr:0x6204C00000 + vhca_id * 0x1000 + +#define C_RDMA_RX_PKT_PROC_PAGE (zxdh_hw_bar_pages[4]) //hw addr:0x6205400000 +#define C_RDMA_RX_PUBLIC_PAGE1 (zxdh_hw_bar_pages[5]) //hw addr:0x6205420000 +#define C_RDMA_RX_PUBLIC_PAGE2 (zxdh_hw_bar_pages[6]) //hw addr:0x6205440000 +#define C_RDMA_RX_CNP_GEN_PAGE (zxdh_hw_bar_pages[7]) //hw addr:0x6205460000 +#define C_RDMA_RX_RAM_SCHE_PAGE1 (zxdh_hw_bar_pages[8]) //hw addr:0x6205467000 +#define C_RDMA_RX_RAM_SCHE_PAGE2 (zxdh_hw_bar_pages[9]) //hw addr:0x6205468000 +#define C_RDMA_RX_RAM_SCHE_PAGE3 (zxdh_hw_bar_pages[10]) //hw addr:0x6205469000 +#define C_RDMA_RX_RAM_SCHE_PAGE4 (zxdh_hw_bar_pages[11]) //hw addr:0x620546A000 +#define C_RDMA_RX_RAM_SCHE_PAGE5 (zxdh_hw_bar_pages[12]) //hw addr:0x620546B000 +#define C_RDMA_RX_VHCA_PAGE (zxdh_hw_bar_pages[13]) +//hw addr:0x6205800000 + vhca_id * 0x1000 +#define C_RDMA_RX_VHCA_PF_PAGE (zxdh_hw_bar_pages[14]) +//hw addr:0x6205C00000 + vhca_id * 0x1000 + +#define C_RDMA_TX_ACK_RECV_PAGE (zxdh_hw_bar_pages[15]) //hw addr:0x62065E8000 +#define C_RDMA_TX_WQE_PARSE_PAGE (zxdh_hw_bar_pages[16]) //hw addr:0x62065F0000 + +#define C_RDMA_TX_RAM_SCHE_PAGE1 (zxdh_hw_bar_pages[17]) //hw addr:0x6206700000 +#define C_RDMA_TX_RAM_SCHE_PAGE2 (zxdh_hw_bar_pages[18]) //hw addr:0x6206701000 +#define C_RDMA_TX_RAM_SCHE_PAGE3 (zxdh_hw_bar_pages[19]) //hw addr:0x6206702000 +#define C_RDMA_TX_RAM_SCHE_PAGE4 (zxdh_hw_bar_pages[20]) //hw addr:0x6206703000 +#define C_RDMA_TX_RAM_SCHE_PAGE5 (zxdh_hw_bar_pages[21]) //hw addr:0x6206704000 +#define C_RDMA_TX_VHCA_PAGE (zxdh_hw_bar_pages[22]) +//hw addr:0x6206800000 + vhca_id * 0x1000 +#define C_RDMA_TX_VHCA_PF_PAGE (zxdh_hw_bar_pages[23]) +//hw addr:0x6206C00000 + vhca_id * 0x1000 + +#define C_RDMA_IO_VHCA_PAGE (zxdh_hw_bar_pages[24]) +//hw addr:0x6207801000 + vhca_id * 0x2000 size:0x1000 + +#define C_RDMA_NOF_L2D_PAGES \ + (zxdh_hw_bar_pages[25]) //hw addr:0x62008C2000 size:0x2000 + +#define C_RDMA_TX_RTT_PAGE \ + (zxdh_hw_bar_pages[27]) //hw addr:0x62065F8000 size:0x1000 +#define C_RDMA_RX_SIDN_PAGE (zxdh_hw_bar_pages[28]) +//hw addr:0x6205600000 + sid * 0x10000 size:0x1000 +#define C_RDMA_TX_SIDN_PAGE \ + (zxdh_hw_bar_pages[29]) //hw addr:0x6206600000 size:0x1000 +#define C_RDMA_IO_SIDN_PAGE (zxdh_hw_bar_pages[30]) +//hw addr:0x6207600000 + sid * 0x10000 size:0x1000 + +/****** CQP Module Register ******/ +#define C_RDMA_CQP_CONTEXT_0 (C_RDMA_CQP_VHCA_PAGE + 0x004u) +#define C_RDMA_CQP_CONTEXT_1 (C_RDMA_CQP_VHCA_PAGE + 0x008u) +#define C_RDMA_CQP_CONTEXT_2 (C_RDMA_CQP_VHCA_PAGE + 0x00Cu) +#define C_RDMA_CQP_CONTEXT_3 (C_RDMA_CQP_VHCA_PAGE + 0x010u) +#define C_RDMA_CQP_CONTEXT_4 (C_RDMA_CQP_VHCA_PAGE + 0x014u) +#define C_RDMA_CQP_CONTEXT_5 (C_RDMA_CQP_VHCA_PAGE + 0x018u) +#define C_RDMA_CQP_CONTEXT_6 (C_RDMA_CQP_VHCA_PAGE + 0x01Cu) +#define C_RDMA_CQP_CONTEXT_7 (C_RDMA_CQP_VHCA_PAGE + 0x020u) +#define C_RDMA_CQP_CONFIG_DONE (C_RDMA_CQP_VHCA_PAGE + 0x024u) +#define C_RDMA_CQP_DB (C_RDMA_CQP_VHCA_PAGE + 0x028u) +#define C_RDMA_CQP_TAIL (C_RDMA_CQP_VHCA_PAGE + 0x02Cu) +#define C_RDMA_CQP_STATUS (C_RDMA_CQP_VHCA_PAGE + 0x030u) +#define C_RDMA_CQP_ERROR (C_RDMA_CQP_VHCA_PAGE + 0x034u) +#define C_RDMA_CQP_ERRCODE (C_RDMA_CQP_VHCA_PAGE + 0x038u) +#define C_RDMA_CQP_CQ_NUM (C_RDMA_CQP_VHCA_PAGE + 0x03Cu) +#define C_RDMA_CQP_CQ_DISTRIBUTE_DONE (C_RDMA_CQP_VHCA_PAGE + 0x040u) +#define C_RDMA_CQP_STATE_RSV0 (C_RDMA_CQP_VHCA_PAGE + 0x058u) +#define C_RDMA_CQP_STATE_RSV1 (C_RDMA_CQP_VHCA_PAGE + 0x05Cu) + +#define C_RDMA_CQP_PF_VF_ID(_i) \ + (C_RDMA_CQP_PUBLIC_PAGE1 + 4 * (_i)) /* i= 0...1023 */ +#define C_RDMA_CQP_PF_VF_ID_INVLD(_i) \ + (C_RDMA_CQP_PUBLIC_PAGE2 + 0x1000u + 4 * (_i)) /* i= 0...1023 */ + +#define C_RDMA_CQP_MGC_BASE_HIGH (C_RDMA_CQP_VHCA_PF_PAGE + 0x004u) +#define C_RDMA_CQP_MGC_BASE_LOW (C_RDMA_CQP_VHCA_PF_PAGE + 0x008u) +#define C_RDMA_CQP_MRTE_CACHE_ID (C_RDMA_CQP_VHCA_PF_PAGE + 0x00Cu) +#define C_RDMA_CQP_AH_CACHE_ID (C_RDMA_CQP_VHCA_PF_PAGE + 0x010u) +#define C_RDMA_CQP_MGC_INDICATE_ID (C_RDMA_CQP_VHCA_PF_PAGE + 0x014u) +#define C_RDMA_CQP_STATE_PF_RSV0 (C_RDMA_CQP_VHCA_PF_PAGE + 0x018u) + +/****** RDMA Flow Control Algorithms Register ******/ +/* DCQCN */ +#define RDMA_DCQCN_NP_CNP_DSCP (C_RDMA_RX_CNP_GEN_PAGE + 0x10u) +#define RDMA_DCQCN_NP_CNP_PRIO_MODE (C_RDMA_RX_CNP_GEN_PAGE + 0x14u) +#define RDMA_DCQCN_NP_CNP_PRIO (C_RDMA_RX_CNP_GEN_PAGE + 0x18u) +#define RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X (C_RDMA_RX_CNP_GEN_PAGE + 0x28u) +#define RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y (C_RDMA_RX_CNP_GEN_PAGE + 0xcu) +#define RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y_EX \ + (C_RDMA_RX_CNP_GEN_PAGE + 0x2cu) +#define RDMA_DCQCN_PRG_TIME_RESET (0x6206008084u) +#define RDMA_DCQCN_RPG_CLAMP_TGT_RATE (0x6206008084u) +#define RDMA_DCQCN_RPG_CLAMP_TGT_RATE_AFTER_TIME_INC (0x6206008080u) +#define RDMA_DCQCN_RP_DCE_TCP_RTT (0x6206008080u) +#define RDMA_DCQCN_DCE_TCP_G (0x6206008088u) +#define RDMA_DCQCN_RPG_GD (0x6206008090u) +#define RDMA_DCQCN_RPG_INITIAL_ALPHA_VALUE (0x620600808cu) +#define RDMA_DCQCN_RPG_MIN_DEC_FAC (0x6206008094u) +#define RDMA_DCQCN_RPG_THRESHOLD (0x6206008098u) +#define RDMA_DCQCN_RPG_RATIO_INCREASE (0x6206008088u) +#define RDMA_DCQCN_RPG_AI_RATIO (0x620600809cu) +#define RDMA_DCQCN_RPG_HAI_RATIO (0x62060080a0u) +#define RDMA_DCQCN_RPG_BYTE_RESET (C_RDMA_TX_VHCA_PAGE + 0x0900u) +#define RDMA_DCQCN_RPG_AI_RATE (C_RDMA_TX_VHCA_PAGE + 0x0908u) +#define RDMA_DCQCN_RPG_HAI_RATE (C_RDMA_TX_VHCA_PAGE + 0x090cu) +#define RDMA_RPG_MAX_RATE (C_RDMA_TX_VHCA_PAGE + 0x0910u) +#define RDMA_RPG_MIN_RATE (C_RDMA_TX_VHCA_PAGE + 0x0914u) +/* RTT */ +#define RDMA_RPG_VF_DELTA (C_RDMA_TX_VHCA_PAGE + 0x091cu) +/****** RDMA Flow Control Algorithms Parameters ******/ +#define RDMA_FLOW_CONTROL_RATE_200G 0x4C4B4000 +#define RDMA_FLOW_CONTROL_RATE_10M 0xFA00 +#define RDMA_FLOW_CONTROL_RATE_10G 0x3D09000 +#define RDMA_FLOW_CONTROL_RATE_1G 0x61A800 +#define RDMA_FLOW_BYTE_RESET_THRESHOLD 125 +#define RDMA_FLOW_MAX_RPG_HAI_RATIO 4096 +#define RDMA_FLOW_MAX_RPG_AI_RATIO 2048 +#define RDMA_FLOW_MAX_RPG_THRESHOLD 31 +#define RDMA_FLOW_MAX_RPG_MIN_DEC_FAC 32768 +#define RDMA_FLOW_MAX_RPG_INITIAL_ALPHA_VALUE 32768 +#define RDMA_FLOW_MAX_RPG_GD 15 +#define RDMA_FLOW_MAX_DCE_TCP_G 15 +#define RDMA_FLOW_MAX_RP_DCE_TCP_RTT 100 +#define RDMA_FLOW_MIN_RP_DCE_TCP_RTT 5 +#define RDMA_FLOW_MAX_PRG_TIME_RESET 200 +#define RDMA_FLOW_MIN_PRG_TIME_RESET 5 +#define RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y 10 +#define RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y_EX \ + (2 * RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y - 1) +#define RDMA_FLOW_MAX_NP_MIN_TIME_BETWEEN_CNPS_X 65535 +#define RDMA_FLOW_MIN_MIN_TIME_BETWEEN_CNPS_X 8 +#define RDMA_FLOW_MAX_NP_MIN_TIME_BETWEEN_CNPS \ + (RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y * \ + RDMA_FLOW_MAX_NP_MIN_TIME_BETWEEN_CNPS_X) +#define RDMA_FLOW_MIN_NP_MIN_TIME_BETWEEN_CNPS \ + (RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y * \ + RDMA_FLOW_MIN_MIN_TIME_BETWEEN_CNPS_X) +#define RDMA_FLOW_MAX_NP_CNP_PRIO_MODE 7 +#define RDMA_FLOW_MAX_NP_CNP_PRIO 7 +#define RDMA_FLOW_MAX_NP_CNP_DSCP 63 +#define RDMA_FLOW_MAX_ALPHA_VALUE 32768 +#define RDMA_FLOW_MAX_TLOW_VALUE 65535 +#define RDMA_FLOW_MAX_THIGH_VALUE 65535 +#define RDMA_FLOW_MAX_AI_NUM_VALUE 65535 +#define RDMA_FLOW_MAX_HAI_N_VALUE 65534 +#define RDMA_FLOW_MAX_AI_N_VALUE 65534 +#define RDMA_FLOW_MAX_VF_DELTA_VALUE 65534 +#define RDMA_FLOW_MAX_THRED_GRADIENT 32768 + +/****** REQ Module Register ******/ +/* rdmatx_ack_recv */ +#define RDMATX_ACK_RSV_RO_REG_0 (C_RDMA_TX_ACK_RECV_PAGE + 0xA0u) +#define RDMATX_ACK_RSV_RO_REG_5 (C_RDMA_TX_ACK_RECV_PAGE + 0xB4u) +#define RDMATX_ACK_RSV_RO_REG_14 (C_RDMA_TX_ACK_RECV_PAGE + 0xD8u) +#define RDMATX_ACK_RSV_RO_REG_20 (C_RDMA_TX_ACK_RECV_PAGE + 0xF0u) +#define RDMATX_ACK_ERR_CQE_OUT_TASK_CNT (C_RDMA_TX_ACK_RECV_PAGE + 0x3C4u) +#define RDMATX_ACK_FLUSH_CQE_OUT_TASK_CNT (C_RDMA_TX_ACK_RECV_PAGE + 0x3C8u) + +/* rdmatx_wqe_parse */ +#define RDMATX_PKT_TIME_IN_CNT (C_RDMA_TX_WQE_PARSE_PAGE + 0x674u) +#define RDMATX_PKT_TIME_OUT_CNT (C_RDMA_TX_WQE_PARSE_PAGE + 0x678u) +#define RDMATX_HOST3_ERR_INFO_FIFO_OVERFLOW_CNT \ + (C_RDMA_TX_WQE_PARSE_PAGE + 0x8b0u) + +/* rdmatx_ram_scheduling */ +#define RDMATX_RAM_READ_FLAG (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x0u) +#define RDMATX_RAM_ADDR (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x04u) +#define RDMATX_RAM_READ_LENGTH (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x08u) +#define RDMATX_RAM_NUM (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x0Cu) +#define RDMATX_RAM_WIDTH (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x10u) +#define RDMATX_RAM_MAINTENANCE_RAM(_i) \ + (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x14u + (_i)*0x4) //i=0... 14 +#define RDMATX_RD_TIME_LIMIT (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x0C0u) +#define RDMATX_READ_ERROR_FLAG (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x0C4u) +#define RDMATX_ERROR_RAM_NUM (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x0C8u) +#define RDMATX_ERROR_RAM_ADDR (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x0CCu) +#define RDMATX_READ_CNT_ERROR (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x0DCu) +#define RDMATX_RAM_REDUN_FLAG (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x110u) +#define RDMATX_DOUBLE_VLD_FLAG (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x114u) + +/* rdmatx_ack_recv_vhca_pfvf */ +#define RDMATX_ACK_SQWQE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0x004u) +#define RDMATX_ACK_DDR_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0x010u) +#define RDMATX_ACK_PCI_MAX_MRTE_INDEX_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0x014u) + +/* rdmatx_doorbell_mgr_vhca_pfvf */ +#define RDMATX_DB_PBLE_ID_CFG (C_RDMA_TX_VHCA_PAGE + 0x400u) +#define RDMATX_DB_SQWQE_ID_CFG (C_RDMA_TX_VHCA_PAGE + 0x40Cu) +#define RDMATX_QPN_BASEQPN_CFG (C_RDMA_TX_VHCA_PAGE + 0x424u) +#define RDMATX_QPN_CONTEXT_ID_CFG (C_RDMA_TX_VHCA_PAGE + 0x428u) +#define RDMATX_QUEUE_VHCA_FLAG (C_RDMA_TX_VHCA_PAGE + 0x448u) + +/* rdmatx_doorbell_mgr_vhca_pf */ +#define C_RDMA_SQ_DBINFO_LOW_DIN (C_RDMA_TX_VHCA_PF_PAGE + 0x404u) +#define C_RDMA_SQ_DBINFO_HIGH_DIN (C_RDMA_TX_VHCA_PF_PAGE + 0x42Cu) //not used + +/* rdmatx_wqe_parse_vhca_pfvf */ +#define RDMATX_SQWQE_PBLE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC00u) //hmc +#define RDMATX_SQWQE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC04u) +#define RDMATX_AH_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC08u) //hmc +#define RDMATX_LOCAL_MRTE_PARENT_PARA_CFG \ + (C_RDMA_TX_VHCA_PAGE + 0xC0Cu) //not used +#define RDMATX_LOCAL_MRTE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC10u) //hmc +#define RDMATX_SGETRAN_MRTE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC14u) //hmc +#define RDMATX_SGETRAN_PBLE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC18u) //hmc +#define RDMATX_PAYLOAD_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC1Cu) +#define RDMATX_HOSTID_CFG (C_RDMA_TX_VHCA_PAGE + 0xC20u) + +/* rdmatx_cm_vhca_pfvf */ +#define RDMA_CPU_MSIX_DATA (C_RDMA_TX_VHCA_PAGE + 0x00000804u) /* i=0...1023 */ +#define RDMA_CPU_MSIX_CONFIG \ + (C_RDMA_TX_VHCA_PAGE + 0x00000808u) /* i=0...1023 */ +#define AEQ_REPORT_ROOT_AXI_DATA \ + (C_RDMA_TX_VHCA_PAGE + 0x00000814u) /* i=0...1023 */ +#define AEQ_REPORT_LEAF_AXI_DATA \ + (C_RDMA_TX_VHCA_PAGE + 0x00000818u) /* i=0...1023 */ +#define AEQ_REPORT_WR_AXI_DATA \ + (C_RDMA_TX_VHCA_PAGE + 0x0000081Cu) /* i=0...1023 */ +#define AEQ_AEQC_AEE_FLAG (C_RDMA_TX_VHCA_PAGE + 0x00000820u) /* i=0...1023 */ +#define RDMATX_TXWINDOW_QPN_BASE (C_RDMA_TX_VHCA_PAGE + 0x810u) + +/* rdmatx_cm_vhca_pf */ +#define C_RDMA_CPU_SOFTWARE_TAIL (C_RDMA_TX_VHCA_PF_PAGE + 0x00000804u) +#define C_RDMA_CPU_AEQ_ARM (C_RDMA_TX_VHCA_PF_PAGE + 0x00000808u) + +/* rdmatx_sub_vhca_pfvf*/ +#define RDMATX_ADD_TOKEN_CHANGE_EN (C_RDMA_TX_VHCA_PAGE + 0x928u) +#define RDMATX_TIME_ADD_TOKEN_CFG (C_RDMA_TX_VHCA_PAGE + 0x92Cu) +#define RDMATX_SIZE_ADD_TOKEN_CFG (C_RDMA_TX_VHCA_PAGE + 0x930u) +#define RDMATX_TOKEN_MAX_CFG (C_RDMA_TX_VHCA_PAGE + 0x934u) + +/****** RES Module Register ******/ +/* rdmarx_pkt_proc */ +#define C_ICRC_CHECK_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x7cu) +#define C_ICRC_CHECK_SOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x8cu) +#define C_ICRC_PROC_SOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x84u) +#define C_ICRC_PROC_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x88u) +#define C_NHD_CHECK_ETH_DISGARD_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x4e4u) +#define C_NHD_CHECK_ICRC_REMOVAL_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x4f8u) +#define C_PLD_CACHE_PKT_PLD_PROC_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0xc64u) +#define C_PLD_CACHE_PLD_CACHE_CTRL_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0xc6cu) +#define C_TRPG_NP_RX_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x68u) + +/* rdmarx_cnp_gen */ +#define C_STATE_ERR_CFG (C_RDMA_RX_CNP_GEN_PAGE + 0x84u) + +/* rdma_tx_rtt_cfg*/ +#define RDMATX_RTT_CFG (C_RDMA_TX_RTT_PAGE + 0x468u) +/* rdma tx cap cfg*/ +#define RDMA_TX_SEL_NODE_MODULE_NUM 6 +#define RDMA_TX_SEL_NODE_MODULE_ACK 0 +#define RDMA_TX_SEL_NODE_MODULE_DB 1 +#define RDMA_TX_SEL_NODE_MODULE_AEQ 2 +#define RDMA_TX_SEL_NODE_MODULE_NONE 3 +#define RDMA_TX_SEL_NODE_MODULE_TXWINDOW 4 +#define RDMA_TX_SEL_NODE_MODULE_WQE 5 +#define RDMA_TX_CAP_WQE_MOD_NUM 3 +#define RDMA_TX_CAP_WQE_PRE_READ 0 +#define RDMA_TX_CAP_WQE_HANDLE 1 +#define RDMA_TX_CAP_WQE_PACK 2 + +#define C_RDMA_TX_CAP_BASE 0x62065E0000u +#define C_RDMA_TX_ACK_RECV_BASE 0x62065E8000u +#define C_RDMA_TX_SIDN_BASE 0x6206600000u +#define RDMATX_CAP_CHL_SEL_NODE0 (C_RDMA_TX_CAP_BASE + 0x224u) +#define RDMATX_CAP_CHL_OPEN_NODE0 (C_RDMA_TX_CAP_BASE + 0x228u) +#define RDMATX_CAP_NODE0_SEL (C_RDMA_TX_WQE_PARSE_PAGE + 0x144u) +#define RDMATX_CAP_NODE0_ACK (C_RDMA_TX_ACK_RECV_BASE + 0xF8u) +#define RDMATX_CAP_NODE0_DB (C_RDMA_TX_SIDN_BASE + 0x32Cu) +#define RDMATX_CAP_NODE0_AEQ (C_RDMA_TX_CAP_BASE + 0x214u) +#define RDMATX_CAP_NODE0_TXWINDOW (C_RDMA_TX_CAP_BASE + 0x21Cu) +#define RDMATX_CAP_NODE0_WQE_PRE_READ (C_RDMA_TX_WQE_PARSE_PAGE + 0x14Cu) +#define RDMATX_CAP_NODE0_WQE_HANDLE (C_RDMA_TX_WQE_PARSE_PAGE + 0x154u) +#define RDMATX_CAP_NODE0_PACKAGE (C_RDMA_TX_WQE_PARSE_PAGE + 0x15Cu) +#define RDMATX_CAP_COMPARE_BIT_EN0_NODE0 (C_RDMA_TX_CAP_BASE + 0x2FCu) +#define RDMATX_CAP_COMPARE_BIT_EN1_NODE0 (C_RDMA_TX_CAP_BASE + 0x300u) +#define RDMATX_CAP_COMPARE_BIT_EN2_NODE0 (C_RDMA_TX_CAP_BASE + 0x304u) +#define RDMATX_CAP_COMPARE_BIT_EN3_NODE0 (C_RDMA_TX_CAP_BASE + 0x308u) +#define RDMATX_CAP_COMPARE_BIT_EN4_NODE0 (C_RDMA_TX_CAP_BASE + 0x30Cu) +#define RDMATX_CAP_COMPARE_BIT_EN5_NODE0 (C_RDMA_TX_CAP_BASE + 0x310u) +#define RDMATX_CAP_COMPARE_BIT_EN6_NODE0 (C_RDMA_TX_CAP_BASE + 0x314u) +#define RDMATX_CAP_COMPARE_BIT_EN7_NODE0 (C_RDMA_TX_CAP_BASE + 0x318u) +#define RDMATX_CAP_COMPARE_BIT_EN8_NODE0 (C_RDMA_TX_CAP_BASE + 0x31Cu) +#define RDMATX_CAP_COMPARE_BIT_EN9_NODE0 (C_RDMA_TX_CAP_BASE + 0x320u) +#define RDMATX_CAP_COMPARE_BIT_EN10_NODE0 (C_RDMA_TX_CAP_BASE + 0x324u) +#define RDMATX_CAP_COMPARE_BIT_EN11_NODE0 (C_RDMA_TX_CAP_BASE + 0x328u) +#define RDMATX_CAP_COMPARE_BIT_EN12_NODE0 (C_RDMA_TX_CAP_BASE + 0x32Cu) +#define RDMATX_CAP_COMPARE_BIT_EN13_NODE0 (C_RDMA_TX_CAP_BASE + 0x330u) +#define RDMATX_CAP_COMPARE_BIT_EN14_NODE0 (C_RDMA_TX_CAP_BASE + 0x334u) +#define RDMATX_CAP_COMPARE_BIT_EN15_NODE0 (C_RDMA_TX_CAP_BASE + 0x338u) +#define RDMATX_CAP_COMPARE_DATA0_NODE0 (C_RDMA_TX_CAP_BASE + 0x234u) +#define RDMATX_CAP_COMPARE_DATA1_NODE0 (C_RDMA_TX_CAP_BASE + 0x238u) +#define RDMATX_CAP_COMPARE_DATA2_NODE0 (C_RDMA_TX_CAP_BASE + 0x23Cu) +#define RDMATX_CAP_COMPARE_DATA3_NODE0 (C_RDMA_TX_CAP_BASE + 0x240u) +#define RDMATX_CAP_COMPARE_DATA4_NODE0 (C_RDMA_TX_CAP_BASE + 0x244u) +#define RDMATX_CAP_COMPARE_DATA5_NODE0 (C_RDMA_TX_CAP_BASE + 0x248u) +#define RDMATX_CAP_COMPARE_DATA6_NODE0 (C_RDMA_TX_CAP_BASE + 0x24Cu) +#define RDMATX_CAP_COMPARE_DATA7_NODE0 (C_RDMA_TX_CAP_BASE + 0x250u) +#define RDMATX_CAP_COMPARE_DATA8_NODE0 (C_RDMA_TX_CAP_BASE + 0x254u) +#define RDMATX_CAP_COMPARE_DATA9_NODE0 (C_RDMA_TX_CAP_BASE + 0x258u) +#define RDMATX_CAP_COMPARE_DATA10_NODE0 (C_RDMA_TX_CAP_BASE + 0x25Cu) +#define RDMATX_CAP_COMPARE_DATA11_NODE0 (C_RDMA_TX_CAP_BASE + 0x260u) +#define RDMATX_CAP_COMPARE_DATA12_NODE0 (C_RDMA_TX_CAP_BASE + 0x264u) +#define RDMATX_CAP_COMPARE_DATA13_NODE0 (C_RDMA_TX_CAP_BASE + 0x268u) +#define RDMATX_CAP_COMPARE_DATA14_NODE0 (C_RDMA_TX_CAP_BASE + 0x26Cu) +#define RDMATX_CAP_COMPARE_DATA15_NODE0 (C_RDMA_TX_CAP_BASE + 0x270u) +#define RDMATX_CAP_AXI_WR_ADDR_LOW_NODE0 (C_RDMA_TX_CAP_BASE + 0x274u) +#define RDMATX_CAP_AXI_WR_ADDR_HIGH_NODE0 (C_RDMA_TX_CAP_BASE + 0x278u) +#define RDMATX_CAP_AXI_WR_LEN_LOW_NODE0 (C_RDMA_TX_CAP_BASE + 0x27Cu) +#define RDMATX_CAP_AXI_WR_LEN_HIGH_NODE0 (C_RDMA_TX_CAP_BASE + 0x4ECu) +#define RDMATX_CAP_TIME_WRL2D_NODE0 (C_RDMA_TX_CAP_BASE + 0x2F4u) +#define RDMATX_CAP_VHCA_NUM_NODE0 (C_RDMA_TX_CAP_BASE + 0x22Cu) +#define RDMATX_CAP_AXI_ID_NODE0 (C_RDMA_TX_CAP_BASE + 0x2DCu) +#define RDMATX_CAP_CAP_ID_NODE0 (C_RDMA_TX_CAP_BASE + 0x2E4u) +#define RDMATX_CAP_CHL_SEL_NODE1 (C_RDMA_TX_CAP_BASE + 0x280u) +#define RDMATX_CAP_CHL_OPEN_NODE1 (C_RDMA_TX_CAP_BASE + 0x284u) +#define RDMATX_CAP_NODE1_SEL (C_RDMA_TX_WQE_PARSE_PAGE + 0x148u) +#define RDMATX_CAP_NODE1_ACK (C_RDMA_TX_ACK_RECV_BASE + 0xFCu) +#define RDMATX_CAP_NODE1_DB (C_RDMA_TX_SIDN_BASE + 0x330u) +#define RDMATX_CAP_NODE1_AEQ (C_RDMA_TX_CAP_BASE + 0x218u) +#define RDMATX_CAP_NODE1_TXWINDOW (C_RDMA_TX_CAP_BASE + 0x220u) +#define RDMATX_CAP_NODE1_WQE_PRE_READ (C_RDMA_TX_WQE_PARSE_PAGE + 0x150u) +#define RDMATX_CAP_NODE1_WQE_HANDLE (C_RDMA_TX_WQE_PARSE_PAGE + 0x158u) +#define RDMATX_CAP_NODE1_PACKAGE (C_RDMA_TX_WQE_PARSE_PAGE + 0x160u) +#define RDMATX_CAP_COMPARE_BIT_EN0_NODE1 (C_RDMA_TX_CAP_BASE + 0x33Cu) +#define RDMATX_CAP_COMPARE_BIT_EN1_NODE1 (C_RDMA_TX_CAP_BASE + 0x340u) +#define RDMATX_CAP_COMPARE_BIT_EN2_NODE1 (C_RDMA_TX_CAP_BASE + 0x344u) +#define RDMATX_CAP_COMPARE_BIT_EN3_NODE1 (C_RDMA_TX_CAP_BASE + 0x348u) +#define RDMATX_CAP_COMPARE_BIT_EN4_NODE1 (C_RDMA_TX_CAP_BASE + 0x34Cu) +#define RDMATX_CAP_COMPARE_BIT_EN5_NODE1 (C_RDMA_TX_CAP_BASE + 0x350u) +#define RDMATX_CAP_COMPARE_BIT_EN6_NODE1 (C_RDMA_TX_CAP_BASE + 0x354u) +#define RDMATX_CAP_COMPARE_BIT_EN7_NODE1 (C_RDMA_TX_CAP_BASE + 0x358u) +#define RDMATX_CAP_COMPARE_BIT_EN8_NODE1 (C_RDMA_TX_CAP_BASE + 0x35Cu) +#define RDMATX_CAP_COMPARE_BIT_EN9_NODE1 (C_RDMA_TX_CAP_BASE + 0x360u) +#define RDMATX_CAP_COMPARE_BIT_EN10_NODE1 (C_RDMA_TX_CAP_BASE + 0x364u) +#define RDMATX_CAP_COMPARE_BIT_EN11_NODE1 (C_RDMA_TX_CAP_BASE + 0x368u) +#define RDMATX_CAP_COMPARE_BIT_EN12_NODE1 (C_RDMA_TX_CAP_BASE + 0x36Cu) +#define RDMATX_CAP_COMPARE_BIT_EN13_NODE1 (C_RDMA_TX_CAP_BASE + 0x370u) +#define RDMATX_CAP_COMPARE_BIT_EN14_NODE1 (C_RDMA_TX_CAP_BASE + 0x374u) +#define RDMATX_CAP_COMPARE_BIT_EN15_NODE1 (C_RDMA_TX_CAP_BASE + 0x378u) +#define RDMATX_CAP_COMPARE_DATA0_NODE1 (C_RDMA_TX_CAP_BASE + 0x290u) +#define RDMATX_CAP_COMPARE_DATA1_NODE1 (C_RDMA_TX_CAP_BASE + 0x294u) +#define RDMATX_CAP_COMPARE_DATA2_NODE1 (C_RDMA_TX_CAP_BASE + 0x298u) +#define RDMATX_CAP_COMPARE_DATA3_NODE1 (C_RDMA_TX_CAP_BASE + 0x29Cu) +#define RDMATX_CAP_COMPARE_DATA4_NODE1 (C_RDMA_TX_CAP_BASE + 0x2A0u) +#define RDMATX_CAP_COMPARE_DATA5_NODE1 (C_RDMA_TX_CAP_BASE + 0x2ACu) +#define RDMATX_CAP_COMPARE_DATA6_NODE1 (C_RDMA_TX_CAP_BASE + 0x2A8u) +#define RDMATX_CAP_COMPARE_DATA7_NODE1 (C_RDMA_TX_CAP_BASE + 0x2ACu) +#define RDMATX_CAP_COMPARE_DATA8_NODE1 (C_RDMA_TX_CAP_BASE + 0x2B0u) +#define RDMATX_CAP_COMPARE_DATA9_NODE1 (C_RDMA_TX_CAP_BASE + 0x2B4u) +#define RDMATX_CAP_COMPARE_DATA10_NODE1 (C_RDMA_TX_CAP_BASE + 0x2B8u) +#define RDMATX_CAP_COMPARE_DATA11_NODE1 (C_RDMA_TX_CAP_BASE + 0x2BCu) +#define RDMATX_CAP_COMPARE_DATA12_NODE1 (C_RDMA_TX_CAP_BASE + 0x2C0u) +#define RDMATX_CAP_COMPARE_DATA13_NODE1 (C_RDMA_TX_CAP_BASE + 0x2C4u) +#define RDMATX_CAP_COMPARE_DATA14_NODE1 (C_RDMA_TX_CAP_BASE + 0x2C8u) +#define RDMATX_CAP_COMPARE_DATA15_NODE1 (C_RDMA_TX_CAP_BASE + 0x2CCu) +#define RDMATX_CAP_AXI_WR_ADDR_LOW_NODE1 (C_RDMA_TX_CAP_BASE + 0x2D0u) +#define RDMATX_CAP_AXI_WR_ADDR_HIGH_NODE1 (C_RDMA_TX_CAP_BASE + 0x2D4u) +#define RDMATX_CAP_AXI_WR_LEN_LOW_NODE1 (C_RDMA_TX_CAP_BASE + 0x2D8u) +#define RDMATX_CAP_AXI_WR_LEN_HIGH_NODE1 (C_RDMA_TX_CAP_BASE + 0x4F0u) +#define RDMATX_CAP_TIME_WRL2D_NODE1 (C_RDMA_TX_CAP_BASE + 0x2F8u) +#define RDMATX_CAP_VHCA_NUM_NODE1 (C_RDMA_TX_CAP_BASE + 0x288u) +#define RDMATX_CAP_AXI_ID_NODE1 (C_RDMA_TX_CAP_BASE + 0x2E0u) +#define RDMATX_CAP_CAP_ID_NODE1 (C_RDMA_TX_CAP_BASE + 0x2E8u) +#define RDMATX_DATA_START_CAP (C_RDMA_TX_CAP_BASE + 0x230u) + +/* rdmarx_pri_base_rd */ +#define RDMARX_PRI_BASE_RD (C_RDMA_RX_PUBLIC_PAGE2 + 0x418u) + +/* rdmarx_cq_period_cfg*/ +#define RDMARX_CQ_PERIOD_CFG (C_RDMA_RX_CNP_GEN_PAGE + 0x0f7cu) + +/* rdmarx_ram_scheduling */ +#define RDMARX_RAM_NUM (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x50cu) +#define RDMARX_RAM_WIDTH (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x510u) +#define RDMARX_RAM_ADDR (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x504u) +#define RDMARX_RAM_READ_LENGTH (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x508u) +#define RDMARX_RD_TIME_LIMIT (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5c0u) +#define RDMARX_RAM_READ_FLAG (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x500u) +#define RDMARX_READ_ERROR_FLAG (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5c4u) +#define RDMARX_READ_CNT_ERROR (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5dcu) +#define RDMARX_RAM_REDUN_FLAG (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x610u) +#define RDMARX_DOUBLE_VLD_FLAG (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x614u) +#define RDMARX_RAM_MAINTENANCE_RAM(_i) \ + (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x514u + (_i)*0x4) /* i=0...14 */ +#define RDMARX_RAM_USE_FLAG (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5E0u) +#define RDMARX_RAM_USE_VHCA_ID (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5E4u) +#define RDMARX_RAM_USE_CNT (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5E8u) + +/* rdma rx cap cfg*/ +#define RDMA_RX_SEL_NODE_MODULE_NUM 14 +#define RDMA_RX_SEL_NODE_MODULE_RTT_T4 0 +#define RDMA_RX_SEL_NODE_MODULE_PKT_PROC 1 +#define RDMA_RX_SEL_NODE_MODULE_HD_CACHE 2 +#define RDMA_RX_SEL_NODE_MODULE_VAPA_DDRWR 3 +#define RDMA_RX_SEL_NODE_MODULE_PSN_CHECK 4 +#define RDMA_RX_SEL_NODE_MODULE_PRIFIELD_CHECK 5 +#define RDMA_RX_SEL_NODE_MODULE_READ_SRQC 6 +#define RDMA_RX_SEL_NODE_MODULE_READ_WQE 7 +#define RDMA_RX_SEL_NODE_MODULE_CNP_GEN 8 +#define RDMA_RX_SEL_NODE_MODULE_ACKNAKFIFO 9 +#define RDMA_RX_SEL_NODE_MODULE_CEQ 10 +#define RDMA_RX_SEL_NODE_MODULE_COMPLQUEUE 11 +#define RDMA_RX_SEL_NODE_MODULE_NOF 12 +#define RDMA_RX_SEL_NODE_MODULE_TX_SUB 13 + +#define C_RDMA_RX_CAP_BASE 0x6205478000u +#define C_RDMA_RX_PKT_PROC_BASE 0x6205400000u +#define C_RDMA_RX_PUBLIC_BASE1 0x6205420000u +#define C_RDMA_RX_PUBLIC_BASE2 0x6205440000u +#define C_RDMA_RX_CNP_GEN_BASE 0x6205460000u +#define C_RDMA_RTT_BASE 0x62065F8000u +#define C_RDMA_RX_NOF_BASE 0x6205465000u +#define RDMARX_CAP_CHL_SEL_NODE0 (C_RDMA_RX_CAP_BASE + 0x604u) +#define RDMARX_CAP_CHL_OPEN_NODE0 (C_RDMA_RX_CAP_BASE + 0x60Cu) +#define RDMARX_CAP_NODE0_SEL_RTT_T4 (C_RDMA_RX_CAP_BASE + 0x774u) +#define RDMARX_CAP_NODE0_SEL_PKT_PROC (C_RDMA_RX_PKT_PROC_BASE + 0x10u) +#define RDMARX_CAP_NODE_SEL_HD_CACHE (C_RDMA_RX_PUBLIC_BASE1 + 0x610u) +#define RDMARX_CAP_NODE_SEL_VAPA_DDRWR (C_RDMA_RX_PUBLIC_BASE1 + 0xA00u) +#define RDMARX_CAP_NODE0_SEL_PRIFIELD_CHECK (C_RDMA_RX_PUBLIC_BASE2 + 0x404u) +#define RDMARX_CAP_NODE0_SEL_READ_SRQC (C_RDMA_RX_PUBLIC_BASE2 + 0x870u) +#define RDMARX_CAP_NODE0_SEL_READ_WQE (C_RDMA_RX_PUBLIC_BASE2 + 0xC14u) +#define RDMARX_CAP_NODE0_SEL_CNP_GEN (C_RDMA_RX_CNP_GEN_BASE + 0x1Cu) +#define RDMARX_CAP_NODE_SEL_ACKNAKFIFO (C_RDMA_RX_CNP_GEN_BASE + 0x3CCu) +#define RDMARX_CAP_NODE0_SEL_CQE (C_RDMA_RX_CNP_GEN_BASE + 0x814u) +#define RDMARX_CAP_NODE0_SEL_COMPLQUEUE (C_RDMA_RX_CNP_GEN_BASE + 0xC70u) +#define RDMARX_CAP_NODE_SEL_NOF (C_RDMA_RX_NOF_BASE + 0x400u) +#define RDMARX_CAP_NODE0_SEL_TXSUB (C_RDMA_RTT_BASE + 0x460u) + +#define RDMARX_CAP_COMPARE_BIT_EN0_NODE0 (C_RDMA_RX_CAP_BASE + 0x6DCu) +#define RDMARX_CAP_COMPARE_BIT_EN1_NODE0 (C_RDMA_RX_CAP_BASE + 0x6E0u) +#define RDMARX_CAP_COMPARE_BIT_EN2_NODE0 (C_RDMA_RX_CAP_BASE + 0x6E4u) +#define RDMARX_CAP_COMPARE_BIT_EN3_NODE0 (C_RDMA_RX_CAP_BASE + 0x6E8u) +#define RDMARX_CAP_COMPARE_BIT_EN4_NODE0 (C_RDMA_RX_CAP_BASE + 0x6ECu) +#define RDMARX_CAP_COMPARE_BIT_EN5_NODE0 (C_RDMA_RX_CAP_BASE + 0x6F0u) +#define RDMARX_CAP_COMPARE_BIT_EN6_NODE0 (C_RDMA_RX_CAP_BASE + 0x6F4u) +#define RDMARX_CAP_COMPARE_BIT_EN7_NODE0 (C_RDMA_RX_CAP_BASE + 0x6F8u) +#define RDMARX_CAP_COMPARE_BIT_EN8_NODE0 (C_RDMA_RX_CAP_BASE + 0x6FCu) +#define RDMARX_CAP_COMPARE_BIT_EN9_NODE0 (C_RDMA_RX_CAP_BASE + 0x700u) +#define RDMARX_CAP_COMPARE_BIT_EN10_NODE0 (C_RDMA_RX_CAP_BASE + 0x704u) +#define RDMARX_CAP_COMPARE_BIT_EN11_NODE0 (C_RDMA_RX_CAP_BASE + 0x708u) +#define RDMARX_CAP_COMPARE_BIT_EN12_NODE0 (C_RDMA_RX_CAP_BASE + 0x70Cu) +#define RDMARX_CAP_COMPARE_BIT_EN13_NODE0 (C_RDMA_RX_CAP_BASE + 0x710u) +#define RDMARX_CAP_COMPARE_BIT_EN14_NODE0 (C_RDMA_RX_CAP_BASE + 0x714u) +#define RDMARX_CAP_COMPARE_BIT_EN15_NODE0 (C_RDMA_RX_CAP_BASE + 0x718u) +#define RDMARX_CAP_COMPARE_DATA0_NODE0 (C_RDMA_RX_CAP_BASE + 0x61Cu) +#define RDMARX_CAP_COMPARE_DATA1_NODE0 (C_RDMA_RX_CAP_BASE + 0x620u) +#define RDMARX_CAP_COMPARE_DATA2_NODE0 (C_RDMA_RX_CAP_BASE + 0x624u) +#define RDMARX_CAP_COMPARE_DATA3_NODE0 (C_RDMA_RX_CAP_BASE + 0x628u) +#define RDMARX_CAP_COMPARE_DATA4_NODE0 (C_RDMA_RX_CAP_BASE + 0x62Cu) +#define RDMARX_CAP_COMPARE_DATA5_NODE0 (C_RDMA_RX_CAP_BASE + 0x630u) +#define RDMARX_CAP_COMPARE_DATA6_NODE0 (C_RDMA_RX_CAP_BASE + 0x634u) +#define RDMARX_CAP_COMPARE_DATA7_NODE0 (C_RDMA_RX_CAP_BASE + 0x638u) +#define RDMARX_CAP_COMPARE_DATA8_NODE0 (C_RDMA_RX_CAP_BASE + 0x63Cu) +#define RDMARX_CAP_COMPARE_DATA9_NODE0 (C_RDMA_RX_CAP_BASE + 0x640u) +#define RDMARX_CAP_COMPARE_DATA10_NODE0 (C_RDMA_RX_CAP_BASE + 0x644u) +#define RDMARX_CAP_COMPARE_DATA11_NODE0 (C_RDMA_RX_CAP_BASE + 0x648u) +#define RDMARX_CAP_COMPARE_DATA12_NODE0 (C_RDMA_RX_CAP_BASE + 0x64Cu) +#define RDMARX_CAP_COMPARE_DATA13_NODE0 (C_RDMA_RX_CAP_BASE + 0x650u) +#define RDMARX_CAP_COMPARE_DATA14_NODE0 (C_RDMA_RX_CAP_BASE + 0x654u) +#define RDMARX_CAP_COMPARE_DATA15_NODE0 (C_RDMA_RX_CAP_BASE + 0x658u) +#define RDMARX_CAP_AXI_WR_ADDR_LOW_NODE0 (C_RDMA_RX_CAP_BASE + 0x6ACu) +#define RDMARX_CAP_AXI_WR_ADDR_HIGH_NODE0 (C_RDMA_RX_CAP_BASE + 0x6B0u) +#define RDMARX_CAP_AXI_WR_LEN_LOW_NODE0 (C_RDMA_RX_CAP_BASE + 0x6B4u) +#define RDMARX_CAP_AXI_WR_LEN_HIGH_NODE0 (C_RDMA_RX_CAP_BASE + 0x898u) +#define RDMARX_CAP_TIME_WRL2D_NODE0 (C_RDMA_RX_CAP_BASE + 0x6D4u) +#define RDMARX_CAP_VHCA_NUM_NODE0 (C_RDMA_RX_CAP_BASE + 0x69Cu) +#define RDMARX_CAP_AXI_ID_NODE0 (C_RDMA_RX_CAP_BASE + 0x6A4u) +#define RDMARX_CAP_CAP_ID_NODE0 (C_RDMA_RX_CAP_BASE + 0x6B8u) + +#define RDMARX_CAP_CHL_SEL_NODE1 (C_RDMA_RX_CAP_BASE + 0x608u) +#define RDMARX_CAP_CHL_OPEN_NODE1 (C_RDMA_RX_CAP_BASE + 0x610u) +#define RDMARX_CAP_NODE1_SEL_RTT_T4 (C_RDMA_RX_CAP_BASE + 0x778u) +#define RDMARX_CAP_NODE1_SEL_PKT_PROC (C_RDMA_RX_PKT_PROC_BASE + 0x14u) +#define RDMARX_CAP_NODE1_SEL_PRIFIELD_CHECK (C_RDMA_RX_PUBLIC_BASE2 + 0x408u) +#define RDMARX_CAP_NODE1_SEL_READ_SRQC (C_RDMA_RX_PUBLIC_BASE2 + 0x874u) +#define RDMARX_CAP_NODE1_SEL_READ_WQE (C_RDMA_RX_PUBLIC_BASE2 + 0xC18u) +#define RDMARX_CAP_NODE1_SEL_CNP_GEN (C_RDMA_RX_CNP_GEN_BASE + 0x20u) +#define RDMARX_CAP_NODE1_SEL_CQE (C_RDMA_RX_CNP_GEN_BASE + 0x818u) +#define RDMARX_CAP_NODE1_SEL_COMPLQUEUE (C_RDMA_RX_CNP_GEN_BASE + 0xC74u) +#define RDMARX_CAP_NODE1_SEL_TXSUB (C_RDMA_RTT_BASE + 0x464u) + +#define RDMARX_CAP_COMPARE_BIT_EN0_NODE1 (C_RDMA_RX_CAP_BASE + 0x71Cu) +#define RDMARX_CAP_COMPARE_BIT_EN1_NODE1 (C_RDMA_RX_CAP_BASE + 0x720u) +#define RDMARX_CAP_COMPARE_BIT_EN2_NODE1 (C_RDMA_RX_CAP_BASE + 0x724u) +#define RDMARX_CAP_COMPARE_BIT_EN3_NODE1 (C_RDMA_RX_CAP_BASE + 0x728u) +#define RDMARX_CAP_COMPARE_BIT_EN4_NODE1 (C_RDMA_RX_CAP_BASE + 0x72Cu) +#define RDMARX_CAP_COMPARE_BIT_EN5_NODE1 (C_RDMA_RX_CAP_BASE + 0x730u) +#define RDMARX_CAP_COMPARE_BIT_EN6_NODE1 (C_RDMA_RX_CAP_BASE + 0x734u) +#define RDMARX_CAP_COMPARE_BIT_EN7_NODE1 (C_RDMA_RX_CAP_BASE + 0x738u) +#define RDMARX_CAP_COMPARE_BIT_EN8_NODE1 (C_RDMA_RX_CAP_BASE + 0x73Cu) +#define RDMARX_CAP_COMPARE_BIT_EN9_NODE1 (C_RDMA_RX_CAP_BASE + 0x740u) +#define RDMARX_CAP_COMPARE_BIT_EN10_NODE1 (C_RDMA_RX_CAP_BASE + 0x744u) +#define RDMARX_CAP_COMPARE_BIT_EN11_NODE1 (C_RDMA_RX_CAP_BASE + 0x748u) +#define RDMARX_CAP_COMPARE_BIT_EN12_NODE1 (C_RDMA_RX_CAP_BASE + 0x74Cu) +#define RDMARX_CAP_COMPARE_BIT_EN13_NODE1 (C_RDMA_RX_CAP_BASE + 0x750u) +#define RDMARX_CAP_COMPARE_BIT_EN14_NODE1 (C_RDMA_RX_CAP_BASE + 0x754u) +#define RDMARX_CAP_COMPARE_BIT_EN15_NODE1 (C_RDMA_RX_CAP_BASE + 0x758u) +#define RDMARX_CAP_COMPARE_DATA0_NODE1 (C_RDMA_RX_CAP_BASE + 0x65Cu) +#define RDMARX_CAP_COMPARE_DATA1_NODE1 (C_RDMA_RX_CAP_BASE + 0x660u) +#define RDMARX_CAP_COMPARE_DATA2_NODE1 (C_RDMA_RX_CAP_BASE + 0x664u) +#define RDMARX_CAP_COMPARE_DATA3_NODE1 (C_RDMA_RX_CAP_BASE + 0x668u) +#define RDMARX_CAP_COMPARE_DATA4_NODE1 (C_RDMA_RX_CAP_BASE + 0x66Cu) +#define RDMARX_CAP_COMPARE_DATA5_NODE1 (C_RDMA_RX_CAP_BASE + 0x670u) +#define RDMARX_CAP_COMPARE_DATA6_NODE1 (C_RDMA_RX_CAP_BASE + 0x674u) +#define RDMARX_CAP_COMPARE_DATA7_NODE1 (C_RDMA_RX_CAP_BASE + 0x678u) +#define RDMARX_CAP_COMPARE_DATA8_NODE1 (C_RDMA_RX_CAP_BASE + 0x67Cu) +#define RDMARX_CAP_COMPARE_DATA9_NODE1 (C_RDMA_RX_CAP_BASE + 0x680u) +#define RDMARX_CAP_COMPARE_DATA10_NODE1 (C_RDMA_RX_CAP_BASE + 0x684u) +#define RDMARX_CAP_COMPARE_DATA11_NODE1 (C_RDMA_RX_CAP_BASE + 0x688u) +#define RDMARX_CAP_COMPARE_DATA12_NODE1 (C_RDMA_RX_CAP_BASE + 0x68Cu) +#define RDMARX_CAP_COMPARE_DATA13_NODE1 (C_RDMA_RX_CAP_BASE + 0x690u) +#define RDMARX_CAP_COMPARE_DATA14_NODE1 (C_RDMA_RX_CAP_BASE + 0x694u) +#define RDMARX_CAP_COMPARE_DATA15_NODE1 (C_RDMA_RX_CAP_BASE + 0x698u) +#define RDMARX_CAP_AXI_WR_ADDR_LOW_NODE1 (C_RDMA_RX_CAP_BASE + 0x6C0u) +#define RDMARX_CAP_AXI_WR_ADDR_HIGH_NODE1 (C_RDMA_RX_CAP_BASE + 0x6C4u) +#define RDMARX_CAP_AXI_WR_LEN_LOW_NODE1 (C_RDMA_RX_CAP_BASE + 0x6C8u) +#define RDMARX_CAP_AXI_WR_LEN_HIGH_NODE1 (C_RDMA_RX_CAP_BASE + 0x89Cu) +#define RDMARX_CAP_TIME_WRL2D_NODE1 (C_RDMA_RX_CAP_BASE + 0x6D8u) +#define RDMARX_CAP_VHCA_NUM_NODE1 (C_RDMA_RX_CAP_BASE + 0x6A0u) +#define RDMARX_CAP_AXI_ID_NODE1 (C_RDMA_RX_CAP_BASE + 0x6A8u) +#define RDMARX_CAP_CAP_ID_NODE1 (C_RDMA_RX_CAP_BASE + 0x6BCu) +#define RDMARX_DATA_START_CAP (C_RDMA_RX_CAP_BASE + 0x614u) + +/* rdmarx_ceq_pf */ +#define RDMARX_CEQ_EQARM_RAM (C_RDMA_RX_VHCA_PF_PAGE + 0x684u) //CEQ +#define C_CEQ_EQARM_RAM (C_RDMA_RX_VHCA_PF_PAGE + 0x00000684u) + +/* rdmarx_completion_queue_pf */ +#define RDMARX_CQ_CQARM (C_RDMA_RX_VHCA_PF_PAGE + 0x588u) + +/* rdmarx_pkt_proc_pfvf */ +#define RDMARX_MUL_CACHE_CFG_SIDN_RAM (C_RDMA_RX_VHCA_PAGE + 0x108u) +#define RDMARX_MUL_COPY_QPN_INDICATE (C_RDMA_RX_VHCA_PAGE + 0x214u) + +/* rdma_rdmarx_hd_cache_top_pfvf */ +#define RDMARX_LIST_CACHE_BASE_QPN (C_RDMA_RX_VHCA_PAGE + 0x28Cu) +#define RDMARX_PLD_WR_AXIID_RAM (C_RDMA_RX_VHCA_PAGE + 0x304u) + +/* rdmarx_prifield_check_pfvf */ +#define RDMARX_VHCA_MAX_SIZE_RAM (C_RDMA_RX_VHCA_PAGE + 0x400u) + +/* rdmarx_read_srqc_top_pfvf */ +#define RDMARX_SRQN_BASE_RAM (C_RDMA_RX_VHCA_PAGE + 0x49Cu) +#define C_DB_AXI_RAM (C_RDMA_RX_VHCA_PAGE + 0x48Cu) /* i=0...1023 */ +#define C_SRQL_AXI_RAM (C_RDMA_RX_VHCA_PAGE + 0x494u) /* i=0...1023 */ + +/* rdmarx_completion_queue_pf */ +#define RDMARX_RQ_AXI_RAM (C_RDMA_RX_VHCA_PAGE + 0x500u) +#define RDMARX_SRQ_AXI_RAM (C_RDMA_RX_VHCA_PAGE + 0x508u) +#define RDMARX_PCI_MAX_MRTE_INDEX_RAM (C_RDMA_RX_VHCA_PAGE + 0x50Cu) + +/* rdmarx_ceq_pfvf */ +#define C_CEQ_CEQE_AXI_INFO_RAM \ + (C_RDMA_RX_VHCA_PAGE + 0x00000680u) /* i=0...1023 */ +#define C_CEQ_RPBLE_AXI_INFO_RAM \ + (C_RDMA_RX_VHCA_PAGE + 0x00000684u) /* i=0...1023 */ +#define C_CEQ_LPBLE_AXI_INFO_RAM \ + (C_RDMA_RX_VHCA_PAGE + 0x00000688u) /* i=0...1023 */ +#define C_CEQ_INT_INFO_RAM (C_RDMA_RX_VHCA_PAGE + 0x0000068Cu) /* i=0...1023 */ +#define RDMARX_ACK_RQDB_AXI_RAM (C_RDMA_RX_VHCA_PAGE + 0x600u) + +/* rdmarx_completion_queue_pfvf */ +#define RDMARX_CQ_CQN_BASE_OFFSET_RAM (C_RDMA_RX_VHCA_PAGE + 0x580u) +#define RDMARX_CQ_CQE_AXI_INFO_RAM (C_RDMA_RX_VHCA_PAGE + 0x594u) +#define RDMARX_CQ_DBSA_AXI_INFO_RAM (C_RDMA_RX_VHCA_PAGE + 0x598u) + +/****** IO Module Register ******/ +#define C_RDMAIO_TABLE2 (C_RDMA_IO_VHCA_PAGE + 0x060u) +#define C_RDMAIO_TABLE4 (C_RDMA_IO_VHCA_PAGE + 0x080u) +#define C_RDMAIO_TABLE3 (C_RDMA_IO_VHCA_PAGE + 0x070u) + +#define C_RDMAIO_TABLE5_0 (C_RDMA_IO_VHCA_PAGE + 0x090u) +#define C_RDMAIO_TABLE5_1 (C_RDMA_IO_VHCA_PAGE + 0x094u) +#define C_RDMAIO_TABLE5_2 (C_RDMA_IO_VHCA_PAGE + 0x098u) +#define C_RDMAIO_TABLE5_3 (C_RDMA_IO_VHCA_PAGE + 0x09cu) + +#define C_RDMAIO_TABLE5_4 (C_RDMA_IO_VHCA_PAGE + 0x0a0u) +#define C_RDMAIO_TABLE5_5 (C_RDMA_IO_VHCA_PAGE + 0x0a4u) +#define C_RDMAIO_TABLE5_6 (C_RDMA_IO_VHCA_PAGE + 0x0a8u) +#define C_RDMAIO_TABLE5_7 (C_RDMA_IO_VHCA_PAGE + 0x0acu) + +#define C_RDMAIO_TABLE5_8 (C_RDMA_IO_VHCA_PAGE + 0x0b0u) +#define C_RDMAIO_TABLE5_9 (C_RDMA_IO_VHCA_PAGE + 0x0b4u) +#define C_RDMAIO_TABLE5_10 (C_RDMA_IO_VHCA_PAGE + 0x0b8u) +#define C_RDMAIO_TABLE5_11 (C_RDMA_IO_VHCA_PAGE + 0x0bcu) + +#define C_RDMAIO_TABLE5_12 (C_RDMA_IO_VHCA_PAGE + 0x0c0u) +#define C_RDMAIO_TABLE5_13 (C_RDMA_IO_VHCA_PAGE + 0x0c4u) +#define C_RDMAIO_TABLE5_14 (C_RDMA_IO_VHCA_PAGE + 0x0c8u) +#define C_RDMAIO_TABLE5_15 (C_RDMA_IO_VHCA_PAGE + 0x0ccu) + +#define C_RDMAIO_TABLE5_16 (C_RDMA_IO_VHCA_PAGE + 0x0d0u) +#define C_RDMAIO_TABLE5_17 (C_RDMA_IO_VHCA_PAGE + 0x0d4u) +#define C_RDMAIO_TABLE5_18 (C_RDMA_IO_VHCA_PAGE + 0x0d8u) +#define C_RDMAIO_TABLE5_19 (C_RDMA_IO_VHCA_PAGE + 0x0dcu) + +#define C_RDMAIO_TABLE5_20 (C_RDMA_IO_VHCA_PAGE + 0x0e0u) +#define C_RDMAIO_TABLE5_21 (C_RDMA_IO_VHCA_PAGE + 0x0e4u) +#define C_RDMAIO_TABLE5_22 (C_RDMA_IO_VHCA_PAGE + 0x0e8u) +#define C_RDMAIO_TABLE5_23 (C_RDMA_IO_VHCA_PAGE + 0x0ecu) + +#define C_RDMAIO_TABLE5_24 (C_RDMA_IO_VHCA_PAGE + 0x0f0u) +#define C_RDMAIO_TABLE5_25 (C_RDMA_IO_VHCA_PAGE + 0x0f4u) +#define C_RDMAIO_TABLE5_26 (C_RDMA_IO_VHCA_PAGE + 0x0f8u) +#define C_RDMAIO_TABLE5_27 (C_RDMA_IO_VHCA_PAGE + 0x0fcu) + +#define C_RDMAIO_TABLE5_28 (C_RDMA_IO_VHCA_PAGE + 0x100u) +#define C_RDMAIO_TABLE5_29 (C_RDMA_IO_VHCA_PAGE + 0x104u) +#define C_RDMAIO_TABLE5_30 (C_RDMA_IO_VHCA_PAGE + 0x108u) +#define C_RDMAIO_TABLE5_31 (C_RDMA_IO_VHCA_PAGE + 0x10cu) + +#define C_RDMAIO_TABLE7 (C_RDMA_IO_SIDN_PAGE + 0x000u) +#define C_RDMAIO_TABLE6_0 (C_RDMA_IO_SIDN_PAGE + 0x010u) +#define C_RDMAIO_TABLE6_1 (C_RDMA_IO_SIDN_PAGE + 0x014u) +#define C_RDMAIO_TABLE6_2 (C_RDMA_IO_SIDN_PAGE + 0x018u) +#define C_RDMAIO_TABLE6_3 (C_RDMA_IO_SIDN_PAGE + 0x01Cu) +#define C_RDMAIO_TABLE6_4 (C_RDMA_IO_SIDN_PAGE + 0x020u) +#define C_RDMAIO_TABLE6_5 (C_RDMA_IO_SIDN_PAGE + 0x024u) +#define C_RDMAIO_TABLE6_6 (C_RDMA_IO_SIDN_PAGE + 0x028u) +#define C_RDMAIO_TABLE6_7 (C_RDMA_IO_SIDN_PAGE + 0x02Cu) +#define C_RDMAIO_TABLE6_8 (C_RDMA_IO_SIDN_PAGE + 0x030u) +#define C_RDMAIO_TABLE6_9 (C_RDMA_IO_SIDN_PAGE + 0x034u) +#define C_RDMAIO_TABLE6_10 (C_RDMA_IO_SIDN_PAGE + 0x038u) +#define C_RDMAIO_TABLE6_11 (C_RDMA_IO_SIDN_PAGE + 0x03Cu) +#define C_RDMAIO_TABLE6_12 (C_RDMA_IO_SIDN_PAGE + 0x040u) +#define C_RDMAIO_TABLE6_13 (C_RDMA_IO_SIDN_PAGE + 0x044u) +#define C_RDMAIO_TABLE6_14 (C_RDMA_IO_SIDN_PAGE + 0x048u) +#define C_RDMAIO_TABLE6_15 (C_RDMA_IO_SIDN_PAGE + 0x04Cu) + +/**************************NVME OF L2D *************************/ +#define NOF_IOQ_VHCA_ID(_i) \ + (C_RDMA_NOF_L2D_PAGES + 0x000u + (_i)*0x8) /* i=nof_qid : 0...1023 */ +#define NOF_IOQ_PD_ID(_i) \ + (C_RDMA_NOF_L2D_PAGES + 0x004u + (_i)*0x8) /* i=nof_qid : 0...1023 */ + +/**************************PF HMC REGISTER *************************/ +/******************************PBLEMR***************************/ + +#define C_HMC_PBLEMR_RX1 (C_RDMA_RX_VHCA_PAGE + 0x308u) // ram num:0x35 +#define C_HMC_PBLEMR_RX2 (C_RDMA_RX_VHCA_PAGE + 0x610u) + +#define C_HMC_PBLEMR_TX1 (C_RDMA_TX_VHCA_PAGE + 0x00Cu) +#define C_HMC_PBLEMR_TX2 (C_RDMA_TX_VHCA_PAGE + 0xC18u) + +#define C_HMC_PBLEMR_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x040u) +#define C_HMC_PBLEMR_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x000u) +#define C_HMC_PBLEMR_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x004u) + +/******************************PBLEQUEUE***************************/ + +#define C_HMC_PBLEQUEUE_RX1 (C_RDMA_RX_VHCA_PAGE + 0x404u) +#define C_HMC_PBLEQUEUE_RX2 (C_RDMA_RX_VHCA_PAGE + 0x688u) +#define C_HMC_PBLEQUEUE_RX3 (C_RDMA_RX_VHCA_PAGE + 0x684u) +#define C_HMC_PBLEQUEUE_RX4 (C_RDMA_RX_VHCA_PAGE + 0x5A0u) +#define C_HMC_PBLEQUEUE_RX5 (C_RDMA_RX_VHCA_PAGE + 0x59Cu) + +#define C_HMC_PBLEQUEUE_TX1 (C_RDMA_TX_VHCA_PAGE + 0x000u) +#define C_HMC_PBLEQUEUE_TX2 (C_RDMA_TX_VHCA_PAGE + 0xC00u) + +#define C_HMC_PBLEQUEUE_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x044u) +#define C_HMC_PBLEQUEUE_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x008u) +#define C_HMC_PBLEQUEUE_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x00Cu) + +/******************************MRTE***************************/ +#define C_HMC_MRTE_RX1 (C_RDMA_RX_VHCA_PAGE + 0x300u) +#define C_HMC_MRTE_RX2 (C_RDMA_RX_VHCA_PAGE + 0x60Cu) + +#define C_HMC_MRTE_TX1 (C_RDMA_TX_VHCA_PAGE + 0x008u) +#define C_HMC_MRTE_TX2 (C_RDMA_TX_VHCA_PAGE + 0xC14u) +#define C_HMC_MRTE_TX3 (C_RDMA_TX_VHCA_PAGE + 0xC10u) + +#define C_HMC_MRTE_CQP (C_RDMA_CQP_VHCA_PF_PAGE + 0x00Cu) + +#define C_HMC_MRTE_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x048u) +#define C_HMC_MRTE_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x010u) +#define C_HMC_MRTE_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x014u) + +/******************************AH***************************/ + +#define C_HMC_AH_TX (C_RDMA_TX_VHCA_PAGE + 0xC08u) + +#define C_HMC_AH_CQP (C_RDMA_CQP_VHCA_PF_PAGE + 0x010u) + +#define C_HMC_AH_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x04Cu) +#define C_HMC_AH_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x018u) +#define C_HMC_AH_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x01Cu) + +/******************************IRD***************************/ + +#define C_HMC_IRD_RX1 (C_RDMA_RX_VHCA_PAGE + 0x604u) +#define C_HMC_IRD_RX2 (C_RDMA_RX_VHCA_PAGE + 0x608u) +#define C_HMC_IRD_RX3 (C_RDMA_RX_VHCA_PAGE + 0x614u) + +#define C_HMC_IRD_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x050u) +#define C_HMC_IRD_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x020u) +#define C_HMC_IRD_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x024u) + +/******************************TX_WINDOW***************************/ +#define C_HMC_TX_WINDOW_TX (C_RDMA_TX_VHCA_PAGE + 0x80Cu) +// #define TXWINDOW_DDR_SIZE 0x62065e0100 +#define C_HMC_TX_WINDOW_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x054u) +#define C_HMC_TX_WINDOW_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x028u) +#define C_HMC_TX_WINDOW_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x02Cu) + +/******************************QPC***************************/ +#define C_HMC_QPC_RX_BASE_LOW (C_RDMA_RX_VHCA_PAGE + 0x280u) +#define C_HMC_QPC_RX_BASE_HIGH (C_RDMA_RX_VHCA_PAGE + 0x284u) +#define C_HMC_QPC_RX (C_RDMA_RX_VHCA_PAGE + 0x288u) + +#define C_HMC_QPC_TX_BASE_LOW (C_RDMA_TX_VHCA_PAGE + 0x41Cu) +#define C_HMC_QPC_TX_BASE_HIGH (C_RDMA_TX_VHCA_PAGE + 0x438u) + +#define C_HMC_QPC_TX (C_RDMA_TX_VHCA_PAGE + 0x414u) + +/******************************SRQC***************************/ +#define C_HMC_SRQC_RX_BASE_LOW (C_RDMA_RX_VHCA_PAGE + 0x480u) +#define C_HMC_SRQC_RX_BASE_HIGH (C_RDMA_RX_VHCA_PAGE + 0x484u) +#define C_HMC_SRQC_RX (C_RDMA_RX_VHCA_PAGE + 0x488u) + +/******************************CQC***************************/ +#define C_HMC_CQC_RX_BASE_LOW (C_RDMA_RX_VHCA_PAGE + 0x584u) +#define C_HMC_CQC_RX_BASE_HIGH (C_RDMA_RX_VHCA_PAGE + 0x588u) +#define C_HMC_CQC_RX1 (C_RDMA_RX_VHCA_PAGE + 0x58Cu) +#define C_HMC_CQC_RX2 (C_RDMA_RX_VHCA_PAGE + 0x590u) + +/*******************************************MR Related Registers*************************************/ +#define C_TX_MRTE_INDEX_CFG (C_RDMA_TX_VHCA_PAGE + 0xC24u) + +#define C_RDMA_VF_HMC_CQP_CQ_DISTRIBUTE_DONE(_i) \ + (0x6204800040u + (0x1000 * (_i))) + +/**************** End of ZTE RDMA Registers ***************/ +#define C_QPC_IRD_SIZE 16 + +enum icrdma_device_caps_const { + //ICRDMA_MAX_WQ_FRAGMENT_COUNT = 30, + //ICRDMA_MAX_SGE_RD = 30, + ICRDMA_MAX_STATS_COUNT = 128, + + ICRDMA_MAX_IRD_SIZE = 16, + ICRDMA_MAX_ORD_SIZE = 16, + +}; + +void zxdh_init_hw(struct zxdh_sc_dev *dev); +void zxdh_init_config_check(struct zxdh_config_check *cc, u8 traffic_class, + u16 qs_handle); +bool zxdh_is_config_ok(struct zxdh_config_check *cc, struct zxdh_sc_vsi *vsi); +void zxdh_check_fc_for_tc_update(struct zxdh_sc_vsi *vsi, + struct zxdh_l2params *l2params); +void zxdh_check_fc_for_qp(struct zxdh_sc_vsi *vsi, struct zxdh_sc_qp *sc_qp); +#endif /* ICRDMA_HW_H*/ diff --git a/src/rdma/src/iidc.h b/src/rdma/src/iidc.h new file mode 100644 index 0000000000000000000000000000000000000000..715c6b596c1964b7202124a9a6d9767b604d1259 --- /dev/null +++ b/src/rdma/src/iidc.h @@ -0,0 +1,302 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef _IIDC_H_ +#define _IIDC_H_ + +#include +#include +#include +#include +#include +#include +//#include "../../../..//zxdh_kernel/include/linux/dinghai/auxiliary_bus.h" + +/* This major and minor version represent IDC API version information. + * + * The concept of passing an API version should be incorporated into the + * auxiliary drivers' probe handlers to check if they can communicate with the + * core PCI driver. During auxiliary driver probe, auxiliary driver should + * check major and minor version information (via iidc_core_dev_info:ver). If + * the version check fails, the auxiliary driver should fail the probe and log + * an appropriate message. + */ +#define IIDC_MAJOR_VER 10 +#define IIDC_MINOR_VER 1 + +enum iidc_event_type { + IIDC_EVENT_BEFORE_MTU_CHANGE, + IIDC_EVENT_AFTER_MTU_CHANGE, + IIDC_EVENT_BEFORE_TC_CHANGE, + IIDC_EVENT_AFTER_TC_CHANGE, + IIDC_EVENT_VF_RESET, + IIDC_EVENT_LINK_CHNG, + IIDC_EVENT_CRIT_ERR, + IIDC_EVENT_NBITS /* must be last */ +}; + +enum iidc_reset_type { + IIDC_PFR, + IIDC_CORER, + IIDC_GLOBR, +}; + +enum iidc_rdma_protocol { + IIDC_RDMA_PROTOCOL_IWARP = BIT(0), + IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1), +}; + +enum iidc_rdma_gen { + IIDC_RDMA_GEN_1, + IIDC_RDMA_GEN_2, + IIDC_RDMA_GEN_3, + IIDC_RDMA_GEN_4, +}; + +struct iidc_rdma_caps { + u8 gen; /* Hardware generation */ + u8 protocols; /* bitmap of supported protocols */ +}; +/* This information is needed to handle auxiliary driver probe */ +struct iidc_ver_info { + u16 major; + u16 minor; + u64 support; +}; + +/* Struct to hold per DCB APP info */ +struct iidc_dcb_app_info { + u8 priority; + u8 selector; + u16 prot_id; +}; + +struct iidc_core_dev_info; + +#define IIDC_MAX_USER_PRIORITY 8 +#define IIDC_MAX_APPS 64 +#define IIDC_MAX_DSCP_MAPPING 64 +#define IIDC_VLAN_PFC_MODE 0x0 +#define IIDC_DSCP_PFC_MODE 0x1 + +/* Struct to hold per RDMA Qset info */ +struct iidc_rdma_qset_params { + u32 teid; /* qset TEID */ + u16 qs_handle; /* RDMA driver provides this */ + u16 vport_id; /* VSI index */ + u8 tc; /* TC branch the QSet should belong to */ +}; + +struct iidc_qos_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +/* Struct to hold QoS info */ +struct iidc_qos_params { + // struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; + // u8 up2tc[IIDC_MAX_USER_PRIORITY]; + // u8 vport_relative_bw; + // u8 vport_priority_type; + // u32 num_apps; + // u8 pfc_mode; + // struct iidc_dcb_app_info apps[IIDC_MAX_APPS]; + // u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; + u8 num_tc; +}; + +union iidc_event_info { + /* IIDC_EVENT_AFTER_TC_CHANGE */ + struct iidc_qos_params port_qos; + /* IIDC_EVENT_LINK_CHNG */ + bool link_up; + /* IIDC_EVENT_VF_RESET */ + u32 vf_id; + /* IIDC_EVENT_CRIT_ERR */ + u32 reg; +}; + +struct iidc_event { + DECLARE_BITMAP(type, IIDC_EVENT_NBITS); + union iidc_event_info info; +}; + +/* RDMA queue vector map info */ +struct iidc_qv_info { + u32 v_idx; + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct iidc_qvlist_info { + u32 num_vectors; + struct iidc_qv_info qv_info[1]; +}; + +struct iidc_vf_port_info { + u16 vf_id; + u16 vport_id; + u16 port_vlan_id; + u16 port_vlan_tpid; +}; + +/* Following APIs are implemented by core PCI driver */ +struct iidc_core_ops { + /* APIs to allocate resources such as VEB, VSI, Doorbell queues, + * completion queues, Tx/Rx queues, etc... + */ + int (*request_reset)(struct iidc_core_dev_info *cdev_info, + enum iidc_reset_type reset_type); +}; + +#define IIDC_RDMA_ROCE_NAME "roce" +#define IIDC_RDMA_IWARP_NAME "iwarp" +#define IIDC_RDMA_ID 0x00000010 +#define IIDC_IEPS_NAME "ieps" +#define IIDC_IEPS_ID 0x00000015 +#define IIDC_MAX_NUM_AUX 5 + +/* The const struct that instantiates cdev_info_id needs to be initialized + * in the .c with the macro ASSIGN_IIDC_INFO. + * For example: + * static const struct cdev_info_id cdev_info_ids[] = ASSIGN_IIDC_INFO; + */ +struct cdev_info_id { + char *name; + int id; +}; + +#define IIDC_RDMA_INFO { .name = IIDC_RDMA_ROCE_NAME, .id = IIDC_RDMA_ID }, +#define IIDC_IEPS_INFO { .name = IIDC_IEPS_NAME, .id = IIDC_IEPS_ID }, + +#define ASSIGN_IIDC_INFO \ + { \ + IIDC_IEPS_INFO \ + IIDC_RDMA_INFO \ + } + +enum iidc_function_type { + IIDC_FUNCTION_TYPE_PF, + IIDC_FUNCTION_TYPE_VF, +}; + +/* Structure representing auxiliary driver tailored information about the core + * PCI dev, each auxiliary driver using the IIDC interface will have an + * instance of this struct dedicated to it. + */ +struct iidc_core_dev_info { + struct pci_dev *pdev; /* PCI device of corresponding to main function */ + struct auxiliary_device *adev; + /* KVA / Linear address corresponding to BAR0 of underlying + * pci_device. + */ + u8 __iomem *hw_addr; + int cdev_info_id; + struct iidc_ver_info ver; + + /* Opaque pointer for aux driver specific data tracking. This memory + * will be alloc'd and freed by the auxiliary driver and used for + * private data accessible only to the specific auxiliary driver. + * It is stored here so that when this struct is passed to the + * auxiliary driver via an IIDC call, the data can be accessed + * at that time. + */ + void *auxiliary_priv; + + enum iidc_function_type ftype; + u16 vport_id; + /* Current active RDMA protocol */ + enum iidc_rdma_protocol rdma_protocol; + + struct iidc_qos_params qos_info; + struct net_device *netdev; + + struct msix_entry *msix_entries; + u16 msix_count; /* How many vectors are reserved for this device */ + // struct iidc_rdma_caps rdma_caps; + /* Following struct contains function pointers to be initialized + * by core PCI driver and called by auxiliary driver + */ + const struct iidc_core_ops *ops; + u8 pf_id; +}; + +struct iidc_auxiliary_dev { + struct auxiliary_device adev; + struct iidc_core_dev_info *cdev_info; +}; + +struct zxdh_core_dev_info { + struct pci_dev *pdev; /* PCI device of corresponding to main function */ + struct auxiliary_device *adev; + /* KVA / Linear address corresponding to BAR0 of underlying + * pci_device. + */ + u8 __iomem *hw_addr; + int cdev_info_id; + struct iidc_ver_info ver; + + /* Opaque pointer for aux driver specific data tracking. This memory + * will be alloc'd and freed by the auxiliary driver and used for + * private data accessible only to the specific auxiliary driver. + * It is stored here so that when this struct is passed to the + * auxiliary driver via an IIDC call, the data can be accessed + * at that time. + */ + void *auxiliary_priv; + + enum iidc_function_type ftype; + u16 vport_id; + /* Current active RDMA protocol */ + enum iidc_rdma_protocol rdma_protocol; + + struct iidc_qos_params qos_info; + //struct net_device *netdev; + + struct msix_entry msix_entries; + u16 msix_count; /* How many vectors are reserved for this device */ + // struct iidc_rdma_caps rdma_caps; + /* Following struct contains function pointers to be initialized + * by core PCI driver and called by auxiliary driver + */ + const struct iidc_core_ops *ops; + u8 pf_id; +}; + +struct zxdh_rdma_if { + void *(*get_rdma_netdev)(void *dh_dev); +}; + +struct zxdh_auxiliary_dev { + struct auxiliary_device adev; + struct zxdh_core_dev_info *zxdh_info; + + struct zxdh_rdma_if *rdma_ops; + void *ops; + void *parent; + int32_t aux_id; +}; + +/* structure representing the auxiliary driver. This struct is to be + * allocated and populated by the auxiliary driver's owner. The core PCI + * driver will access these ops by performing a container_of on the + * auxiliary_device->dev.driver. + */ +struct iidc_auxiliary_drv { + struct auxiliary_driver adrv; + /* This event_handler is meant to be a blocking call. For instance, + * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not + * return until the auxiliary driver is ready for the MTU change to + * happen. + */ + void (*event_handler)(struct iidc_core_dev_info *cdev_info, + struct iidc_event *event); + int (*vc_receive)(struct iidc_core_dev_info *cdev_info, u32 vf_id, + u8 *msg, u16 len); +}; + +#endif /* _IIDC_H_*/ diff --git a/src/rdma/src/linux_kcompat.h b/src/rdma/src/linux_kcompat.h new file mode 100644 index 0000000000000000000000000000000000000000..b6931be0f2004a3701c85fa7fe83568e0e8be82d --- /dev/null +++ b/src/rdma/src/linux_kcompat.h @@ -0,0 +1,441 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef LINUX_KCOMPAT_H +#define LINUX_KCOMPAT_H + +/* IB_IW_PKEY */ +#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE +#define IB_IW_PKEY +#endif + +/* DEV_OPS_FILL_ENTRY */ +#if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE +#define IB_DEV_OPS_FILL_ENTRY +#endif + +/* KMAP_LOCAL_PAGE */ +#if KERNEL_VERSION(5, 11, 0) > LINUX_VERSION_CODE +#define USE_KMAP +#endif + +/* CREATE_AH */ +#if KERNEL_VERSION(5, 8, 0) <= LINUX_VERSION_CODE +#define CREATE_AH_VER_5 +#elif KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE +#define CREATE_AH_VER_2 +#elif KERNEL_VERSION(5, 0, 0) <= LINUX_VERSION_CODE +#define CREATE_AH_VER_3 +#elif KERNEL_VERSION(4, 12, 0) <= LINUX_VERSION_CODE +#define CREATE_AH_VER_1_2 +#define ETHER_COPY_VER_2 +#else +#define CREATE_AH_VER_1_1 +#define ETHER_COPY_VER_1 +#endif + +/* PROCESS_MAD */ +#if KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE +#define PROCESS_MAD_VER_3 +#elif KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE +#define PROCESS_MAD_VER_2 +#else +#define PROCESS_MAD_VER_1 +#endif + +/* ZRDMA_CREATE_SRQ */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) +#define ZRDMA_CREATE_SRQ_VER_1 +#else +#define ZRDMA_CREATE_SRQ_VER_2 +#endif + +/* ZRDMA_DESTROY_SRQ */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +#define ZRDMA_DESTROY_SRQ_VER_3 +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0) +#define ZRDMA_DESTROY_SRQ_VER_2 +#else +#define ZRDMA_DESTROY_SRQ_VER_1 +#endif /* LINUX_VERSION_CODE */ + +/* DESTROY_AH */ +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE +#define DESTROY_AH_VER_4 +#else +#if KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE +#define DESTROY_AH_VER_3 +#else +#if KERNEL_VERSION(5, 0, 0) <= LINUX_VERSION_CODE +#define DESTROY_AH_VER_2 +#else +#define DESTROY_AH_VER_1 +#endif +#endif +#endif + +/* CREAT_QP */ +#if KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE +#define CREATE_QP_VER_2 +#define GLOBAL_QP_MEM +#else +#define CREATE_QP_VER_1 +#endif + +/* DESTROY_QP */ +#if KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE +#define DESTROY_QP_VER_1 +#define kc_zxdh_destroy_qp(ibqp, udata) zxdh_destroy_qp(ibqp) +#else +#define DESTROY_QP_VER_2 +#define kc_zxdh_destroy_qp(ibqp, udata) zxdh_destroy_qp(ibqp, udata) +#endif + +/* CREATE_CQ */ +#if KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE +#define CREATE_CQ_VER_3 +#elif KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE +#define CREATE_CQ_VER_2 +#else +#define CREATE_CQ_VER_1 +#endif + +/* COPY_USER_PGDADDR */ +#if KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE +#define COPY_USER_PGADDR_VER_4 +#elif KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE +#define COPY_USER_PGADDR_VER_3 +#elif KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE +#define COPY_USER_PGADDR_VER_1 +#elif KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE +#define COPY_USER_PGADDR_VER_2 +#endif + +/* ALLOC_UCONTEXT/ DEALLOC_UCONTEXT */ +#if KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE +#define ALLOC_UCONTEXT_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#else +#define ALLOC_UCONTEXT_VER_2 +#define DEALLOC_UCONTEXT_VER_2 +#endif + +/* ALLOC_PD , DEALLOC_PD */ +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE +#define DEALLOC_PD_VER_4 +#define ALLOC_PD_VER_3 +#else +#if KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE +#define ALLOC_PD_VER_3 +#define DEALLOC_PD_VER_3 +#else +#if KERNEL_VERSION(5, 1, 0) <= LINUX_VERSION_CODE +#define ALLOC_PD_VER_2 +#define DEALLOC_PD_VER_2 +#else +#define ALLOC_PD_VER_1 +#define DEALLOC_PD_VER_1 +#endif +#endif +#endif + +#if KERNEL_VERSION(5, 16, 0) <= LINUX_VERSION_CODE +#define ALLOC_HW_STATS_STRUCT_V2 +#else +#define ALLOC_HW_STATS_STRUCT_V1 +#endif + +#if KERNEL_VERSION(5, 14, 0) <= LINUX_VERSION_CODE +#define ALLOC_HW_STATS_V3 +#elif KERNEL_VERSION(5, 13, 0) <= LINUX_VERSION_CODE +#define ALLOC_HW_STATS_V2 +#else +#define ALLOC_HW_STATS_V1 +#endif + +#if KERNEL_VERSION(5, 13, 0) <= LINUX_VERSION_CODE +#define QUERY_GID_ROCE_V2 +#define MODIFY_PORT_V2 +#define QUERY_PKEY_V2 +#define ROCE_PORT_IMMUTABLE_V2 +#define GET_HW_STATS_V2 +#define GET_LINK_LAYER_V2 +#define IW_PORT_IMMUTABLE_V2 +#define QUERY_GID_V2 +#define QUERY_PORT_V2 +#else +#define QUERY_GID_ROCE_V1 +#define MODIFY_PORT_V1 +#define QUERY_PKEY_V1 +#define ROCE_PORT_IMMUTABLE_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IW_PORT_IMMUTABLE_V1 +#define QUERY_GID_V1 +#define QUERY_PORT_V1 +#endif + +#if KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE +#define GET_ETH_SPEED_AND_WIDTH_V1 +#else +#define GET_ETH_SPEED_AND_WIDTH_V2 +#endif + +#if KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE +#define VMA_DATA +#endif + +#if KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE +/* https://lore.kernel.org/linux-rdma/20191217210406.GC17227@ziepe.ca/ + * This series adds mmap DB support and also extends rdma_user_mmap_io API + * with an extra param + */ +#define RDMA_MMAP_DB_SUPPORT +#endif + +/* ZXDH_ALLOC_MW */ +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE +#define ZXDH_ALLOC_MW_VER_2 +#else +#define ZXDH_ALLOC_MW_VER_1 +#endif + +/* ZXDH_ALLOC_MR */ +#if (KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE && \ + KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE) +#define ZXDH_ALLOC_MR_VER_1 +#else +#define ZXDH_ALLOC_MR_VER_0 +#endif + +#if KERNEL_VERSION(4, 16, 0) > LINUX_VERSION_CODE +#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION +#endif + +/* ZXDH_DESTROY_CQ */ +#if KERNEL_VERSION(5, 9, 3) <= LINUX_VERSION_CODE +#define ZXDH_DESTROY_CQ_VER_4 +#elif KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE +#define ZXDH_DESTROY_CQ_VER_3 +#elif KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE +#define ZXDH_DESTROY_CQ_VER_2 +#else +#define ZXDH_DESTROY_CQ_VER_1 +#endif /* LINUX_VERSION_CODE */ + +/* max_sge, ip_gid, gid_attr_network_type, deref_sgid_attr */ +#if KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE +#define set_max_sge(props, rf) \ + ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) +#define kc_set_props_ip_gid_caps(props) \ + ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS) +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) +#define kc_deref_sgid_attr(sgid_attr) (sgid_attr.ndev) +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) +#define IB_GET_CACHED_GID +#else +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#endif + +#if KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE +#define kc_typeq_ib_wr const +#else +#define kc_typeq_ib_wr +#endif + +/* ib_register_device */ +#if KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#elif (KERNEL_VERSION(4, 20, 0) <= LINUX_VERSION_CODE) && \ + (KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, NULL) +#elif (KERNEL_VERSION(5, 1, 0) <= LINUX_VERSION_CODE) && \ + (KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name) +#else +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, dev) +#endif + +#if KERNEL_VERSION(5, 0, 0) <= LINUX_VERSION_CODE +#define HAS_IB_SET_DEVICE_OP +#endif /* >= 5.0.0 */ + +#if KERNEL_VERSION(4, 17, 0) > LINUX_VERSION_CODE +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define kc_set_ibdev_add_del_gid(ibdev) \ + do { \ + ibdev->add_gid = zxdh_add_gid; \ + ibdev->del_gid = zxdh_del_gid; \ + } while (0) +#else +#define kc_set_ibdev_add_del_gid(ibdev) +#endif + +#if KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) +#else +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#endif /* < 4.20.0 */ + +#if (KERNEL_VERSION(4, 17, 0) <= LINUX_VERSION_CODE && \ + KERNEL_VERSION(5, 3, 0) > LINUX_VERSION_CODE) +#define ZXDH_SET_DRIVER_ID +#endif + +#if (KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) +#define SET_BEST_PAGE_SZ_V1 +#else +#define SET_BEST_PAGE_SZ_V2 +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#endif + +#if (KERNEL_VERSION(5, 11, 0) > LINUX_VERSION_CODE) +#define UVERBS_CMD_MASK +#else +#define USE_QP_ATTRS_STANDARD +#endif + +#if KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) +#define ib_device_put(dev) +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) +#else +#define NETDEV_TO_IBDEV_SUPPORT +#define IB_DEALLOC_DRIVER_SUPPORT +#endif /* < 5.1.0 */ + +/******PORT_PHYS_STATE enums***************************************************/ +#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE) +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; +#endif +/*********************************************************/ + +#if KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#else +#define kc_get_ucontext(udata) to_ucontext(context) +#endif + +#if KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE +#define IN_IFADDR +#else +#define FOR_IFA +#endif + +#if KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE +struct ib_ucontext *zxdh_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata); +int zxdh_dealloc_ucontext(struct ib_ucontext *context); +struct ib_pd *zxdh_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata); +int zxdh_dealloc_pd(struct ib_pd *ibpd); +#else +int zxdh_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +void zxdh_dealloc_ucontext(struct ib_ucontext *context); +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE +int zxdh_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +int zxdh_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); +#else +#if KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE +int zxdh_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); +void zxdh_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +#else +int zxdh_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata); +void zxdh_dealloc_pd(struct ib_pd *ibpd); +#endif +#endif +#endif + +/*****SETUP DMA_DEVICE***************************************************/ +#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE +#define set_ibdev_dma_device(ibdev, dev) ibdev.dma_device = dev +#else +#define set_ibdev_dma_device(ibdev, dev) +#endif /* < 4.11.0 */ +/*********************************************************/ + +#if KERNEL_VERSION(4, 12, 0) > LINUX_VERSION_CODE +#define rdma_ah_attr ib_ah_attr +#define ah_attr_to_dmac(attr) ((attr).dmac) +#else +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#endif /* < 4.12.0 */ + +#if KERNEL_VERSION(4, 13, 0) > LINUX_VERSION_CODE +#define wait_queue_entry __wait_queue +#endif /* < 4.13.0 */ + +#if KERNEL_VERSION(4, 17, 0) > LINUX_VERSION_CODE +#define ZXDH_ADD_DEL_GID +#endif + +#if KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE +#define SET_ROCE_CM_INFO_VER_1 +#define IB_IW_MANDATORY_AH_OP +#elif KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE +#define SET_ROCE_CM_INFO_VER_2 +#else +#define SET_ROCE_CM_INFO_VER_3 +#endif + +#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE +#define IB_UMEM_GET_V3 +#elif KERNEL_VERSION(5, 5, 0) <= LINUX_VERSION_CODE +#define IB_UMEM_GET_V2 +#else +#define IB_UMEM_GET_V1 +#endif + +#if KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE +#define DEREG_MR_VER_2 +#else +#define DEREG_MR_VER_1 +#endif + +/* REREG MR */ +#if KERNEL_VERSION(5, 11, 0) <= LINUX_VERSION_CODE +#define REREG_MR_VER_2 +#else +#define REREG_MR_VER_1 +#endif + +#endif /* LINUX_KCOMPAT_H */ diff --git a/src/rdma/src/main.c b/src/rdma/src/main.c new file mode 100644 index 0000000000000000000000000000000000000000..757c579fab0dc928d5eab514ccd98c56882f8e31 --- /dev/null +++ b/src/rdma/src/main.c @@ -0,0 +1,1577 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "main.h" +/* TODO: Adding this here is not ideal. Can we remove this warning now? */ +#include "icrdma_hw.h" +#include +#include "zrdma_kcompat.h" + +#define DRV_VER_MAJOR 1 +#define DRV_VER_MINOR 8 +#define DRV_VER_BUILD 46 +#define DRV_VER \ + __stringify(DRV_VER_MAJOR) "." __stringify( \ + DRV_VER_MINOR) "." __stringify(DRV_VER_BUILD) + +static u8 resource_profile; +module_param(resource_profile, byte, 0444); +MODULE_PARM_DESC( + resource_profile, + "Resource Profile: 0=PF only(default), 1=Weighted VF, 2=Even Distribution"); + +static u8 max_rdma_vfs = 1; +module_param(max_rdma_vfs, byte, 0444); +MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32, default=1"); + +bool zxdh_upload_context; +module_param(zxdh_upload_context, bool, 0444); +MODULE_PARM_DESC(zxdh_upload_context, "Upload QP context, default=false"); + +static unsigned int limits_sel = 3; +module_param(limits_sel, uint, 0444); +MODULE_PARM_DESC(limits_sel, "Resource limits selector, Range: 0-7, default=3"); + +static unsigned int gen1_limits_sel = 1; +module_param(gen1_limits_sel, uint, 0444); +MODULE_PARM_DESC(gen1_limits_sel, + "x722 resource limits selector, Range: 0-5, default=1"); + +static unsigned int roce_ena = 1; +module_param(roce_ena, uint, 0444); +MODULE_PARM_DESC( + roce_ena, + "RoCE enable: 1=enable RoCEv2 on all ports (not supported on x722), 0=iWARP(default)"); + +static ulong roce_port_cfg; +module_param(roce_port_cfg, ulong, 0444); +MODULE_PARM_DESC( + roce_port_cfg, + "RoCEv2 per port enable: 1=port0 RoCEv2 all others iWARP, 2=port1 RoCEv2 etc. not supported on X722"); + +static bool en_rem_endpoint_trk; +module_param(en_rem_endpoint_trk, bool, 0444); +MODULE_PARM_DESC( + en_rem_endpoint_trk, + "Remote Endpoint Tracking: 1=enabled (not supported on x722), 0=disabled(default)"); + +static u8 fragment_count_limit = 6; +module_param(fragment_count_limit, byte, 0444); +MODULE_PARM_DESC( + fragment_count_limit, + "adjust maximum values for queue depth and inline data size, default=4, Range: 2-13"); + +/******************Advanced RoCEv2 congestion knobs***********************************************/ +static bool dcqcn_enable; +module_param(dcqcn_enable, bool, 0444); +MODULE_PARM_DESC( + dcqcn_enable, + "enables DCQCN algorithm for RoCEv2 on all ports, default=false "); + +static bool dcqcn_cc_cfg_valid; +module_param(dcqcn_cc_cfg_valid, bool, 0444); +MODULE_PARM_DESC(dcqcn_cc_cfg_valid, + "set DCQCN parameters to be valid, default=false"); + +static u8 dcqcn_min_dec_factor = 1; +module_param(dcqcn_min_dec_factor, byte, 0444); +MODULE_PARM_DESC( + dcqcn_min_dec_factor, + "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1"); + +static u8 dcqcn_min_rate_MBps; +module_param(dcqcn_min_rate_MBps, byte, 0444); +MODULE_PARM_DESC(dcqcn_min_rate_MBps, + "set minimum rate limit value, in MBits per second, default=0"); + +static u8 dcqcn_F; +module_param(dcqcn_F, byte, 0444); +MODULE_PARM_DESC( + dcqcn_F, + "set number of times to stay in each stage of bandwidth recovery, default=0"); + +static unsigned short dcqcn_T; +module_param(dcqcn_T, ushort, 0444); +MODULE_PARM_DESC( + dcqcn_T, + "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0"); + +static unsigned int dcqcn_B; +module_param(dcqcn_B, uint, 0444); +MODULE_PARM_DESC( + dcqcn_B, + "set number of MSS to add to the congestion window in additive increase mode, default=0"); + +static unsigned short dcqcn_rai_factor; +module_param(dcqcn_rai_factor, ushort, 0444); +MODULE_PARM_DESC( + dcqcn_rai_factor, + "set number of MSS to add to the congestion window in additive increase mode, default=0"); + +static unsigned short dcqcn_hai_factor; +module_param(dcqcn_hai_factor, ushort, 0444); +MODULE_PARM_DESC( + dcqcn_hai_factor, + "set number of MSS to add to the congestion window in hyperactive increase mode, default=0"); + +static unsigned int dcqcn_rreduce_mperiod; +module_param(dcqcn_rreduce_mperiod, uint, 0444); +MODULE_PARM_DESC( + dcqcn_rreduce_mperiod, + "set minimum time between 2 consecutive rate reductions for a single flow, default=0"); + +/****************************************************************************************************************/ + +MODULE_ALIAS("zrdma"); +MODULE_AUTHOR("ZTE Corporation"); +MODULE_DESCRIPTION("ZTE(R) Ethernet Protocol Driver for RDMA"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VER); + +struct zxdh_vport_t { + u32 tpid /* : 16; */; + u32 vhca /* : 10; */; + u32 uplink_port /* : 6; */; + + u32 rss_hash_factor /* : 8; */; + u32 hash_alg /* : 4; */; + u32 panel_id /* : 4; */; + + u32 lag_id /* : 3; */; + u32 pf_vqm_vfid /* : 11; */; + u32 rsv3 /* : 2; */; + + u32 mtu /* : 16; */; + + u32 port_base_qid /* : 12; */; + u32 hash_search_index /* : 3; */; + u32 rsv1 /* : 1; */; + + u32 tm_enable /* : 1; */; + u32 ingress_meter_enable /* : 1; */; + u32 egress_meter_enable /* : 1; */; + u32 ingress_meter_mode /* : 1; */; + u32 egress_meter_mode /* : 1; */; + u32 fd_enable /* : 1; */; + u32 vepa_enable /* : 1; */; + u32 spoof_check_enable /* : 1; */; + + u32 inline_sec_offload /* : 1; */; + u32 ovs_enable /* : 1; */; + u32 lag_enable /* : 1; */; + u32 is_passthrough /* : 1; */; + u32 is_vf /* : 1; */; + u32 virtion_version /* : 2; */; + u32 virtio_enable /* : 1; */; + + u32 accelerator_offload_flag /* : 1; */; + u32 lro_offload /* : 1; */; + u32 ip_fragment_offload /* : 1; */; + u32 tcp_udp_checksum_offload /* : 1; */; + u32 ip_checksum_offload /* : 1; */; + u32 outer_ip_checksum_offload /* : 1; */; + u32 is_up /* : 1; */; + u32 allmulticast_enable /* : 1; */; + + u32 hw_bond_enable /* : 1; */; + u32 rdma_offload_enable /* : 1; */; + u32 vlan_filter_enable /* : 1; */; + u32 vlan_strip_offload /* : 1; */; + u32 qinq_vlan_strip_offload /* : 1; */; + u32 rss_enable /* : 1; */; + u32 mtu_offload_enable /* : 1; */; + u32 hit_flag /*: 1; */; +}; + +#ifdef offsetof +#undef offsetof +#define offsetof(TYPE, MEMBER) ((size_t)(&((TYPE *)0)->MEMBER)) +#endif +#define EGR_FLAG_VHCA ((u32)(offsetof(struct zxdh_vport_t, vhca) / sizeof(u32))) +#define EGR_FLAG_RDMA_OFFLOAD_EN_OFF \ + ((u32)(offsetof(struct zxdh_vport_t, rdma_offload_enable) / \ + sizeof(u32))) + +//todo: extern remove +#define EGR_RDMA_OFFLOAD_EN 0x1 +extern u32 dpp_vport_vhca_id_add(dpp_pf_info_t *pf_info, u32 vhca_id); +extern u32 dpp_egr_port_attr_set(dpp_pf_info_t *pf_info, u32 mode, u32 value); + +static int zxdh_netdevice_event(struct notifier_block *not_blk, + unsigned long event, void *arg) +{ + struct ib_device *ibdev; + struct zxdh_device *iwdev; + struct net_device *netdev = netdev_notifier_info_to_dev(arg); + + ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_ZXDH); + if (!ibdev) + return NOTIFY_DONE; + iwdev = to_iwdev(ibdev); + + switch (event) { + case NETDEV_UP: + if (refcount_read(&iwdev->trace_switch.t_switch)) + ibdev_notice(ibdev, "IB port active\n"); + break; + case NETDEV_DOWN: + if (refcount_read(&iwdev->trace_switch.t_switch)) + ibdev_notice(ibdev, "IB port down\n"); + break; + case NETDEV_CHANGEMTU: + pr_info("%s changed mtu to %d\n", netdev->name, netdev->mtu); + break; + default: + pr_info("ignoring netdev event = %ld for %s\n", event, + netdev->name); + break; + } + + ib_device_put(ibdev); + return NOTIFY_DONE; +} + +static struct notifier_block zxdh_netdevice_notifier = { + .notifier_call = zxdh_netdevice_event +}; + +static void zxdh_register_notifiers(void) +{ + register_netdevice_notifier(&zxdh_netdevice_notifier); +} + +static void zxdh_unregister_notifiers(void) +{ + unregister_netdevice_notifier(&zxdh_netdevice_notifier); +} + +#ifdef Z_DH_DEBUG +u64 dh_rf_pointer[128] = { 0 }; + +static ssize_t dh_debug_proc_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + u64 cmd_id = 0; + u64 query_type = 0; + u64 vhca_id = 0; + u64 query_id = 0; + struct zxdh_pci_f *rf = NULL; + struct zxdh_sc_dev *dev = NULL; + struct zxdh_qp *iwqp = NULL; + struct zxdh_cq *iwcq = NULL; + struct zxdh_dma_mem qpc_buf = {}; + char kbuf[64]; + int ret; + + memset(kbuf, 0, sizeof(kbuf)); + + if (copy_from_user(kbuf, buf, count)) + return -EFAULT; + + if (sscanf(kbuf, "0x%llx", &cmd_id) != 1) { + pr_err("invalid para\n"); + return -EFAULT; + } + + query_type = cmd_id >> 32; + vhca_id = cmd_id & 0xff000000; + query_id = cmd_id & 0xffffff; + + rf = (struct zxdh_pci_f *)dh_rf_pointer[vhca_id]; + dev = &rf->sc_dev; + + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(dev->hw->device, qpc_buf.size, + &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) + return -ENOMEM; + + switch (query_type) { + case 1: + if (query_id < (dev->base_qpn + 1) || + query_id > (dev->base_qpn + rf->max_qp - 1)) + break; + if (rf->qp_table[query_id - dev->base_qpn]) { + iwqp = rf->qp_table[query_id - dev->base_qpn]; + ret = zxdh_query_qpc(&iwqp->sc_qp, &qpc_buf); + if (!ret) + goto free_rsrc; + +#ifdef Z_DH_DEBUG + mutex_lock(&dev->ws_mutex); + pr_info("***QP %d HW QPC info print start***\n", + iwqp->sc_qp.qp_ctx_num); + zxdh_print_hw_qpc(qpc_buf.va); + pr_info("****QP %d HW QPC info print end****\n", + iwqp->sc_qp.qp_ctx_num); + mutex_unlock(&dev->ws_mutex); +#endif + } + break; + case 2: + if (query_id < (dev->base_cqn) || + query_id > (dev->base_cqn + rf->max_cq - 1)) + break; + if (rf->cq_table[query_id - dev->base_cqn]) { + iwcq = rf->cq_table[query_id - dev->base_cqn]; + zxdh_query_cqc(&iwcq->sc_cq); + } + break; + default: + break; + } + + return count; +free_rsrc: + dma_free_coherent(dev->hw->device, qpc_buf.size, qpc_buf.va, + qpc_buf.pa); + return ret; +} + +static const struct proc_ops dh_debug_proc_fops = { + .proc_write = dh_debug_proc_write, +}; +#endif + +#ifdef Z_DH_DEBUG +static char debug_msg[256] = ""; +#ifdef DCQCN_INFO +static u32 mp_debug_info[41] = { 0 }; +#endif +static u32 rtt_debug_info[37] = { 0 }; +static void read_rtt_debug_info(u32 data, u32 data_id) +{ + /*255:0*/ + u32 delta_data = 0; + u32 num_QP_perQP = 0; + u32 num_vf_acQPS = 0; + u32 pkt_cnt = 0; + u32 num_QP_perQP_scal = 0; + u32 num_vf_acQPS_scal = 0; + u32 QP_type = 0; + u32 RTT_num = 0; + u32 Flag_newRTT = 0; + u32 CNP_num = 0; + u32 tx_token = 0; + u32 rx_token = 0; + u32 T1 = 0; + u32 T2 = 0; + u32 T4 = 0; + u32 T5 = 0; + + /*383:256*/ + u32 burst_congestion = 0; + u32 CNT = 0; + u32 token_period = 0; + u32 rtt_period = 0; + u32 token_loaned_QP_tx = 0; + u32 token_step_tx = 0; + u32 deactivation_flag = 0; + u32 updata_flag = 0; + u32 token_step_rx = 0; + u32 token_loaned_QP_rx = 0; + u32 read_flg = 0; + /*511:384*/ + u32 rate_old = 0; + u32 prev_rtt = 0; + u32 rtt_diff_old = 0; + u32 ai_count = 0; + u32 state_congetion = 0; + u32 time_ac = 0; + + if (data_id == 0) { + delta_data = data; + rtt_debug_info[0] = delta_data; + } + if (data_id == 1) { + num_QP_perQP = data & 0xffff; + num_vf_acQPS = data >> 16 & 0xffff; + rtt_debug_info[1] = num_QP_perQP; + rtt_debug_info[2] = num_vf_acQPS; + } + if (data_id == 2) { + pkt_cnt = data & 0x7ff; + num_QP_perQP_scal = data >> 11 & 0x1f; + num_vf_acQPS_scal = data >> 16 & 0xf; + QP_type = data >> 20 & 0x1; + RTT_num = data >> 21 & 0x3; + Flag_newRTT = data >> 23 & 0x1; + CNP_num = data >> 24 & 0xff; + + rtt_debug_info[3] = pkt_cnt; + rtt_debug_info[4] = num_QP_perQP_scal; + rtt_debug_info[5] = num_vf_acQPS_scal; + rtt_debug_info[6] = QP_type; + rtt_debug_info[7] = RTT_num; + rtt_debug_info[8] = Flag_newRTT; + rtt_debug_info[9] = CNP_num; + } + if (data_id == 3) { + tx_token = data & 0xffff; + rx_token = data >> 16 & 0xffff; + rtt_debug_info[10] = tx_token; + rtt_debug_info[11] = rx_token; + } + + if (data_id == 4) { + T1 = data & 0xffff; + T2 = data >> 16 & 0xffff; + rtt_debug_info[12] = T1; + rtt_debug_info[13] = T2; + } + if (data_id == 5) { + T4 = data & 0xffff; + T5 = data >> 16 & 0xffff; + rtt_debug_info[14] = T4; + rtt_debug_info[15] = T5; + } + if (data_id == 6) { + T1 = data & 0xffff; + T2 = data >> 16 & 0xffff; + rtt_debug_info[16] = T1; + rtt_debug_info[17] = T2; + } + if (data_id == 7) { + T4 = data & 0xffff; + T5 = data >> 16 & 0xffff; + rtt_debug_info[18] = T4; + rtt_debug_info[19] = T5; + } + + if (data_id == 8) { + burst_congestion = data & 0xf; + CNT = data >> 4 & 0xfffffff; + rtt_debug_info[20] = burst_congestion; + rtt_debug_info[21] = CNT; + } + + if (data_id == 9) { + token_period = data & 0xffff; + rtt_period = data >> 16 & 0x1f; + rtt_debug_info[22] = token_period; + rtt_debug_info[23] = rtt_period; + } + if (data_id == 10) { + token_loaned_QP_tx = data & 0xfff; + token_step_tx = data >> 12 & 0x3ffff; + deactivation_flag = data >> 30 & 0x1; + updata_flag = data >> 31 & 0x1; + rtt_debug_info[24] = token_loaned_QP_tx; + rtt_debug_info[25] = token_step_tx; + rtt_debug_info[26] = deactivation_flag; + rtt_debug_info[27] = updata_flag; + } + if (data_id == 11) { + token_step_rx = data & 0x3ffff; + token_loaned_QP_rx = data >> 18 & 0xfff; + read_flg = data >> 30 & 0x1; + + rtt_debug_info[28] = token_step_rx; + rtt_debug_info[29] = token_loaned_QP_rx; + rtt_debug_info[30] = read_flg; + } + if (data_id == 12) { + rate_old = data; + rtt_debug_info[31] = rate_old; + } + if (data_id == 13) { + prev_rtt = data & 0xffff; + rtt_diff_old = data >> 16 & 0xffff; + rtt_debug_info[32] = prev_rtt; + rtt_debug_info[33] = rtt_diff_old; + } + + if (data_id == 14) { + ai_count = data & 0xffff; + rtt_debug_info[34] = ai_count; + } + + if (data_id == 15) { + state_congetion = data >> 8 & 0x7; + time_ac = data >> 16 & 0xffff; + + rtt_debug_info[35] = state_congetion; + rtt_debug_info[36] = time_ac; + } +} + +#ifdef DCQCN_INFO +static void read_mp_debug_info(u32 data, u32 data_id) +{ + /*127:0*/ + u32 delta_data = 0; + u32 num_QP_perQP = 0; + u32 num_vf_acQPS = 0; + u32 pkt_cnt = 0; + u32 num_QP_perQP_scal = 0; + u32 num_vf_acQPS_scal = 0; + u32 QP_type = 0; + u32 RTT_num = 0; + u32 Flag_newRTT = 0; + u32 CNP_num = 0; + u32 tx_token = 0; + u32 rx_token = 0; + /*255:128*/ + u32 burst_congestion = 0; + u32 CNT = 0; + u32 token_period = 0; + u32 rtt_period = 0; + u32 rsv1 = 0; + u32 token_loaned_QP_tx = 0; + u32 token_step_tx = 0; + u32 deactivation_flag = 0; + u32 updata_flag = 0; + u32 token_step_rx = 0; + u32 token_loaned_QP_rx = 0; + u32 read_flg = 0; + u32 rsv2 = 0; + /*511:256*/ + u32 Rc = 0; + u32 Rt = 0; + u32 alpha = 0; + u32 time_alpha = 0; + u32 BC = 0; + u32 T = 0; + u32 couter = 0; + u32 RatioStep = 0; + u32 time_T = 0; + u32 i = 0; + u32 CNP_resi = 0; + u32 state_congetion = 0; + u32 rsv3 = 0; + u32 S = 0; + u32 rsv4 = 0; + u32 time_ac = 0; + + if (data_id == 0) { + delta_data = data; + mp_debug_info[0] = delta_data; + } + if (data_id == 1) { + num_QP_perQP = data & 0xffff; + num_vf_acQPS = data >> 16 & 0xffff; + mp_debug_info[1] = num_QP_perQP; + mp_debug_info[2] = num_vf_acQPS; + } + if (data_id == 2) { + pkt_cnt = data & 0x7ff; + num_QP_perQP_scal = data >> 11 & 0x1f; + num_vf_acQPS_scal = data >> 16 & 0xf; + QP_type = data >> 20 & 0x1; + RTT_num = data >> 21 & 0x3; + Flag_newRTT = data >> 23 & 0x1; + CNP_num = data >> 24 & 0xff; + + mp_debug_info[3] = pkt_cnt; + mp_debug_info[4] = num_QP_perQP_scal; + mp_debug_info[5] = num_vf_acQPS_scal; + mp_debug_info[6] = QP_type; + mp_debug_info[7] = RTT_num; + mp_debug_info[8] = Flag_newRTT; + mp_debug_info[9] = CNP_num; + } + if (data_id == 3) { + tx_token = data & 0xffff; + rx_token = data >> 16 & 0xffff; + mp_debug_info[10] = tx_token; + mp_debug_info[11] = rx_token; + } + if (data_id == 4) { + burst_congestion = data & 0xf; + CNT = data >> 4 & 0xfffffff; + mp_debug_info[12] = burst_congestion; + mp_debug_info[13] = CNT; + } + + if (data_id == 5) { + token_period = data & 0xffff; + rtt_period = data >> 16 & 0x1f; + rsv1 = data >> 21 & 0x7ff; + mp_debug_info[14] = token_period; + mp_debug_info[15] = rtt_period; + mp_debug_info[16] = rsv1; + } + if (data_id == 6) { + token_loaned_QP_tx = data & 0xfff; + token_step_tx = data >> 12 & 0x3ffff; + deactivation_flag = data >> 30 & 0x1; + updata_flag = data >> 31 & 0x1; + mp_debug_info[17] = token_loaned_QP_tx; + mp_debug_info[18] = token_step_tx; + mp_debug_info[19] = deactivation_flag; + mp_debug_info[20] = updata_flag; + } + if (data_id == 7) { + token_step_rx = data & 0x3ffff; + token_loaned_QP_rx = data >> 18 & 0xfff; + read_flg = data >> 30 & 0x1; + rsv2 = data >> 31 & 0x1; + mp_debug_info[21] = token_step_rx; + mp_debug_info[22] = token_loaned_QP_rx; + mp_debug_info[23] = read_flg; + mp_debug_info[24] = rsv2; + } + + if (data_id == 8) { + Rc = data; + mp_debug_info[25] = Rc; + } + if (data_id == 9) { + Rt = data; + mp_debug_info[26] = Rt; + } + if (data_id == 10) { + alpha = data & 0xffff; + time_alpha = data >> 16 & 0xffff; + mp_debug_info[27] = alpha; + mp_debug_info[28] = time_alpha; + } + if (data_id == 11) { + BC = data & 0xffff; + T = data >> 16 & 0xffff; + mp_debug_info[29] = BC; + mp_debug_info[30] = T; + } + if (data_id == 12) { + couter = data; + mp_debug_info[31] = couter; + } + if (data_id == 13) { + RatioStep = data; + mp_debug_info[32] = RatioStep; + } + if (data_id == 14) { + time_T = data & 0xffff; + i = data >> 16 & 0xffff; + mp_debug_info[33] = time_T; + mp_debug_info[34] = i; + } + if (data_id == 15) { + CNP_resi = data & 0xff; + state_congetion = data >> 8 & 0x7; + rsv3 = data >> 11 & 0x1; + S = data >> 12 & 0x3; + rsv4 = data >> 14 & 0x3; + time_ac = data >> 16 & 0xffff; + mp_debug_info[35] = CNP_resi; + mp_debug_info[36] = state_congetion; + mp_debug_info[37] = rsv3; + mp_debug_info[38] = S; + mp_debug_info[39] = rsv4; + mp_debug_info[40] = time_ac; + } +} +#endif + +static u32 mp_depth = 7; +static u64 mp_base_addr = 0x3C0000000; +static void print_kernel_info(struct seq_file *m) +{ + u32 i = 0, k = 0; + u32 info_id = 0; + u32 block_size = 0x200000; + u32 ddrData = 0; + u64 ddr_base_addr = 0; + u8 *base_addr; + int status; + u64 offset = 0; + u64 per_offset = 0; + u32 idx = 0; + u32 line = 0; + struct zxdh_pci_f *rf = NULL; + struct zxdh_dma_mem ddr1 = {}; + struct zxdh_src_copy_dest src_dest = {}; + + for (i = 0; i < 128; i++) { + if (dh_rf_pointer[i] != 0) { + rf = (struct zxdh_pci_f *)dh_rf_pointer[i]; + pr_info("this is mp_debug rf %u\n", i); + break; + } + } + if (!rf) + return; + + ddr1.size = block_size; + ddr1.va = dma_alloc_coherent(rf->hw.device, ddr1.size, &ddr1.pa, + GFP_KERNEL); + if (!ddr1.va) { + pr_info("no memory\n"); + return; + } + + for (i = 0; i < 4; i++) { //ddr index + ddr_base_addr = mp_base_addr + i * 0x3200000; + per_offset = 0; + for (k = 0; k < mp_depth; k++) { + src_dest.src = ddr_base_addr + k * block_size; + src_dest.dest = ddr1.pa; + src_dest.len = ddr1.size; + status = + zxdh_dpuddr_to_host_cmd(&rf->sc_dev, &src_dest); + if (status != 0) { + pr_info("status:%d\n", status); + sprintf(debug_msg, "status:%d\n", status); + seq_puts(m, debug_msg); + return; + } + pr_info("zxdh_dpuddr_to_host_cmd status:%d\n", status); + + base_addr = (u8 *)(uintptr_t)ddr1.va; + idx = 0; + line = 0; + for (offset = 0; offset < ddr1.size; offset += 64) { + sprintf(debug_msg, "DDR[%u] group:%llu\n", i, + per_offset / 64); + seq_puts(m, debug_msg); + per_offset += 64; + for (line = 0; line < 4; line++) { + sprintf(debug_msg, "0x%llx: ", + src_dest.src + offset + + line * 16); + seq_puts(m, debug_msg); + for (idx = 0; idx < 4; idx++) { + info_id = line * 4 + idx; + sprintf(debug_msg, "0x%08x ", + *((u32 *)(base_addr + + offset + + 16 * line + + idx * 4))); + seq_puts(m, debug_msg); + ddrData = *((u32 *)(base_addr + + offset + + 16 * line + + idx * 4)); +#ifdef DCQCN_INFO + read_mp_debug_info(ddrData, + info_id); +#else + read_rtt_debug_info(ddrData, + info_id); +#endif + } + seq_puts(m, "\n"); + } + +#ifdef DCQCN_INFO + sprintf(debug_msg, + "delta_data=0x%04x, num_QP_perQP=0x%04x, num_vf_acQPS=0x%04x, pkt_cnt=0x%04x\n", + mp_debug_info[0], mp_debug_info[1], + mp_debug_info[2], mp_debug_info[3]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "num_QP_perQP_scal=0x%04x, num_vf_acQPS_scal=0x%04x, QP_type=0x%04x, RTT_num=0x%04x\n", + mp_debug_info[4], mp_debug_info[5], + mp_debug_info[6], mp_debug_info[7]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "Flag_newRTT=0x%04x, CNP_num=0x%04x, tx_token=0x%04x, rx_token=0x%04x\n", + mp_debug_info[8], mp_debug_info[9], + mp_debug_info[10], mp_debug_info[11]); + seq_puts(m, debug_msg); + + sprintf(debug_msg, + "burst_congestion=0x%04x, CNT=0x%04x, token_period=0x%04x, rtt_period=0x%04x\n", + mp_debug_info[12], mp_debug_info[13], + mp_debug_info[14], mp_debug_info[15]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "rsv=0x%04x, token_loaned_QP_tx=0x%04x, token_step_tx=0x%04x, deactivation_flag=0x%04x\n", + mp_debug_info[16], mp_debug_info[17], + mp_debug_info[18], mp_debug_info[19]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "updata_flag=0x%04x, token_step_rx=0x%04x, token_loaned_QP_rx=0x%04x, read_flg=0x%04x, rsv=0x%04x\n", + mp_debug_info[20], mp_debug_info[21], + mp_debug_info[22], mp_debug_info[23], + mp_debug_info[24]); + seq_puts(m, debug_msg); + + sprintf(debug_msg, + "Rc=0x%04x, Rt=0x%04x, alpha=0x%04x, time_alpha=0x%04x\n", + mp_debug_info[25], mp_debug_info[26], + mp_debug_info[27], mp_debug_info[28]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "BC=0x%04x, T=0x%04x, couter=0x%04x, RatioStep=0x%04x\n", + mp_debug_info[29], mp_debug_info[30], + mp_debug_info[31], mp_debug_info[32]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "time_T=0x%04x, i=0x%04x, CNP_resi=0x%04x, state_congetion=0x%04x\n", + mp_debug_info[33], mp_debug_info[34], + mp_debug_info[35], mp_debug_info[36]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "rsv=0x%04x, S=0x%04x, rsv=0x%04x, time_ac=0x%04x\n", + mp_debug_info[37], mp_debug_info[38], + mp_debug_info[39], mp_debug_info[40]); + seq_puts(m, debug_msg); +#else + sprintf(debug_msg, + "delta_data=0x%04x, num_QP_perQP=0x%04x, num_vf_acQPS=0x%04x, pkt_cnt=0x%04x\n", + rtt_debug_info[0], rtt_debug_info[1], + rtt_debug_info[2], rtt_debug_info[3]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "num_QP_perQP_scal=0x%04x, num_vf_acQPS_scal=0x%04x, QP_type=0x%04x, RTT_num=0x%04x\n", + rtt_debug_info[4], rtt_debug_info[5], + rtt_debug_info[6], rtt_debug_info[7]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "Flag_newRTT=0x%04x, CNP_num=0x%04x, tx_token=0x%04x, rx_token=0x%04x\n", + rtt_debug_info[8], rtt_debug_info[9], + rtt_debug_info[10], rtt_debug_info[11]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "T1-1=0x%04x, T2-1=0x%04x, T4-1=0x%04x, T5-1=0x%04x\n", + rtt_debug_info[12], rtt_debug_info[13], + rtt_debug_info[14], rtt_debug_info[15]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "T1-2=0x%04x, T2-2=0x%04x, T4-2=0x%04x, T5-2=0x%04x\n", + rtt_debug_info[16], rtt_debug_info[17], + rtt_debug_info[18], rtt_debug_info[19]); + seq_puts(m, debug_msg); + + sprintf(debug_msg, + "burst_congestion=0x%04x, CNT=0x%04x, token_period=0x%04x, rtt_period=0x%04x\n", + rtt_debug_info[20], rtt_debug_info[21], + rtt_debug_info[22], rtt_debug_info[23]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "token_loaned_QP_tx=0x%04x,token_step_tx=0x%04x, deactivation_flag=0x%04x, updata_flag=0x%04x\n", + rtt_debug_info[24], rtt_debug_info[25], + rtt_debug_info[26], rtt_debug_info[27]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "token_step_rx=0x%04x, token_loaned_QP_rx=0x%04x, read_flg=0x%04x\n", + rtt_debug_info[28], rtt_debug_info[29], + rtt_debug_info[30]); + seq_puts(m, debug_msg); + + sprintf(debug_msg, + "rate_old=0x%04x, prev_rtt=0x%04x, rtt_diff_old=0x%04x, ai_count=0x%04x\n", + rtt_debug_info[31], rtt_debug_info[32], + rtt_debug_info[33], rtt_debug_info[34]); + seq_puts(m, debug_msg); + sprintf(debug_msg, + "state_congetion=0x%04x, time_ac=0x%04x,\n", + rtt_debug_info[35], rtt_debug_info[36]); + seq_puts(m, debug_msg); + +#endif + } + seq_puts(m, "\n"); + } + } + if (ddr1.va) { + dma_free_coherent(rf->hw.device, ddr1.size, ddr1.va, ddr1.pa); + ddr1.va = NULL; + } +} + +static int proc_test_show(struct seq_file *m, void *v) +{ + print_kernel_info(m); + return 0; +} + +static int proc_test_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_test_show, NULL); +} + +static char mpbuf[100]; +static ssize_t proc_mp_debug_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + memset(mpbuf, 0, sizeof(mpbuf)); + if (count >= sizeof(mpbuf)) + return -EINVAL; + if (copy_from_user(mpbuf, buf, count)) + return -EFAULT; + if (sscanf(mpbuf, "%u 0x%llx", &mp_depth, &mp_base_addr) != 2) { + pr_err("echo: write error: invalid para\n"); + return -EFAULT; + } + if (mp_depth < 1 || mp_depth > 16) { + pr_err("echo: mp_depth write error: 1 <= mp_depth <= 16.\n"); + return -EFAULT; + } + if (mp_base_addr < 0x3C0000000 || mp_base_addr >= 0x3C3200000) { + pr_err("echo: mp_base_addr write error: 0x3C0000000 <= mp_base_addr < 0x3C3200000.\n"); + return -EFAULT; + } + pr_info("%s mp_depth:%u, mp_base_addr:0x%llx\n", __func__, mp_depth, + mp_base_addr); + return count; +} + +static const struct proc_ops mp_debug_proc_fops = { + .proc_open = proc_test_open, + .proc_read = seq_read, + .proc_write = proc_mp_debug_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; +#endif + +#ifdef Z_DH_DEBUG +static u32 depth = 2; +static char kebuf[32]; +static char cap_data_debug_msg[256] = ""; +static u32 get_last_num(unsigned int cur_line, unsigned int last_line, + unsigned int count, unsigned int numperline) +{ + if (cur_line == last_line) + return (count - 1) % numperline; + else + return numperline - 1; +} +static void print_kernel_cap_data_info(struct seq_file *m, u32 index) +{ + u32 i = 0, j = 0, len = 0x200000; + u32 numPerLine = 16; + u32 per_line_index = 0; + u32 ddrLen = (2 * 1024 * 1024) / sizeof(u32); + u8 *base_addr; + u32 last_id = 0; + int status; + u32 base_ddr; + u32 line_id = 1; + u32 max_num = 0; + u32 *read_data; + + struct zxdh_pci_f *rf = NULL; + struct zxdh_dma_mem ddr1 = {}; + struct zxdh_src_copy_dest src_dest = {}; + + for (i = 0; i < 128; i++) { + if (dh_rf_pointer[i] != 0) { + rf = (struct zxdh_pci_f *)dh_rf_pointer[i]; + pr_info("this is mp_debug rf %u\n", i); + break; + } + } + if (!rf) + return; + + base_ddr = 0xBF000000; + + if (index == 1) { + base_ddr = base_ddr + depth * 1024 * 1024; + } else if (index == 2) { + base_ddr = base_ddr + depth * 1024 * 1024 * 2; + } else if (index == 3) { + base_ddr = base_ddr + depth * 1024 * 1024 * 3; + } + + pr_info("base_ddr = 0x%08x, depth = %u\n", base_ddr, depth); + + for (i = 0; i < depth / 2; i++) { + ddr1.size = 0x200000; + ddr1.va = dma_alloc_coherent(rf->hw.device, ddr1.size, &ddr1.pa, + GFP_KERNEL); + + if (!ddr1.va) { + pr_info("no memory\n"); + return; + } + + src_dest.src = base_ddr + i * 0x200000; + src_dest.dest = ddr1.pa; + src_dest.len = len; + status = zxdh_dpuddr_to_host_cmd(&rf->sc_dev, &src_dest); + + pr_info("zxdh_dpuddr_to_host_cmd status:%d\n", status); + + base_addr = (u8 *)(uintptr_t)ddr1.va; + + last_id = ((ddrLen - 1) / numPerLine) + 1; + max_num = get_last_num(line_id, last_id, ddrLen, numPerLine); + + seq_puts(m, "0x00000000: "); + for (j = 1; j <= ddrLen; j++) { + per_line_index = (j - 1) % numPerLine; + read_data = (u32 *)base_addr + + (max_num - per_line_index) + + (line_id - 1) * 16; + sprintf(cap_data_debug_msg, "[%u:%u]%08x ", + (max_num - per_line_index + 1) * 32 - 1, + (max_num - per_line_index) * 32, *read_data); + seq_puts(m, cap_data_debug_msg); + if (j % numPerLine == 0) { + line_id++; + max_num = get_last_num(line_id, last_id, ddrLen, + numPerLine); + seq_puts(m, "\n"); + if (j != ddrLen) { + sprintf(cap_data_debug_msg, "0x%08lx: ", + (j / numPerLine) * numPerLine * + sizeof(u32)); + seq_puts(m, cap_data_debug_msg); + } + } + } + if (ddr1.va) { + dma_free_coherent(rf->hw.device, ddr1.size, ddr1.va, + ddr1.pa); + ddr1.va = NULL; + pr_info("ddr1.pa has released!\n"); + } + } +} + +static int proc_cap_rx0_show(struct seq_file *m, void *v) +{ + print_kernel_cap_data_info(m, 0); + return 0; +} +static int proc_cap_rx1_show(struct seq_file *m, void *v) +{ + print_kernel_cap_data_info(m, 1); + return 0; +} +static int proc_cap_tx0_show(struct seq_file *m, void *v) +{ + print_kernel_cap_data_info(m, 2); + return 0; +} +static int proc_cap_tx1_show(struct seq_file *m, void *v) +{ + print_kernel_cap_data_info(m, 3); + return 0; +} + +static int proc_cap_rx1_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_cap_rx1_show, NULL); +} +static int proc_cap_rx0_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_cap_rx0_show, NULL); +} +static int proc_cap_tx1_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_cap_tx1_show, NULL); +} +static int proc_cap_tx0_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_cap_tx0_show, NULL); +} + +static ssize_t proc_cap_rx0_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + memset(kebuf, 0, sizeof(kebuf)); + if (count >= sizeof(kebuf)) + return -EINVAL; + if (copy_from_user(kebuf, buf, count)) + return -EFAULT; + if (sscanf(kebuf, "%u", &depth) != 1) { + pr_err("echo: write error: invalid para\n"); + return -EFAULT; + } + if (depth < 2 || depth > 128) { + pr_err("echo: write error: invalid para\n"); + return -EFAULT; + } + pr_info("%s depth:%u\n", __func__, depth); + return count; +} + +static const struct proc_ops cap_rx0_proc_fops = { + .proc_open = proc_cap_rx0_open, + .proc_read = seq_read, + .proc_write = proc_cap_rx0_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; +static const struct proc_ops cap_rx1_proc_fops = { + .proc_open = proc_cap_rx1_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; +static const struct proc_ops cap_tx0_proc_fops = { + .proc_open = proc_cap_tx0_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; +static const struct proc_ops cap_tx1_proc_fops = { + .proc_open = proc_cap_tx1_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; +#endif + +/** + * set_protocol_used - set protocol_used against HW generation and roce_ena flag + * @rf: RDMA PCI function + * @roce_ena: RoCE enabled bit flag + */ +static inline void set_protocol_used(struct zxdh_pci_f *rf, uint roce_ena) +{ + switch (rf->rdma_ver) { + case ZXDH_GEN_2: + rf->protocol_used = + roce_ena & BIT(PCI_FUNC(rf->pcidev->devfn)) ? + ZXDH_ROCE_PROTOCOL_ONLY : + ZXDH_IWARP_PROTOCOL_ONLY; + + break; + case ZXDH_GEN_1: + rf->protocol_used = ZXDH_IWARP_PROTOCOL_ONLY; + break; + } +} + +/** + * zxdh_set_rf_user_cfg_params - Setup RF configurations from module parameters + * @rf: RDMA PCI function + */ +void zxdh_set_rf_user_cfg_params(struct zxdh_pci_f *rf) +{ + /*TODO: Fixup range checks on all integer module params */ + if (limits_sel > 7) + limits_sel = 7; + + if (gen1_limits_sel > 5) + gen1_limits_sel = 5; + + rf->limits_sel = (rf->rdma_ver == ZXDH_GEN_1) ? gen1_limits_sel : + limits_sel; + if (roce_ena) + pr_warn_once( + "zrdma: Because roce_ena is ENABLED, roce_port_cfg will be ignored."); + set_protocol_used(rf, roce_ena ? 0xFFFFFFFF : roce_port_cfg); + rf->rsrc_profile = + (resource_profile < ZXDH_HMC_PROFILE_EQUAL) ? + (u8)resource_profile + ZXDH_HMC_PROFILE_DEFAULT : + ZXDH_HMC_PROFILE_DEFAULT; + if (max_rdma_vfs > ZXDH_MAX_PE_ENA_VF_COUNT) { + pr_warn_once( + "zrdma: Requested VF count [%d] is above max supported. Setting to %d.", + max_rdma_vfs, ZXDH_MAX_PE_ENA_VF_COUNT); + max_rdma_vfs = ZXDH_MAX_PE_ENA_VF_COUNT; + } + //rf->max_rdma_vfs = (rf->rsrc_profile != ZXDH_HMC_PROFILE_DEFAULT)? + //max_rdma_vfs : 0; + rf->en_rem_endpoint_trk = en_rem_endpoint_trk; + rf->fragcnt_limit = fragment_count_limit; + if (rf->fragcnt_limit > 13 || rf->fragcnt_limit < 2) { + rf->fragcnt_limit = 6; + pr_warn_once( + "zrdma: Requested [%d] fragment count limit out of range (2-13), setting to default=6.", + fragment_count_limit); + } + rf->dcqcn_ena = dcqcn_enable; + + /* Skip over all checking if no dcqcn */ + if (!dcqcn_enable) + return; + + rf->dcqcn_params.cc_cfg_valid = dcqcn_cc_cfg_valid; + rf->dcqcn_params.dcqcn_b = dcqcn_B; + +#define DCQCN_B_MAX GENMASK(25, 0) + if (rf->dcqcn_params.dcqcn_b > DCQCN_B_MAX) { + rf->dcqcn_params.dcqcn_b = DCQCN_B_MAX; + pr_warn_once( + "zrdma: Requested [%d] dcqcn_b value too high, setting to %d.", + dcqcn_B, rf->dcqcn_params.dcqcn_b); + } + +#define DCQCN_F_MAX 8 + rf->dcqcn_params.dcqcn_f = dcqcn_F; + if (dcqcn_F > DCQCN_F_MAX) { + rf->dcqcn_params.dcqcn_f = DCQCN_F_MAX; + pr_warn_once( + "zrdma: Requested [%d] dcqcn_f value too high, setting to %d.", + dcqcn_F, DCQCN_F_MAX); + } + + rf->dcqcn_params.dcqcn_t = dcqcn_T; + rf->dcqcn_params.hai_factor = dcqcn_hai_factor; + rf->dcqcn_params.min_dec_factor = dcqcn_min_dec_factor; + if (dcqcn_min_dec_factor < 1 || dcqcn_min_dec_factor > 100) { + rf->dcqcn_params.dcqcn_b = 1; + pr_warn_once( + "zrdma: Requested [%d] dcqcn_min_dec_factor out of range (1-100) , setting to default=1", + dcqcn_min_dec_factor); + } + + rf->dcqcn_params.min_rate = dcqcn_min_rate_MBps; + rf->dcqcn_params.rai_factor = dcqcn_rai_factor; + rf->dcqcn_params.rreduce_mperiod = dcqcn_rreduce_mperiod; +} + +static void zxdh_iidc_event_handler(struct iidc_core_dev_info *cdev_info, + struct iidc_event *event) +{ +} + +/** + * zxdh_request_reset - Request a reset + * @rf: RDMA PCI function + */ +static void zxdh_request_reset(struct zxdh_pci_f *rf) +{ + struct iidc_core_dev_info *cdev_info = rf->cdev; + + dev_warn(idev_to_dev(&rf->sc_dev), "Requesting a reset\n"); + rf->sc_dev.vchnl_up = false; + cdev_info->ops->request_reset(rf->cdev, IIDC_PFR); +} + +static void zxdh_remove(struct auxiliary_device *aux_dev) +{ + struct iidc_auxiliary_dev *iidc_adev = + container_of(aux_dev, struct iidc_auxiliary_dev, adev); + struct iidc_core_dev_info *cdev_info = iidc_adev->cdev_info; + struct zxdh_device *iwdev = dev_get_drvdata(&aux_dev->dev); + + if (cdev_info->vport_id == 0x300) + return; + //if ( 0 == iwdev->rf->ftype) // PF + //zxdh_free_all_vf_rsrc(&iwdev->rf->sc_dev); + zrdma_cleanup_debugfs_entry(iwdev->rf); + zxdh_ib_unregister_device(iwdev); + +#ifndef IB_DEALLOC_DRIVER_SUPPORT + /* In newer kernels core issues callback zxdh_ib_dealloc_device to cleanup on ib unregister + * Older kernels require cleanup here + */ + + zxdh_rt_deinit_hw(iwdev); + zxdh_ctrl_deinit_hw(iwdev->rf); + zxdh_del_handler(iwdev->hdl); +#ifdef MSIX_DEBUG + pci_free_irq_vectors(cdev_info->pdev); +#endif + if (iwdev->rf->iw_msixtbl) { + kfree(iwdev->rf->iw_msixtbl); + iwdev->rf->iw_msixtbl = NULL; + } + kfree(iwdev->hdl); + kfree(iwdev->rf); + + ib_dealloc_device(&iwdev->ibdev); + +#endif /* IB_DEALLOC_DRIVER_SUPPORT */ + pr_info("INIT: Gen2 PF[%d] device remove success\n", + PCI_FUNC(cdev_info->pdev->devfn)); + return; +} + +/** + * zxdh_shutdown - trigger when reboot + * @aux_dev: auxiliary device ptr + */ +static void zxdh_shutdown(struct auxiliary_device *aux_dev) +{ + zxdh_remove(aux_dev); +} + +#ifdef MSIX_DEBUG + +static int ft_debug_msix_interrupt(struct pci_dev *pdev, + struct msix_entry *msix, u32 msix_num) +{ + struct msix_entry *temp_msix; + int ret; + int i; + + temp_msix = msix; + if (pci_enable_device(pdev)) { + pr_info("%s enable pcie msix failed!\n", __func__); + return -1; + } + ret = pci_alloc_irq_vectors_affinity(pdev, msix_num, msix_num, + PCI_IRQ_MSIX, NULL); + if (ret < 0) { + pr_info("%s alloc irq vectors failed!\n", __func__); + return -1; + } + pr_info("%s alloc irq vectors ret:%d\n", __func__, ret); + + for (i = 0; i < msix_num; i++) { + temp_msix->vector = pci_irq_vector(pdev, i); + temp_msix->entry = i; + pr_info("%s vector:%d entry:%d\n", __func__, temp_msix->vector, + temp_msix->entry); + temp_msix++; + } + + return 0; +} +#endif +static void zxdh_cfg_dpp(struct zxdh_device *iwdev, + struct iidc_core_dev_info *cdev_info) +{ + dpp_pf_info_t pf_info = { 0 }; + + pf_info.slot = cdev_info->pdev->bus->number; + pf_info.vport = cdev_info->vport_id; + + dpp_vport_vhca_id_add(&pf_info, iwdev->rf->sc_dev.vhca_id); + dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VHCA, + iwdev->rf->sc_dev.vhca_id); + dpp_egr_port_attr_set(&pf_info, EGR_FLAG_RDMA_OFFLOAD_EN_OFF, + EGR_RDMA_OFFLOAD_EN); +} + +static void zxdh_fill_device_info(struct zxdh_device *iwdev, + struct iidc_core_dev_info *cdev_info) +{ + struct zxdh_pci_f *rf = iwdev->rf; + + rf->ftype = (cdev_info->vport_id >> 11) & 0x1; + rf->pf_id = (cdev_info->vport_id >> 8) & 0x7; + rf->sc_dev.ep_id = (cdev_info->vport_id >> 12) & 0x7; + + rf->cdev = cdev_info; + rf->pcidev = cdev_info->pdev; + rf->hw.pci_hw_addr = cdev_info->hw_addr; + + rf->msix_count = cdev_info->msix_count; +#ifdef MSIX_DEBUG + ft_debug_msix_interrupt(cdev_info->pdev, cdev_info->msix_entries, + rf->msix_count); +#endif + rf->msix_entries = cdev_info->msix_entries; + rf->sc_dev.max_ceqs = (rf->msix_count - 1); + rf->protocol_used = cdev_info->rdma_protocol == + IIDC_RDMA_PROTOCOL_ROCEV2 ? + ZXDH_ROCE_PROTOCOL_ONLY : + ZXDH_IWARP_PROTOCOL_ONLY; + rf->rdma_ver = ZXDH_GEN_2; + rf->rsrc_profile = ZXDH_HMC_PROFILE_DEFAULT; + rf->rst_to = ZXDH_RST_TIMEOUT_HZ; + rf->gen_ops.request_reset = zxdh_request_reset; + rf->check_fc = zxdh_check_fc_for_qp; + + /* Can override limits_sel, protocol_used */ + zxdh_set_rf_user_cfg_params(rf); + rf->iwdev = iwdev; + + INIT_LIST_HEAD(&iwdev->ah_list); + mutex_init(&iwdev->ah_list_lock); + iwdev->netdev = cdev_info->netdev; + iwdev->init_state = INITIAL_STATE; + iwdev->roce_cwnd = ZXDH_ROCE_CWND_DEFAULT; + iwdev->roce_ackcreds = ZXDH_ROCE_ACKCREDS_DEFAULT; + iwdev->rcv_wnd = ZXDH_CM_DEFAULT_RCV_WND_SCALED; + iwdev->rcv_wscale = ZXDH_CM_DEFAULT_RCV_WND_SCALE; + +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + iwdev->iwarp_ecn_en = true; + iwdev->iwarp_rtomin = 5; + iwdev->up_up_map = ZXDH_DEFAULT_UP_UP_MAP; +#endif + if (rf->protocol_used == ZXDH_ROCE_PROTOCOL_ONLY) { +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + iwdev->roce_rtomin = 5; +#endif + //iwdev->roce_dcqcn_en = iwdev->rf->dcqcn_ena; + iwdev->roce_dcqcn_en = true; //dcqcn/ecn is set to default on + iwdev->roce_mode = true; + } +} + +/*zxdh_auxiliary_dev中的netdev字段上移,此处重新赋�?*/ +static void zxdh_to_iidc(struct iidc_core_dev_info *cdev_info, + struct zxdh_auxiliary_dev *iidc_adev) +{ + cdev_info->pdev = iidc_adev->zxdh_info->pdev; + cdev_info->adev = iidc_adev->zxdh_info->adev; + cdev_info->hw_addr = iidc_adev->zxdh_info->hw_addr; + cdev_info->cdev_info_id = iidc_adev->zxdh_info->cdev_info_id; + cdev_info->ver = iidc_adev->zxdh_info->ver; + cdev_info->auxiliary_priv = iidc_adev->zxdh_info->auxiliary_priv; + cdev_info->vport_id = iidc_adev->zxdh_info->vport_id; + cdev_info->rdma_protocol = iidc_adev->zxdh_info->rdma_protocol; + cdev_info->qos_info = iidc_adev->zxdh_info->qos_info; + cdev_info->msix_entries = &iidc_adev->zxdh_info->msix_entries; + cdev_info->msix_count = iidc_adev->zxdh_info->msix_count; + cdev_info->ops = iidc_adev->zxdh_info->ops; + cdev_info->netdev = + iidc_adev->rdma_ops->get_rdma_netdev(iidc_adev->parent); +} + +static int zxdh_probe(struct auxiliary_device *aux_dev, + const struct auxiliary_device_id *id) +{ + struct zxdh_auxiliary_dev *iidc_adev = + container_of(aux_dev, struct zxdh_auxiliary_dev, adev); + struct zxdh_device *iwdev; + struct zxdh_pci_f *rf; + int err; + struct zxdh_handler *hdl; + struct iidc_core_dev_info *cdev_info = + kzalloc(sizeof(struct iidc_core_dev_info), GFP_KERNEL); + + if (!cdev_info) + return -ENOMEM; + zxdh_to_iidc(cdev_info, iidc_adev); + + if (cdev_info->vport_id == 0x300) { + kfree(cdev_info); + return 0; //EVB_EP0 bond_pf inserted 2 adev for 1vhca + } + + if (cdev_info->ver.major != IIDC_MAJOR_VER) { + pr_err("version mismatch:\n"); + pr_err("expected major ver %d, caller specified major ver %d\n", + IIDC_MAJOR_VER, cdev_info->ver.major); + pr_err("expected minor ver %d, caller specified minor ver %d\n", + IIDC_MINOR_VER, cdev_info->ver.minor); + kfree(cdev_info); + return -EINVAL; + } + if (cdev_info->ver.minor != IIDC_MINOR_VER) + pr_info("probe: minor version mismatch: expected %0d.%0d caller specified %0d.%0d\n", + IIDC_MAJOR_VER, IIDC_MINOR_VER, cdev_info->ver.major, + cdev_info->ver.minor); + + pr_info("probe: cdev_info=%p, cdev_info->dev.aux_dev.bus->number=%d, cdev_info->netdev=%p\n", + cdev_info, cdev_info->pdev->bus->number, cdev_info->netdev); + + iwdev = ib_alloc_device(zxdh_device, ibdev); + if (!iwdev) { + kfree(cdev_info); + return -ENOMEM; + } + iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL); + if (!iwdev->rf) { + ib_dealloc_device(&iwdev->ibdev); + kfree(cdev_info); + return -ENOMEM; + } + zxdh_fill_device_info(iwdev, cdev_info); + + err = zxdh_manager_init(iwdev->rf, cdev_info); + if (err != 0) { + pr_info("dh_rdma_manager_init faile\n"); + goto err_mgr_init; + } + + zxdh_cfg_dpp(iwdev, cdev_info); + + rf = iwdev->rf; + + hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); + if (!hdl) { + kfree(iwdev->rf); + ib_dealloc_device(&iwdev->ibdev); + return -ENOMEM; + } + + hdl->iwdev = iwdev; + iwdev->hdl = hdl; + + err = zxdh_ctrl_init_hw(rf); + if (err) + goto err_ctrl_init; + + err = zxdh_rt_init_hw(iwdev); + if (err) + goto err_rt_init; + + err = zxdh_ib_register_device(iwdev); + if (err) + goto err_ibreg; + + zxdh_add_handler(hdl); + refcount_set(&iwdev->trace_switch.t_switch, 0); + dev_set_drvdata(&aux_dev->dev, iwdev); +#ifdef Z_DH_DEBUG + dh_rf_pointer[rf->sc_dev.vhca_id] = (uintptr_t)rf; +#endif + create_debugfs_entry(rf); + + return 0; + +err_ibreg: + zxdh_rt_deinit_hw(iwdev); +err_rt_init: + zxdh_ctrl_deinit_hw(rf); +#ifdef MSIX_DEBUG + pci_free_irq_vectors(cdev_info->pdev); +#endif +err_ctrl_init: + kfree(hdl); +err_mgr_init: + kfree(iwdev->rf); + ib_dealloc_device(&iwdev->ibdev); + kfree(cdev_info); + + return err; +} + +static const struct auxiliary_device_id zxdh_auxiliary_id_table[] = { + { + .name = "zxdh_pf.rdma_aux", + }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, zxdh_auxiliary_id_table); + +static struct iidc_auxiliary_drv zxdh_auxiliary_drv = { + .adrv = { + .id_table = zxdh_auxiliary_id_table, + .probe = zxdh_probe, + .remove = zxdh_remove, + .shutdown = zxdh_shutdown, + }, + .event_handler = zxdh_iidc_event_handler, +}; + +static int __init zxdh_init_module(void) +{ + int ret; + + pr_info("zrdma driver version: %d.%d.%d\n", DRV_VER_MAJOR, + DRV_VER_MINOR, DRV_VER_BUILD); + zrdma_register_debugfs(); + ret = auxiliary_driver_register(&zxdh_auxiliary_drv.adrv); + if (ret) + return ret; + +#ifdef Z_DH_DEBUG + proc_create("dh_debug", 0, NULL, &dh_debug_proc_fops); + proc_create("mp_debug", 0, NULL, &mp_debug_proc_fops); + proc_create("cap_rx1", 0, NULL, &cap_rx1_proc_fops); + proc_create("cap_rx0", 0, NULL, &cap_rx0_proc_fops); + proc_create("cap_tx1", 0, NULL, &cap_tx1_proc_fops); + proc_create("cap_tx0", 0, NULL, &cap_tx0_proc_fops); +#endif + + zxdh_register_notifiers(); + + return 0; +} + +static void __exit zxdh_exit_module(void) +{ + zxdh_unregister_notifiers(); + +#ifdef Z_DH_DEBUG + remove_proc_entry("cap_tx0", NULL); + remove_proc_entry("cap_tx1", NULL); + remove_proc_entry("cap_rx0", NULL); + remove_proc_entry("cap_rx1", NULL); + remove_proc_entry("mp_debug", NULL); + remove_proc_entry("dh_debug", NULL); +#endif + + auxiliary_driver_unregister(&zxdh_auxiliary_drv.adrv); + zrdma_unregister_debugfs(); +} + +module_init(zxdh_init_module); +module_exit(zxdh_exit_module); diff --git a/src/rdma/src/main.h b/src/rdma/src/main.h new file mode 100644 index 0000000000000000000000000000000000000000..b6c0d4240486f1c4e4c05fe763238949cf7680e1 --- /dev/null +++ b/src/rdma/src/main.h @@ -0,0 +1,646 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_MAIN_H +#define ZXDH_MAIN_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef CONFIG_64BIT +#include +#endif +#include +//#include "../../../../zxdh_kernel/include/linux/dinghai/auxiliary_bus.h" +#ifndef RDMA_MMAP_DB_SUPPORT +#include +#endif +#include +#ifdef __OFED_4_8__ +#include +#endif /* __OFED_4_8__ */ + +#ifdef Z_DH_DEBUG +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "status.h" +#include "osdep.h" +#include "defs.h" +#include "hmc.h" +#include "type.h" +#include "ws.h" +#include "protos.h" +#include "pble.h" +#include "cm.h" +#include "iidc.h" +#include "zrdma_kcompat.h" +#include "zrdma-abi.h" +#include "verbs.h" +#include "user.h" +#include "puda.h" +#include "srq.h" +#include "manager.h" +#include "dbgfs.h" + +extern struct list_head zxdh_handlers; +extern spinlock_t zxdh_handler_lock; +extern bool zxdh_upload_context; +extern struct auxiliary_driver i40iw_auxiliary_drv; + +typedef struct dpp_pf_info { + u16 slot; + u16 vport; +} dpp_pf_info_t; + +#define ZXDH_MAX_IRQ_COUNT 4 +#define ZXDH_CEQ_IRQ_COUNT 3 + +#define ZXDH_FW_VER_DEFAULT 2 +#define ZXDH_HW_VER 2 + +#define ZXDH_ARP_ADD 1 +#define ZXDH_ARP_DELETE 2 +#define ZXDH_ARP_RESOLVE 3 + +#define ZXDH_MACIP_ADD 1 +#define ZXDH_MACIP_DELETE 2 + +#define IW_CCQ_SIZE ZXDH_CQP_SW_SQSIZE_2048 +#define IW_CEQ_SIZE 2048 +#define IW_AEQ_SIZE 2048 + +#define RX_BUF_SIZE (1536 + 8) +#define IW_REG0_SIZE (4 * 1024) +#define IW_TX_TIMEOUT (6 * HZ) +#define IW_FIRST_QPN 1 + +#define IW_SW_CONTEXT_ALIGN 1024 + +#define MAX_DPC_ITERATIONS 128 + +#define ZXDH_EVENT_TIMEOUT_MS 5000 +#define ZXDH_VCHNL_EVENT_TIMEOUT_MS 10000 +#define ZXDH_RST_TIMEOUT_HZ 4 + +#define ZXDH_NO_QSET 0xffff + +#define IW_CFG_FPM_QP_COUNT 32768 +#define ZXDH_MAX_PAGES_PER_FMR 512 +#define ZXDH_MIN_PAGES_PER_FMR 1 +#define ZXDH_CQP_COMPL_RQ_WQE_FLUSHED 2 +#define ZXDH_CQP_COMPL_SQ_WQE_FLUSHED 3 + +#define ZXDH_Q_TYPE_PE_AEQ 0x80 +#define ZXDH_Q_INVALID_IDX 0xffff +#define ZXDH_REM_ENDPOINT_TRK_QPID 3 + +#define ZXDH_DRV_OPT_ENA_MPA_VER_0 0x00000001 +#define ZXDH_DRV_OPT_DISABLE_MPA_CRC 0x00000002 +#define ZXDH_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004 +#define ZXDH_DRV_OPT_DISABLE_INTF 0x00000008 +#define ZXDH_DRV_OPT_ENA_MSI 0x00000010 +#define ZXDH_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020 +#define ZXDH_DRV_OPT_NO_INLINE_DATA 0x00000080 +#define ZXDH_DRV_OPT_DISABLE_INT_MOD 0x00000100 +#define ZXDH_DRV_OPT_DISABLE_VIRT_WQ 0x00000200 +#define ZXDH_DRV_OPT_ENA_PAU 0x00000400 +#define ZXDH_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800 + +#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) +#define ZXDH_ROCE_CWND_DEFAULT 0x400 +#define ZXDH_ROCE_RTOMIN_DEFAULT 0x5 +#define ZXDH_ROCE_ACKCREDS_DEFAULT 0x1E +#if IS_ENABLED(CONFIG_CONFIGFS_FS) +#define ZXDH_DEFAULT_UP_UP_MAP 0x0706050403020100l +#endif + +#define ZXDH_FLUSH_SQ BIT(0) +#define ZXDH_FLUSH_RQ BIT(1) +#define ZXDH_REFLUSH BIT(2) +#define ZXDH_FLUSH_WAIT BIT(3) + +#define SINGLE_EP0 1 +#define MULTI_EP_NO_ZF 0 +#define MULTI_EP_WITH_ZF 0 + +struct dev_log_trace { + refcount_t t_switch; +}; + +enum init_completion_state { + INVALID_STATE = 0, + INITIAL_STATE, + CQP_CREATED, + SMMU_PAGETABLE_INITIALIZED, + HMC_OBJS_CREATED, + HW_RSRC_INITIALIZED, + CQP_QP_CREATED, + AEQ_CREATED, + CCQ_CREATED, + CEQ0_CREATED, /* Last state of probe */ + ILQ_CREATED, + IEQ_CREATED, + REM_ENDPOINT_TRK_CREATED, + CEQS_CREATED, + PBLE_CHUNK_MEM, + IP_ADDR_REGISTERED, /* Last state of open */ +}; + +enum { + MCODE_TYPE_DCQCN = 1, + MCODE_TYPE_RTT = 2, +}; + +struct zxdh_cqp_err_info { + u16 maj; + u16 min; + const char *desc; +}; + +struct zxdh_cqp_compl_info { + u64 op_ret_val; + u16 maj_err_code; + u16 min_err_code; + bool error; + u8 op_code; + __le64 addrbuf[5]; +}; + +struct zxdh_cqp_request { + struct cqp_cmds_info info; + wait_queue_head_t waitq; + struct list_head list; + refcount_t refcnt; + void (*callback_fcn)(struct zxdh_cqp_request *cqp_request); + void *param; + struct zxdh_cqp_compl_info compl_info; + u8 waiting : 1; + u8 request_done : 1; + u8 dynamic : 1; +}; + +struct zxdh_cqp { + struct zxdh_sc_cqp sc_cqp; + spinlock_t req_lock; /* protect CQP request list */ + spinlock_t compl_lock; /* protect CQP completion processing */ + wait_queue_head_t waitq; + wait_queue_head_t remove_wq; + struct zxdh_dma_mem sq; + struct zxdh_dma_mem host_ctx; + u64 *scratch_array; + struct zxdh_cqp_request *cqp_requests; + struct list_head cqp_avail_reqs; + struct list_head cqp_pending_reqs; +}; + +struct zxdh_ccq { + struct zxdh_sc_cq sc_cq; + struct zxdh_dma_mem mem_cq; + struct zxdh_dma_mem shadow_area; +}; + +struct zxdh_ceq { + struct zxdh_sc_ceq sc_ceq; + struct zxdh_dma_mem mem; + u32 irq; + u32 msix_idx; + struct zxdh_pci_f *rf; + struct tasklet_struct dpc_tasklet; + + spinlock_t + ce_lock; /* sync cq destroy with cq completion event notification */ +}; + +struct zxdh_aeq { + struct zxdh_sc_aeq sc_aeq; + struct zxdh_dma_mem mem; + struct zxdh_pble_alloc palloc; + bool virtual_map; +}; + +struct zxdh_arp_entry { + u32 ip_addr[4]; + u8 mac_addr[ETH_ALEN]; +}; + +struct zxdh_msix_vector { + u32 idx; + u32 irq; + u32 cpu_affinity; + u32 ceq_id; + cpumask_t mask; +}; + +struct zxdh_mc_table_info { + u32 mgn; + u32 dest_ip[4]; + u8 lan_fwd : 1; + u8 ipv4_valid : 1; +}; + +struct mc_table_list { + struct list_head list; + struct zxdh_mc_table_info mc_info; + struct zxdh_mcast_grp_info mc_grp_ctx; +}; + +struct zxdh_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct zxdh_qvlist_info { + u32 num_vectors; + struct zxdh_qv_info qv_info[1]; +}; + +struct zxdh_gen_ops { + void (*request_reset)(struct zxdh_pci_f *rf); + int (*register_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); + void (*unregister_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); +}; + +struct zxdh_pci_f { + u8 reset : 1; + u8 rsrc_created : 1; + u8 ftype : 1; + u8 rsrc_profile; + u8 max_rdma_vfs; + u8 *hmc_info_mem; + u8 *mem_rsrc; + u8 rdma_ver; + u8 rst_to; + /* Not used in SRIOV VF mode */ + u8 pf_id; + u8 vf_id; + u8 ep_id; + u8 fragcnt_limit; + enum zxdh_protocol_used protocol_used; + u8 en_rem_endpoint_trk : 1; + u8 dcqcn_ena : 1; + u32 sd_type; + u32 msix_count; + u32 max_mr; + u32 max_qp; + u32 max_cq; + u32 max_ah; + u32 next_ah; + u32 max_mcg; + u32 next_mcg; + u32 max_pd; + u32 next_qp; + u32 next_cq; + u32 next_pd; + u32 next_mr; + u32 max_mr_size; + u32 max_cqe; + u32 mr_stagmask; + u32 used_pds; + u32 used_cqs; + u32 used_mrs; + u32 used_qps; + u32 max_srq; + u32 next_srq; + u32 used_srqs; +#ifdef Z_CONFIG_RDMA_ARP + u32 arp_table_size; + u32 next_arp_index; + unsigned long *allocated_arps; + struct zxdh_arp_entry *arp_table; + spinlock_t arp_lock; /*protect ARP table access*/ +#endif + u32 ceqs_count; + u32 limits_sel; + + unsigned long *allocated_qps; + unsigned long *allocated_cqs; + unsigned long *allocated_mrs; + unsigned long *allocated_pds; + unsigned long *allocated_mcgs; + unsigned long *allocated_ahs; + unsigned long *allocated_srqs; + + enum init_completion_state init_state; + struct zxdh_sc_dev sc_dev; + struct zxdh_handler *hdl; + struct pci_dev *pcidev; + void *cdev; + struct zxdh_hw hw; + struct zxdh_cqp cqp; + struct zxdh_ccq ccq; + struct zxdh_aeq aeq; + struct zxdh_ceq *ceqlist; + struct zxdh_hmc_pble_rsrc *pble_rsrc; + struct zxdh_hmc_pble_rsrc *pble_mr_rsrc; + struct zxdh_dma_mem cqp_host_ctx; + + spinlock_t rsrc_lock; /* protect HW resource array access */ + spinlock_t qptable_lock; /*protect QP table access*/ + spinlock_t cqtable_lock; /*protect CQ table access*/ + struct zxdh_qp **qp_table; + struct zxdh_cq **cq_table; + struct zxdh_msix_vector *iw_msixtbl; + struct zxdh_qvlist_info *iw_qvlist; + spinlock_t srqtable_lock; /*protect SRQ table access*/ + struct zxdh_srq **srq_table; + struct tasklet_struct dpc_tasklet; + struct msix_entry *msix_entries; + struct workqueue_struct *cqp_cmpl_wq; + struct work_struct cqp_cmpl_work; + struct zxdh_gen_ops gen_ops; + void (*check_fc)(struct zxdh_sc_vsi *vsi, struct zxdh_sc_qp *sc_qp); + struct zxdh_dcqcn_cc_params dcqcn_params; + struct zxdh_device *iwdev; + struct zrdma_debugfs_entries debugfs_entry; + u8 vlan_parse_en; + u8 mcode_type; +}; + +struct zxdh_cap_mmap_info { + struct rdma_user_mmap_entry *cap_mmap_entry_node0; + struct rdma_user_mmap_entry *cap_mmap_entry_node1; + struct rdma_user_mmap_entry *mp_cap_mmap_entry; +}; + +struct zxdh_device { + struct ib_device ibdev; + struct zxdh_pci_f *rf; + struct net_device *netdev; + struct zxdh_handler *hdl; + struct workqueue_struct *cleanup_wq; + struct zxdh_sc_vsi vsi; + struct zxdh_cm_core cm_core; + struct list_head ah_list; + struct mutex ah_list_lock; + struct dev_log_trace trace_switch; + u32 ah_list_cnt; + u32 ah_list_hwm; + u32 roce_cwnd; + u32 roce_ackcreds; + u32 vendor_id; + u32 vendor_part_id; + u32 device_cap_flags; + u32 push_mode; + u32 rcv_wnd; + u16 mac_ip_table_idx; + u16 vsi_num; + u8 rcv_wscale; + u8 iw_status; + u8 rd_fence_rate; +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + u64 up_up_map; + u8 cnp_up_override; + u8 iwarp_rtomin; + u32 ceq_intrl; /* Interrupt rate limit per second: 0-disabled, 4237 - 250,000 */ + u8 up_map_en : 1; + u8 iwarp_dctcp_en : 1; + u8 iwarp_timely_en : 1; + u8 iwarp_bolt_en : 1; + u8 iwarp_ecn_en : 1; + u8 override_rcv_wnd : 1; + u8 override_cwnd : 1; + u8 override_ackcreds : 1; + u8 override_ooo : 1; + u8 override_rtomin : 1; + u8 override_rd_fence_rate : 1; + u8 roce_rtomin; + u8 roce_ecn_en : 1; + u8 roce_timely_en : 1; + u8 roce_no_icrc_en : 1; + u8 roce_dctcp_en : 1; +#endif /* CONFIG_CONFIGFS_FS */ + u8 roce_mode : 1; + u8 roce_dcqcn_en : 1; + u8 dcb_vlan_mode : 1; + u8 iw_ooo : 1; + enum init_completion_state init_state; + dma_addr_t cap_dma_addr_node0; + void *cap_cpu_addr_node0; + dma_addr_t cap_dma_addr_node1; + void *cap_cpu_addr_node1; + struct zxdh_cap_mmap_info cap_mmap_info; + void *mp_cap_cpu_addr; + dma_addr_t mp_cap_dma_addr; + u64 mp_cap_media_addr_base; + wait_queue_head_t suspend_wq; +}; + +struct zxdh_handler { + struct list_head list; + struct zxdh_device *iwdev; + bool shared_res_created; +}; + +static inline struct zxdh_device *to_iwdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct zxdh_device, ibdev); +} + +static inline struct zxdh_ucontext *to_ucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct zxdh_ucontext, ibucontext); +} + +#ifdef RDMA_MMAP_DB_SUPPORT +static inline struct zxdh_user_mmap_entry * +to_zxdh_mmap_entry(struct rdma_user_mmap_entry *rdma_entry) +{ + return container_of(rdma_entry, struct zxdh_user_mmap_entry, + rdma_entry); +} + +#endif +static inline struct zxdh_pd *to_iwpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct zxdh_pd, ibpd); +} + +static inline struct zxdh_ah *to_iwah(struct ib_ah *ibah) +{ + return container_of(ibah, struct zxdh_ah, ibah); +} + +static inline struct zxdh_mr *to_iwmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct zxdh_mr, ibmr); +} + +static inline struct zxdh_mr *to_iwmw(struct ib_mw *ibmw) +{ + return container_of(ibmw, struct zxdh_mr, ibmw); +} + +static inline struct zxdh_cq *to_iwcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct zxdh_cq, ibcq); +} + +static inline struct zxdh_qp *to_iwqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct zxdh_qp, ibqp); +} + +static inline struct zxdh_pci_f *dev_to_rf(struct zxdh_sc_dev *dev) +{ + return container_of(dev, struct zxdh_pci_f, sc_dev); +} + +/** + * zxdh_alloc_resource - allocate a resource + * @iwdev: device pointer + * @resource_array: resource bit array: + * @max_resources: maximum resource number + * @req_resources_num: Allocated resource number + * @next: next free id + **/ +static inline int zxdh_alloc_rsrc(struct zxdh_pci_f *rf, + unsigned long *rsrc_array, u32 max_rsrc, + u32 *req_rsrc_num, u32 *next) +{ + u32 rsrc_num; + unsigned long flags; + + spin_lock_irqsave(&rf->rsrc_lock, flags); + rsrc_num = find_next_zero_bit(rsrc_array, max_rsrc, *next); + if (rsrc_num >= max_rsrc) { + rsrc_num = find_first_zero_bit(rsrc_array, max_rsrc); + if (rsrc_num >= max_rsrc) { + spin_unlock_irqrestore(&rf->rsrc_lock, flags); + pr_err("ERR: resource [%d] allocation failed\n", + rsrc_num); + return -EOVERFLOW; + } + } + __set_bit(rsrc_num, rsrc_array); + *next = rsrc_num + 1; + if (*next == max_rsrc) + *next = 0; + *req_rsrc_num = rsrc_num; + spin_unlock_irqrestore(&rf->rsrc_lock, flags); + + return 0; +} + +/** + * zxdh_free_resource - free a resource + * @iwdev: device pointer + * @resource_array: resource array for the resource_num + * @resource_num: resource number to free + **/ +static inline void zxdh_free_rsrc(struct zxdh_pci_f *rf, + unsigned long *rsrc_array, u32 rsrc_num) +{ + unsigned long flags; + + spin_lock_irqsave(&rf->rsrc_lock, flags); + __clear_bit(rsrc_num, rsrc_array); + spin_unlock_irqrestore(&rf->rsrc_lock, flags); +} + +int zxdh_ctrl_init_hw(struct zxdh_pci_f *rf); +void zxdh_ctrl_deinit_hw(struct zxdh_pci_f *rf); +int zxdh_rt_init_hw(struct zxdh_device *iwdev); +void zxdh_rt_deinit_hw(struct zxdh_device *iwdev); +void zxdh_qp_add_ref(struct ib_qp *ibqp); +void zxdh_qp_rem_ref(struct ib_qp *ibqp); +void zxdh_flush_wqes(struct zxdh_qp *iwqp, u32 flush_mask); +struct zxdh_cqp_request *zxdh_alloc_and_get_cqp_request(struct zxdh_cqp *cqp, + bool wait); +void zxdh_free_cqp_request(struct zxdh_cqp *cqp, + struct zxdh_cqp_request *cqp_request); +void zxdh_put_cqp_request(struct zxdh_cqp *cqp, + struct zxdh_cqp_request *cqp_request); +u32 zxdh_initialize_hw_rsrc(struct zxdh_pci_f *rf); +void zxdh_port_ibevent(struct zxdh_device *iwdev); +void zxdh_aeq_qp_disconn(struct zxdh_qp *qp); +void zxdh_aeq_process_retry_err(struct zxdh_qp *iwqp); +void zxdh_aeq_process_entry_err(struct zxdh_qp *iwqp); + +bool zxdh_cqp_crit_err(struct zxdh_sc_dev *dev, u8 cqp_cmd, u16 maj_err_code, + u16 min_err_code); +int zxdh_handle_cqp_op(struct zxdh_pci_f *rf, + struct zxdh_cqp_request *cqp_request); +int zxdh_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +void zxdh_cq_add_ref(struct ib_cq *ibcq); +void zxdh_cq_rem_ref(struct ib_cq *ibcq); +void zxdh_cq_wq_destroy(struct zxdh_pci_f *rf, struct zxdh_sc_cq *cq); + +void zxdh_cleanup_pending_cqp_op(struct zxdh_pci_f *rf); +int zxdh_hw_modify_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_modify_qp_info *info, bool wait); +int zxdh_qp_suspend_resume(struct zxdh_sc_qp *qp, bool suspend); +void zxdh_free_qp_rsrc(struct zxdh_qp *iwqp); +int zxdh_hw_flush_wqes(struct zxdh_pci_f *rf, struct zxdh_sc_qp *qp, + struct zxdh_qp_flush_info *info, bool wait); +void zxdh_copy_ip_ntohl(u32 *dst, __be32 *src); +void zxdh_copy_ip_htonl(__be32 *dst, u32 *src); +u16 zxdh_get_vlan_ipv4(u32 *addr); +struct net_device *zxdh_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac); +struct ib_mr *zxdh_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size, int acc, + u64 *iova_start); +int zxdh_upload_qp_context(struct zxdh_qp *iwqp, bool freeze, bool raw); +void zxdh_del_hmc_objects(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info); + +void zxdh_cqp_ce_handler(struct zxdh_pci_f *rf, struct zxdh_sc_cq *cq); +int zxdh_ah_cqp_op(struct zxdh_pci_f *rf, struct zxdh_sc_ah *sc_ah, u8 cmd, + bool wait, + void (*callback_fcn)(struct zxdh_cqp_request *cqp_request), + void *cb_param); +void zxdh_gsi_ud_qp_ah_cb(struct zxdh_cqp_request *cqp_request); +bool zxdh_cq_empty(struct zxdh_cq *iwcq); +#if IS_ENABLED(CONFIG_CONFIGFS_FS) +struct zxdh_device *zxdh_get_device_by_name(const char *name); +#endif + +void zxdh_set_rf_user_cfg_params(struct zxdh_pci_f *rf); +void zxdh_add_handler(struct zxdh_handler *hdl); +void zxdh_del_handler(struct zxdh_handler *hdl); +void cqp_compl_worker(struct work_struct *work); + +int zxdh_manager_init(struct zxdh_pci_f *rf, + struct iidc_core_dev_info *cdev_info); + +#endif /* ZRDMA_MAIN_H */ diff --git a/src/rdma/src/manager.c b/src/rdma/src/manager.c new file mode 100644 index 0000000000000000000000000000000000000000..028a35ec5b85d9bd857a2974d8d8c0ba150f7cbd --- /dev/null +++ b/src/rdma/src/manager.c @@ -0,0 +1,752 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include + +//#include "/home/chenhuan/code/rdma_dev/zxdh_kernel/incldue/linux/dinghai/dh_cmd.h" +//#include "dh_cmd.h" +#include "iidc.h" +#include "main.h" +#include "manager.h" +#include "icrdma_hw.h" + +u64 zxdh_hw_bar_pages[C_RDMA_HW_BAR_PAGE_NUM] = { 0 }; + +static int dh_rdma_pf_pcie_id_get(struct zxdh_mgr *mgr) +{ + u32 pos = 0; + u8 type = 0; + u16 padding = 0; + struct pci_dev *pdev = mgr->pdev; + + for (pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR); pos > 0; + pos = pci_find_next_capability(pdev, pos, PCI_CAP_ID_VNDR)) { + pci_read_config_byte( + pdev, pos + offsetof(struct zxdh_pf_pci_cap, cfg_type), + &type); + + if (type == ZXDH_PCI_CAP_PCI_CFG) { + pci_read_config_word( + pdev, + pos + offsetof(struct zxdh_pf_pci_cap, + padding[0]), + &padding); + mgr->pcie_id = padding; + pr_info("pf_dev->pcie_id: 0x%x\n", mgr->pcie_id); + return 0; + } + } + return -1; +} + +int zxdh_chan_sync_send(struct zxdh_mgr *pmgr, struct zxdh_chan_msg *pmsg, + u32 *pdata, u32 rep_len) +{ + u16 buffer_len = 0; + void *recv_buffer = NULL; + int ret = 0; + u8 *reply_ptr = NULL; + u16 reply_msg_len = 0; + + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + + if (pmgr == NULL || pmsg == NULL || pdata == NULL) + return -1; + + buffer_len = rep_len + ZXDH_CHAN_REPS_LEN; + recv_buffer = (void *)kmalloc(buffer_len, GFP_KERNEL); + if (recv_buffer == NULL) + return -1; + + in.virt_addr = + (u64)pmgr->pci_hw_addr + ZXDH_BAR_CHAN_OFFSET; //bar空间偏移? + in.payload_addr = pmsg->msg; + in.payload_len = pmsg->msg_len; + + if (!pmgr->ftype) + in.src = MSG_CHAN_END_PF; + else + in.src = MSG_CHAN_END_VF; + + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + + if (0 == dh_rdma_pf_pcie_id_get(pmgr)) + in.src_pcieid = pmgr->pcie_id; + else + return -1; + + result.buffer_len = buffer_len; + result.recv_buffer = recv_buffer; + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret != 0) { + pr_err("zxdh_bar_chan_sync_msg_send faile, ret=%d\n", ret); + return -1; + } + + reply_ptr = (u8 *)result.recv_buffer; + if (*reply_ptr == MSG_REP_VALID) { + reply_msg_len = *(u16 *)(reply_ptr + MSG_REP_LEN_OFFSET); + memcpy(pdata, reply_ptr + ZXDH_CHAN_REPS_LEN, + ((reply_msg_len > rep_len) ? rep_len : reply_msg_len)); + kfree(recv_buffer); + return 0; + } + + kfree(recv_buffer); + return 0; +} + +int zxdh_mgr_par_get(struct zxdh_mgr *dh_mgr) +{ + int ret = 0; + + struct zxdh_mgr_msg *cmd = + kzalloc(sizeof(struct zxdh_mgr_msg), GFP_KERNEL); + struct zxdh_chan_msg *pmsg = + kzalloc(sizeof(struct zxdh_chan_msg), GFP_KERNEL); + struct zxdh_mgr_par param; + + if (pmsg == NULL) { + kfree(cmd); + return -ENOMEM; + } + + if (cmd == NULL) { + kfree(pmsg); + return -ENOMEM; + } + + cmd->op_code = 0; + cmd->pf_id = dh_mgr->pf_id; + cmd->vport_vf_id = dh_mgr->vport_vf_id; + cmd->ftype = dh_mgr->ftype; + cmd->ep_id = dh_mgr->ep_id; + + pmsg->msg_len = sizeof(struct zxdh_mgr_msg); + pmsg->msg = (void *)cmd; + + ret = zxdh_chan_sync_send(dh_mgr, pmsg, (void *)&dh_mgr->param, + sizeof(struct zxdh_mgr_par)); + param = dh_mgr->param; + pr_info("mgr cfg param:"); + pr_info("ftype=%d, ep_id=%d, pf_id=%d, max_vf_num=%d, vhca_id=%d, bar_offset=0x%x.\n", + param.ftype, param.ep_id, param.pf_id, param.max_vf_num, + param.vhca_id, param.bar_offset); + pr_info("l2d_smmu_addr=0x%llx, vf_id=%d, vhca_id_pf=%d, l2d_smmu_l2_offset=%d.\n", + param.l2d_smmu_addr, param.vf_id, param.vhca_id_pf, + param.l2d_smmu_l2_offset); + pr_info("qp_cnt=%d, cq_cnt=%d, srq_cnt=%d, ceq_cnt=%d, ah_cnt=%d, mr_cnt=%d, pbleq_cnt=%d, pblem_cnt=%d.\n", + param.qp_cnt, param.cq_cnt, param.srq_cnt, param.ceq_cnt, + param.ah_cnt, param.mr_cnt, param.pbleq_cnt, param.pblem_cnt); + pr_info("base_qpn=%d, base_cqn=%d, base_srqn=%d, base_ceqn=%d.\n", + param.base_qpn, param.base_cqn, param.base_srqn, + param.base_ceqn); + pr_info("qp_hmc_base=0x%llx, cq_hmc_base=0x%llx, srq_hmc_base=0x%llx, txwindow_hmc_base=0x%llx.\n", + param.qp_hmc_base, param.cq_hmc_base, param.srq_hmc_base, + param.txwindow_hmc_base); + pr_info("ird_hmc_base=0x%llx,ah_hmc_base=0x%llx,mr_hmc_base=0x%llx,pbleq_hmc_base=0x%llx,pblem_hmc_base=0x%llx.\n", + param.ird_hmc_base, param.ah_hmc_base, param.mr_hmc_base, + param.pbleq_hmc_base, param.pblem_hmc_base); + pr_info("mcode_type=%d, max_hw_wq_frags=%d, max_hw_read_sges=%d\n", + param.mcode_type, param.max_hw_wq_frags, + param.max_hw_read_sges); + + if (ret != 0) { + pr_info("get pf param faile, ret=%d.\n", ret); + kfree(cmd); + kfree(pmsg); + return -EPIPE; + } + + if (param.ftype != dh_mgr->ftype || param.ep_id != dh_mgr->ep_id || + param.pf_id != dh_mgr->pf_id) { + kfree(cmd); + kfree(pmsg); + return -EPIPE; + } + + kfree(cmd); + kfree(pmsg); + + return 0; +} + +static int zxdh_sc_init_hmccnt(struct zxdh_pci_f *rf, + struct zxdh_mgr_par *param) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + u32 hmc_info_mem_size; + + hmc_info_mem_size = + sizeof(struct zxdh_hmc_pble_rsrc) * 2 + + sizeof(struct zxdh_hmc_info) + + (sizeof(struct zxdh_hmc_obj_info) * ZXDH_HMC_IW_MAX); + + rf->hmc_info_mem = kzalloc(hmc_info_mem_size, GFP_KERNEL); + if (!rf->hmc_info_mem) + return -ENOMEM; + + rf->pble_mr_rsrc = (struct zxdh_hmc_pble_rsrc *)rf->hmc_info_mem; + rf->pble_rsrc = (struct zxdh_hmc_pble_rsrc *)(rf->pble_mr_rsrc + 1); + dev->hmc_info = &rf->hw.hmc; + dev->hmc_info->hmc_obj = + (struct zxdh_hmc_obj_info *)(rf->pble_rsrc + 1); + + rf->max_rdma_vfs = param->max_vf_num; + dev->hmc_use_dpu_ddr = param->hmc_use_dpu_ddr; + pr_info("hmc_use_dpu_ddr=%d\n", dev->hmc_use_dpu_ddr); + if (!rf->sc_dev.hmc_use_dpu_ddr) { + dev->l2d_smmu_addr = param->l2d_smmu_addr; + dev->l2d_smmu_l2_offset = param->l2d_smmu_l2_offset; + } + + dev->hmc_pf_manager_info.hmc_base = param->qp_hmc_base; + + rf->max_qp = param->qp_cnt; + rf->max_cq = param->cq_cnt; + rf->max_srq = param->srq_cnt; + rf->max_ah = param->ah_cnt; + rf->max_mr = param->mr_cnt; + + dev->base_qpn = param->base_qpn; + dev->base_cqn = param->base_cqn; + dev->base_srqn = param->base_srqn; + + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_QP].max_cnt = rf->max_qp; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].max_cnt = rf->max_cq; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].max_cnt = rf->max_srq; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_TXWINDOW].max_cnt = rf->max_qp; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_IRD].max_cnt = rf->max_qp; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_AH].max_cnt = rf->max_ah; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_MR].max_cnt = param->mr_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].max_cnt = param->pbleq_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].max_cnt = param->pblem_cnt; + + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_QP].base = param->qp_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].base = param->cq_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].base = param->srq_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_TXWINDOW].base = + param->txwindow_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_IRD].base = param->ird_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_AH].base = param->ah_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_MR].base = param->mr_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].base = param->pbleq_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].base = + param->pblem_hmc_base; + + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_QP].cnt = param->qp_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].cnt = param->cq_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].cnt = param->srq_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_AH].cnt = param->ah_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_MR].cnt = param->mr_cnt; + + if (!rf->ftype) { + dev->hmc_pf_manager_info.total_qp_cnt = + param->qp_cnt + param->max_vf_num * param->vf_qp_cnt; + dev->hmc_pf_manager_info.total_cq_cnt = + param->cq_cnt + param->max_vf_num * param->vf_cq_cnt; + dev->hmc_pf_manager_info.total_srq_cnt = + param->srq_cnt + param->max_vf_num * param->vf_srq_cnt; + dev->hmc_pf_manager_info.total_ah_cnt = + param->ah_cnt + param->max_vf_num * param->vf_ah_cnt; + dev->hmc_pf_manager_info.total_mrte_cnt = + param->mr_cnt + param->max_vf_num * param->vf_mr_cnt; + + dev->hmc_pf_manager_info.pf_pblemr_cnt = param->pblem_cnt; + dev->hmc_pf_manager_info.pf_pblequeue_cnt = param->pbleq_cnt; + + dev->hmc_pf_manager_info.vf_qp_cnt = param->vf_qp_cnt; + dev->hmc_pf_manager_info.vf_pblemr_cnt = param->vf_pblem_cnt; + dev->hmc_pf_manager_info.vf_pblequeue_cnt = param->vf_pbleq_cnt; + } + return 0; +} + +static void zxdh_init_hw_bar_pages(u8 ep_id, u64 bar_offset) +{ + int i; + u64 page_bar_offset; + u64 bar_offset_low; + u64 bar_offset_high; + + page_bar_offset = bar_offset; + for (i = 0; i < C_RDMA_HW_BAR_PAGE_NUM; i++) { + if (ep_id == ZXDH_ZF_EPID) { + bar_offset_low = page_bar_offset & 0xFFFF; + bar_offset_high = page_bar_offset & 0xF0000; + zxdh_hw_bar_pages[i] = + bar_offset_low + (bar_offset_high << 4); + } else + zxdh_hw_bar_pages[i] = page_bar_offset; + + page_bar_offset += C_RDMA_HW_BAR_PAGE_SIZE; + pr_info("zxdh_hw_bar_pages[%d] = 0x%llx\n", i, + zxdh_hw_bar_pages[i]); + } +} + +int zxdh_manager_init(struct zxdh_pci_f *rf, + struct iidc_core_dev_info *cdev_info) +{ + int ret = 0; + + struct zxdh_mgr *dh_mgr = kzalloc(sizeof(struct zxdh_mgr), GFP_KERNEL); + + if (dh_mgr == NULL) + return -ENOMEM; + + dh_mgr->pdev = cdev_info->pdev; + dh_mgr->pf_id = rf->pf_id; + dh_mgr->vport_vf_id = (cdev_info->vport_id) & 0xFF; + dh_mgr->ftype = rf->ftype; + dh_mgr->ep_id = rf->sc_dev.ep_id; + + dh_mgr->device_id = cdev_info->pdev->subsystem_device; + dh_mgr->pci_hw_addr = cdev_info->hw_addr; + + pr_info("manager pcie_id=%d, device_id=%d\n", dh_mgr->pcie_id, + dh_mgr->device_id); + + ret = zxdh_mgr_par_get(dh_mgr); + if (ret != 0) { + kfree(dh_mgr); + pr_info("dh_rdma_mgr_par_get faile.\n"); + return ret; + } + + rf->vf_id = dh_mgr->param.vf_id; + rf->sc_dev.vhca_id = dh_mgr->param.vhca_id; + rf->sc_dev.vhca_id_pf = dh_mgr->param.vhca_id_pf; + rf->sc_dev.hmc_fn_id = dh_mgr->param.hmc_sid; + rf->sc_dev.total_vhca = dh_mgr->param.dh_total_vhca; + rf->sc_dev.np_mode_low_lat = dh_mgr->param.np_mode_low_lat; + rf->sc_dev.np_mode_low_lat = false; + rf->sc_dev.nof_ioq_ddr_addr = dh_mgr->param.nof_ioq_ddr_addr; + rf->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags = + dh_mgr->param.max_hw_wq_frags; + rf->sc_dev.hw_attrs.uk_attrs.max_hw_read_sges = + dh_mgr->param.max_hw_read_sges; + pr_info("nof_ioq_ddr_addr:0x%llx\n", rf->sc_dev.nof_ioq_ddr_addr); + + rf->hw.hw_addr = cdev_info->hw_addr; + zxdh_init_hw_bar_pages(rf->sc_dev.ep_id, dh_mgr->param.bar_offset); + + ret = zxdh_sc_init_hmccnt(rf, &dh_mgr->param); + if (ret != 0) { + kfree(dh_mgr); + pr_info("init_hmccnt faile.\n"); + return ret; + } + + rf->sc_dev.max_ceqs = dh_mgr->param.ceq_cnt; + rf->sc_dev.base_ceqn = dh_mgr->param.base_ceqn; + + rf->msix_count = min(rf->msix_count, (rf->sc_dev.max_ceqs + 1)); + if (rf->msix_count > 1) + rf->sc_dev.max_ceqs = (rf->msix_count - 1); + else + rf->sc_dev.max_ceqs = rf->msix_count; + if (rf->msix_count == 0) { + kfree(dh_mgr); + pr_info("misx_count is 0\n"); + return -EINVAL; + } + rf->mcode_type = dh_mgr->param.mcode_type; + kfree(dh_mgr); + + return 0; +} + +int zxdh_rdma_reg_read(struct zxdh_pci_f *rf, uint64_t phy_addr, + uint32_t *outdata) +{ + int ret = 0; + uint8_t rep_valid = 0; + uint16_t rep_len = 0; + uint8_t *rep_ptr; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct zxdh_mgr mgr = { 0 }; + struct iidc_core_dev_info *cdev_info; + struct zxdh_reg_read_cmd *read_cmd; + size_t recv_len; + void *recv_buffer; + struct dh_rdma_reg_read_resp *read_resp; + + if (!rf || !outdata) + return -EINVAL; + + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + read_cmd = (struct zxdh_reg_read_cmd *)kzalloc( + sizeof(struct zxdh_reg_read_cmd), GFP_KERNEL); + if (!read_cmd) { + return -ENOMEM; + } + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct dh_rdma_reg_read_resp) + + 1 * sizeof(uint32_t); // data + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) { + kfree(read_cmd); + return -ENOMEM; + } + + // commnad preparation + read_cmd->op_code = RDMA_REG_READ; + read_cmd->req.phy_addr = phy_addr; + read_cmd->req.reg_num = 1; + + // send message preparation + in.payload_addr = (void *)read_cmd; + in.payload_len = sizeof(struct zxdh_reg_read_cmd); + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (u64)cdev_info->hw_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + kfree(read_cmd); + + if (ret) { + pr_err("[%s] message send failed, ret=%d\n", __func__, ret); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != recv_len - ZXDH_CHAN_REPS_LEN) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -EPROTO; + } + + read_resp = + (struct dh_rdma_reg_read_resp *)(rep_ptr + ZXDH_CHAN_REPS_LEN); + if (read_resp->status_code != 200) { + pr_err("[%s] response status invalid, statuc_code=0x%x\n", + __func__, read_resp->status_code); + kfree(recv_buffer); + return -EPROTO; + } + + pr_info("resp: addr=0x%llx num=%d status_code=%d val=%x\n", + read_resp->phy_addr, read_resp->reg_num, read_resp->status_code, + read_resp->data[0]); + + *outdata = read_resp->data[0]; + + kfree(recv_buffer); + return 0; +} + +int zxdh_rdma_reg_write(struct zxdh_pci_f *rf, uint64_t phy_addr, uint32_t val) +{ + int ret = 0; + uint8_t rep_valid = 0; + uint16_t rep_len = 0; + uint8_t *rep_ptr; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct zxdh_mgr mgr = { 0 }; + struct iidc_core_dev_info *cdev_info; + size_t write_cmd_len; + size_t recv_len; + void *recv_buffer; + struct zxdh_reg_write_cmd *write_cmd; + struct dh_rdma_reg_write_resp *write_resp; + + if (!rf) + return -EINVAL; + + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + write_cmd_len = + sizeof(struct zxdh_reg_write_cmd) + 1 * sizeof(uint32_t); + write_cmd = + (struct zxdh_reg_write_cmd *)kzalloc(write_cmd_len, GFP_KERNEL); + if (!write_cmd) { + return -ENOMEM; + } + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct dh_rdma_reg_write_resp); + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) { + kfree(write_cmd); + return -ENOMEM; + } + + // commnad preparation + write_cmd->op_code = RDMA_REG_WRITE; + write_cmd->req.phy_addr = phy_addr; + write_cmd->req.reg_num = 1; + write_cmd->req.data[0] = val; + + // send message preparation + in.payload_addr = (void *)write_cmd; + in.payload_len = write_cmd_len; + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (u64)cdev_info->hw_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + kfree(write_cmd); + + if (ret) { + pr_err("[%s] message send failed, ret=%d\n", __func__, ret); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != recv_len - ZXDH_CHAN_REPS_LEN) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -EPROTO; + } + + write_resp = + (struct dh_rdma_reg_write_resp *)(rep_ptr + ZXDH_CHAN_REPS_LEN); + if (write_resp->status_code != 200) { + pr_err("[%s] response status invalid, statuc_code=0x%x\n", + __func__, write_resp->status_code); + kfree(recv_buffer); + return -EPROTO; + } + + pr_info("resp: addr=0x%llx num=%d status_code=%d\n", + write_resp->phy_addr, write_resp->reg_num, + write_resp->status_code); + + kfree(recv_buffer); + return 0; +} + +int zxdh_mp_dtcm_para_get(struct zxdh_pci_f *rf, uint16_t mcode_type, + uint16_t para_id, uint32_t *outdata) +{ + int ret = 0; + uint8_t rep_valid = 0; + uint16_t rep_len = 0; + uint8_t *rep_ptr; + struct zxdh_mgr mgr = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct zxdh_mp_dtcm_para_get_cmd get_cmd = { 0 }; + struct iidc_core_dev_info *cdev_info; + size_t recv_len; + void *recv_buffer; + struct dh_mp_dtcm_para_get_resp *get_resp; + + if (!rf || !outdata) + return -EINVAL; + + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct dh_mp_dtcm_para_get_resp); + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) + return -ENOMEM; + + // commnad preparation + get_cmd.op_code = RDMA_MP_DTCM_PARA_GET; + get_cmd.req.mcode_type = mcode_type; + get_cmd.req.para_id = para_id; + + // get message preparation + in.payload_addr = (void *)&get_cmd; + in.payload_len = sizeof(struct zxdh_mp_dtcm_para_get_cmd); + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (u64)cdev_info->hw_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret) { + pr_err("[%s] message send failed, ret=%d\n", __func__, ret); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != recv_len - ZXDH_CHAN_REPS_LEN) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -EPROTO; + } + + get_resp = (struct dh_mp_dtcm_para_get_resp *)(rep_ptr + + ZXDH_CHAN_REPS_LEN); + if (get_resp->status_code != 200) { + pr_err("[%s] response status invalid, statuc_code=0x%x\n", + __func__, get_resp->status_code); + kfree(recv_buffer); + return -EPROTO; + } + + pr_info("resp: para_id=%d val=%d\n", get_resp->para_id, get_resp->val); + + *outdata = get_resp->val; + kfree(recv_buffer); + return 0; +} + +int zxdh_mp_dtcm_para_set(struct zxdh_pci_f *rf, uint16_t mcode_type, + uint16_t para_id, uint32_t val) +{ + int ret = 0; + uint8_t rep_valid = 0; + uint16_t rep_len = 0; + uint8_t *rep_ptr; + struct zxdh_mgr mgr = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct zxdh_mp_dtcm_para_set_cmd set_cmd = { 0 }; + struct iidc_core_dev_info *cdev_info; + size_t recv_len; + void *recv_buffer; + struct dh_mp_dtcm_para_set_resp *set_resp; + + if (!rf) + return -EINVAL; + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct dh_mp_dtcm_para_set_resp); + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) + return -ENOMEM; + + // commnad preparation + set_cmd.op_code = RDMA_MP_DTCM_PARA_SET; + set_cmd.req.mcode_type = mcode_type; + set_cmd.req.para_id = para_id; + set_cmd.req.val = val; + + // get message preparation + in.payload_addr = (void *)&set_cmd; + in.payload_len = sizeof(struct zxdh_mp_dtcm_para_set_cmd); + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (u64)cdev_info->hw_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret) { + pr_err("[%s] message send failed, ret=%d\n", __func__, ret); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != recv_len - ZXDH_CHAN_REPS_LEN) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -EPROTO; + } + + set_resp = (struct dh_mp_dtcm_para_set_resp *)(rep_ptr + + ZXDH_CHAN_REPS_LEN); + if (set_resp->status_code != 200) { + pr_err("[%s] response status invalid, statuc_code=0x%x\n", + __func__, set_resp->status_code); + kfree(recv_buffer); + return -EPROTO; + } + + pr_info("resp: para_id=%d\n", para_id); + kfree(recv_buffer); + + return 0; +} diff --git a/src/rdma/src/manager.h b/src/rdma/src/manager.h new file mode 100644 index 0000000000000000000000000000000000000000..fdb5aa82142ba215cf5d0d14da4d03c026520178 --- /dev/null +++ b/src/rdma/src/manager.h @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef _MGR_H +#define _MGR_H + +/* Common configuration */ +#define ZXDH_PCI_CAP_COMMON_CFG 1 +/* Notifications */ +#define ZXDH_PCI_CAP_NOTIFY_CFG 2 +/* ISR access */ +#define ZXDH_PCI_CAP_ISR_CFG 3 +/* Device specific configuration */ +#define ZXDH_PCI_CAP_DEVICE_CFG 4 +/* PCI configuration access */ +#define ZXDH_PCI_CAP_PCI_CFG 5 + +#define ZXDH_ZF_EPID 4 +#define ZXDH_BAR_CHAN_OFFSET 0x2000 +#define ZXDH_CHAN_REPS_LEN 4 +#define MSG_REP_VALID 0xff +#define MSG_REP_LEN_OFFSET 1 + +// #define MSG_CHAN_END_PF 1 +// #define MSG_CHAN_END_VF 2 +// #define MSG_CHAN_END_RISC 3 + +#define MODULE_RDMA 4 + +#define RDMA_MGR_INIT (0) +#define RDMA_REG_READ (1) +#define RDMA_REG_WRITE (2) +#define RDMA_MP_DTCM_PARA_GET (3) +#define RDMA_MP_DTCM_PARA_SET (4) + +typedef enum BAR_DRIVER_TYPE { + MSG_CHAN_END_MPF = 0, + MSG_CHAN_END_PF, + MSG_CHAN_END_VF, + MSG_CHAN_END_RISC, + MSG_CHAN_END_ERR, +} BAR_DRIVER_TYPE; + +struct zxdh_pci_bar_msg { + uint64_t virt_addr; /**< 4k空间地址, 若src为MPF该参数不生效>**/ + void *payload_addr; /**< 消息净荷地址>**/ + uint16_t payload_len; /**< 消息净荷长度>**/ + uint16_t emec; /**< 消息紧急类型>**/ + uint16_t src; /**< 消息发送源,参考BAR_DRIVER_TYPE>**/ + uint16_t dst; /**< 消息接收者,参考BAR_DRIVER_TYPE>**/ + uint32_t event_id; /**< 事件id>**/ + uint16_t src_pcieid; /**< 源 pcie_id>**/ + uint16_t dst_pcieid; /**< 目的pcie_id>**/ +}; + +struct zxdh_msg_recviver_mem { + void *recv_buffer; /**< 消息接收缓存>**/ + uint16_t buffer_len; /**< 消息缓存长度>**/ +}; + +/* This is the PCI capability header: */ +struct zxdh_pf_pci_cap { + __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + __u8 cap_next; /* Generic PCI field: next ptr. */ + __u8 cap_len; /* Generic PCI field: capability length */ + __u8 cfg_type; /* Identifies the structure. */ + __u8 bar; /* Where to find it. */ + __u8 id; /* Multiple capabilities of the same type */ + __u8 padding[2]; /* Pad to full dword. */ + __le32 offset; /* Offset within bar. */ + __le32 length; /* Length of the structure, in bytes. */ +}; + +struct dh_rdma_board_glb_cfg { + u32 cqp_size; //cqp队列深度 + u32 qp_size; //qp队列深度 + u32 cq_size; //cq队列深度 + u32 ceq_size; //ceq队列深度 + u32 srq_size; //srq队列深度 + u32 wr_cnt; //workrequest个数 + u32 sge_cnt; //sge个数 +}; + +struct dh_rdma_vf_param { + u32 vf_id; + u32 vf_vhca_id; + u32 pf_id; + u32 pf_vhca_id; + + u32 vf_bar_offset; //bar空间地址偏移 + + u32 qp_cnt; // 每个vhca的QP数 + u32 cq_cnt; // 每个vhca的CQ数 + u32 srq_cnt; // 每个vhca的SRQ数 + u32 ceq_cnt; // 每个vhca的CEQ数 + u32 ah_cnt; // 每个vhca的AH数 + + u32 qp_id_min; // QP最小队列编号 + u32 cq_id_min; // CQ最小队列编号 + u32 ceq_id_min; // CEQ最小队列编号 + u32 srq_id_min; // SRQ最小队列编号 +}; + +//pf需要知道自己实际能用的最大队列,已经需要分配的最大队列数(包括vf需要用的队列) +struct dh_rdma_pf_param { + u8 pf_id; // 当前EP下该PF的ID + u32 max_vf_num; // 当前PF下VF的最大数量 + u8 sid; // PF的SID(0~31) + //u8 has_vf; // 判断该PF有没有VF,没有为0,有为1 + u32 vhca_id; // vhca ID + + u32 pf_bar_offset; //bar空间地址偏移 + + u32 qp_cnt; // 每个vhca的QP数 + u32 cq_cnt; // 每个vhca的CQ数 + u32 srq_cnt; // 每个vhca的SRQ数 + u32 ceq_cnt; // 每个vhca的CEQ数 + //u32 aeq_cnt; // 每个vhca的AEQ数 + u32 ah_cnt; // 每个vhca的AH数 + + u32 qp_id_min; // QP最小队列编号 + u32 cq_id_min; // CQ最小队列编号 + u32 ceq_id_min; // CEQ最小队列编号 + u32 srq_id_min; // SRQ最小队列编号 + + u32 assign_qp_cnt; //pf分配使用的QP数 + u32 assign_cq_cnt; //pf分配使用的CQ数 + u32 assign_ceq_cnt; //pf分配使用的CEQ数 + u32 assign_srq_cnt; //pf分配使用的SRQ数 + + u32 qp_size; // QP队列深度 + u32 cq_size; // CQ队列深度 + u32 ceq_size; // CEQ队列深度 + u32 aeq_size; // AEQ队列深度 + u32 srq_size; // SRQ队列深度 +}; + +struct zxdh_mgr_par { + u16 ftype; + u16 ep_id; + u16 pf_id; + u16 vf_id; + u32 bar_offset; + u32 l2d_smmu_l2_offset; + u64 l2d_smmu_addr; + u64 nof_ioq_ddr_addr; + + u16 vhca_id; + u16 vhca_id_pf; + u32 max_vf_num; + + u32 qp_cnt; + u32 cq_cnt; + u32 srq_cnt; + u32 ceq_cnt; + u32 ah_cnt; + u32 mr_cnt; + u32 pbleq_cnt; + u32 pblem_cnt; + + u32 vf_qp_cnt; + u32 vf_cq_cnt; + u32 vf_srq_cnt; + u32 vf_ceq_cnt; + u32 vf_ah_cnt; + u32 vf_mr_cnt; + u32 vf_pbleq_cnt; + u32 vf_pblem_cnt; + + u32 base_qpn; + u32 base_cqn; + u32 base_srqn; + u32 base_ceqn; + + u64 qp_hmc_base; + u64 cq_hmc_base; + u64 srq_hmc_base; + u64 txwindow_hmc_base; + u64 ird_hmc_base; + u64 ah_hmc_base; + u64 mr_hmc_base; + u64 pbleq_hmc_base; + u64 pblem_hmc_base; + + u8 hmc_sid; + u8 hmc_use_dpu_ddr; + u8 np_mode_low_lat; + u8 mcode_type; + + u32 max_hw_read_sges; + u32 max_hw_wq_frags; + u32 dh_total_vhca; +}; + +typedef struct zxdh_chan_msg { + u32 msg_len; + void *msg; +} T_DH_RDMA_CHAN_MSG; + +enum chan_cmd_type { + GET_PF_PARAM = 1, + GET_VF_PARAM = 2, +}; + +struct zxdh_mgr_msg { + u32 op_code; + u8 ep_id; + u8 pf_id; + u16 vport_vf_id; + u8 ftype; // 判断为vf,0是pf, 1是vf + u8 rsv[3]; +}; + +struct zxdh_mgr { + //struct irdma_device *iwdev; + struct pci_dev *pdev; + u32 pf_id; + u32 vport_vf_id; + u32 ep_id; + u8 ftype; // 判断为vf,0是pf, 1是vf + u16 pcie_id; + u16 device_id; + u8 __iomem *pci_hw_addr; + struct zxdh_mgr_par param; +}; + +enum e_dtcm_para_id_dcqcn { + E_PARA_DCQCN_RPG_TIME_RESET, + E_PARA_DCQCN_CLAMP_TGT_RAGE, + E_PARA_DCQCN_CLAMP_TGT_RATE_AFTER_TIME_INC, + E_PARA_DCQCN_DCE_TCP_RTT, + E_PARA_DCQCN_DCE_TCP_G, + E_PARA_DCQCN_RPG_GD, + E_PARA_DCQCN_INITIAL_ALPHA_VALUE, + E_PARA_DCQCN_MIN_DEC_FAC, + E_PARA_DCQCN_RPG_THRESHOLD, + E_PARA_DCQCN_RPG_RATIO_INCREASE, + E_PARA_DCQCN_RPG_AI_RATIO, + E_PARA_DCQCN_RPG_HAI_RATIO, + E_PARA_DCQCN_NUM +}; + +enum e_dtcm_para_id_rtt { + E_PARA_RTT_ALPHA, + E_PARA_RTT_TLOW, + E_PARA_RTT_THIGH, + E_PARA_RTT_MINRTT, + E_PARA_RTT_BETA, + E_PARA_RTT_AI_NUM, + E_PARA_RTT_THRED_GRADIENT, + E_PARA_RTT_HAI_N, + E_PARA_RTT_AI_N, + E_PARA_RTT_NUM +}; + +struct dh_rdma_reg_read_req { + u64 phy_addr; + u32 reg_num; +} __attribute__((__packed__)); + +struct dh_rdma_reg_read_resp { + u64 phy_addr; + u32 reg_num; + u32 status_code; + u32 data[]; +} __attribute__((__packed__)); + +struct dh_rdma_reg_write_req { + u64 phy_addr; + u32 reg_num; + u32 data[]; +} __attribute__((__packed__)); + +struct dh_rdma_reg_write_resp { + u64 phy_addr; + u32 reg_num; + u32 status_code; +} __attribute__((__packed__)); + +// mp dtcm para set/get messages +struct dh_mp_dtcm_para_set_req { + u16 mcode_type; + u16 para_id; + u32 val; +} __attribute__((__packed__)); + +struct dh_mp_dtcm_para_set_resp { + u16 para_id; + u32 status_code; +} __attribute__((__packed__)); + +struct dh_mp_dtcm_para_get_req { + u16 mcode_type; + u16 para_id; +} __attribute__((__packed__)); + +struct dh_mp_dtcm_para_get_resp { + u16 para_id; + u32 status_code; + u32 val; +} __attribute__((__packed__)); + +// channel message struct +struct zxdh_reg_read_cmd { + u32 op_code; + struct dh_rdma_reg_read_req req; +} __attribute__((__packed__)); +struct zxdh_reg_write_cmd { + u32 op_code; + struct dh_rdma_reg_write_req req; +} __attribute__((__packed__)); + +struct zxdh_mp_dtcm_para_get_cmd { + u32 op_code; + struct dh_mp_dtcm_para_get_req req; +} __attribute__((__packed__)); + +struct zxdh_mp_dtcm_para_set_cmd { + u32 op_code; + struct dh_mp_dtcm_para_set_req req; +} __attribute__((__packed__)); + +int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, + struct zxdh_msg_recviver_mem *result); +int zxdh_chan_sync_send(struct zxdh_mgr *pmgr, struct zxdh_chan_msg *pmsg, + u32 *pdata, u32 rep_len); +int zxdh_mgr_par_get(struct zxdh_mgr *dh_mgr); + +int zxdh_rdma_reg_read(struct zxdh_pci_f *rf, uint64_t phy_addr, + uint32_t *outdata); +int zxdh_rdma_reg_write(struct zxdh_pci_f *rf, uint64_t phy_addr, uint32_t val); + +int zxdh_mp_dtcm_para_get(struct zxdh_pci_f *rf, uint16_t mcode_type, + uint16_t para_id, uint32_t *outdata); + +int zxdh_mp_dtcm_para_set(struct zxdh_pci_f *rf, uint16_t mcode_type, + uint16_t para_id, uint32_t val); + +#endif diff --git a/src/rdma/src/ofed_kcompat.h b/src/rdma/src/ofed_kcompat.h new file mode 100644 index 0000000000000000000000000000000000000000..94a768b2172b81ac33736f6a78248d74aaa90f00 --- /dev/null +++ b/src/rdma/src/ofed_kcompat.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef OFED_KCOMPAT_H +#define OFED_KCOMPAT_H + +#include +#if defined(RHEL_7_2) +#include +#include +#include +#include +#include +#include + +#define refcount_inc atomic_inc +#define refcount_read atomic_read +#define refcount_set atomic_set +#define refcount_dec atomic_dec +#define refcount_dec_and_test atomic_dec_and_test +#define refcount_sub_and_test atomic_sub_and_test +#define refcount_add atomic_add +#define refcount_inc_not_zero atomic_inc_not_zero +#define rdma_ah_attr ib_ah_attr +#define ah_attr_to_dmac(attr) ((attr).dmac) +#define ib_device_put(dev) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define set_ibdev_dma_device(ibdev, dev) ibdev.dma_device = dev + +struct zxdh_cm_node; +struct zxdh_device; +struct zxdh_pci_f; +struct zxdh_qp; + +enum ib_mtu ib_mtu_int_to_enum(int mtu); + +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +#define kc_set_props_ip_gid_caps(props) \ + ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS) +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) +#define kc_deref_sgid_attr(sgid_attr) (sgid_attr.ndev) +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) +#define kc_get_ucontext(udata) to_ucontext(context) +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) +#define kc_typeq_ib_wr +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#define kc_set_ibdev_add_del_gid(ibdev) \ + do { \ + ibdev->add_gid = zxdh_add_gid; \ + ibdev->del_gid = zxdh_del_gid; \ + } while (0) +#define wait_queue_entry __wait_queue + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_0 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define ETHER_COPY_VER_1 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_GET_CACHED_GID +#define IB_IW_MANDATORY_AH_OP +#define IB_IW_PKEY +#define IB_MTU_CONVERSIONS +#define IB_UMEM_GET_V0 +#define IB_USER_VERBS_EX_CMD_MODIFY_QP IB_USER_VERBS_CMD_MODIFY_QP +#define ZXDH_ADD_DEL_GID +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_1 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +enum ib_uverbs_ex_create_cq_flags { + IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, + IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, +}; + +enum rdma_create_ah_flags { + /* In a sleepable context */ + RDMA_CREATE_AH_SLEEPABLE = BIT(0), +}; + +#define set_max_sge(props, rf) \ + ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) + +#endif /* RHEL_7_2 */ + +#if defined(RHEL_7_2) || defined(RHEL_7_4) +#ifdef MODULE +#undef MODULE_DEVICE_TABLE +#define MODULE_DEVICE_TABLE(type, name) \ + ({ \ + extern typeof(name) __mod_##type##__##name##_device_table \ + __attribute__((unused, alias(__stringify(name)))); \ + }) +#endif /* MODULE */ +#endif /* RHEL_7_2 or RHEL_7_4 */ + +#if defined(RHEL_7_4) || defined(RHEL_7_5) || defined(RHEL_7_6) +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_1_2 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define ETHER_COPY_VER_2 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_GET_CACHED_GID +#define IB_IW_MANDATORY_AH_OP +#define IB_IW_PKEY +#define IB_UMEM_GET_V1 +#define ZXDH_ADD_DEL_GID +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_SET_DRIVER_ID +#define ZXDH_DESTROY_CQ_VER_1 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_1 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define wait_queue_entry __wait_queue +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#define kc_typeq_ib_wr +#define kc_deref_sgid_attr(sgid_attr) (sgid_attr.ndev) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext((ibpd)->uobject->context) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) + +#define set_max_sge(props, rf) \ + ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) + +#define kc_set_props_ip_gid_caps(props) \ + ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS) + +#define kc_set_ibdev_add_del_gid(ibdev) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) + +#endif /* RHEL_7_5 */ + +#if defined(SLES_15) || defined(SLES_12_SP_4) || defined(SLES_12_SP_3) +#ifdef SLES_12_SP_3 +#define wait_queue_entry __wait_queue +#endif + +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_1_2 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define ETHER_COPY_VER_2 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_GET_CACHED_GID +#define IB_IW_MANDATORY_AH_OP +#define IB_IW_PKEY +#define IB_UMEM_GET_V1 +#define ZXDH_ADD_DEL_GID +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define ZXDH_SET_DRIVER_ID +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_1 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#define kc_typeq_ib_wr +#define kc_deref_sgid_attr(sgid_attr) (sgid_attr.ndev) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext((ibpd)->uobject->context) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define set_ibdev_dma_device(ibdev, dev) + +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) + +#define set_max_sge(props, rf) \ + ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) + +#define kc_set_props_ip_gid_caps(props) \ + ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS) + +#define kc_set_ibdev_add_del_gid(ibdev) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) +#endif /* SLES_15 */ + +#endif /* OFED_KCOMPAT_H */ diff --git a/src/rdma/src/osdep.h b/src/rdma/src/osdep.h new file mode 100644 index 0000000000000000000000000000000000000000..76e334723310b22adf17a7c852473888688d31ab --- /dev/null +++ b/src/rdma/src/osdep.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_OSDEP_H +#define ZXDH_OSDEP_H + +#include +#ifdef FIELD_PREP +#include +#endif +#include +#include +#include +#if defined(__OFED_4_8__) +#define refcount_t atomic_t +#define refcount_inc atomic_inc +#define refcount_dec_and_test atomic_dec_and_test +#define refcount_set atomic_set +#else +#include +#endif /* OFED_4_8 */ +#define STATS_TIMER_DELAY 60000 + +/* + * See include/linux/compiler_attributes.h in kernel >=5.4 for fallthrough. + * This code really should be in zxdh_kcompat.h but to cover shared code + * it had to be here. + * The two #if checks implements fallthrough definition for kernels < 5.4 + * The first check is for new compiler, GCC >= 5.0. If code in compiler_attributes.h + * is not invoked and compiler supports __has_attribute. + * If fallthrough is not defined after the first check, the second check against fallthrough + * will define the macro for the older compiler. + */ +#if !defined(fallthrough) && !defined(__GCC4_has_attribute___noclone__) && \ + defined(__has_attribute) +#define fallthrough __attribute__((__fallthrough__)) +#endif +#ifndef fallthrough +#define fallthrough \ + do { \ + } while (0) +#endif +#define idev_to_dev(ptr) (((ptr)->hw->device)) +#ifndef ibdev_dbg +#define zxdh_dbg(idev, fmt, ...) dev_dbg(idev_to_dev(idev), fmt, ##__VA_ARGS__) +#define ibdev_err(ibdev, fmt, ...) dev_err(&((ibdev)->dev), fmt, ##__VA_ARGS__) +#define ibdev_warn(ibdev, fmt, ...) \ + dev_warn(&((ibdev)->dev), fmt, ##__VA_ARGS__) +#define ibdev_info(ibdev, fmt, ...) \ + dev_info(&((ibdev)->dev), fmt, ##__VA_ARGS__) +#define ibdev_notice(ibdev, fmt, ...) \ + dev_notice(&((ibdev)->dev), fmt, ##__VA_ARGS__) +#else +#define zxdh_dbg(idev, fmt, ...) \ + do { \ + struct ib_device *ibdev = zxdh_get_ibdev(idev); \ + if (ibdev) \ + ibdev_dbg(ibdev, fmt, ##__VA_ARGS__); \ + else \ + dev_dbg(idev_to_dev(idev), fmt, ##__VA_ARGS__); \ + } while (0) +#endif + +struct zxdh_dma_info { + dma_addr_t *dmaaddrs; +}; + +struct zxdh_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +} __packed; + +struct zxdh_virt_mem { + void *va; + u32 size; +} __packed; + +struct zxdh_sc_vsi; +struct zxdh_sc_dev; +struct zxdh_sc_qp; +struct zxdh_puda_buf; +struct zxdh_puda_cmpl_info; +struct zxdh_update_sds_info; +struct zxdh_hmc_fcn_info; +struct zxdh_manage_vf_pble_info; +struct zxdh_hw; +struct zxdh_pci_f; +struct zxdh_virtchnl_req; + +#ifndef FIELD_PREP + +#if defined(__OFED_4_8__) +/* Special handling for 7.2/OFED. The GENMASK macros need to be updated */ +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) +#endif +/* Compat for rdma-core-27.0 and OFED 4.8/RHEL 7.2. Not for UPSTREAM */ +#define __bf_shf(x) (__builtin_ffsll(x) - 1) +#define FIELD_PREP(_mask, _val) \ + ({ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); }) + +#define FIELD_GET(_mask, _reg) \ + ({ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); }) +#endif /* FIELD_PREP */ +struct ib_device *zxdh_get_ibdev(struct zxdh_sc_dev *dev); +void *zxdh_remove_cqp_head(struct zxdh_sc_dev *dev); +void zxdh_terminate_del_timer(struct zxdh_sc_qp *qp); +void zxdh_hw_stats_start_timer(struct zxdh_sc_vsi *vsi); +void zxdh_hw_stats_stop_timer(struct zxdh_sc_vsi *vsi); +void wr32(struct zxdh_hw *hw, u32 reg, u32 val); +u32 rd32(struct zxdh_hw *hw, u32 reg); +u64 rd64(struct zxdh_hw *hw, u32 reg); +int zxdh_map_vm_page_list(struct zxdh_hw *hw, void *va, dma_addr_t *pg_dma, + u32 pg_cnt); +void zxdh_unmap_vm_page_list(struct zxdh_hw *hw, dma_addr_t *pg_dma, + u32 pg_cnt); +#endif /* ZXDH_OSDEP_H */ diff --git a/src/rdma/src/pble.c b/src/rdma/src/pble.c new file mode 100644 index 0000000000000000000000000000000000000000..43e00e6f007d4aa87c52cdc90a8b0fd979277e59 --- /dev/null +++ b/src/rdma/src/pble.c @@ -0,0 +1,467 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "vf.h" +#include "virtchnl.h" +#include "pble.h" +#include "main.h" + +static int add_pble_prm(struct zxdh_hmc_pble_rsrc *pble_rsrc); + +/** + * zxdh_destroy_pble_prm - destroy prm during module unload + * @pble_rsrc: pble resources + */ +void zxdh_destroy_pble_prm(struct zxdh_hmc_pble_rsrc *pble_rsrc) +{ + struct zxdh_chunk *chunk; + struct zxdh_pble_prm *pinfo = &pble_rsrc->pinfo; + + while (!list_empty(&pinfo->clist)) { + chunk = (struct zxdh_chunk *)pinfo->clist.next; + list_del(&chunk->list); + if (chunk->type == PBLE_SD_PAGED) + zxdh_pble_free_paged_mem(chunk); + if (chunk->bitmapbuf) + kfree(chunk->bitmapmem.va); + kfree(chunk->chunkmem.va); + } +} + +/** + * zxdh_hmc_init_pble - Initialize pble resources during module load + * @dev: zxdh_sc_dev struct + * @pble_rsrc: pble resources + * @mr: Queue or Memory area + */ +int zxdh_hmc_init_pble(struct zxdh_sc_dev *dev, + struct zxdh_hmc_pble_rsrc *pble_rsrc, int mr) +{ + struct zxdh_hmc_info *hmc_info; + u32 fpm_idx = 0; + int status = 0; + + hmc_info = dev->hmc_info; + pble_rsrc->dev = dev; + pble_rsrc->pble_copy = true; + pble_rsrc->pble_type = mr; + + /* Start pble' on 4k boundary */ + if (pble_rsrc->fpm_base_addr & 0xfff) + fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3; + + if (mr == PBLE_QUEUE) { + pble_rsrc->unallocated_pble = + hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt - fpm_idx; + } else { + pble_rsrc->unallocated_pble = + hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].cnt - fpm_idx; + } + + pble_rsrc->fpm_base_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3); + pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr; + + pble_rsrc->pinfo.pble_shift = PBLE_SHIFT; + + mutex_init(&pble_rsrc->pble_mutex_lock); + + spin_lock_init(&pble_rsrc->pinfo.prm_lock); + INIT_LIST_HEAD(&pble_rsrc->pinfo.clist); + if (add_pble_prm(pble_rsrc)) { + zxdh_destroy_pble_prm(pble_rsrc); + status = -ENOMEM; + } + + return status; +} + +/** + * add_sd_direct - add sd direct for pble + * @pble_rsrc: pble resource ptr + * @info: page info for sd + */ +static int add_sd_direct(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_add_page_info *info) +{ + int ret_code = 0; + struct sd_pd_idx *idx = &info->idx; + struct zxdh_chunk *chunk = info->chunk; + struct zxdh_hmc_info *hmc_info = info->hmc_info; + struct zxdh_hmc_sd_entry *sd_entry = info->sd_entry; + u32 offset = 0; + struct zxdh_pci_f *rf = (struct zxdh_pci_f *)container_of( + pble_rsrc->dev, struct zxdh_pci_f, sc_dev); + + if (rf->ftype == 1) { + if (pble_rsrc->pble_type == PBLE_QUEUE) { + if (!sd_entry->valid && + (hmc_info->pble_hmc_index < + hmc_info->hmc_first_entry_pble_mr)) { + ret_code = zxdh_vf_add_pble_hmc_obj( + pble_rsrc->dev, hmc_info, pble_rsrc, + info->pages); + } + } else { + if (!sd_entry->valid && + (hmc_info->pble_mr_hmc_index < + hmc_info->hmc_entry_total + 1)) { + ret_code = zxdh_vf_add_pble_hmc_obj( + pble_rsrc->dev, hmc_info, pble_rsrc, + info->pages); + } + } + } else { + if (pble_rsrc->pble_type == PBLE_QUEUE) { + if (!sd_entry->valid && + (hmc_info->pble_hmc_index < + hmc_info->hmc_first_entry_pble_mr)) { + ret_code = zxdh_add_pble_hmc_obj( + hmc_info, pble_rsrc, info->pages); + } + } else { + if (!sd_entry->valid && + (hmc_info->pble_mr_hmc_index < + hmc_info->hmc_entry_total + 1)) { + ret_code = zxdh_add_pble_hmc_obj( + hmc_info, pble_rsrc, info->pages); + } + } + } + + if (ret_code) + return ret_code; + + chunk->type = PBLE_SD_CONTIGOUS; + + offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT; + chunk->size = info->pages << HMC_PAGED_BP_SHIFT; + + chunk->vaddr = sd_entry->u.bp.addr.va + offset; + chunk->pa = sd_entry->u.bp.addr.pa + offset; // + chunk->fpm_addr = pble_rsrc->next_fpm_addr; + + return 0; +} + +/** + * fpm_to_idx - given fpm address, get pble index + * @pble_rsrc: pble resource management + * @addr: fpm address for index + */ +static u32 fpm_to_idx(struct zxdh_hmc_pble_rsrc *pble_rsrc, u64 addr) +{ + u64 idx; + + idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3; + + return (u32)idx; +} + +/** + * add_pble_prm - add a sd entry for pble resoure + * @pble_rsrc: pble resource management + */ +static int add_pble_prm(struct zxdh_hmc_pble_rsrc *pble_rsrc) +{ + struct zxdh_sc_dev *dev = pble_rsrc->dev; + struct zxdh_hmc_sd_entry *sd_entry; + struct zxdh_hmc_info *hmc_info; + struct zxdh_chunk *chunk; + struct zxdh_add_page_info info; + struct sd_pd_idx *idx = &info.idx; + int ret_code = 0; + struct zxdh_virt_mem chunkmem; + u32 pages; + + if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE) + return -ENOMEM; + + chunkmem.size = sizeof(*chunk); + chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL); + if (!chunkmem.va) + return -ENOMEM; + + chunk = chunkmem.va; + chunk->chunkmem = chunkmem; + hmc_info = dev->hmc_info; + chunk->dev = dev; + chunk->fpm_addr = pble_rsrc->next_fpm_addr; + + if (pble_rsrc->pble_type == PBLE_QUEUE) { + sd_entry = + &hmc_info->sd_table + .sd_entry[hmc_info->pble_hmc_index]; /* code */ + } else { + sd_entry = &hmc_info->sd_table + .sd_entry[hmc_info->pble_mr_hmc_index]; + } + + idx->pd_idx = + (u32)((pble_rsrc->next_fpm_addr - pble_rsrc->fpm_base_addr) / + ZXDH_HMC_PAGED_BP_SIZE); //4096 + idx->rel_pd_idx = (idx->pd_idx % ZXDH_HMC_PD_CNT_IN_SD); // 512 + pages = (idx->rel_pd_idx) ? (ZXDH_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) : + ZXDH_HMC_PD_CNT_IN_SD; + + pages = (u32)min(pages, pble_rsrc->unallocated_pble >> + PBLE_512_SHIFT); // PBLE_512_SHIFT==9 + + info.chunk = chunk; + info.hmc_info = hmc_info; + info.pages = pages; + info.sd_entry = sd_entry; + + ret_code = add_sd_direct(pble_rsrc, &info); + + if (ret_code) + goto error; + + ret_code = zxdh_prm_add_pble_mem(&pble_rsrc->pinfo, chunk); + if (ret_code) + goto error; + + pble_rsrc->next_fpm_addr += chunk->size; + pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3); + + sd_entry->valid = true; + list_add(&chunk->list, &pble_rsrc->pinfo.clist); + + return 0; + +error: + if (chunk->bitmapbuf) + kfree(chunk->bitmapmem.va); + kfree(chunk->chunkmem.va); + + return ret_code; +} + +/** + * free_lvl2 - fee level 2 pble + * @pble_rsrc: pble resource management + * @palloc: level 2 pble allocation + */ +static void free_lvl2(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc) +{ + u32 i; + struct zxdh_pble_level2 *lvl2 = &palloc->level2; + struct zxdh_pble_info *root = &lvl2->root; + struct zxdh_pble_info *leaf = lvl2->leaf; + + for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { + if (leaf->addr) + zxdh_prm_return_pbles(&pble_rsrc->pinfo, + &leaf->chunkinfo); + else + break; + } + + if (root->addr) + zxdh_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo); + + kfree(lvl2->leafmem.va); + lvl2->leaf = NULL; + lvl2->leafmem.va = NULL; +} + +/** + * get_lvl2_pble - get level 2 pble resource + * @pble_rsrc: pble resource management + * @palloc: level 2 pble allocation + */ +static int get_lvl2_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc) +{ + u32 lf4k, lflast, total, i; + u32 pblcnt = PBLE_PER_PAGE; + u64 *addr; + struct zxdh_pble_level2 *lvl2 = &palloc->level2; + struct zxdh_pble_info *root = &lvl2->root; + struct zxdh_pble_info *leaf; + int ret_code; + u64 fpm_addr; + dma_addr_t paaddr; + + /* number of full 512 (4K) leafs) */ + lf4k = palloc->total_cnt >> 9; + lflast = palloc->total_cnt % PBLE_PER_PAGE; + total = (lflast == 0) ? lf4k : lf4k + 1; + lvl2->leaf_cnt = total; + + lvl2->leafmem.size = (sizeof(*leaf) * total); + lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL); + if (!lvl2->leafmem.va) + return -ENOMEM; + + lvl2->leaf = lvl2->leafmem.va; + leaf = lvl2->leaf; + ret_code = zxdh_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo, + total << 3, &root->addr, &fpm_addr, + &paaddr); + if (ret_code) { + kfree(lvl2->leafmem.va); + lvl2->leaf = NULL; + return -ENOMEM; + } + + root->smmu_fpm_addr = fpm_addr; + root->pa = paaddr; + root->idx = fpm_to_idx(pble_rsrc, fpm_addr); + root->cnt = total; + addr = root->addr; + for (i = 0; i < total; i++, leaf++) { + pblcnt = (lflast && ((i + 1) == total)) ? lflast : + PBLE_PER_PAGE; + ret_code = zxdh_prm_get_pbles(&pble_rsrc->pinfo, + &leaf->chunkinfo, pblcnt << 3, + &leaf->addr, &fpm_addr, &paaddr); + if (ret_code) + goto error; + + leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr); + leaf->smmu_fpm_addr = fpm_addr; + leaf->pa = paaddr; + leaf->cnt = pblcnt; + *addr = (u64)leaf->idx; + addr++; + } + + if (pble_rsrc->pble_copy) { + zxdh_cqp_config_pble_table_cmd(pble_rsrc->dev, root, total << 3, + pble_rsrc->pble_type); + } + + palloc->level = PBLE_LEVEL_2; + pble_rsrc->stats_lvl2++; + return 0; + +error: + free_lvl2(pble_rsrc, palloc); + + return -ENOMEM; +} + +/** + * get_lvl1_pble - get level 1 pble resource + * @pble_rsrc: pble resource management + * @palloc: level 1 pble allocation + */ +static int get_lvl1_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc) +{ + int ret_code; + u64 fpm_addr; + dma_addr_t paaddr; + struct zxdh_pble_info *lvl1 = &palloc->level1; + + ret_code = zxdh_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo, + palloc->total_cnt << 3, &lvl1->addr, + &fpm_addr, &paaddr); + if (ret_code) + return -ENOMEM; + + palloc->level = PBLE_LEVEL_1; + lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr); + lvl1->cnt = palloc->total_cnt; + lvl1->smmu_fpm_addr = fpm_addr; + lvl1->pa = paaddr; + pble_rsrc->stats_lvl1++; + + return 0; +} + +/** + * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine + * @pble_rsrc: pble resources + * @palloc: contains all inforamtion regarding pble (idx + pble addr) + * @level1_only: flag for a level 1 PBLE + */ +static int get_lvl1_lvl2_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc, bool level1_only) +{ + int status = 0; + + status = get_lvl1_pble(pble_rsrc, palloc); + if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE) + return status; + + status = get_lvl2_pble(pble_rsrc, palloc); + + return status; +} + +/** + * zxdh_get_pble - allocate pbles from the prm + * @pble_rsrc: pble resources + * @palloc: contains all inforamtion regarding pble (idx + pble addr) + * @pble_cnt: #of pbles requested + * @level1_only: true if only pble level 1 to acquire + */ +int zxdh_get_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc, u32 pble_cnt, + bool level1_only) +{ + int status = 0; + int max_sds = 0; + int i; + + palloc->total_cnt = pble_cnt; + palloc->level = PBLE_LEVEL_0; + + mutex_lock(&pble_rsrc->pble_mutex_lock); + + /*check first to see if we can get pble's without acquiring + * additional sd's + */ + status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only); + if (!status) + goto exit; + + max_sds = (palloc->total_cnt >> 18) + 1; + for (i = 0; i < max_sds; i++) { + status = add_pble_prm(pble_rsrc); + if (status) + break; + + status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only); + /* if level1_only, only go through it once */ + if (!status || level1_only) + break; + } + +exit: + if (!status) { + pble_rsrc->allocdpbles += pble_cnt; + pble_rsrc->stats_alloc_ok++; + } else { + pble_rsrc->stats_alloc_fail++; + } + mutex_unlock(&pble_rsrc->pble_mutex_lock); + + return status; +} + +/** + * zxdh_free_pble - put pbles back into prm + * @pble_rsrc: pble resources + * @palloc: contains all information regarding pble resource being freed + */ +void zxdh_free_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc) +{ + pble_rsrc->freedpbles += palloc->total_cnt; + + if (palloc->level == PBLE_LEVEL_2) + free_lvl2(pble_rsrc, palloc); + else + zxdh_prm_return_pbles(&pble_rsrc->pinfo, + &palloc->level1.chunkinfo); + pble_rsrc->stats_alloc_freed++; +} diff --git a/src/rdma/src/pble.h b/src/rdma/src/pble.h new file mode 100644 index 0000000000000000000000000000000000000000..e04d03cdb3bfaef8d3f729cc3fe63aa166985ad3 --- /dev/null +++ b/src/rdma/src/pble.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_PBLE_H +#define ZXDH_PBLE_H + +#define PBLE_SHIFT 6 +#define PBLE_PER_PAGE 512 +#define HMC_PAGED_BP_SHIFT 12 +#define PBLE_512_SHIFT 9 +#define PBLE_INVALID_IDX 0xffffffff + +enum zxdh_pble_level { + PBLE_LEVEL_0 = 0, + PBLE_LEVEL_1 = 1, + PBLE_LEVEL_2 = 2, +}; + +enum zxdh_alloc_type { + PBLE_NO_ALLOC = 0, + PBLE_SD_CONTIGOUS = 1, + PBLE_SD_PAGED = 2, +}; + +struct zxdh_chunk; + +struct zxdh_pble_chunkinfo { + struct zxdh_chunk *pchunk; + u64 bit_idx; + u64 bits_used; +}; + +struct zxdh_pble_info { + u64 *addr; + dma_addr_t pa; + u64 smmu_fpm_addr; + u32 idx; + u32 cnt; + struct zxdh_pble_chunkinfo chunkinfo; + bool pble_copy; +}; + +struct zxdh_pble_level2 { + struct zxdh_pble_info root; + struct zxdh_pble_info *leaf; + struct zxdh_virt_mem leafmem; + u32 leaf_cnt; +}; + +struct zxdh_pble_alloc { + u32 total_cnt; + enum zxdh_pble_level level; + union { + struct zxdh_pble_info level1; + struct zxdh_pble_level2 level2; + }; +}; + +struct sd_pd_idx { + u32 sd_idx; + u32 pd_idx; + u32 rel_pd_idx; +}; + +struct zxdh_add_page_info { + struct zxdh_chunk *chunk; + struct zxdh_hmc_sd_entry *sd_entry; + struct zxdh_hmc_info *hmc_info; + struct sd_pd_idx idx; + u32 pages; +}; + +struct zxdh_chunk { + struct list_head list; + struct zxdh_dma_info dmainfo; + void *bitmapbuf; + + u32 sizeofbitmap; + u64 size; + void *vaddr; + dma_addr_t pa; + u64 fpm_addr; + u32 pg_cnt; + enum zxdh_alloc_type type; + struct zxdh_sc_dev *dev; + struct zxdh_virt_mem bitmapmem; + struct zxdh_virt_mem chunkmem; +}; + +struct zxdh_pble_prm { + struct list_head clist; + spinlock_t prm_lock; /* protect prm bitmap */ + u64 total_pble_alloc; + u64 free_pble_cnt; + u8 pble_shift; +}; + +struct zxdh_hmc_pble_rsrc { + u32 unallocated_pble; + struct mutex pble_mutex_lock; /* protect PBLE resource */ + struct zxdh_sc_dev *dev; + u64 fpm_base_addr; + u64 next_fpm_addr; + struct zxdh_pble_prm pinfo; + u64 allocdpbles; + u64 freedpbles; + u32 stats_direct_sds; + u32 stats_paged_sds; + u64 stats_alloc_ok; + u64 stats_alloc_fail; + u64 stats_alloc_freed; + u64 stats_lvl1; + u64 stats_lvl2; + u32 pble_type; + bool pble_copy; +}; + +void zxdh_destroy_pble_prm(struct zxdh_hmc_pble_rsrc *pble_rsrc); +int zxdh_hmc_init_pble(struct zxdh_sc_dev *dev, + struct zxdh_hmc_pble_rsrc *pble_rsrc, int mr); + +void zxdh_free_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc); +int zxdh_get_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc, u32 pble_cnt, + bool level1_only); +int zxdh_prm_add_pble_mem(struct zxdh_pble_prm *pprm, + struct zxdh_chunk *pchunk); +int zxdh_prm_get_pbles(struct zxdh_pble_prm *pprm, + struct zxdh_pble_chunkinfo *chunkinfo, u64 mem_size, + u64 **vaddr, u64 *fpm_addr, dma_addr_t *paaddr); +void zxdh_prm_return_pbles(struct zxdh_pble_prm *pprm, + struct zxdh_pble_chunkinfo *chunkinfo); +void zxdh_pble_free_paged_mem(struct zxdh_chunk *chunk); +#endif /* ZXDH_PBLE_H */ diff --git a/src/rdma/src/private_verbs_cmd.c b/src/rdma/src/private_verbs_cmd.c new file mode 100644 index 0000000000000000000000000000000000000000..1c8ba230410e9c8f583b523fd0da6d2512a76d39 --- /dev/null +++ b/src/rdma/src/private_verbs_cmd.c @@ -0,0 +1,1904 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include +#define UVERBS_MODULE_NAME zxdh_ib +#include +#include +#include "zxdh_user_ioctl_cmds.h" +#include "private_verbs_cmd.h" +#include "main.h" +#include "zxdh_user_ioctl_verbs.h" +#include "icrdma_hw.h" + +#define DATA_ADDR_BASE 0x620610E0B4u +#define READ_RAM_REG_BASE 0x620610E0B8u +#define MP_DATA_NUM_GEG 0x620610E0BCu +#define BYPASS_REG 0x620740000Cu +#define REPLACE_REG 0x62074000A0u +#define BASE_FOR_LITTLE_GQP 0x6206008000u +#define BASE_FOR_BIG_GQP 0x6206108000u +#define RAM_ADDR 0xA1F40u +#define MP_OFFSET 0x200u +#define REG_BYTE 0x4u +#define CAP_ENABLE_REG_IDX 0x2Du +#define WRITE_RAM_REG_IDX 0x2Eu +#define GQP_MOD 0x14u +#define MP_MOD 0x37u +#define GQP_OFFSET 0x4u +#define GQP_ID_1023 0x3FF +#define GQP_ID_1103 0x44F +#define GQP_ID_2047 0x7FF +#define MP_IDX_INC 0x1u +#define MP_DATA_BYTE 0x40u +#define DDR_MP_DATA_NUM 0x30D3Fu +#define DDR_ADDR_BASE 0x3C0000000u +#define DDR_SIZE 0x3200000u +#define REPLACE_VALUE 0x20000000u +#define CLOSE_MP_CAP_VALUE (RAM_ADDR + (0xFF * MP_OFFSET)) +int write_cap_tx_reg_node0(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg); +int write_cap_tx_reg_node1(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg); +int write_cap_rx_reg_node0(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg); +int write_cap_rx_reg_node1(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg); +static int write_mp_cap_regs(struct zxdh_device *iwdev, bool is_l2d_used, + struct zxdh_mp_cap_resp *resp); + +extern u16 zxdh_get_rc_gqp_id(u16 ws_index, u16 vhca_id, u32 total_vhca); +extern u16 get_ud_gqp_id(u16 vhca_id, u32 total_vhca); + +#define WRITE_REGISTER_AND_CHECK(rf, reg, value) \ + do { \ + int ok = zxdh_rdma_reg_write(rf, reg, value); \ + if (ok != 0) \ + return ok; \ + } while (0) + +#define READ_REGISTER_AND_CHECK(rf, reg, value) \ + do { \ + int ok = zxdh_rdma_reg_read(rf, reg, value); \ + if (ok != 0) \ + return ok; \ + } while (0) + +#define REG_OP_AND_CHECK(opfunc, rf, reg, value) \ + do { \ + int ok = opfunc(rf, reg, value); \ + if (ok != 0) \ + return ok; \ + } while (0) + +static int process_hw_modify_qpc_cmd(struct zxdh_qp *iwqp, + struct zxdh_modify_qpc_item *modify_item, + u64 modify_mask) +{ + unsigned long flags; + struct zxdh_device *iwdev; + struct zxdh_modify_qp_info info = { 0 }; + u64 qpc_tx_mask_low = 0; + u64 qpc_tx_mask_high = 0; + iwdev = iwqp->iwdev; + if (modify_mask & ZXDH_TX_READ_RETRY_FLAG_SET) { + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RETRY_FLAG; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_CUR_RETRY_CNT; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_READ_RETRY_FLAG; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RNR_RETRY_FLAG; + } + if (modify_mask & ZXDH_ERR_FLAG_SET) { + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_ERR_FLAG; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_ACK_ERR_FLAG; + } + if (modify_mask & ZXDH_RETRY_CQE_SQ_OPCODE) + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RETRY_CQE_SQ_OPCODE; + + if (modify_mask & ZXDH_PACKAGE_ERR_FLAG) + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_PACKAGE_ERR_FLAG; + + if (modify_mask & ZXDH_TX_LAST_ACK_PSN) + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LAST_ACK_PSN; + + if (modify_mask & ZXDH_TX_LAST_ACK_WQE_OFFSET_SET) { + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LAST_ACK_WQE_OFFSET; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_HW_SQ_TAIL_UNA; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RNR_RETRY_THRESHOLD; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RNR_RETRY_TIME; + } + if (modify_mask & ZXDH_TX_RDWQE_PYLD_LENGTH) + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RDWQE_PYLD_LENGTH; + + if (modify_mask & ZXDH_TX_RECV_READ_FLAG_SET) { + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RECV_RD_MSG_LOSS_ERR_CNT; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RECV_RD_MSG_LOSS_ERR_FLAG; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RECV_ERR_FLAG; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RECV_READ_FLAG; + } + if (modify_mask & ZXDH_TX_RD_MSG_LOSS_ERR_FLAG_SET) { + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RD_MSG_LOSS_ERR_FLAG; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_PKTCHK_RD_MSG_LOSS_ERR_CNT; + } + + info.qpc_tx_mask_low = qpc_tx_mask_low; + info.qpc_tx_mask_high = qpc_tx_mask_high; + spin_lock_irqsave(&iwqp->lock, flags); + zxdh_sc_qp_modify_private_cmd_qpc(&iwqp->sc_qp, iwqp->host_ctx.va, + modify_item); + spin_unlock_irqrestore(&iwqp->lock, flags); + if (zxdh_hw_modify_qp(iwdev, iwqp, &info, true)) + return EINVAL; + + return 0; +} + +static u16 get_tx_wqe_pointer(uint8_t *buf) +{ + __le16 ddd = ((*(__le32 *)(buf + 7)) & 0x1FFFE) >> 1; + + return le16_to_cpu(ddd); +} + +void copy_tx_window_to_win_item(void *va, struct zxdh_qp_tx_win_item *info) +{ + info->start_psn = ZXDH_GET_QPC_ITEM(u32, va, + ZXDH_TX_WIN_START_PSN_BYTE_OFFSET, + IRDMATX_WIN_START_PSN); + info->wqe_pointer = get_tx_wqe_pointer(va); +} + +static void copy_qpc_to_tx_retry_item(void *va, + struct zxdh_reset_qp_retry_tx_item *info) +{ + info->tx_win_raddr = + ZXDH_GET_QPC_ITEM(u16, va, ZXDH_QPC_TX_WIN_RADDR_BYTE_OFFSET, + RDMAQPC_TX_WIN_RADDR); + info->tx_last_ack_psn = + ZXDH_GET_QPC_ITEM(u32, va, ZXDH_QPC_TX_LAST_ACK_PSN_BYTE_OFFSET, + RDMAQPC_TX_LAST_ACK_PSN); + info->rnr_retry_time_l = ZXDH_GET_QPC_ITEM( + u32, va, ZXDH_QPC_RNR_RETRY_TIME_L_BYTE_OFFSET, + RDMAQPC_TX_RNR_RETRY_TIME_L); + info->rnr_retry_time_h = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_RNR_RETRY_TIME_H_BYTE_OFFSET, + RDMAQPC_TX_RNR_RETRY_TIME_H); + info->rnr_retry_threshold = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_RNR_RETRY_THRESHOLD_BYTE_OFFSET, + RDMAQPC_TX_RNR_RETRY_THRESHOLD); + info->cur_retry_count = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_CUR_RETRY_COUNT_BYTE_OFFSET, + RDMAQPC_TX_CUR_RETRY_CNT); + info->retry_cqe_sq_opcode = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_RETRY_CQE_SQ_OPCODE_BYTE_OFFSET, + RDMAQPC_TX_RETRY_CQE_SQ_OPCODE_FLAG); +} + +static int zxdh_query_tx_window_info(struct zxdh_device *iwdev, u64 tx_addr, + struct zxdh_dma_mem *qpc_buf) +{ + int err_code; + struct zxdh_src_copy_dest src_dest = { 0 }; + + src_dest.src = tx_addr; + src_dest.dest = qpc_buf->pa; + src_dest.len = qpc_buf->size; + err_code = + zxdh_cqp_rdma_read_tx_window_cmd(&iwdev->rf->sc_dev, &src_dest); + if (err_code) { + pr_err("zxdh query tx window info failed:%d\n", err_code); + return err_code; + } + return 0; +} + +void set_retry_modify_qpc_item( + struct zxdh_modify_qpc_item *modify_qpc_item, + struct zxdh_reset_qp_retry_tx_item *retry_item_info, + struct zxdh_qp_tx_win_item *tx_win_item_info, u64 *modify_mask) +{ + modify_qpc_item->tx_last_ack_psn = tx_win_item_info->start_psn - 1; + *modify_mask |= ZXDH_TX_LAST_ACK_PSN; + + modify_qpc_item->last_ack_wqe_offset = 0; + modify_qpc_item->hw_sq_tail_una = tx_win_item_info->wqe_pointer; + modify_qpc_item->rnr_retry_time_l = retry_item_info->rnr_retry_time_l; + modify_qpc_item->rnr_retry_time_h = retry_item_info->rnr_retry_time_h; + modify_qpc_item->rnr_retry_threshold = + retry_item_info->rnr_retry_threshold; + *modify_mask |= ZXDH_TX_LAST_ACK_WQE_OFFSET_SET; + + modify_qpc_item->retry_flag = 0; + modify_qpc_item->rnr_retry_flag = 0; + modify_qpc_item->read_retry_flag = 0; + modify_qpc_item->cur_retry_count = retry_item_info->cur_retry_count; + *modify_mask |= ZXDH_TX_READ_RETRY_FLAG_SET; + + modify_qpc_item->rdwqe_pyld_length_l = 0; + modify_qpc_item->rdwqe_pyld_length_h = 0; + *modify_mask |= ZXDH_TX_RDWQE_PYLD_LENGTH; + + modify_qpc_item->recv_read_flag = 0; + modify_qpc_item->recv_err_flag = 0; + modify_qpc_item->recv_rd_msg_loss_err_cnt = 0; + modify_qpc_item->recv_rd_msg_loss_err_flag = 0; + *modify_mask |= ZXDH_TX_RECV_READ_FLAG_SET; + + modify_qpc_item->rd_msg_loss_err_flag = 0; + modify_qpc_item->pktchk_rd_msg_loss_err_cnt = 0; + *modify_mask |= ZXDH_TX_RD_MSG_LOSS_ERR_FLAG_SET; + + modify_qpc_item->ack_err_flag = 0; + modify_qpc_item->err_flag = 0; + *modify_mask |= ZXDH_ERR_FLAG_SET; + + modify_qpc_item->package_err_flag = 0; + *modify_mask |= ZXDH_PACKAGE_ERR_FLAG; + + modify_qpc_item->retry_cqe_sq_opcode = + retry_item_info->retry_cqe_sq_opcode & + ZXDH_RESET_RETRY_CQE_SQ_OPCODE_ERR; + *modify_mask |= ZXDH_RETRY_CQE_SQ_OPCODE; +} + +static int +UVERBS_HANDLER(ZXDH_IB_METHOD_QP_RESET_QP)(struct uverbs_attr_bundle *attrs) +{ + struct zxdh_dma_mem qpc_buf = { 0 }; + struct zxdh_qp *iwqp; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct ib_ucontext *ucontext; + struct zxdh_pci_f *rf; + struct zxdh_sc_dev *dev; + struct zxdh_reset_qp_retry_tx_item retry_item_info = { 0 }; + struct zxdh_modify_qpc_item modify_qpc_item = { 0 }; + struct zxdh_qp_tx_win_item tx_win_item_info = { 0 }; + int ret; + int err_code = 0; + u64 tx_addr; + u64 modify_mask = 0; + u64 reset_opcode; + struct ib_qp *qp = + uverbs_attr_get_obj(attrs, ZXDH_IB_ATTR_QP_RESET_QP_HANDLE); + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + iwdev = to_iwdev(ib_dev); + iwqp = to_iwqp(qp); + rf = iwdev->rf; + dev = &rf->sc_dev; + ret = uverbs_copy_from(&reset_opcode, attrs, + ZXDH_IB_ATTR_QP_RESET_OP_CODE); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + if (reset_opcode <= 0) + return EINVAL; + + switch (reset_opcode) { + case ZXDH_RESET_RETRY_TX_ITEM_FLAG: + qpc_buf.va = NULL; + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, + qpc_buf.size, &qpc_buf.pa, + GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("res qp alloc dma failed:ENOMEM\n"); + return ENOMEM; + } + err_code = zxdh_fill_qpc(&iwqp->sc_qp, &qpc_buf); + if (err_code) { + pr_err("reset qp fill qpc failed:%d\n", err_code); + goto free_exit; + } + copy_qpc_to_tx_retry_item(qpc_buf.va, &retry_item_info); + tx_addr = (qp->qp_num - dev->base_qpn) * + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_TXWINDOW] + .size + + retry_item_info.tx_win_raddr * 64; + + memset(qpc_buf.va, 0, qpc_buf.size); + qpc_buf.size = 16; + err_code = zxdh_query_tx_window_info(iwdev, tx_addr, &qpc_buf); + if (err_code) { + pr_err("reset qp dma read tx window failed:%d\n", + err_code); + goto free_exit; + } + copy_tx_window_to_win_item(qpc_buf.va, &tx_win_item_info); + set_retry_modify_qpc_item(&modify_qpc_item, &retry_item_info, + &tx_win_item_info, &modify_mask); + err_code = process_hw_modify_qpc_cmd(iwqp, &modify_qpc_item, + modify_mask); + if (err_code) { + pr_err("reset qp process modify qpc cmd failed:%d\n", + err_code); + goto free_exit; + } + break; + default: + pr_err("reset qp unknow opcode:%lld\n", reset_opcode); + err_code = EINVAL; + break; + } +free_exit: + if (qpc_buf.va) { + dma_free_coherent(iwdev->rf->hw.device, + ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT), + qpc_buf.va, qpc_buf.pa); + qpc_buf.va = NULL; + } + return err_code; +} + +static void copy_qpc_to_resp(void *va, struct zxdh_query_qpc_resp *resp) +{ + resp->retry_flag = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_RETRY_FALG_BYTE_OFFSET, RDMAQPC_TX_RETRY_FLAG); + resp->rnr_retry_flag = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_RNR_RETRY_FALG_BYTE_OFFSET, + RDMAQPC_TX_RNR_RETRY_FLAG); + resp->read_retry_flag = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_READ_RETRY_FALG_BYTE_OFFSET, + RDMAQPC_TX_READ_RETRY_FLAG); + resp->cur_retry_count = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_CUR_RETRY_COUNT_BYTE_OFFSET, + RDMAQPC_TX_CUR_RETRY_CNT); + resp->retry_cqe_sq_opcode = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_RETRY_CQE_SQ_OPCODE_BYTE_OFFSET, + RDMAQPC_TX_RETRY_CQE_SQ_OPCODE_FLAG); + resp->err_flag = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_ERR_FLAG_BYTE_OFFSET, RDMAQPC_TX_ERR_FLAG); + resp->ack_err_flag = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_ACK_ERR_FLAG_BYTE_OFFSET, + RDMAQPC_TX_ACK_ERR_FLAG); + resp->package_err_flag = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_PACKAGE_ERR_FLAG_BYTE_OFFSET, + RDMAQPC_TX_PACKAGE_ERR_FLAG); + resp->recv_err_flag = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_RECV_ERR_FLAG_BYTE_OFFSET, + RDMAQPC_TX_RECV_ERR_FLAG); + resp->tx_last_ack_psn = + ZXDH_GET_QPC_ITEM(u32, va, ZXDH_QPC_TX_LAST_ACK_PSN_BYTE_OFFSET, + RDMAQPC_TX_LAST_ACK_PSN); + resp->retry_count = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_RETY_COUNT_BYTE_OFFSET, RDMAQPC_TX_RETRY_CNT); +} + +static int +UVERBS_HANDLER(ZXDH_IB_METHOD_QP_QUERY_QPC)(struct uverbs_attr_bundle *attrs) +{ + struct zxdh_qp *iwqp; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct ib_ucontext *ucontext; + struct zxdh_dma_mem qpc_buf; + int err_code = 0; + struct zxdh_query_qpc_resp resp = { 0 }; + struct ib_qp *qp = + uverbs_attr_get_obj(attrs, ZXDH_IB_ATTR_QP_QUERY_HANDLE); + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + iwdev = to_iwdev(ib_dev); + iwqp = to_iwqp(qp); + + qpc_buf.va = NULL; + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, qpc_buf.size, + &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("query qpc alloc dma failed:ENOMEM\n"); + return ENOMEM; + } + err_code = zxdh_fill_qpc(&iwqp->sc_qp, &qpc_buf); + if (err_code) { + pr_err("query qpc fill qpc failed:%d\n", err_code); + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, + qpc_buf.va, qpc_buf.pa); + return EFAULT; + } + copy_qpc_to_resp(qpc_buf.va, &resp); + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, qpc_buf.va, + qpc_buf.pa); + return uverbs_copy_to_struct_or_zero(attrs, ZXDH_IB_ATTR_QP_QUERY_RESP, + &resp, sizeof(resp)); +} + +static void +transfer_modify_qpc_req_to_item(const struct zxdh_modify_qpc_req *req, + struct zxdh_modify_qpc_item *modify_item) +{ + modify_item->retry_flag = req->retry_flag; + modify_item->rnr_retry_flag = req->rnr_retry_flag; + modify_item->read_retry_flag = req->read_retry_flag; + modify_item->cur_retry_count = req->cur_retry_count; + modify_item->retry_cqe_sq_opcode = req->retry_cqe_sq_opcode; + modify_item->err_flag = req->err_flag; + modify_item->ack_err_flag = req->ack_err_flag; + modify_item->package_err_flag = req->package_err_flag; +} + +static int +UVERBS_HANDLER(ZXDH_IB_METHOD_QP_MODIFY_QPC)(struct uverbs_attr_bundle *attrs) +{ + struct zxdh_qp *iwqp; + struct ib_ucontext *ucontext; + struct zxdh_modify_qpc_req req = { 0 }; + int ret; + struct zxdh_modify_qpc_item modify_item = { 0 }; + u64 modify_mask; + struct ib_qp *qp = + uverbs_attr_get_obj(attrs, ZXDH_IB_ATTR_QP_MODIFY_QPC_HANDLE); + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + iwqp = to_iwqp(qp); + ret = uverbs_copy_from(&modify_mask, attrs, + ZXDH_IB_ATTR_QP_MODIFY_QPC_MASK); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + ret = uverbs_copy_from_or_zero(&req, attrs, + ZXDH_IB_ATTR_QP_MODIFY_QPC_REQ); + if (ret) + return ret; + transfer_modify_qpc_req_to_item(&req, &modify_item); + ret = process_hw_modify_qpc_cmd(iwqp, &modify_item, modify_mask); + if (ret) { + pr_err("modify qpc process modify qpc cmd failed:%d\n", ret); + return ret; + } + return 0; +} + +static int UVERBS_HANDLER(ZXDH_IB_METHOD_QP_MODIFY_UDP_SPORT)( + struct uverbs_attr_bundle *attrs) +{ + struct zxdh_pd *iwpd; + struct zxdh_device *iwdev; + struct ib_ucontext *ucontext; + struct ib_device *ib_dev; + struct zxdh_qp *iwqp = NULL; + struct zxdh_udp_offload_info *udp_info; + struct zxdh_qp_host_ctx_info *ctx_info; + struct zxdh_pci_f *rf; + struct zxdh_sc_dev *dev; + struct zxdh_modify_qp_info info = {}; + u64 qpc_tx_mask_low = 0; + u64 qpc_rx_mask_low = 0; + unsigned long flags; + u16 udp_sport = 0; + u32 qpn = 0; + int ret; + + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + iwdev = to_iwdev(ib_dev); + ret = uverbs_copy_from(&udp_sport, attrs, ZXDH_IB_ATTR_QP_UDP_PORT); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + ret = uverbs_copy_from(&qpn, attrs, ZXDH_IB_ATTR_QP_QPN); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + rf = iwdev->rf; + dev = &rf->sc_dev; + if (qpn < (dev->base_qpn + 1) || qpn > (dev->base_qpn + rf->max_qp - 1)) + return EINVAL; + + iwqp = iwdev->rf->qp_table[qpn - dev->base_qpn]; + if (iwqp == NULL) + return EINVAL; + + if (iwqp->ibqp.qp_type != IB_QPT_RC || + !(iwqp->ibqp_state == IB_QPS_RTR || iwqp->ibqp_state == IB_QPS_RTS)) + return EOPNOTSUPP; + + iwpd = to_iwpd(iwqp->ibqp.pd); + udp_info = &iwqp->udp_info; + ctx_info = &iwqp->ctx_info; + ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id; + + udp_info->src_port = udp_sport; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_SRC_PORT; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_SRC_PORT; + info.qpc_tx_mask_low = qpc_tx_mask_low; + info.qpc_rx_mask_low = qpc_rx_mask_low; + spin_lock_irqsave(&iwqp->lock, flags); + zxdh_sc_qp_modify_ctx_udp_sport(&iwqp->sc_qp, iwqp->host_ctx.va, + ctx_info); + spin_unlock_irqrestore(&iwqp->lock, flags); + if (zxdh_hw_modify_qp(iwdev, iwqp, &info, true)) + return -EINVAL; + + if (!refcount_read(&iwdev->trace_switch.t_switch)) + return 0; + + if (udp_info->ipv4) { + struct sockaddr_in saddr_in4 = { 0 }; + struct sockaddr_in daddr_in4 = { 0 }; + + saddr_in4.sin_addr.s_addr = htonl(udp_info->local_ipaddr[3]); + daddr_in4.sin_addr.s_addr = htonl(udp_info->dest_ip_addr[3]); + + ibdev_notice( + &iwdev->ibdev, + "QP[%u]: modify QP udp sport, type %d, ib qpn 0x%X, state: %s, dest_qpn:%d, src_port:%d, src_ip:%pI4, dest_ip:%pI4\n", + iwqp->ibqp.qp_num, iwqp->ibqp.qp_type, + iwqp->ibqp.qp_num, + zxdh_qp_state_to_string(iwqp->ibqp_state), + iwqp->roce_info.dest_qp, udp_sport, &saddr_in4.sin_addr, + &daddr_in4.sin_addr); + } else { + struct sockaddr_in6 saddr_in6 = { 0 }; + struct sockaddr_in6 daddr_in6 = { 0 }; + + zxdh_copy_ip_htonl(saddr_in6.sin6_addr.in6_u.u6_addr32, + udp_info->local_ipaddr); + zxdh_copy_ip_htonl(daddr_in6.sin6_addr.in6_u.u6_addr32, + udp_info->dest_ip_addr); + + ibdev_notice( + &iwdev->ibdev, + "QP[%u]: modify QP udp sport, type %d, ib qpn 0x%X, state: %s, dest_qpn:%d, src_port:%d, src_ip:%pI6, dest_ip:%pI6\n", + iwqp->ibqp.qp_num, iwqp->ibqp.qp_type, + iwqp->ibqp.qp_num, + zxdh_qp_state_to_string(iwqp->ibqp_state), + iwqp->roce_info.dest_qp, udp_sport, + &saddr_in6.sin6_addr, &daddr_in6.sin6_addr); + } + return 0; +} + +static int UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_GET_LOG_TRACE)( + struct uverbs_attr_bundle *attrs) +{ + struct zxdh_device *iwdev; + struct ib_ucontext *ucontext; + struct ib_device *ib_dev; + u8 trace_switch; + int ret; + + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + iwdev = to_iwdev(ib_dev); + trace_switch = refcount_read(&iwdev->trace_switch.t_switch); + ret = uverbs_copy_to(attrs, ZXDH_IB_ATTR_DEV_GET_LOG_TARCE_SWITCH, + &trace_switch, sizeof(trace_switch)); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + return 0; +} + +static int UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_SET_LOG_TRACE)( + struct uverbs_attr_bundle *attrs) +{ + struct ib_ucontext *ucontext; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + u8 trace_switch; + int ret; + + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + iwdev = to_iwdev(ib_dev); + ret = uverbs_copy_from(&trace_switch, attrs, + ZXDH_IB_ATTR_DEV_SET_LOG_TARCE_SWITCH); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + if (trace_switch >= SWITCH_ERROR) + return EINVAL; + + refcount_set(&iwdev->trace_switch.t_switch, trace_switch); + return 0; +} + +int write_cap_tx_reg_node0(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg) +{ + struct zxdh_pci_f *rf; + u8 node_select, node_choose, comapre_loop; + u64 wqe_offset[RDMA_TX_CAP_WQE_MOD_NUM] = { + RDMATX_CAP_NODE0_WQE_PRE_READ, RDMATX_CAP_NODE0_WQE_HANDLE, + RDMATX_CAP_NODE0_PACKAGE + }; + u64 node0offset[RDMA_TX_SEL_NODE_MODULE_NUM - 1] = { + RDMATX_CAP_NODE0_ACK, RDMATX_CAP_NODE0_DB, RDMATX_CAP_NODE0_AEQ, + 0, RDMATX_CAP_NODE0_TXWINDOW + }; + u64 compare_bit_en_offset[EN_32bit_GROUP_NUM] = { + RDMATX_CAP_COMPARE_BIT_EN0_NODE0, + RDMATX_CAP_COMPARE_BIT_EN1_NODE0, + RDMATX_CAP_COMPARE_BIT_EN2_NODE0, + RDMATX_CAP_COMPARE_BIT_EN3_NODE0, + RDMATX_CAP_COMPARE_BIT_EN4_NODE0, + RDMATX_CAP_COMPARE_BIT_EN5_NODE0, + RDMATX_CAP_COMPARE_BIT_EN6_NODE0, + RDMATX_CAP_COMPARE_BIT_EN7_NODE0, + RDMATX_CAP_COMPARE_BIT_EN8_NODE0, + RDMATX_CAP_COMPARE_BIT_EN9_NODE0, + RDMATX_CAP_COMPARE_BIT_EN10_NODE0, + RDMATX_CAP_COMPARE_BIT_EN11_NODE0, + RDMATX_CAP_COMPARE_BIT_EN12_NODE0, + RDMATX_CAP_COMPARE_BIT_EN13_NODE0, + RDMATX_CAP_COMPARE_BIT_EN14_NODE0, + RDMATX_CAP_COMPARE_BIT_EN15_NODE0 + }; + u64 compare_data_en_offset[EN_32bit_GROUP_NUM] = { + RDMATX_CAP_COMPARE_DATA0_NODE0, + RDMATX_CAP_COMPARE_DATA1_NODE0, + RDMATX_CAP_COMPARE_DATA2_NODE0, + RDMATX_CAP_COMPARE_DATA3_NODE0, + RDMATX_CAP_COMPARE_DATA4_NODE0, + RDMATX_CAP_COMPARE_DATA5_NODE0, + RDMATX_CAP_COMPARE_DATA6_NODE0, + RDMATX_CAP_COMPARE_DATA7_NODE0, + RDMATX_CAP_COMPARE_DATA8_NODE0, + RDMATX_CAP_COMPARE_DATA9_NODE0, + RDMATX_CAP_COMPARE_DATA10_NODE0, + RDMATX_CAP_COMPARE_DATA11_NODE0, + RDMATX_CAP_COMPARE_DATA12_NODE0, + RDMATX_CAP_COMPARE_DATA13_NODE0, + RDMATX_CAP_COMPARE_DATA14_NODE0, + RDMATX_CAP_COMPARE_DATA15_NODE0 + }; + + u32 node0_mask, val, chl_sel_idx; + + chl_sel_idx = cap_cfg->channel_select[NODE0]; + if (chl_sel_idx >= RDMA_TX_SEL_NODE_MODULE_NUM) { + return EINVAL; + } + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_CHL_SEL_NODE0, + (cap_cfg->channel_select[NODE0] & 0xF)); + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_CHL_OPEN_NODE0, + (cap_cfg->channel_open[NODE0] & 0xF)); + + node_choose = cap_cfg->node_choose[NODE0] & 0xFF; + node0_mask = ~(0xff); + if (chl_sel_idx == RDMA_TX_SEL_NODE_MODULE_WQE) { + node_select = (cap_cfg->node_select[NODE0] & 0xFF); + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE0_SEL)); + val = ((val & node0_mask) | node_select); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE0_SEL)); + + val = readl((u32 __iomem *)(dev->hw->hw_addr + + wqe_offset[node_select])); + val = ((val & node0_mask) | node_choose); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + wqe_offset[node_select])); + } else { + if (chl_sel_idx != RDMA_TX_SEL_NODE_MODULE_NONE) { + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE0_SEL)); + + READ_REGISTER_AND_CHECK(rf, node0offset[chl_sel_idx], + &val); + val = ((val & node0_mask) | node_choose); + WRITE_REGISTER_AND_CHECK(rf, node0offset[chl_sel_idx], + val); + } + } + + for (comapre_loop = 0; comapre_loop < EN_32bit_GROUP_NUM; + comapre_loop++) { + WRITE_REGISTER_AND_CHECK( + rf, compare_bit_en_offset[comapre_loop], + cap_cfg->compare_bit_en[comapre_loop][NODE0]); + WRITE_REGISTER_AND_CHECK( + rf, compare_data_en_offset[comapre_loop], + cap_cfg->compare_data[comapre_loop][NODE0]); + } + + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_TIME_WRL2D_NODE0, + cap_cfg->rdma_time_wrl2d[NODE0]); + return 0; +} + +int write_cap_tx_reg_node1(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg) +{ + struct zxdh_pci_f *rf; + u8 node_select, node_choose, comapre_loop; + u64 wqe_offset[RDMA_TX_CAP_WQE_MOD_NUM] = { + RDMATX_CAP_NODE1_WQE_PRE_READ, RDMATX_CAP_NODE1_WQE_HANDLE, + RDMATX_CAP_NODE1_PACKAGE + }; + u64 node1offset[RDMA_TX_SEL_NODE_MODULE_NUM - 1] = { + RDMATX_CAP_NODE1_ACK, RDMATX_CAP_NODE1_DB, RDMATX_CAP_NODE1_AEQ, + 0, RDMATX_CAP_NODE1_TXWINDOW + }; + u64 compare_bit_en_offset[EN_32bit_GROUP_NUM] = { + RDMATX_CAP_COMPARE_BIT_EN0_NODE1, + RDMATX_CAP_COMPARE_BIT_EN1_NODE1, + RDMATX_CAP_COMPARE_BIT_EN2_NODE1, + RDMATX_CAP_COMPARE_BIT_EN3_NODE1, + RDMATX_CAP_COMPARE_BIT_EN4_NODE1, + RDMATX_CAP_COMPARE_BIT_EN5_NODE1, + RDMATX_CAP_COMPARE_BIT_EN6_NODE1, + RDMATX_CAP_COMPARE_BIT_EN7_NODE1, + RDMATX_CAP_COMPARE_BIT_EN8_NODE1, + RDMATX_CAP_COMPARE_BIT_EN9_NODE1, + RDMATX_CAP_COMPARE_BIT_EN10_NODE1, + RDMATX_CAP_COMPARE_BIT_EN11_NODE1, + RDMATX_CAP_COMPARE_BIT_EN12_NODE1, + RDMATX_CAP_COMPARE_BIT_EN13_NODE1, + RDMATX_CAP_COMPARE_BIT_EN14_NODE1, + RDMATX_CAP_COMPARE_BIT_EN15_NODE1 + }; + u64 compare_data_en_offset[EN_32bit_GROUP_NUM] = { + RDMATX_CAP_COMPARE_DATA0_NODE1, + RDMATX_CAP_COMPARE_DATA1_NODE1, + RDMATX_CAP_COMPARE_DATA2_NODE1, + RDMATX_CAP_COMPARE_DATA3_NODE1, + RDMATX_CAP_COMPARE_DATA4_NODE1, + RDMATX_CAP_COMPARE_DATA5_NODE1, + RDMATX_CAP_COMPARE_DATA6_NODE1, + RDMATX_CAP_COMPARE_DATA7_NODE1, + RDMATX_CAP_COMPARE_DATA8_NODE1, + RDMATX_CAP_COMPARE_DATA9_NODE1, + RDMATX_CAP_COMPARE_DATA10_NODE1, + RDMATX_CAP_COMPARE_DATA11_NODE1, + RDMATX_CAP_COMPARE_DATA12_NODE1, + RDMATX_CAP_COMPARE_DATA13_NODE1, + RDMATX_CAP_COMPARE_DATA14_NODE1, + RDMATX_CAP_COMPARE_DATA15_NODE1 + }; + u32 node1_mask, val, chl_sel_idx; + + chl_sel_idx = cap_cfg->channel_select[NODE1]; + if (chl_sel_idx >= RDMA_TX_SEL_NODE_MODULE_NUM) { + return EINVAL; + } + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_CHL_SEL_NODE1, + (cap_cfg->channel_select[NODE1] & 0xF)); + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_CHL_OPEN_NODE1, + (cap_cfg->channel_open[NODE1] & 0xF)); + node_choose = cap_cfg->node_choose[NODE1] & 0xFF; + node1_mask = ~(0xff); + if (chl_sel_idx == RDMA_TX_SEL_NODE_MODULE_WQE) { + node_select = (cap_cfg->node_select[NODE1] & 0xFF); + + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE1_SEL)); + val = ((val & node1_mask) | node_select); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE1_SEL)); + + val = readl((u32 __iomem *)(dev->hw->hw_addr + + wqe_offset[node_select])); + val = ((val & node1_mask) | node_choose); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + wqe_offset[node_select])); + } else { + if (chl_sel_idx != RDMA_TX_SEL_NODE_MODULE_NONE) { + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE1_SEL)); + + READ_REGISTER_AND_CHECK(rf, node1offset[chl_sel_idx], + &val); + val = ((val & node1_mask) | node_choose); + WRITE_REGISTER_AND_CHECK(rf, node1offset[chl_sel_idx], + val); + } + } + + for (comapre_loop = 0; comapre_loop < EN_32bit_GROUP_NUM; + comapre_loop++) { + WRITE_REGISTER_AND_CHECK( + rf, compare_bit_en_offset[comapre_loop], + cap_cfg->compare_bit_en[comapre_loop][NODE1]); + WRITE_REGISTER_AND_CHECK( + rf, compare_data_en_offset[comapre_loop], + cap_cfg->compare_data[comapre_loop][NODE1]); + } + + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_TIME_WRL2D_NODE1, + cap_cfg->rdma_time_wrl2d[NODE1]); + return 0; +} + +int write_cap_rx_reg_node0(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg) +{ + struct zxdh_pci_f *rf; + u8 comapre_loop; + u64 node0offset[RDMA_RX_SEL_NODE_MODULE_NUM] = { + RDMARX_CAP_NODE0_SEL_RTT_T4, + RDMARX_CAP_NODE0_SEL_PKT_PROC, + RDMARX_CAP_NODE_SEL_HD_CACHE, + RDMARX_CAP_NODE_SEL_VAPA_DDRWR, + 0, + RDMARX_CAP_NODE0_SEL_PRIFIELD_CHECK, + RDMARX_CAP_NODE0_SEL_READ_SRQC, + RDMARX_CAP_NODE0_SEL_READ_WQE, + RDMARX_CAP_NODE0_SEL_CNP_GEN, + RDMARX_CAP_NODE_SEL_ACKNAKFIFO, + RDMARX_CAP_NODE0_SEL_CQE, + RDMARX_CAP_NODE0_SEL_COMPLQUEUE, + RDMARX_CAP_NODE_SEL_NOF, + RDMARX_CAP_NODE0_SEL_TXSUB + }; + + u64 compare_bit_en_offset[EN_32bit_GROUP_NUM] = { + RDMARX_CAP_COMPARE_BIT_EN0_NODE0, + RDMARX_CAP_COMPARE_BIT_EN1_NODE0, + RDMARX_CAP_COMPARE_BIT_EN2_NODE0, + RDMARX_CAP_COMPARE_BIT_EN3_NODE0, + RDMARX_CAP_COMPARE_BIT_EN4_NODE0, + RDMARX_CAP_COMPARE_BIT_EN5_NODE0, + RDMARX_CAP_COMPARE_BIT_EN6_NODE0, + RDMARX_CAP_COMPARE_BIT_EN7_NODE0, + RDMARX_CAP_COMPARE_BIT_EN8_NODE0, + RDMARX_CAP_COMPARE_BIT_EN9_NODE0, + RDMARX_CAP_COMPARE_BIT_EN10_NODE0, + RDMARX_CAP_COMPARE_BIT_EN11_NODE0, + RDMARX_CAP_COMPARE_BIT_EN12_NODE0, + RDMARX_CAP_COMPARE_BIT_EN13_NODE0, + RDMARX_CAP_COMPARE_BIT_EN14_NODE0, + RDMARX_CAP_COMPARE_BIT_EN15_NODE0 + }; + u64 compare_data_en_offset[EN_32bit_GROUP_NUM] = { + RDMARX_CAP_COMPARE_DATA0_NODE0, + RDMARX_CAP_COMPARE_DATA1_NODE0, + RDMARX_CAP_COMPARE_DATA2_NODE0, + RDMARX_CAP_COMPARE_DATA3_NODE0, + RDMARX_CAP_COMPARE_DATA4_NODE0, + RDMARX_CAP_COMPARE_DATA5_NODE0, + RDMARX_CAP_COMPARE_DATA6_NODE0, + RDMARX_CAP_COMPARE_DATA7_NODE0, + RDMARX_CAP_COMPARE_DATA8_NODE0, + RDMARX_CAP_COMPARE_DATA9_NODE0, + RDMARX_CAP_COMPARE_DATA10_NODE0, + RDMARX_CAP_COMPARE_DATA11_NODE0, + RDMARX_CAP_COMPARE_DATA12_NODE0, + RDMARX_CAP_COMPARE_DATA13_NODE0, + RDMARX_CAP_COMPARE_DATA14_NODE0, + RDMARX_CAP_COMPARE_DATA15_NODE0 + }; + u32 node0_mask = 0; + u32 node0_value = 0; + u32 val, chl_sel_idx; + + chl_sel_idx = cap_cfg->channel_select[NODE0]; + if (chl_sel_idx >= RDMA_RX_SEL_NODE_MODULE_NUM) { + return EINVAL; + }; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_CHL_SEL_NODE0, + (cap_cfg->channel_select[NODE0] & 0xF)); + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_CHL_OPEN_NODE0, + (cap_cfg->channel_open[NODE0] & 0xF)); + + switch (chl_sel_idx) { + case RDMA_RX_SEL_NODE_MODULE_RTT_T4: + case RDMA_RX_SEL_NODE_MODULE_PKT_PROC: + case RDMA_RX_SEL_NODE_MODULE_CEQ: + case RDMA_RX_SEL_NODE_MODULE_COMPLQUEUE: + case RDMA_RX_SEL_NODE_MODULE_TX_SUB: + node0_mask = ~(0xffffffff); + node0_value = cap_cfg->node_select[NODE0]; + break; + case RDMA_RX_SEL_NODE_MODULE_HD_CACHE: + case RDMA_RX_SEL_NODE_MODULE_VAPA_DDRWR: + node0_mask = ~(0xf); + node0_value = cap_cfg->node_select[NODE0]; + break; + case RDMA_RX_SEL_NODE_MODULE_PRIFIELD_CHECK: + case RDMA_RX_SEL_NODE_MODULE_READ_SRQC: + case RDMA_RX_SEL_NODE_MODULE_READ_WQE: + node0_mask = ~(0xff); + node0_value = cap_cfg->node_select[NODE0]; + break; + case RDMA_RX_SEL_NODE_MODULE_CNP_GEN: + node0_mask = ~(0x7); + node0_value = cap_cfg->node_select[NODE0]; + break; + case RDMA_RX_SEL_NODE_MODULE_ACKNAKFIFO: + node0_mask = ~(0xffff); + node0_value = cap_cfg->node_select[NODE0]; + break; + case RDMA_RX_SEL_NODE_MODULE_NOF: + node0_mask = ~(0xffff << 16); + node0_value = cap_cfg->node_select[NODE0] << 16; + break; + default: + break; + } + if (chl_sel_idx != RDMA_RX_SEL_NODE_MODULE_PSN_CHECK) { + READ_REGISTER_AND_CHECK(rf, node0offset[chl_sel_idx], &val); + val = ((val & node0_mask) | node0_value); + WRITE_REGISTER_AND_CHECK(rf, node0offset[chl_sel_idx], val); + + pr_info("val=%u, node0_value=%u, channel_select=%u, node0_select val= 0x%08llx\n", + val, node0_value, chl_sel_idx, + node0offset[chl_sel_idx]); + } + + for (comapre_loop = 0; comapre_loop < EN_32bit_GROUP_NUM; + comapre_loop++) { + WRITE_REGISTER_AND_CHECK( + rf, compare_bit_en_offset[comapre_loop], + cap_cfg->compare_bit_en[comapre_loop][NODE0]); + WRITE_REGISTER_AND_CHECK( + rf, compare_data_en_offset[comapre_loop], + cap_cfg->compare_data[comapre_loop][NODE0]); + } + + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_TIME_WRL2D_NODE0, + cap_cfg->rdma_time_wrl2d[NODE0]); + return 0; +} + +int write_cap_rx_reg_node1(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg) +{ + struct zxdh_pci_f *rf; + u8 comapre_loop; + u64 node1offset[RDMA_RX_SEL_NODE_MODULE_NUM] = { + RDMARX_CAP_NODE1_SEL_RTT_T4, + RDMARX_CAP_NODE1_SEL_PKT_PROC, + RDMARX_CAP_NODE_SEL_HD_CACHE, + RDMARX_CAP_NODE_SEL_VAPA_DDRWR, + 0, + RDMARX_CAP_NODE1_SEL_PRIFIELD_CHECK, + RDMARX_CAP_NODE1_SEL_READ_SRQC, + RDMARX_CAP_NODE1_SEL_READ_WQE, + RDMARX_CAP_NODE1_SEL_CNP_GEN, + RDMARX_CAP_NODE_SEL_ACKNAKFIFO, + RDMARX_CAP_NODE1_SEL_CQE, + RDMARX_CAP_NODE1_SEL_COMPLQUEUE, + RDMARX_CAP_NODE_SEL_NOF, + RDMARX_CAP_NODE1_SEL_TXSUB + }; + + u64 compare_bit_en_offset[EN_32bit_GROUP_NUM] = { + RDMARX_CAP_COMPARE_BIT_EN0_NODE1, + RDMARX_CAP_COMPARE_BIT_EN1_NODE1, + RDMARX_CAP_COMPARE_BIT_EN2_NODE1, + RDMARX_CAP_COMPARE_BIT_EN3_NODE1, + RDMARX_CAP_COMPARE_BIT_EN4_NODE1, + RDMARX_CAP_COMPARE_BIT_EN5_NODE1, + RDMARX_CAP_COMPARE_BIT_EN6_NODE1, + RDMARX_CAP_COMPARE_BIT_EN7_NODE1, + RDMARX_CAP_COMPARE_BIT_EN8_NODE1, + RDMARX_CAP_COMPARE_BIT_EN9_NODE1, + RDMARX_CAP_COMPARE_BIT_EN10_NODE1, + RDMARX_CAP_COMPARE_BIT_EN11_NODE1, + RDMARX_CAP_COMPARE_BIT_EN12_NODE1, + RDMARX_CAP_COMPARE_BIT_EN13_NODE1, + RDMARX_CAP_COMPARE_BIT_EN14_NODE1, + RDMARX_CAP_COMPARE_BIT_EN15_NODE1 + }; + u64 compare_data_en_offset[EN_32bit_GROUP_NUM] = { + RDMARX_CAP_COMPARE_DATA0_NODE1, + RDMARX_CAP_COMPARE_DATA1_NODE1, + RDMARX_CAP_COMPARE_DATA2_NODE1, + RDMARX_CAP_COMPARE_DATA3_NODE1, + RDMARX_CAP_COMPARE_DATA4_NODE1, + RDMARX_CAP_COMPARE_DATA5_NODE1, + RDMARX_CAP_COMPARE_DATA6_NODE1, + RDMARX_CAP_COMPARE_DATA7_NODE1, + RDMARX_CAP_COMPARE_DATA8_NODE1, + RDMARX_CAP_COMPARE_DATA9_NODE1, + RDMARX_CAP_COMPARE_DATA10_NODE1, + RDMARX_CAP_COMPARE_DATA11_NODE1, + RDMARX_CAP_COMPARE_DATA12_NODE1, + RDMARX_CAP_COMPARE_DATA13_NODE1, + RDMARX_CAP_COMPARE_DATA14_NODE1, + RDMARX_CAP_COMPARE_DATA15_NODE1 + }; + u32 node1_mask = 0; + u32 node1_value = 0; + u32 val, chl_sel_idx; + + chl_sel_idx = cap_cfg->channel_select[NODE1]; + if (chl_sel_idx >= RDMA_RX_SEL_NODE_MODULE_NUM) { + return EINVAL; + }; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_CHL_SEL_NODE1, + (cap_cfg->channel_select[NODE1] & 0xF)); + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_CHL_OPEN_NODE1, + (cap_cfg->channel_open[NODE1] & 0xF)); + + switch (chl_sel_idx) { + case RDMA_RX_SEL_NODE_MODULE_RTT_T4: + case RDMA_RX_SEL_NODE_MODULE_PKT_PROC: + case RDMA_RX_SEL_NODE_MODULE_CEQ: + case RDMA_RX_SEL_NODE_MODULE_COMPLQUEUE: + case RDMA_RX_SEL_NODE_MODULE_TX_SUB: + node1_mask = ~(0xffffffff); + node1_value = cap_cfg->node_select[NODE1]; + break; + case RDMA_RX_SEL_NODE_MODULE_HD_CACHE: + case RDMA_RX_SEL_NODE_MODULE_VAPA_DDRWR: + node1_mask = ~(0xf << 16); + node1_value = cap_cfg->node_select[NODE1] << 16; + break; + case RDMA_RX_SEL_NODE_MODULE_PRIFIELD_CHECK: + case RDMA_RX_SEL_NODE_MODULE_READ_SRQC: + case RDMA_RX_SEL_NODE_MODULE_READ_WQE: + case RDMA_RX_SEL_NODE_MODULE_NOF: + node1_mask = ~(0xff); + node1_value = cap_cfg->node_select[NODE1]; + break; + case RDMA_RX_SEL_NODE_MODULE_CNP_GEN: + node1_mask = ~(0x7); + node1_value = cap_cfg->node_select[NODE1]; + break; + case RDMA_RX_SEL_NODE_MODULE_ACKNAKFIFO: + node1_mask = ~(0xffff << 16); + node1_value = cap_cfg->node_select[NODE1] << 16; + break; + default: + break; + } + if (chl_sel_idx != RDMA_RX_SEL_NODE_MODULE_PSN_CHECK) { + READ_REGISTER_AND_CHECK(rf, node1offset[chl_sel_idx], &val); + val = (val & node1_mask) | node1_value; + WRITE_REGISTER_AND_CHECK(rf, node1offset[chl_sel_idx], val); + + pr_info("val=%u, node1_value=%u, channel_select_node1=%u, node1_select val= 0x%08llx\n", + val, node1_value, chl_sel_idx, + node1offset[chl_sel_idx]); + } + + for (comapre_loop = 0; comapre_loop < EN_32bit_GROUP_NUM; + comapre_loop++) { + WRITE_REGISTER_AND_CHECK( + rf, compare_bit_en_offset[comapre_loop], + cap_cfg->compare_bit_en[comapre_loop][NODE1]); + WRITE_REGISTER_AND_CHECK( + rf, compare_data_en_offset[comapre_loop], + cap_cfg->compare_data[comapre_loop][NODE1]); + } + + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_TIME_WRL2D_NODE1, + cap_cfg->rdma_time_wrl2d[NODE1]); + return 0; +} + +static bool check_cap_cfg(struct zxdh_cap_cfg *cap_cfg) +{ + if (cap_cfg->cap_data_start_cap == 0x0) { + pr_err("zxdh cap_data_start_cap cfg err!\n"); + return false; + } + + if (cap_cfg->cap_data_start_cap == 0x1) { + if (cap_cfg->cap_position == CAP_TX && + (cap_cfg->channel_select[NODE0] == + RDMA_TX_SEL_NODE_MODULE_NONE || + cap_cfg->channel_select[NODE0] > + RDMA_TX_SEL_NODE_MODULE_WQE)) { + pr_err("zxdh cap_data_start_cap cfg tx node0 channel_select:%u err!\n", + cap_cfg->channel_select[NODE0]); + return false; + } + + if (cap_cfg->cap_position == CAP_RX && + cap_cfg->channel_select[NODE0] >= + RDMA_RX_SEL_NODE_MODULE_NUM) { + pr_err("zxdh cap_data_start_cap cfg rx node0 channel_select:%u err!\n", + cap_cfg->channel_select[NODE0]); + return false; + } + } + + if (cap_cfg->cap_data_start_cap == 0x2) { + if (cap_cfg->cap_position == CAP_TX && + (cap_cfg->channel_select[NODE1] == + RDMA_TX_SEL_NODE_MODULE_NONE || + cap_cfg->channel_select[NODE1] > + RDMA_TX_SEL_NODE_MODULE_WQE)) { + pr_err("zxdh cap_data_start_cap cfg tx node1 channel_select:%u err!\n", + cap_cfg->channel_select[NODE1]); + return false; + } + + if (cap_cfg->cap_position == CAP_RX && + cap_cfg->channel_select[NODE1] >= + RDMA_RX_SEL_NODE_MODULE_NUM) { + pr_err("zxdh cap_data_start_cap cfg rx node1 channel_select:%u err!\n", + cap_cfg->channel_select[NODE1]); + return false; + } + } + return true; +} + +static int +UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_CAP_START)(struct uverbs_attr_bundle *attrs) +{ + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct zxdh_sc_dev *dev; + struct ib_device *ib_dev; + struct zxdh_cap_cfg cap_cfg = { 0 }; + struct zxdh_cap_start_resp cap_resp = { 0 }; + int ret; + struct zxdh_ucontext *ucontext; + u32 dma_addr_low, dma_addr_high, cap_id; + + ib_uctx = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + ret = uverbs_copy_from(&cap_cfg, attrs, ZXDH_IB_ATTR_DEV_CAP_START); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + if (!check_cap_cfg(&cap_cfg)) + return EPERM; + + ucontext = to_ucontext(ib_uctx); + dev = &iwdev->rf->sc_dev; + + iwdev->cap_cpu_addr_node0 = 0; + iwdev->cap_cpu_addr_node1 = 0; + iwdev->cap_dma_addr_node0 = 0; + iwdev->cap_dma_addr_node1 = 0; + memset(&iwdev->cap_mmap_info, 0, sizeof(iwdev->cap_mmap_info)); + if ((cap_cfg.cap_data_start_cap & 0x1) == 0x1) { + iwdev->cap_cpu_addr_node0 = zxdh_zalloc_mapped( + iwdev, &iwdev->cap_dma_addr_node0, + ZXDH_CAP_DATA_MEM_SIZE, DMA_BIDIRECTIONAL); + if (iwdev->cap_cpu_addr_node0 == NULL) { + pr_err("zxdh_zalloc_mapped for node0 fail!\n"); + ret = -1; + return ret; + } + + dma_addr_low = (u32)(iwdev->cap_dma_addr_node0 & 0xFFFFFFFF); + dma_addr_high = + (u32)((iwdev->cap_dma_addr_node0 >> 32) & 0xFFFFFFFF); + // access host, smmu not used + cap_id = 0x58; + pr_info("iwdev->cap_dma_addr_node0:%llX,dma_addr_low:%X,dma_addr_high:%X.\n", + iwdev->cap_dma_addr_node0, dma_addr_low, dma_addr_high); + pr_info("vhca_id:%u,cap_id:%X.\n", dev->vhca_id, cap_id); + + iwdev->cap_mmap_info.cap_mmap_entry_node0 = + zxdh_cap_mmap_entry_insert(ucontext, + iwdev->cap_cpu_addr_node0, + ZXDH_CAP_DATA_MEM_SIZE, + ZXDH_MMAP_PFN, + &cap_resp.cap_pa_node0); + if (!iwdev->cap_mmap_info.cap_mmap_entry_node0) { + pr_err("zxdh_user_mmap_entry_insert node0 err!\n"); + rdma_user_mmap_entry_remove( + iwdev->cap_mmap_info.cap_mmap_entry_node0); + return -ENOMEM; + } + + if (cap_cfg.cap_position == CAP_TX) { + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_ADDR_LOW_NODE0, + dma_addr_low); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_ADDR_HIGH_NODE0, + dma_addr_high); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_LEN_LOW_NODE0, + ZXDH_CAP_DATA_MEM_SIZE); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_LEN_HIGH_NODE0, 0); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMATX_CAP_VHCA_NUM_NODE0, + dev->vhca_id); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMATX_CAP_AXI_ID_NODE0, 0); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_CAP_ID_NODE0, cap_id); + ret = write_cap_tx_reg_node0(dev, &cap_cfg); + if (ret != 0) + return ret; + } + + if (cap_cfg.cap_position == CAP_RX) { + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_ADDR_LOW_NODE0, + dma_addr_low); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_ADDR_HIGH_NODE0, + dma_addr_high); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_LEN_LOW_NODE0, + ZXDH_CAP_DATA_MEM_SIZE); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMARX_CAP_VHCA_NUM_NODE0, + dev->vhca_id); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMARX_CAP_AXI_ID_NODE0, 0); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_CAP_ID_NODE0, cap_id); + ret = write_cap_rx_reg_node0(dev, &cap_cfg); + if (ret != 0) + return ret; + } + } + + if ((cap_cfg.cap_data_start_cap & 0x2) == 0x2) { + iwdev->cap_cpu_addr_node1 = zxdh_zalloc_mapped( + iwdev, &iwdev->cap_dma_addr_node1, + ZXDH_CAP_DATA_MEM_SIZE, DMA_BIDIRECTIONAL); + if (iwdev->cap_cpu_addr_node1 == NULL) { + pr_err("zxdh_zalloc_mapped for node1 fail!\n"); + ret = -1; + return ret; + } + dma_addr_low = (u32)(iwdev->cap_dma_addr_node1 & 0xFFFFFFFF); + dma_addr_high = + (u32)((iwdev->cap_dma_addr_node1 >> 32) & 0xFFFFFFFF); + // access host, smmu not used + cap_id = 0x58; + pr_info("iwdev->cap_dma_addr_node1:%llX,dma_addr_low:%X,dma_addr_high:%X.\n", + iwdev->cap_dma_addr_node0, dma_addr_low, dma_addr_high); + pr_info("vhca_id:%u,cap_id:%X.\n", dev->vhca_id, cap_id); + + iwdev->cap_mmap_info.cap_mmap_entry_node1 = + zxdh_cap_mmap_entry_insert(ucontext, + iwdev->cap_cpu_addr_node1, + ZXDH_CAP_DATA_MEM_SIZE, + ZXDH_MMAP_PFN, + &cap_resp.cap_pa_node1); + if (!iwdev->cap_mmap_info.cap_mmap_entry_node1) { + pr_err("zxdh_user_mmap_entry_insert node1 err!\n"); + rdma_user_mmap_entry_remove( + iwdev->cap_mmap_info.cap_mmap_entry_node1); + return -ENOMEM; + } + + if (cap_cfg.cap_position == CAP_TX) { + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_ADDR_LOW_NODE1, + dma_addr_low); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_ADDR_HIGH_NODE1, + dma_addr_high); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_LEN_LOW_NODE1, + ZXDH_CAP_DATA_MEM_SIZE); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_LEN_HIGH_NODE1, 0); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMATX_CAP_VHCA_NUM_NODE1, + dev->vhca_id); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMATX_CAP_AXI_ID_NODE1, 0); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_CAP_ID_NODE1, cap_id); + ret = write_cap_tx_reg_node1(dev, &cap_cfg); + if (ret != 0) + return ret; + } + + if (cap_cfg.cap_position == CAP_RX) { + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_ADDR_LOW_NODE1, + dma_addr_low); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_ADDR_HIGH_NODE1, + dma_addr_high); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_LEN_LOW_NODE1, + ZXDH_CAP_DATA_MEM_SIZE); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_LEN_HIGH_NODE1, 0); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMARX_CAP_VHCA_NUM_NODE1, + dev->vhca_id); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMARX_CAP_AXI_ID_NODE1, 0); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_CAP_ID_NODE1, cap_id); + ret = write_cap_rx_reg_node1(dev, &cap_cfg); + if (ret != 0) + return ret; + } + } + + if (cap_cfg.cap_position == CAP_TX) { + WRITE_REGISTER_AND_CHECK(iwdev->rf, RDMATX_DATA_START_CAP, + cap_cfg.cap_data_start_cap); + } else if (cap_cfg.cap_position == CAP_RX) { + WRITE_REGISTER_AND_CHECK(iwdev->rf, RDMARX_DATA_START_CAP, + cap_cfg.cap_data_start_cap); + } + pr_info("zxdh_rdma received cap_start msg:cap_position=%u\n", + cap_cfg.cap_position); + + if (uverbs_copy_to_struct_or_zero(attrs, + ZXDH_IB_ATTR_DEV_CAP_START_RESP, + &cap_resp, sizeof(cap_resp))) { + if ((cap_cfg.cap_data_start_cap & 0x1) == 0x1) { + rdma_user_mmap_entry_remove( + iwdev->cap_mmap_info.cap_mmap_entry_node0); + } + + if ((cap_cfg.cap_data_start_cap & 0x2) == 0x2) { + rdma_user_mmap_entry_remove( + iwdev->cap_mmap_info.cap_mmap_entry_node1); + } + pr_err("zxdh_rdma ib_copy_to_udata failed!\n"); + return -EFAULT; + } + return 0; +} + +static int +UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_CAP_STOP)(struct uverbs_attr_bundle *attrs) +{ + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + u8 cap_position; + int ret; + + pr_info("cap stop node0!\n"); + ib_uctx = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + + ret = uverbs_copy_from(&cap_position, attrs, ZXDH_IB_ATTR_DEV_CAP_STOP); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + if (cap_position == CAP_TX) { + WRITE_REGISTER_AND_CHECK(iwdev->rf, RDMATX_DATA_START_CAP, 0); + } else if (cap_position == CAP_RX) { + WRITE_REGISTER_AND_CHECK(iwdev->rf, RDMARX_DATA_START_CAP, 0); + } else { + pr_info("cap %u stop err!\n", cap_position); + return EINVAL; + } + + pr_info("cap %u stop!1:tx,2:rx\n", cap_position); + return 0; +} + +static int +UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_CAP_FREE)(struct uverbs_attr_bundle *attrs) +{ + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + u8 cap_type; + int ret; + + ib_uctx = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + + ret = uverbs_copy_from(&cap_type, attrs, ZXDH_IB_ATTR_DEV_CAP_FREE); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + if (cap_type == 0) { + if (iwdev->cap_mmap_info.cap_mmap_entry_node0 != NULL) { + pr_info("rdma_user_mmap_entry_remove node0!\n"); + rdma_user_mmap_entry_remove( + iwdev->cap_mmap_info.cap_mmap_entry_node0); + } + + if (iwdev->cap_mmap_info.cap_mmap_entry_node1 != NULL) { + pr_info("rdma_user_mmap_entry_remove node1!\n"); + rdma_user_mmap_entry_remove( + iwdev->cap_mmap_info.cap_mmap_entry_node1); + } + if (iwdev->cap_cpu_addr_node0 != NULL) { + zxdh_free_mapped(iwdev, iwdev->cap_cpu_addr_node0, + iwdev->cap_dma_addr_node0, + ZXDH_CAP_DATA_MEM_SIZE, + DMA_BIDIRECTIONAL); + pr_info("cap free node0!\n"); + } + + if (iwdev->cap_cpu_addr_node1 != NULL) { + zxdh_free_mapped(iwdev, iwdev->cap_cpu_addr_node1, + iwdev->cap_dma_addr_node1, + ZXDH_CAP_DATA_MEM_SIZE, + DMA_BIDIRECTIONAL); + pr_info("cap free node1!\n"); + } + } + + if (cap_type == 1) { + if (iwdev->cap_mmap_info.mp_cap_mmap_entry != NULL) { + pr_info("rdma_user_mmap_entry_remove mp cap!\n"); + rdma_user_mmap_entry_remove( + iwdev->cap_mmap_info.mp_cap_mmap_entry); + } + + if (iwdev->mp_cap_cpu_addr != NULL) { + dma_free_coherent(iwdev->rf->sc_dev.hw->device, + ALIGN(ZXDH_L2D_MPCAP_BUFF_SIZE, + ZXDH_HW_PAGE_SIZE), + iwdev->mp_cap_cpu_addr, + iwdev->mp_cap_dma_addr); + iwdev->mp_cap_cpu_addr = NULL; + pr_info("dma_free_coherent mp_cap_cpu_addr!\n"); + } + } + + return 0; +} + +static int write_mp_cap_regs(struct zxdh_device *iwdev, bool is_l2d_used, + struct zxdh_mp_cap_resp *resp) +{ + int i; + uint8_t mp_idx, gqp_idx; + uint64_t reg_addr, cap_size, addr_val; + uint32_t reg_val; + uint32_t read_reg_val = 0; + bool is_addr_replace = false; + + for (i = 0; i < resp->cap_gqp_num && i < MAX_CAP_QPS; i++) { + mp_idx = 0; + gqp_idx = 0; + if (resp->cap_gqpid[i] <= GQP_ID_1023) { + mp_idx = resp->cap_gqpid[i] / GQP_MOD; + gqp_idx = resp->cap_gqpid[i] % GQP_MOD; + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * CAP_ENABLE_REG_IDX); + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + reg_addr, &read_reg_val); + reg_val = (uint64_t)(RAM_ADDR + (i * MP_OFFSET)); + if (reg_val == read_reg_val) { + pr_err("reg_addr:%llx map cap for gqp:%u is working!\n", + reg_addr, gqp_idx); + return EINVAL; + } + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + reg_val = gqp_idx; + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + } else if (resp->cap_gqpid[i] > GQP_ID_1023 && + resp->cap_gqpid[i] <= GQP_ID_1103) { + mp_idx = ((resp->cap_gqpid[i] - GQP_OFFSET) / GQP_MOD) + + MP_IDX_INC; + gqp_idx = (resp->cap_gqpid[i] - GQP_OFFSET) % GQP_MOD; + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (4 * CAP_ENABLE_REG_IDX); + reg_val = RAM_ADDR + i * MP_OFFSET; + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + reg_val = gqp_idx; + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + + } else if (resp->cap_gqpid[i] <= GQP_ID_2047) { + mp_idx = ((resp->cap_gqpid[i] - GQP_OFFSET) / GQP_MOD) - + MP_MOD; + gqp_idx = (resp->cap_gqpid[i] - GQP_OFFSET) % GQP_MOD; + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_BIG_GQP + + (REG_BYTE * CAP_ENABLE_REG_IDX); + reg_val = RAM_ADDR + i * MP_OFFSET; + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_BIG_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + reg_val = gqp_idx; + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + } else { + pr_err("gqpid:%u err!\n", resp->cap_gqpid[i]); + return EINVAL; + } + + if (is_l2d_used) { + iwdev->mp_cap_media_addr_base = + iwdev->rf->sc_dev.l2d_smmu_addr + 0x40000 + + 0x8000; + cap_size = ZXDH_L2D_MPCAP_BUFF_SIZE / resp->cap_gqp_num; + } else { + iwdev->mp_cap_media_addr_base = DDR_ADDR_BASE; + cap_size = DDR_SIZE; + } + reg_addr = DATA_ADDR_BASE + i * MP_OFFSET; + addr_val = iwdev->mp_cap_media_addr_base + (i * cap_size); + if ((addr_val >> 32) != 0) { + reg_val = (REPLACE_VALUE | (addr_val & 0x0FFFFFFF)); + is_addr_replace = true; + } else { + reg_val = addr_val; + } + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, reg_addr, + reg_val); + + reg_addr = READ_RAM_REG_BASE + i * MP_OFFSET; + reg_val = RAM_ADDR + i * MP_OFFSET; + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, reg_addr, + reg_val); + + reg_addr = MP_DATA_NUM_GEG + i * MP_OFFSET; + if (is_l2d_used) { + reg_val = ((cap_size / MP_DATA_BYTE) - 1); + } else { + reg_val = DDR_MP_DATA_NUM; /* 20w MP data */ + } + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, reg_addr, + reg_val); + } + + if (is_addr_replace) { + reg_addr = BYPASS_REG; + reg_val = 0x1; + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, reg_addr, + reg_val); + + reg_addr = REPLACE_REG; + reg_val = (addr_val >> 28); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, reg_addr, + reg_val); + } + return 0; +} + +static int +UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_MP_CAP)(struct uverbs_attr_bundle *attrs) +{ + int ret, i, j; + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_mp_cap_cfg mp_cap_cfg = { 0 }; + struct zxdh_qp *iwqp; + struct zxdh_ucontext *ucontext; + uint16_t gqp_id; + uint64_t mp_reg_addrs[MAX_CAP_QPS]; + bool same_gqp_exist; + struct zxdh_mp_cap_resp mp_cap_resp; + + ib_uctx = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + + ret = uverbs_copy_from(&mp_cap_cfg, attrs, ZXDH_IB_ATTR_DEV_MP_CAP); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + pr_info("qpn_num:%u\n", mp_cap_cfg.qpn_num); + if (mp_cap_cfg.qpn_num == 0 || mp_cap_cfg.qpn_num > MAX_CAP_QPS) + return EINVAL; + + memset(&mp_cap_resp, 0, sizeof(struct zxdh_mp_cap_resp)); + if (mp_cap_cfg.cap_use_l2d) { + iwdev->mp_cap_cpu_addr = NULL; + iwdev->mp_cap_cpu_addr = dma_alloc_coherent( + iwdev->rf->sc_dev.hw->device, ZXDH_L2D_MPCAP_BUFF_SIZE, + &iwdev->mp_cap_dma_addr, GFP_KERNEL); + if (!iwdev->mp_cap_cpu_addr) { + pr_err("dma_alloc_coherent for mp cap fail!\n"); + ret = -1; + return ret; + } + + ucontext = to_ucontext(ib_uctx); + iwdev->cap_mmap_info.mp_cap_mmap_entry = + zxdh_cap_mmap_entry_insert(ucontext, + iwdev->mp_cap_cpu_addr, + ZXDH_L2D_MPCAP_BUFF_SIZE, + ZXDH_MMAP_PFN, + &mp_cap_resp.cap_pa); + if (!iwdev->cap_mmap_info.mp_cap_mmap_entry) { + pr_err("zxdh_user_mmap_entry_insert mp cap err!\n"); + rdma_user_mmap_entry_remove( + iwdev->cap_mmap_info.mp_cap_mmap_entry); + return -ENOMEM; + } + } + + memset(mp_reg_addrs, 0, sizeof(mp_reg_addrs)); + for (i = 0; i < mp_cap_cfg.qpn_num; i++) { + if (mp_cap_cfg.qpn[i] < (iwdev->rf->sc_dev.base_qpn + 1) || + mp_cap_cfg.qpn[i] > (iwdev->rf->sc_dev.base_qpn + + iwdev->rf->max_qp - 1)) { + pr_err("qpn:%u, base_qpn:%u overload", + mp_cap_cfg.qpn[i], iwdev->rf->sc_dev.base_qpn); + return EINVAL; + } + + iwqp = NULL; + iwqp = iwdev->rf->qp_table[mp_cap_cfg.qpn[i] - + iwdev->rf->sc_dev.base_qpn]; + if (iwqp == NULL) + return EINVAL; + + if (iwqp->sc_qp.qp_uk.qp_type == ZXDH_QP_TYPE_ROCE_RC) { + gqp_id = + zxdh_get_rc_gqp_id(iwqp->sc_qp.qp_uk.ws_index, + iwqp->sc_qp.dev->vhca_id, + iwqp->sc_qp.dev->total_vhca); + } else { + gqp_id = get_ud_gqp_id(iwqp->sc_qp.dev->vhca_id, + iwqp->sc_qp.dev->total_vhca); + } + + pr_info("mp cap qp_type:%u (1:RC,2:UD),qpn:%u,gqp_id:%u,vhcaid:%u!\n", + iwqp->sc_qp.qp_uk.qp_type, mp_cap_cfg.qpn[i], gqp_id, + iwqp->sc_qp.dev->vhca_id); + + same_gqp_exist = false; + for (j = 0; j < mp_cap_resp.cap_gqp_num; j++) { + if (mp_cap_resp.cap_gqpid[i] == gqp_id) { + same_gqp_exist = true; + pr_info("same gqp_id:%u for qpn:%u vhcaid:%u!\n", + gqp_id, mp_cap_cfg.qpn[i], + iwqp->sc_qp.dev->vhca_id); + break; + } + } + if (same_gqp_exist) + continue; + mp_cap_resp.cap_gqpid[mp_cap_resp.cap_gqp_num] = gqp_id; + mp_cap_resp.cap_gqp_num += 1; + } + + ret = write_mp_cap_regs(iwdev, mp_cap_cfg.cap_use_l2d, &mp_cap_resp); + if (ret != 0) { + pr_err("write_mp_cap_regs err! gqp_num:%u\n", + mp_cap_resp.cap_gqp_num); + return ret; + } + mp_cap_resp.mcode_type = iwdev->rf->mcode_type; + pr_info("zxdh_rdma mp cap ib_copy_to_udata gqpid:%u,gqp_num:%u,cap_pa:%llx.mcode_type:%u!\n", + mp_cap_resp.cap_gqpid[0], mp_cap_resp.cap_gqp_num, + mp_cap_resp.cap_pa, mp_cap_resp.mcode_type); + + if (uverbs_copy_to_struct_or_zero(attrs, ZXDH_IB_ATTR_DEV_MP_CAP_RESP, + &mp_cap_resp, sizeof(mp_cap_resp))) { + pr_err("zxdh_rdma mp cap ib_copy_to_udata failed!\n"); + rdma_user_mmap_entry_remove( + iwdev->cap_mmap_info.mp_cap_mmap_entry); + return -EFAULT; + } + + return 0; +} + +static int +UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_MP_GET_DATA)(struct uverbs_attr_bundle *attrs) +{ + int ret = 0; + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_src_copy_dest src_dest = {}; + int status; + u8 param; + + ib_uctx = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + + ret = uverbs_copy_from(¶m, attrs, ZXDH_IB_ATTR_DEV_MP_GET_DATA); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + src_dest.src = iwdev->mp_cap_media_addr_base; + src_dest.dest = iwdev->mp_cap_dma_addr; + src_dest.len = ZXDH_L2D_MPCAP_BUFF_SIZE; + status = zxdh_dpuddr_to_host_cmd(&iwdev->rf->sc_dev, &src_dest); + if (status != 0) { + pr_info("status:%d\n", status); + return -EFAULT; + } + + pr_info("zxdh_dpuddr_to_host_cmd succ!\n"); + return 0; +} + +static int UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_MP_CAP_CLEAR)( + struct uverbs_attr_bundle *attrs) +{ + int ret = 0; + uint8_t i; + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_cap_gqp cap_gqp; + uint64_t reg_addr; + uint32_t reg_val; + uint8_t mp_idx; + + ib_uctx = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ret = uverbs_copy_from(&cap_gqp, attrs, ZXDH_IB_ATTR_DEV_MP_CAP_CLEAR); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + if (cap_gqp.gqp_num == 0 || cap_gqp.gqp_num > MAX_CAP_QPS) + return EINVAL; + + reg_val = CLOSE_MP_CAP_VALUE; + for (i = 0; i < cap_gqp.gqp_num; i++) { + mp_idx = 0; + if (cap_gqp.gqpid[i] <= GQP_ID_1023) { + mp_idx = cap_gqp.gqpid[i] / GQP_MOD; + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * CAP_ENABLE_REG_IDX); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + + } else if (cap_gqp.gqpid[i] > GQP_ID_1023 && + cap_gqp.gqpid[i] <= GQP_ID_1103) { + mp_idx = + ((cap_gqp.gqpid[i] - GQP_OFFSET) / GQP_MOD) + 1; + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * CAP_ENABLE_REG_IDX); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + } else if (cap_gqp.gqpid[i] <= GQP_ID_2047) { + mp_idx = ((cap_gqp.gqpid[i] - GQP_OFFSET) / GQP_MOD) - + MP_MOD; + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_BIG_GQP + + (REG_BYTE * CAP_ENABLE_REG_IDX); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + } else { + pr_err("gqpid:%u err!\n", cap_gqp.gqpid[i]); + ret = EINVAL; + } + } + + return ret; +} + +DECLARE_UVERBS_NAMED_METHOD(ZXDH_IB_METHOD_QP_RESET_QP, + UVERBS_ATTR_IDR(ZXDH_IB_ATTR_QP_RESET_QP_HANDLE, + UVERBS_OBJECT_QP, + UVERBS_ACCESS_READ, UA_MANDATORY), + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_QP_RESET_OP_CODE, + UVERBS_ATTR_TYPE(u64), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_QP_MODIFY_QPC, + UVERBS_ATTR_IDR(ZXDH_IB_ATTR_QP_MODIFY_QPC_HANDLE, UVERBS_OBJECT_QP, + UVERBS_ACCESS_READ, UA_MANDATORY), + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_QP_MODIFY_QPC_REQ, + UVERBS_ATTR_STRUCT(struct zxdh_modify_qpc_req, + package_err_flag), + UA_MANDATORY), + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_QP_MODIFY_QPC_MASK, + UVERBS_ATTR_TYPE(u64), UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_QP_QUERY_QPC, + UVERBS_ATTR_IDR(ZXDH_IB_ATTR_QP_QUERY_HANDLE, UVERBS_OBJECT_QP, + UVERBS_ACCESS_READ, UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_QP_QUERY_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_query_qpc_resp, + tx_last_ack_psn), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD(ZXDH_IB_METHOD_QP_MODIFY_UDP_SPORT, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_QP_UDP_PORT, + UVERBS_ATTR_TYPE(u16), + UA_MANDATORY), + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_QP_QPN, + UVERBS_ATTR_TYPE(u32), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_GET_LOG_TRACE, + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_GET_LOG_TARCE_SWITCH, + UVERBS_ATTR_TYPE(u8), UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_SET_LOG_TRACE, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_SET_LOG_TARCE_SWITCH, + UVERBS_ATTR_TYPE(u8), UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_CAP_START, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_CAP_START, + UVERBS_ATTR_STRUCT(struct zxdh_cap_cfg, + cap_data_start_cap), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_CAP_START_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_cap_start_resp, + cap_pa_node1), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD(ZXDH_IB_METHOD_DEV_CAP_STOP, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_CAP_STOP, + UVERBS_ATTR_TYPE(u8), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD(ZXDH_IB_METHOD_DEV_CAP_FREE, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_CAP_FREE, + UVERBS_ATTR_TYPE(u8), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_MP_CAP, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_MP_CAP, + UVERBS_ATTR_STRUCT(struct zxdh_mp_cap_cfg, qpn_num), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_MP_CAP_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_mp_cap_resp, cap_pa), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD(ZXDH_IB_METHOD_DEV_MP_GET_DATA, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_MP_GET_DATA, + UVERBS_ATTR_TYPE(u8), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_MP_CAP_CLEAR, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_MP_CAP_CLEAR, + UVERBS_ATTR_STRUCT(struct zxdh_cap_gqp, gqp_num), + UA_MANDATORY), ); + +DECLARE_UVERBS_GLOBAL_METHODS(ZXDH_IB_OBJECT_DEV, + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_GET_LOG_TRACE), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_SET_LOG_TRACE), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_CAP_START), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_CAP_STOP), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_CAP_FREE), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_MP_CAP), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_MP_GET_DATA), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_MP_CAP_CLEAR), ); + +DECLARE_UVERBS_GLOBAL_METHODS(ZXDH_IB_OBJECT_QP_OBJ, + &UVERBS_METHOD(ZXDH_IB_METHOD_QP_MODIFY_UDP_SPORT), + &UVERBS_METHOD(ZXDH_IB_METHOD_QP_QUERY_QPC), + &UVERBS_METHOD(ZXDH_IB_METHOD_QP_MODIFY_QPC), + &UVERBS_METHOD(ZXDH_IB_METHOD_QP_RESET_QP), ); + +const struct uapi_definition zxdh_ib_dev_defs[] = { + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(ZXDH_IB_OBJECT_DEV), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(ZXDH_IB_OBJECT_QP_OBJ), + {}, +}; diff --git a/src/rdma/src/private_verbs_cmd.h b/src/rdma/src/private_verbs_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..953b38d92829ea6924a6ac8d89187a43f17a1151 --- /dev/null +++ b/src/rdma/src/private_verbs_cmd.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_PRIVATE_VERBS_CMD_H +#define ZXDH_PRIVATE_VERBS_CMD_H +#include +#define ZXDH_RESET_RETRY_CQE_SQ_OPCODE_ERR 0x1f +enum switch_status { + SWITCH_CLOSE = 0, + SWITCH_OPEN = 1, + SWITCH_ERROR, +}; + +enum zxdh_qp_modify_qpc_mask { + ZXDH_RETRY_CQE_SQ_OPCODE = 1 << 0, + ZXDH_ERR_FLAG_SET = 1 << 1, + ZXDH_PACKAGE_ERR_FLAG = 1 << 2, + ZXDH_TX_LAST_ACK_PSN = 1 << 3, + ZXDH_TX_LAST_ACK_WQE_OFFSET_SET = 1 << 4, + ZXDH_TX_READ_RETRY_FLAG_SET = 1 << 5, + ZXDH_TX_RDWQE_PYLD_LENGTH = 1 << 6, + ZXDH_TX_RECV_READ_FLAG_SET = 1 << 7, + ZXDH_TX_RD_MSG_LOSS_ERR_FLAG_SET = 1 << 8, +}; + +enum zxdh_qp_reset_qp_code { + ZXDH_RESET_RETRY_TX_ITEM_FLAG = 1, +}; + +struct zxdh_reset_qp_retry_tx_item { + u16 tx_win_raddr; + u32 tx_last_ack_psn; + u32 last_ack_wqe_offset; + u16 hw_sq_tail_una; + u32 rnr_retry_time_l; + u8 rnr_retry_time_h; + u8 rnr_retry_threshold; + u8 read_retry_flag; + u8 rnr_retry_flag; + u8 retry_flag; + u8 cur_retry_count; + u32 rdwqe_pyld_length; + u8 recv_read_flag; + u8 recv_err_flag; + u8 recv_rd_msg_loss_err_flag; + u8 recv_rd_msg_loss_err_cnt; + u8 rd_msg_loss_err_flag; + u8 pktchk_rd_msg_loss_err_cnt; + u8 ack_err_flag; + u8 err_flag; + u8 package_err_flag; + u8 retry_cqe_sq_opcode; +}; + +struct zxdh_qp_tx_win_item { + u32 start_psn; + u16 wqe_pointer; +}; + +struct zxdh_modify_qpc_item { + u32 tx_last_ack_psn; + u32 last_ack_wqe_offset; + u16 hw_sq_tail_una; + u32 rnr_retry_time_l; + u8 rnr_retry_time_h; + u8 rnr_retry_threshold; + u8 read_retry_flag; + u8 rnr_retry_flag; + u8 retry_flag; + u8 cur_retry_count; + u8 rdwqe_pyld_length_l; + u32 rdwqe_pyld_length_h; + u8 recv_read_flag; + u8 recv_err_flag; + u8 recv_rd_msg_loss_err_flag; + u8 recv_rd_msg_loss_err_cnt; + u8 rd_msg_loss_err_flag; + u8 pktchk_rd_msg_loss_err_cnt; + u8 ack_err_flag; + u8 err_flag; + u8 package_err_flag; + u8 retry_cqe_sq_opcode; +}; + +#define CAP_NODE_NUM 2 +#define NODE1 1 +#define NODE0 0 +#define EN_32bit_GROUP_NUM 16 +#define BIT_O_31 0 +#define BIT_32_63 1 +#define BIT_64_95 2 +#define BIT_96_127 3 +#define BIT_128_159 4 +#define BIT_160_191 5 +#define BIT_192_223 6 +#define BIT_224_255 7 +#define BIT_256_287 8 +#define BIT_288_319 9 +#define BIT_320_351 10 +#define BIT_352_383 11 +#define BIT_384_415 12 +#define BIT_416_447 13 +#define BIT_448_479 14 +#define BIT_480_511 15 +#define CAP_TX 1 +#define CAP_RX 2 +struct zxdh_cap_cfg { + uint8_t cap_position; + uint32_t channel_select[CAP_NODE_NUM]; + uint32_t channel_open[CAP_NODE_NUM]; + uint32_t node_choose[CAP_NODE_NUM]; + uint32_t node_select[CAP_NODE_NUM]; + uint32_t compare_bit_en[EN_32bit_GROUP_NUM][CAP_NODE_NUM]; + uint32_t compare_data[EN_32bit_GROUP_NUM][CAP_NODE_NUM]; + uint32_t rdma_time_wrl2d[CAP_NODE_NUM]; + uint32_t extra[CAP_NODE_NUM][EN_32bit_GROUP_NUM]; + uint32_t cap_data_start_cap; +}; +#define MAX_CAP_QPS 4 +struct zxdh_mp_cap_cfg { + bool cap_use_l2d; + uint32_t qpn[MAX_CAP_QPS]; + uint8_t qpn_num; +}; + +struct zxdh_cap_gqp { + uint16_t gqpid[MAX_CAP_QPS]; + uint8_t gqp_num; +}; + +/* ZXDH Devices ID */ +#define ZXDH_DEV_ID_ADAPTIVE_EVB_PF 0x8040 /* ZXDH EVB PF DEVICE ID*/ +#define ZXDH_DEV_ID_ADAPTIVE_EVB_VF 0x8041 /* ZXDH EVB VF DEVICE ID*/ +#define ZXDH_DEV_ID_ADAPTIVE_E312_PF 0x8049 /* ZXDH E312 PF DEVICE ID*/ +#define ZXDH_DEV_ID_ADAPTIVE_E312_VF 0x8060 /* ZXDH E312 VF DEVICE ID*/ +#define ZXDH_DEV_ID_ADAPTIVE_X512_PF 0x806B /* ZXDH X512 PF DEVICE ID*/ +#define ZXDH_DEV_ID_ADAPTIVE_X512_VF 0x806C /* ZXDH X512 VF DEVICE ID*/ + +#define ZXDH_L2D_MPCAP_BUFF_SIZE 0x17000u +#define ZXDH_CAP_DATA_MEM_SIZE (2 * 1024 * 1024) +#define ZXDH_LOG_BUF_SIZE 4096 +extern const struct uapi_definition zxdh_ib_dev_defs[]; + +void copy_tx_window_to_win_item(void *va, struct zxdh_qp_tx_win_item *info); +void set_retry_modify_qpc_item( + struct zxdh_modify_qpc_item *modify_qpc_item, + struct zxdh_reset_qp_retry_tx_item *retry_item_info, + struct zxdh_qp_tx_win_item *tx_win_item_info, u64 *modify_mask); + +#endif diff --git a/src/rdma/src/protos.h b/src/rdma/src/protos.h new file mode 100644 index 0000000000000000000000000000000000000000..ee827bcab79a7f44d096f0904be60af3658e3d9b --- /dev/null +++ b/src/rdma/src/protos.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_PROTOS_H +#define ZXDH_PROTOS_H +#include + +#define PAUSE_TIMER_VAL 0xffff +#define REFRESH_THRESHOLD 0x7fff +#define HIGH_THRESHOLD 0x800 +#define LOW_THRESHOLD 0x200 +#define ALL_TC2PFC 0xff +#define CQP_COMPL_WAIT_TIME_MS 6 +#define CQP_TIMEOUT_THRESHOLD 20000 + +/* init operations */ +void zxdh_sc_dev_init(enum zxdh_rdma_vers ver, struct zxdh_sc_dev *dev, + struct zxdh_device_init_info *info); +void zxdh_sc_cqp_post_sq(struct zxdh_sc_cqp *cqp); +__le64 *zxdh_sc_cqp_get_next_send_wqe(struct zxdh_sc_cqp *cqp, u64 scratch); +int zxdh_sc_mr_fast_register(struct zxdh_sc_qp *qp, + struct zxdh_fast_reg_stag_info *info, + bool post_sq); +void zxdh_init_config_check(struct zxdh_config_check *cc, u8 traffic_class, + u16 qs_handle); +/* HMC/FPM functions */ + +/* stats misc */ +void zxdh_rdma_stats_read(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats); + +int zxdh_process_pma_cmd(struct zxdh_sc_dev *dev, u8 port, + const struct ib_mad *in_mad, struct ib_mad *out_mad); +void zxdh_hw_stats_read_all(struct zxdh_vsi_pestat *stats, + const u64 *hw_stats_regs); +int zxdh_cqp_up_map_cmd(struct zxdh_sc_dev *dev, u8 cmd, + struct zxdh_up_info *map_info); +int zxdh_cqp_ceq_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_ceq *sc_ceq, + u8 op); +int zxdh_cqp_aeq_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_aeq *sc_aeq, + u8 op); +void zxdh_sc_dev_qplist_init(struct zxdh_sc_dev *dev); +int zxdh_sc_add_cq_ctx(struct zxdh_sc_ceq *ceq, struct zxdh_sc_cq *cq); +void zxdh_sc_remove_cq_ctx(struct zxdh_sc_ceq *ceq, struct zxdh_sc_cq *cq); +/* misc L2 param change functions */ +void zxdh_qp_add_qos(struct zxdh_sc_qp *qp); +void zxdh_qp_rem_qos(struct zxdh_sc_qp *qp); +struct zxdh_sc_qp *zxdh_get_qp_from_list(struct list_head *head, + struct zxdh_sc_qp *qp); +/* dynamic memory allocation */ +/* misc */ +u8 zxdh_get_encoded_wqe_size(u32 wqsize, enum zxdh_queue_type queue_type); +void zxdh_modify_qp_to_err(struct zxdh_sc_qp *sc_qp); +int zxdh_cfg_fpm_val(struct zxdh_sc_dev *dev, u32 qp_count); +void free_sd_mem(struct zxdh_sc_dev *dev); +int zxdh_process_cqp_cmd(struct zxdh_sc_dev *dev, + struct cqp_cmds_info *pcmdinfo); +int zxdh_process_bh(struct zxdh_sc_dev *dev); +extern void dump_ctx(struct zxdh_sc_dev *dev, u32 pf_num, u32 qp_num); +void dumpCSR(struct zxdh_sc_dev *dev); +void dumpCSRx(struct zxdh_sc_dev *dev); +void dumpcls(struct zxdh_sc_dev *dev); +void *zxdh_remove_cqp_head(struct zxdh_sc_dev *dev); + +int zxdh_sc_config_pte_table(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest scr_dest); +int zxdh_cqp_config_pte_table_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest scr_dest); + +void zxdh_hmc_dpu_capability(struct zxdh_sc_dev *dev); +u32 zxdh_hmc_register_config_comval(struct zxdh_sc_dev *dev, u32 rsrc_type); +u32 zxdh_hmc_register_config_cqpval(struct zxdh_sc_dev *dev, u32 max_cnt, + u32 rsrc_type); +u64 zxdh_get_path_index(struct zxdh_path_index *path_index); +int zxdh_cqp_config_pble_table_cmd(struct zxdh_sc_dev *dev, + struct zxdh_pble_info *pbleinfo, u32 len, + bool pbletype); +u16 zxdh_get_ws_index(struct zxdh_sc_qp *qp, u32 dest_ip); +u16 get_dev_rc_ws_offset(u16 vhca_id, u32 total_vhca); +u16 zxdh_get_dev_ud_ws(u16 vhca_id, u32 total_vhca); +u16 zxdh_get_tc_ws_offset(u32 total_vhca, u8 traffic_class, u16 *tc_ws_num); + +int zxdh_sc_send_mailbox(struct zxdh_sc_dev *dev, u8 opt, u64 msg2, u64 msg3, + u64 msg4); +int zxdh_sc_send_mailbox_cmd(struct zxdh_sc_dev *dev, u8 opt, u64 msg2, + u64 msg3, u64 msg4, u16 dst_vf_id); +int zxdh_sc_commit_hmc_register_val(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write32_date *dma_data, + bool post_sq, u8 wait_type); + +int zxdh_sc_dma_read_usecqe(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_dam_read_bycqe *readbuf, + struct zxdh_path_index *spath_index, bool post_sq); + +int zxdh_sc_dma_read(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_src_copy_dest *src_dest, + struct zxdh_path_index *spath_index, + struct zxdh_path_index *dpath_index, bool post_sq); + +int zxdh_sc_dma_write64(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write64_date *dma_data, bool post_sq); + +int zxdh_sc_dma_write32(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write32_date *dma_data, bool post_sq); + +int zxdh_sc_dma_write(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_src_copy_dest *src_dest, + struct zxdh_path_index *spath_index, + struct zxdh_path_index *dpath_index, bool post_sq); + +int zxdh_sc_mb_create(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_mailboxhead_data *mbhead_data, bool post_sq, + u32 dst_vf_id); +int zxdh_sc_query_mkey_cmd(struct zxdh_sc_dev *dev, u32 mekyindex); + +int zxdh_clear_dpuddr(struct zxdh_sc_dev *dev, u64 size, bool clear); +int zxdh_vf_clear_dpuddr(struct zxdh_sc_dev *dev, u64 size, bool clear); + +int zxdh_clear_nof_ioq(struct zxdh_sc_dev *dev, u64 size, u64 ioq_pa); + +int zxdh_dpuddr_to_host_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest); +int zxdh_cqp_rdma_write_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest, u8 src_dir, + u8 dest_dir); +int zxdh_cqp_rdma_read_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest, u8 src_dir, + u8 dest_dir); +int zxdh_cqp_damreadbycqe_cmd(struct zxdh_sc_dev *dev, + struct zxdh_dam_read_bycqe *dmadata, + struct zxdh_path_index *src_path_index, u64 *arr); +int zxdh_cqp_rdma_write32_cmd(struct zxdh_sc_dev *dev, + struct zxdh_dma_write32_date *dma_data); +int zxdh_cqp_rdma_readreg_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest); +int zxdh_cqp_rdma_read_mrte_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest); +int zxdh_cqp_rdma_read_tx_window_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest); +u64 zxdh_get_hmc_align_512(u64 paaddr); +u16 zxdh_txwind_ddr_size(u8 num); + +u64 zxdh_get_hmc_align_2M(u64 paaddr); +u64 zxdh_get_hmc_align_4K(u64 paaddr); +int zxdh_create_vf_pblehmc_entry(struct zxdh_sc_dev *dev); +int zxdh_sc_query_mkey(struct zxdh_sc_cqp *cqp, u32 mkeyindex, u64 scratch, + bool post_sq); + +int zxdh_sc_query_qpc(struct zxdh_sc_qp *qp, u64 qpc_buf_pa, u64 scratch, + bool post_sq); +int zxdh_sc_query_cqc(struct zxdh_sc_cq *cq, u64 cqc_buf_pa, u64 scratch, + bool post_sq); +int zxdh_sc_query_ceqc(struct zxdh_sc_ceq *ceq, u64 ceqc_buf_pa, u64 scratch, + bool post_sq); +int zxdh_sc_query_aeqc(struct zxdh_sc_aeq *aeq, u64 aeqc_buf_pa, u64 scratch, + bool post_sq); + +int zxdh_cq_round_up(u32 wqdepth); + +int zxdh_cqp_aeq_create(struct zxdh_sc_aeq *aeq); +int zxdh_init_destroy_aeq(struct zxdh_pci_f *rf); +int zxdh_create_cqp_qp(struct zxdh_pci_f *rf); +int zxdh_destroy_cqp_qp(struct zxdh_pci_f *rf); +const char *zxdh_qp_state_to_string(enum ib_qp_state state); +void get_pci_board_bdf(char *pci_board_bdf, struct zxdh_pci_f *rf); +#endif /* ZXDH_PROTOS_H */ diff --git a/src/rdma/src/puda.c b/src/rdma/src/puda.c new file mode 100644 index 0000000000000000000000000000000000000000..c6dacb3dfc254314e96f6e78dfef843fc090edca --- /dev/null +++ b/src/rdma/src/puda.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "puda.h" +#include "ws.h" + +/** + * zxdh_puda_get_listbuf - get buffer from puda list + * @list: list to use for buffers (ILQ or IEQ) + */ +static struct zxdh_puda_buf *zxdh_puda_get_listbuf(struct list_head *list) +{ + struct zxdh_puda_buf *buf = NULL; + + if (!list_empty(list)) { + buf = (struct zxdh_puda_buf *)list->next; + list_del((struct list_head *)&buf->list); + } + + return buf; +} + +/** + * zxdh_puda_ret_bufpool - return buffer to rsrc list + * @rsrc: resource to use for buffer + * @buf: buffer to return to resource + */ +void zxdh_puda_ret_bufpool(struct zxdh_puda_rsrc *rsrc, + struct zxdh_puda_buf *buf) +{ + unsigned long flags; + + buf->do_lpb = false; + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + list_add(&buf->list, &rsrc->bufpool); + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + rsrc->avail_buf_count++; +} + +/** + * zxdh_puda_get_next_send_wqe - return next wqe for processing + * @qp: puda qp for wqe + * @wqe_idx: wqe index for caller + */ +static __le64 *zxdh_puda_get_next_send_wqe(struct zxdh_qp_uk *qp, u32 *wqe_idx) +{ + __le64 *wqe = NULL; + int ret_code = 0; + + *wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring); + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + ZXDH_RING_MOVE_HEAD(qp->sq_ring, ret_code); + if (ret_code) + return wqe; + + wqe = qp->sq_base[*wqe_idx].elem; + + return wqe; +} + +/** + * zxdh_puda_send - complete send wqe for transmit + * @qp: puda qp for send + * @info: buffer information for transmit + */ +int zxdh_puda_send(struct zxdh_sc_qp *qp, struct zxdh_puda_send_info *info) +{ + __le64 *wqe; + u32 iplen, l4len; + u64 hdr[2]; + u32 wqe_idx; + u8 iipt; + + /* number of 32 bits DWORDS in header */ + l4len = info->tcplen >> 2; + if (info->ipv4) { + iipt = 3; + iplen = 5; + } else { + iipt = 1; + iplen = 10; + } + + wqe = zxdh_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); + if (!wqe) + return -ENOSPC; + + qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; + /* Third line of WQE descriptor */ + /* maclen is in words */ + + if (qp->dev->hw_attrs.uk_attrs.hw_rev >= ZXDH_GEN_2) { + hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */ + hdr[1] = FIELD_PREP(ZXDH_UDA_QPSQ_OPCODE, ZXDH_OP_TYPE_SEND) | + FIELD_PREP(ZXDH_UDA_QPSQ_L4LEN, l4len) | + FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) | + FIELD_PREP(ZXDH_UDA_QPSQ_SIGCOMPL, 1) | + FIELD_PREP(ZXDH_UDA_QPSQ_VALID, + qp->qp_uk.swqe_polarity); + + /* Forth line of WQE descriptor */ + + set_64bit_val(wqe, 0, info->paddr); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) | + FIELD_PREP(ZXDH_UDA_QPSQ_VALID, + qp->qp_uk.swqe_polarity)); + } else { + hdr[0] = FIELD_PREP(ZXDH_UDA_QPSQ_MACLEN, info->maclen >> 1) | + FIELD_PREP(ZXDH_UDA_QPSQ_IPLEN, iplen) | + FIELD_PREP(ZXDH_UDA_QPSQ_L4T, 1) | + FIELD_PREP(ZXDH_UDA_QPSQ_IIPT, iipt) | + FIELD_PREP(ZXDH_GEN1_UDA_QPSQ_L4LEN, l4len); + + hdr[1] = FIELD_PREP(ZXDH_UDA_QPSQ_OPCODE, ZXDH_OP_TYPE_SEND) | + FIELD_PREP(ZXDH_UDA_QPSQ_SIGCOMPL, 1) | + FIELD_PREP(ZXDH_UDA_QPSQ_DOLOOPBACK, info->do_lpb) | + FIELD_PREP(ZXDH_UDA_QPSQ_VALID, + qp->qp_uk.swqe_polarity); + + /* Forth line of WQE descriptor */ + + set_64bit_val(wqe, 0, info->paddr); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len)); + } + + set_64bit_val(wqe, 16, hdr[0]); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr[1]); + + print_hex_dump_debug("PUDA: PUDA SEND WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, 32, false); + zxdh_uk_qp_post_wr(&qp->qp_uk); + return 0; +} + +/** + * zxdh_puda_send_buf - transmit puda buffer + * @rsrc: resource to use for buffer + * @buf: puda buffer to transmit + */ +void zxdh_puda_send_buf(struct zxdh_puda_rsrc *rsrc, struct zxdh_puda_buf *buf) +{ + struct zxdh_puda_send_info info = {}; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + /* if no wqe available or not from a completion and we have + * pending buffers, we must queue new buffer + */ + if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) { + list_add_tail(&buf->list, &rsrc->txpend); + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + rsrc->stats_sent_pkt_q++; + if (rsrc->type == ZXDH_PUDA_RSRC_TYPE_ILQ) + pr_err("PUDA: adding to txpend\n"); + return; + } + rsrc->tx_wqe_avail_cnt--; + /* if we are coming from a completion and have pending buffers + * then Get one from pending list + */ + if (!buf) { + buf = zxdh_puda_get_listbuf(&rsrc->txpend); + if (!buf) + goto done; + } + + info.scratch = buf; + info.paddr = buf->mem.pa; + info.len = buf->totallen; + info.tcplen = buf->tcphlen; + info.ipv4 = buf->ipv4; + + if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= ZXDH_GEN_2) { + info.ah_id = buf->ah_id; + } else { + info.maclen = buf->maclen; + info.do_lpb = buf->do_lpb; + } + + /* Synch buffer for use by device */ + dma_sync_single_for_cpu(rsrc->dev->hw->device, buf->mem.pa, + buf->mem.size, DMA_BIDIRECTIONAL); + ret = zxdh_puda_send(&rsrc->qp, &info); + if (ret) { + rsrc->tx_wqe_avail_cnt++; + rsrc->stats_sent_pkt_q++; + list_add(&buf->list, &rsrc->txpend); + if (rsrc->type == ZXDH_PUDA_RSRC_TYPE_ILQ) + pr_info("PUDA: adding to puda_send\n"); + } else { + rsrc->stats_pkt_sent++; + } +done: + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); +} diff --git a/src/rdma/src/puda.h b/src/rdma/src/puda.h new file mode 100644 index 0000000000000000000000000000000000000000..ea292937c276524cdaa080c443e5470eca21fd3e --- /dev/null +++ b/src/rdma/src/puda.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_PUDA_H +#define ZXDH_PUDA_H + +#define ZXDH_IEQ_MPA_FRAMING 6 +#define ZXDH_TCP_OFFSET 40 +#define ZXDH_IPV4_PAD 20 +#define ZXDH_MRK_BLK_SZ 512 + +enum puda_rsrc_type { + ZXDH_PUDA_RSRC_TYPE_ILQ = 1, + ZXDH_PUDA_RSRC_TYPE_IEQ, + ZXDH_PUDA_RSRC_TYPE_MAX, /* Must be last entry */ +}; + +enum puda_rsrc_complete { + PUDA_CQ_CREATED = 1, + PUDA_QP_CREATED, + PUDA_TX_COMPLETE, + PUDA_RX_COMPLETE, + PUDA_HASH_CRC_COMPLETE, +}; + +struct zxdh_sc_dev; +struct zxdh_sc_qp; +struct zxdh_sc_cq; + +struct zxdh_puda_cmpl_info { + struct zxdh_qp_uk *qp; + u8 q_type; + u8 l3proto; + u8 l4proto; + u16 vlan; + u32 payload_len; + u32 compl_error; /* No_err=0, else major and minor err code */ + u32 qp_id; + u32 wqe_idx; + u8 ipv4 : 1; + u8 smac_valid : 1; + u8 vlan_valid : 1; + u8 smac[ETH_ALEN]; +}; + +struct zxdh_puda_send_info { + u64 paddr; /* Physical address */ + u32 len; + u32 ah_id; + u8 tcplen; + u8 maclen; + u8 ipv4 : 1; + u8 do_lpb : 1; + void *scratch; +}; + +struct zxdh_puda_buf { + struct list_head list; /* MUST be first entry */ + struct zxdh_dma_mem mem; /* DMA memory for the buffer */ + struct zxdh_puda_buf *next; /* for alloclist in rsrc struct */ + struct zxdh_virt_mem buf_mem; /* Buffer memory for this buffer */ + void *scratch; + u8 *iph; + u8 *tcph; + u8 *data; + u16 datalen; + u16 vlan_id; + u8 tcphlen; /* tcp length in bytes */ + u8 maclen; /* mac length in bytes */ + u32 totallen; /* machlen+iphlen+tcphlen+datalen */ + refcount_t refcount; + u8 hdrlen; + u8 ipv4 : 1; + u8 vlan_valid : 1; + u8 do_lpb : 1; /* Loopback buffer */ + u8 smac_valid : 1; + u32 seqnum; + u32 ah_id; + u8 smac[ETH_ALEN]; + struct zxdh_sc_vsi *vsi; +}; + +struct zxdh_puda_rsrc_info { + void (*receive)(struct zxdh_sc_vsi *vsi, struct zxdh_puda_buf *buf); + void (*xmit_complete)(struct zxdh_sc_vsi *vsi, void *sqwrid); + enum puda_rsrc_type type; /* ILQ or IEQ */ + u32 count; + u32 pd_id; + u32 cq_id; + u32 qp_id; + u32 sq_size; + u32 rq_size; + u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */ + u16 buf_size; + u8 stats_idx; + u8 stats_idx_valid : 1; + int abi_ver; +}; + +struct zxdh_puda_rsrc { + struct zxdh_sc_cq cq; + struct zxdh_sc_qp qp; + struct zxdh_sc_pd sc_pd; + struct zxdh_sc_dev *dev; + struct zxdh_sc_vsi *vsi; + struct zxdh_dma_mem cqmem; + struct zxdh_dma_mem qpmem; + struct zxdh_virt_mem ilq_mem; + enum puda_rsrc_complete cmpl; + enum puda_rsrc_type type; + u16 buf_size; /*buf must be max datalen + tcpip hdr + mac */ + u32 cq_id; + u32 qp_id; + u32 sq_size; + u32 rq_size; + u32 cq_size; + struct zxdh_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + u32 compl_rxwqe_idx; + u32 rx_wqe_idx; + u32 rxq_invalid_cnt; + u32 tx_wqe_avail_cnt; + struct shash_desc *hash_desc; + struct list_head txpend; + struct list_head bufpool; /* free buffers pool list for recv and xmit */ + u32 alloc_buf_count; + u32 avail_buf_count; /* snapshot of currently available buffers */ + spinlock_t bufpool_lock; + struct zxdh_puda_buf *alloclist; + void (*receive)(struct zxdh_sc_vsi *vsi, struct zxdh_puda_buf *buf); + void (*xmit_complete)(struct zxdh_sc_vsi *vsi, void *sqwrid); + /* puda stats */ + u64 stats_buf_alloc_fail; + u64 stats_pkt_rcvd; + u64 stats_pkt_sent; + u64 stats_rcvd_pkt_err; + u64 stats_sent_pkt_q; + u64 stats_bad_qp_id; + /* IEQ stats */ + u64 fpdu_processed; + u64 bad_seq_num; + u64 crc_err; + u64 pmode_count; + u64 partials_handled; + u8 stats_idx; + u8 check_crc : 1; + u8 stats_idx_valid : 1; +}; + +void zxdh_puda_ret_bufpool(struct zxdh_puda_rsrc *rsrc, + struct zxdh_puda_buf *buf); +void zxdh_puda_send_buf(struct zxdh_puda_rsrc *rsrc, struct zxdh_puda_buf *buf); +int zxdh_puda_send(struct zxdh_sc_qp *qp, struct zxdh_puda_send_info *info); + +int zxdh_cqp_qp_destroy_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_qp *qp); +#endif /*ZXDH_PROTOS_H */ diff --git a/src/rdma/src/restrack.c b/src/rdma/src/restrack.c new file mode 100644 index 0000000000000000000000000000000000000000..2a1fbd9067d861f2402d39951c6b7ae5076076b7 --- /dev/null +++ b/src/rdma/src/restrack.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include +#include +#include "restrack.h" +#include "main.h" + +#ifdef IB_DEV_OPS_FILL_ENTRY +static int fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr) +{ + struct zxdh_mr *mr = to_iwmr(ibmr); + struct nlattr *table_attr; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + goto err; + switch (mr->type) { + case ZXDH_MEMREG_TYPE_MEM: + if (rdma_nl_put_driver_string(msg, "type", "mem")) + goto err; + break; + case ZXDH_MEMREG_TYPE_QP: + if (rdma_nl_put_driver_string(msg, "type", "qp")) + goto err; + break; + case ZXDH_MEMREG_TYPE_CQ: + if (rdma_nl_put_driver_string(msg, "type", "cq")) + goto err; + break; + case ZXDH_MEMREG_TYPE_SRQ: + if (rdma_nl_put_driver_string(msg, "type", "srq")) + goto err; + break; + default: + goto err; + break; + } + nla_nest_end(msg, table_attr); + return 0; + +err: + pr_err("res mr entry failed\n"); + nla_nest_cancel(msg, table_attr); + return -EMSGSIZE; +} + +static int fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ibmr) +{ + struct zxdh_mr *iwmr = to_iwmr(ibmr); + struct zxdh_device *iwdev = to_iwdev(ibmr->device); + struct zxdh_src_copy_dest src_dest = { 0 }; + struct zxdh_dma_mem qpc_buf = { 0 }; + int err_code = 0; + + qpc_buf.size = 64; + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, qpc_buf.size, + &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("res mr entry raw failed:ENOMEM\n"); + return -ENOMEM; + } + if (iwmr->type != ZXDH_MEMREG_TYPE_MEM) { + err_code = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, 0, qpc_buf.va); + goto free_buff; + } + src_dest.src = 64 * (iwmr->stag >> ZXDH_CQPSQ_STAG_IDX_S); + src_dest.dest = qpc_buf.pa; + src_dest.len = qpc_buf.size; + err_code = zxdh_cqp_rdma_read_mrte_cmd(&iwdev->rf->sc_dev, &src_dest); + if (err_code) { + pr_err("res qp entry raw fill qpc failed:%d\n", err_code); + goto free_buff; + } + err_code = + nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, qpc_buf.size, qpc_buf.va); +free_buff: + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, qpc_buf.va, + qpc_buf.pa); + qpc_buf.va = NULL; + return err_code; +} + +static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp) +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_dma_mem qpc_buf; + int err_code = 0; + + qpc_buf.va = NULL; + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, qpc_buf.size, + &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("res qp entry raw failed:ENOMEM\n"); + return -ENOMEM; + } + err_code = zxdh_fill_qpc(&iwqp->sc_qp, &qpc_buf); + if (err_code) { + pr_err("res qp entry raw fill qpc failed:%d\n", err_code); + goto free_buff; + } + err_code = + nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, qpc_buf.size, qpc_buf.va); +free_buff: + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, qpc_buf.va, + qpc_buf.pa); + qpc_buf.va = NULL; + return err_code; +} + +static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq) +{ + struct zxdh_cq *iwcq = to_iwcq(ibcq); + struct zxdh_device *iwdev = to_iwdev(ibcq->device); + struct zxdh_dma_mem cqc_buf; + int err_code = 0; + + cqc_buf.va = NULL; + cqc_buf.size = ALIGN(ZXDH_CQ_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + cqc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, cqc_buf.size, + &cqc_buf.pa, GFP_KERNEL); + if (!cqc_buf.va) { + pr_err("res cq entry raw failed:ENOMEM\n"); + return -ENOMEM; + } + err_code = zxdh_fill_cqc(&iwcq->sc_cq, &cqc_buf); + if (err_code) { + pr_err("res cq entry raw fill cqc failed:%d\n", err_code); + goto free_buff; + } + err_code = + nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, cqc_buf.size, cqc_buf.va); +free_buff: + dma_free_coherent(iwdev->rf->hw.device, cqc_buf.size, cqc_buf.va, + cqc_buf.pa); + cqc_buf.va = NULL; + return err_code; +} + +static const struct ib_device_ops restrack_ops = { + .fill_res_cq_entry_raw = fill_res_cq_entry_raw, + .fill_res_mr_entry = fill_res_mr_entry, + .fill_res_qp_entry_raw = fill_res_qp_entry_raw, + .fill_res_mr_entry_raw = fill_res_mr_entry_raw, + +}; + +int zxdh_set_restrack_ops(struct ib_device *ibdev) +{ + ib_set_device_ops(ibdev, &restrack_ops); + return 0; +} +#endif \ No newline at end of file diff --git a/src/rdma/src/restrack.h b/src/rdma/src/restrack.h new file mode 100644 index 0000000000000000000000000000000000000000..8a6cb137ef103ad15fe009f3b837c9043c26704e --- /dev/null +++ b/src/rdma/src/restrack.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_ZRDMA_H +#define ZXDH_ZRDMA_H + +int zxdh_set_restrack_ops(struct ib_device *ibdev); +#endif diff --git a/src/rdma/src/rhel_kcompat.h b/src/rdma/src/rhel_kcompat.h new file mode 100644 index 0000000000000000000000000000000000000000..be410128638652b57570ba18c5749a15da03ee61 --- /dev/null +++ b/src/rdma/src/rhel_kcompat.h @@ -0,0 +1,1001 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef RHEL_KCOMPAT_H +#define RHEL_KCOMPAT_H + +#ifdef RHEL_8_5 +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_3 +#define ALLOC_UCONTEXT_VER_2 +#define COPY_USER_PGADDR_VER_3 +#define CREATE_AH_VER_5 +#define CREATE_QP_VER_1 +#define CREATE_CQ_VER_3 +#define DESTROY_AH_VER_4 +#define DEALLOC_PD_VER_4 +#define DESTROY_QP_VER_2 +#define DEALLOC_UCONTEXT_VER_2 +#define DEREG_MR_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V2 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define HAS_IB_SET_DEVICE_OP +#define IB_DEALLOC_DRIVER_SUPPORT +#define IB_IW_PKEY +#define IW_PORT_IMMUTABLE_V1 +#define IB_UMEM_GET_V2 +#define IN_IFADDR +#define ZXDH_ALLOC_MW_VER_2 +#define ZXDH_DESTROY_CQ_VER_4 +#define ZXDH_ALLOC_MR_VER_0 +#define MODIFY_PORT_V1 +#define NETDEV_TO_IBDEV_SUPPORT +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_2 +#define ROCE_PORT_IMMUTABLE_V1 +#define RDMA_MMAP_DB_SUPPORT +#define SET_BEST_PAGE_SZ_V2 +#define SET_ROCE_CM_INFO_VER_3 +#define USE_KMAP + +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, dev) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define kc_typeq_ib_wr const + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#endif /* RHEL_8_5 */ + +#ifdef RHEL_8_4 +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_3 +#define ALLOC_UCONTEXT_VER_2 +#define COPY_USER_PGADDR_VER_3 +#define CREATE_AH_VER_5 +#define CREATE_QP_VER_1 +#define CREATE_CQ_VER_3 +#define DESTROY_AH_VER_3 +#define DEALLOC_PD_VER_3 +#define DESTROY_QP_VER_2 +#define DEALLOC_UCONTEXT_VER_2 +#define DEREG_MR_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define HAS_IB_SET_DEVICE_OP +#define IB_DEALLOC_DRIVER_SUPPORT +#define IB_IW_PKEY +#define IW_PORT_IMMUTABLE_V1 +#define IB_UMEM_GET_V2 +#define IN_IFADDR +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_3 +#define ZXDH_ALLOC_MR_VER_0 +#define MODIFY_PORT_V1 +#define NETDEV_TO_IBDEV_SUPPORT +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define RDMA_MMAP_DB_SUPPORT +#define SET_BEST_PAGE_SZ_V2 +#define SET_ROCE_CM_INFO_VER_3 +#define UVERBS_CMD_MASK +#define USE_KMAP + +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define kc_typeq_ib_wr const + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#endif /* RHEL_8_4 */ + +#ifdef RHEL_8_3 + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_3 +#define ALLOC_UCONTEXT_VER_2 +#define COPY_USER_PGADDR_VER_3 +#define CREATE_AH_VER_2 +#define CREATE_CQ_VER_3 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_3 +#define DEALLOC_UCONTEXT_VER_2 +#define DEREG_MR_VER_2 +#define DESTROY_AH_VER_3 +#define DESTROY_QP_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define HAS_IB_SET_DEVICE_OP +#define NETDEV_TO_IBDEV_SUPPORT +#define IB_DEALLOC_DRIVER_SUPPORT +#define IB_IW_PKEY +#define IB_UMEM_GET_V2 +#define IN_IFADDR +#define ZXDH_ALLOC_MR_VER_1 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_3 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define RDMA_MMAP_DB_SUPPORT +#define SET_BEST_PAGE_SZ_V2 +#define SET_ROCE_CM_INFO_VER_3 +#define UVERBS_CMD_MASK +#define USE_KMAP + +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define kc_typeq_ib_wr const + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#endif /* RHEL_8_3 */ + +#ifdef RHEL_7_9 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_4 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define CREATE_QP_VER_1 +#define DESTROY_QP_VER_1 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_UMEM_GET_V1 +#define IB_IW_PKEY +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_2 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define kc_set_ibdev_add_del_gid(ibdev) +#define wait_queue_entry __wait_queue +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, NULL) +#define kc_typeq_ib_wr const +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) + +#endif + +#ifdef RHEL_8_2 +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_3 +#define ALLOC_UCONTEXT_VER_2 +#define COPY_USER_PGADDR_VER_3 +#define CREATE_AH_VER_2 +#define CREATE_CQ_VER_3 +#define CREATE_QP_VER_1 +#define DESTROY_AH_VER_3 +#define CREATE_QP_VER_1 +#define DESTROY_QP_VER_2 +#define DEALLOC_PD_VER_3 +#define DEALLOC_UCONTEXT_VER_2 +#define DEREG_MR_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define HAS_IB_SET_DEVICE_OP +#define NETDEV_TO_IBDEV_SUPPORT +#define IB_DEALLOC_DRIVER_SUPPORT +#define IB_IW_PKEY +#define IB_UMEM_GET_V1 +#define IN_IFADDR +#define ZXDH_ALLOC_MR_VER_1 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_3 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V2 +#define SET_ROCE_CM_INFO_VER_3 +#define UVERBS_CMD_MASK +#define USE_KMAP + +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define kc_typeq_ib_wr const + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#endif /* RHEL_8_2 */ + +#ifdef RHEL_8_1 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_3 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DESTROY_AH_VER_2 +#define DEALLOC_UCONTEXT_VER_1 +#define DEALLOC_PD_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_QP_VER_1 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define HAS_IB_SET_DEVICE_OP +#define IB_GET_NETDEV_OP_NOT_DEPRECATED +#define IB_IW_PKEY +#define IB_UMEM_GET_V1 +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define ZXDH_SET_DRIVER_ID +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_2 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define kc_zxdh_destroy_qp(ibqp, udata) zxdh_destroy_qp(ibqp) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, NULL) +#define kc_typeq_ib_wr const +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_set_ibdev_add_del_gid(ibdev) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) +#endif /* RHEL_8_1 */ + +#ifdef RHEL_7_8 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_4 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_IW_PKEY +#define IB_UMEM_GET_V1 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define ZXDH_ALLOC_MR_VER_0 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_2 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define kc_set_ibdev_add_del_gid(ibdev) +#define wait_queue_entry __wait_queue +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, NULL) +#define kc_typeq_ib_wr const +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) +#endif /* RHEL_7_8 */ + +#ifdef RHEL_7_7 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_4 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_IW_PKEY +#define IB_UMEM_GET_V1 +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_2 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define kc_set_ibdev_add_del_gid(ibdev) +#define wait_queue_entry __wait_queue +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, NULL) +#define kc_typeq_ib_wr const +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) +#endif /* RHEL_7_7 */ + +#ifdef RHEL_8_0 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_1_2 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEALLOC_PD_VER_1 +#define DEREG_MR_VER_1 +#define ETHER_COPY_VER_2 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_UMEM_GET_V1 +#define IB_GET_CACHED_GID +#define IB_IW_MANDATORY_AH_OP +#define IB_IW_PKEY +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define ZXDH_SET_DRIVER_ID +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_1 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#define kc_typeq_ib_wr +#define kc_deref_sgid_attr(sgid_attr) (sgid_attr.ndev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_set_ibdev_add_del_gid(ibdev) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) +#endif /* RHEL_8_0 */ + +#ifdef RHEL_7_6 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_1_2 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define ETHER_COPY_VER_2 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_GET_CACHED_GID +#define IB_IW_MANDATORY_AH_OP +#define IB_IW_PKEY +#define IB_UMEM_GET_V1 +#define IW_PORT_IMMUTABLE_V1 +#define ZXDH_ADD_DEL_GID +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_1 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define wait_queue_entry __wait_queue +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#define kc_typeq_ib_wr +#define kc_deref_sgid_attr(sgid_attr) (sgid_attr.ndev) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) + +#define set_max_sge(props, rf) \ + ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) + +#define kc_set_props_ip_gid_caps(props) \ + ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS) + +#define kc_set_ibdev_add_del_gid(ibdev) \ + do { \ + ibdev->add_gid = zxdh_add_gid; \ + ibdev->del_gid = zxdh_del_gid; \ + } while (0) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) +#endif /* RHEL_7_6 */ + +#ifdef RHEL_7_5 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_1_2 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define ETHER_COPY_VER_2 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_IW_MANDATORY_AH_OP +#define IB_IW_PKEY +#define IB_UMEM_GET_V1 +#define ZXDH_ADD_DEL_GID +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_1 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define wait_queue_entry __wait_queue +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#define kc_typeq_ib_wr +#define kc_deref_sgid_attr(sgid_attr) (sgid_attr.ndev) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) + +#define set_max_sge(props, rf) \ + ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) + +#define kc_set_props_ip_gid_caps(props) \ + ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS) + +#define kc_set_ibdev_add_del_gid(ibdev) \ + do { \ + ibdev->add_gid = zxdh_add_gid; \ + ibdev->del_gid = zxdh_del_gid; \ + } while (0) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) +#endif /* RHEL_7_5 */ + +#ifdef RHEL_7_4 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define CREATE_AH_VER_1_1 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEALLOC_PD_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define ETHER_COPY_VER_1 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_IW_PKEY +#define IB_IW_MANDATORY_AH_OP +#define IB_GET_CACHED_GID +#define IB_UMEM_GET_V1 +#define ZXDH_ADD_DEL_GID +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_1 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define wait_queue_entry __wait_queue +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#define kc_typeq_ib_wr +#define kc_deref_sgid_attr(sgid_attr) (sgid_attr.ndev) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define rdma_ah_attr ib_ah_attr +#define ah_attr_to_dmac(attr) ((attr).dmac) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) + +#define set_max_sge(props, rf) \ + ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) + +#define kc_set_props_ip_gid_caps(props) \ + ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS) + +#define kc_set_ibdev_add_del_gid(ibdev) \ + do { \ + ibdev->add_gid = zxdh_add_gid; \ + ibdev->del_gid = zxdh_del_gid; \ + } while (0) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) + +#define set_ibdev_dma_device(ibdev, dev) ibdev.dma_device = dev + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) +#endif /* RHEL_7_4 */ +#endif /* RHEL_KCOMPAT_H */ diff --git a/src/rdma/src/smmu/kernel/adk_mmu600.c b/src/rdma/src/smmu/kernel/adk_mmu600.c new file mode 100644 index 0000000000000000000000000000000000000000..009e52bd73883c2c34bbe66b3096815bed3b39ed --- /dev/null +++ b/src/rdma/src/smmu/kernel/adk_mmu600.c @@ -0,0 +1,391 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include +#include +#include +#include + +#define USE_CMA +#ifdef USE_CMA +//#include +#include +#include +#include +#endif + +#include "cmdk_mmu600.h" +#include "adk_mmu600.h" +#include "ioctl_mmu600.h" + +#include "common_define.h" + +int dh_rdma_chan_smmu_invalid_tlb_send(struct zxdh_sc_dev *dev); +/************************************************************************** + * Macro * + **************************************************************************/ +#define CMA_PAGE_COUNT (16 * 1024) // 64M, 16*1024*4k + +//-------structures------------------------------------ + +//-------translation table------------------------------ +// SID[4:0] 一个SID对应一个PF,共32个PF,32个PF共享这32G,所以需要32套页表 +// 32个PF * 每个PF对应32个L1页表项映射32G = 一共需要L1页表项的个数是1024 个 +#define SMMU_L1_PER_PT_SIZE \ + (0x100) // 32个pte,共占用32 * 8Byte = 256Byte内存空间,映射32G +#define SMMU_L1_PT_ALIGN_SIZE (0x100) // 0x100 = 256 +#define SMMU_L1_PT_NUM (32) // 32套页表 +#define SMMU_L1_PT_SIZE (SMMU_L1_PT_NUM * SMMU_L1_PER_PT_SIZE) // 8K = 0x2000 + +// 32个PF共用同一套L2,动态维护管理 +#define SMMU_L2_PER_PT_SIZE \ + (0x1000) // 4k = 0x1000, 每个块表示1G,包含 512个2M 共占用 512*8=4K 内存 +#define SMMU_L2_PT_ALIGN_SIZE (0x1000) // 4k = 0x1000 +#define SMMU_L2_PT_NUM (32) // 物理上只有32G,共需要32个 512*2M 就可以表示 +#define SMMU_L2_PT_SIZE (SMMU_L2_PT_NUM * SMMU_L2_PER_PT_SIZE) // 128k = 0x20000 + +// 32个PF共用同一套L3,动态维护管理 +#define SMMU_L3_PER_PT_SIZE \ + (0x1000) // 4k = 0x1000, 每个块表示2M,包含 512个4K 共占用 512*8=4K 内存 +#define SMMU_L3_PT_ALIGN_SIZE (0x1000) // 4k = 0x1000 +#define SMMU_L3_PT_NUM (0x3DE) // 0x3DE = 990 +#define SMMU_L3_PT_SIZE (SMMU_L3_PT_NUM * SMMU_L3_PER_PT_SIZE) // 3M + 896K + +#define SMMU_PT_TOTAL (SMMU_L1_PT_SIZE + SMMU_L2_PT_SIZE + SMMU_L3_PT_SIZE) + +// L1要求0x100对齐 +// #define PTE_L2D_START_PA (0x6200B2E800) +// 4k对齐 +//#define PTE_L2D_START_PA (0x6200B2F000) +#define PTE_L2D_START_PA (0x6200630000) + +/************************************************************************** + * Functions * + **************************************************************************/ +int BspMmu600EnStreamBypass(u32 udSid) +{ + //需要向risc-v的smmu驱动发送命令,TODO + + return CMDK_OK; +} +EXPORT_SYMBOL(BspMmu600EnStreamBypass); + +/************************************************************************** + * 函数名称: zxdh_smmu_dma_l1_l2_to_risc_test + * 功能描述: 在host上调用,通过dma模块把建立好的L1和L2搬移到L2D中 + * + * 输入参数: struct zxdh_sc_dev *ptMmuMmapCfg + * 输出参数: + * 返 回 值: CMDK_OK / CMDK_ERROR + * 其它说明:测试接口 + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/07/03 V1.0 guoll + ***************************************************************************/ +void zxdh_smmu_dma_l1_l2_to_risc_test(struct stPteRequest *ptMmuMmapCfg, + struct zxdh_sc_dev *dev) +{ + static int s_count; + struct zxdh_src_copy_dest src_dest = {}; + + memset((void *)dev->pte_address->uddPTETempVirAddr, 0, 8); + + //cpy data from host to l2d + src_dest.src = dev->pte_address->uddPTETempPhyAddr; + src_dest.len = 8; + src_dest.dest = dev->pte_l2d_startpa + SMMU_L1_PT_SIZE + + s_count * 8; // dev->pte_l2d_startpa + + *(u64 *)dev->pte_address->uddPTETempVirAddr = (++s_count); + + dev->cqp->process_config_pte_table(dev, src_dest); +} + +/************************************************************************** + * 函数名称: zxdh_smmu_use_l1_test + * 功能描述: 在host上调用,通过dma模块出发smmu进行查表,测试L1的转换功能 + * + * 输入参数: struct zxdh_sc_dev *ptMmuMmapCfg + * 输出参数: + * 返 回 值: CMDK_OK / CMDK_ERROR + * 其它说明:测试接口 + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/07/03 V1.0 guoll + ***************************************************************************/ +void zxdh_smmu_use_l1_test(struct stPteRequest *ptMmuMmapCfg, + struct zxdh_sc_dev *dev) +{ + static int s_count; + struct zxdh_src_copy_dest src_dest = {}; + + memset((void *)dev->pte_address->uddPTETempVirAddr, 0, 8); + + /* cpy data from host to l2d */ + src_dest.src = dev->pte_address->uddPTETempPhyAddr; + src_dest.len = 8; + src_dest.dest = dev->pte_l2d_startpa + SMMU_L1_PT_SIZE + s_count * 8; + + *(u64 *)dev->pte_address->uddPTETempVirAddr = (++s_count); + dev->cqp->process_config_pte_table(dev, src_dest); +} + +/************************************************************************** + * 函数名称: zxdh_smmu_use_l2_test + * 功能描述: 在host上调用,通过dma模块出发smmu进行查表,测试L3的转换功能 + * + * 输入参数: struct zxdh_sc_dev *ptMmuMmapCfg + * 输出参数: + * 返 回 值: CMDK_OK / CMDK_ERROR + * 其它说明:测试接口 + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/07/03 V1.0 guoll + ***************************************************************************/ +void zxdh_smmu_use_l2_test(struct stPteRequest *ptMmuMmapCfg, + struct zxdh_sc_dev *dev) +{ + static int s_count; + struct zxdh_src_copy_dest src_dest = {}; + + memset((void *)dev->pte_address->uddPTETempVirAddr, 0, 8); + + /* cpy data from host to l2d */ + src_dest.src = dev->pte_address->uddPTETempPhyAddr; + src_dest.len = 8; + src_dest.dest = dev->pte_l2d_startpa + SMMU_L1_PT_SIZE + s_count * 8; + + *(u64 *)dev->pte_address->uddPTETempVirAddr = (++s_count); + dev->cqp->process_config_pte_table(dev, src_dest); +} + +/************************************************************************** + * 函数名称: zxdh_smmu_use_l3_test + * 功能描述: 在host上调用,通过dma模块出发smmu进行查表,测试L3的转换功能 + * + * 输入参数: struct zxdh_sc_dev *ptMmuMmapCfg + * 输出参数: + * 返 回 值: CMDK_OK / CMDK_ERROR + * 其它说明:测试接口 + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/07/03 V1.0 guoll + ***************************************************************************/ +void zxdh_smmu_use_l3_test(struct stPteRequest *ptMmuMmapCfg, + struct zxdh_sc_dev *dev) +{ + static int s_count; + struct zxdh_src_copy_dest src_dest = {}; + + // ======================================================================== + // ======================================================================== + memset((void *)dev->pte_address->uddPTETempVirAddr, 0, 8); + + /* cpy data from host to l2d */ + src_dest.src = dev->pte_address->uddPTETempPhyAddr; + src_dest.len = 8; + src_dest.dest = dev->pte_l2d_startpa + SMMU_L1_PT_SIZE + s_count * 8; + + *(u64 *)dev->pte_address->uddPTETempVirAddr = (++s_count); + + dev->cqp->process_config_pte_table(dev, src_dest); +} + +/************************************************************************** + * 函数名称: zxdh_smmu_set_pte + * 功能描述: 在host上调用,配置页表 + * + * 输入参数: struct zxdh_sc_dev * + * 输出参数: + * 返 回 值: CMDK_OK / CMDK_ERROR + * 其它说明:对外接口 + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/04/26 V1.0 guoll + ***************************************************************************/ +u32 zxdh_smmu_set_pte(struct stPteRequest *ptMmuMmapCfg, + struct zxdh_sc_dev *dev) +{ + u32 ret = 0; + + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(ptMmuMmapCfg); + + // tmpFlag = g_ucMmu600PrintModuleId; + + // g_ucMmu600PrintModuleId = 1; + + // 临时规避方法 + // ptMmuMmapCfg->udRWFlag = 3; + + // 仅用2M粒度 + //if((0 == ptMmuMmapCfg->udStreamid) && (1 > (s0_count)) && (0x200000 == ptMmuMmapCfg->uddSize)) + //if((0 == ptMmuMmapCfg->udStreamid) && (s0_count) && (0x200000 == ptMmuMmapCfg->uddSize)) + // 仅用4K粒度 + //if((0 == ptMmuMmapCfg->udStreamid) && (1 > s0_count) && (0x1000 == ptMmuMmapCfg->uddSize)) + //if((0 == ptMmuMmapCfg->udStreamid) && (1 > s0_count)) + ret = zxdh_smmu_mmap((struct stPteRequest *)ptMmuMmapCfg, dev); + + // g_ucMmu600PrintModuleId = tmpFlag; + + return ret; +} +EXPORT_SYMBOL(zxdh_smmu_set_pte); + +u32 bspSmmuDeletePTE(u32 udSid, u64 uddVa, struct zxdh_sc_dev *dev) +{ + return CmdkSysMmuCmdTlbSync(); +} +EXPORT_SYMBOL(bspSmmuDeletePTE); + +static int zxdh_smmu_alloc_from_cma(struct device *device, + struct smmu_pte_address *pstPteAddress) +{ +#ifdef USE_CMA + SMMU_POINTER_CHECK(device); + SMMU_POINTER_CHECK(pstPteAddress); + pstPteAddress->CmaPageMemBaseVA = (u64)dma_alloc_coherent( + device, SMMU_PT_TOTAL, + (dma_addr_t *)(&(pstPteAddress->CmaPageMemBasePA)), GFP_KERNEL); + + SMMU_POINTER_CHECK(pstPteAddress->CmaPageMemBaseVA); + SMMU_POINTER_CHECK(pstPteAddress->CmaPageMemBasePA); + + memset_8byte((u64 *)pstPteAddress->CmaPageMemBaseVA, 0, SMMU_PT_TOTAL); + + // self : init pte base va = page base va + pstPteAddress->CmaMemBaseVA_pte = pstPteAddress->CmaPageMemBaseVA; +#endif + + return CMDK_OK; +} + +/************************************************************************** + * 函数名称: zxdh_smmu_pagetable_init + * 功能描述: 初始化入口函数,在host上调用; + * 申请分配存储SMMU数据结构的内存; + * 并且初始化页表数据结构。 + * 输入参数: struct zxdh_sc_dev * + * 输出参数: + * 返 回 值: CMDK_OK / CMDK_ERROR + * 其它说明:对外接口 + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/04/26 V1.0 guoll + ***************************************************************************/ +int zxdh_smmu_pagetable_init(struct zxdh_sc_dev *dev) +{ + int ret = 0; + + // 存放页表初始化参数的数据结构 + struct stPagetableParam stPage = { 0 }; + + SMMU_POINTER_CHECK(dev); + + dh_rdma_chan_smmu_invalid_tlb_send(dev); + + dev->pte_l2d_startpa = dev->l2d_smmu_addr; + dev->pte_address = (struct smmu_pte_address *)kmalloc( + sizeof(struct smmu_pte_address), GFP_KERNEL); + SMMU_POINTER_CHECK(dev->pte_address); + MEMSET((void *)dev->pte_address, sizeof(struct smmu_pte_address), 0, + sizeof(struct smmu_pte_address)); + zxdh_smmu_alloc_from_cma(dev->hw->device, dev->pte_address); + + stPage.udPageTableSize = SMMU_PT_TOTAL; + stPage.udL1PageTableNum = SMMU_L1_PT_NUM; + stPage.udL2PageTableNum = SMMU_L2_PT_NUM; + stPage.udL3PageTableNum = SMMU_L3_PT_NUM; + stPage.uddPageTablePhyAddr = dev->pte_address->CmaPageMemBasePA; + stPage.uddPageTableVirAddr = dev->pte_address->CmaPageMemBaseVA; + + dev->pte_address->l2d_smmu_l2_offset = dev->l2d_smmu_l2_offset; + + ret = zxdh_smmu_struct_init(&stPage, dev->pte_address); + if (ret) + return CMDK_ERROR; + + return CMDK_OK; +} + +int zxdh_smmu_pagetable_exit(struct zxdh_sc_dev *dev) +{ + struct smmu_pte_address *pstPteAddress = dev->pte_address; + + //mmu600test_dbg_exit(dev); + + if (pstPteAddress->uddPTETempVirAddr) + kfree((void *)pstPteAddress->uddPTETempVirAddr); + + if (pstPteAddress->ptPteRecords) + kfree((void *)pstPteAddress->ptPteRecords); + + if (pstPteAddress->uddSmmuMapManageAddr) + kfree((void *)pstPteAddress->uddSmmuMapManageAddr); + + kfree((void *)dev->pte_address); + return CMDK_OK; +} + +int dh_rdma_chan_smmu_invalid_tlb_send(struct zxdh_sc_dev *dev) +{ + int ret = 0; + u64 recv_buffer = 0; + u8 *reply_ptr = NULL; + //uint32_t pf_id = 0; + uint8_t *risc_smmu_back_result = NULL; + uint16_t *risc_smmu_back_len = NULL; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct rsvMsgSmmuInfo tRscMsgSmmuInfo = { 0 }; + + result.recv_buffer = &recv_buffer; + result.buffer_len = sizeof(u64); + + //pf_id = pmgr->iwdev->rf->pf_id; + + tRscMsgSmmuInfo.udIsTlbInvalid = 1; + tRscMsgSmmuInfo.stInvalidTlbCfg.cmd = CMDQ_OP_TLBI_NSNH_ALL; + //tRscMsgSmmuInfo.stInvalidTlbCfg.vmid = pf_id; + + in.payload_addr = (uint8_t *)&tRscMsgSmmuInfo; + in.payload_len = sizeof(struct rsvMsgSmmuInfo); + + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.virt_addr = (u64)dev->hw->pci_hw_addr + 0x2000; // bar空间偏移 + + in.event_id = RSC_MSG_SMMU_EVENT_ID; + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret != 0) { + SMMU_PRINT(PM_ERROR, + "zxdh_bar_chan_sync_msg_send error, ret = %d\n", + ret); + } + + reply_ptr = (u8 *)result.recv_buffer; // common 通道处理状态信息 + if (*reply_ptr == 0xFF) { + risc_smmu_back_result = (u8 *)&result + 4; + risc_smmu_back_len = (u16 *)(reply_ptr + 1); + + //risc_len = *(u16 *)(reply_ptr + MSG_REP_LEN_OFFSET); + + SMMU_PRINT( + PM_ERROR, + "risc_back_result = 0x%x, risc_smmu_back_len = 0x%x\n", + *(u8 *)risc_smmu_back_result, + *(u8 *)risc_smmu_back_len); + } + + return 0; +} + +MODULE_AUTHOR("ZTE, Inc"); +MODULE_LICENSE("GPL"); diff --git a/src/rdma/src/smmu/kernel/adk_mmu600.h b/src/rdma/src/smmu/kernel/adk_mmu600.h new file mode 100644 index 0000000000000000000000000000000000000000..ce5c2efa473bb6434276260369dfc0ede3fa2d60 --- /dev/null +++ b/src/rdma/src/smmu/kernel/adk_mmu600.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef _ADK_MMU600_H_ +#define _ADK_MMU600_H_ + +#include "../../manager.h" + +/************************************************************************** + * Macro * + **************************************************************************/ +/* max stream id num */ +#define SEC0_MAX_STREAM_NUM (64) + +/* udRWFlag value */ +#define SMMU_PTE_AP_EL_RW (0) // EL1 or higer R/W, EL0 none +#define SMMU_PTE_AP_RW (1) // R/W in all EL +#define SMMU_PTE_AP_EL_RO (2) // EL1 or higer RO, EL0 none +#define SMMU_PTE_AP_RO (3) // RO in all EL + +/* udMemAttr value */ +#define SMMU_PTE_MEMATTR_DEVICE (0) +#define SMMU_PTE_MEMATTR_NM_WB_WA (1) +#define SMMU_PTE_MEMATTR_NC (2) + +/* udShare value */ +#define SMMU_PAGETABLE_NONSHAREABLE (0) /**< non share*/ +#define SMMU_PAGETABLE_OUTERSHARE (2) /**< outer share*/ +#define SMMU_PAGETABLE_INNERSHARE (3) /**< inner share*/ + +#ifndef SMMU_PTE_REQUEST_ST +#define SMMU_PTE_REQUEST_ST + +#define RSC_MSG_SMMU_EVENT_ID (6) // commom通道,SMMU EVENT ID +#define CMDQ_OP_TLBI_NSNH_ALL (0x30) + +struct stInvalidTlbCfg { + u32 cmd; + u32 scale; + u32 num; + u32 TG; + u32 leaf; + u32 TTL; + u32 vmid; + u32 asid; + u64 addr; +}; + +struct rsvMsgSmmuInfo { + u32 udIsTlbInvalid; + struct stInvalidTlbCfg stInvalidTlbCfg; +}; + +struct stPteRequest { + u64 uddPhyAddr; /* Request physical address */ + u64 uddVirAddr; /* Request virual address */ + u32 udStreamid; // stream id + u64 uddSize; // 映射地址范围大小 + u32 udRWFlag; /* AP */ + u32 udMemAttr; /* memory attribute */ + u32 udShare; /* share, 0-nonshare, 2-outershare, 3-innershare*/ +}; + +#endif +struct pteRecord { + u32 udValid; + u32 udSid; + u64 uddVa; + u64 uddPa; + u64 uddSize; +}; + +struct stPagetableParam { + u64 uddPageTablePhyAddr; ///<页表存放区起始地址 + u64 uddPageTableVirAddr; + u32 udPageTableSize; ///<页表存放区大小 + + u64 uddExPageTablePhyAddr; ///<扩展页表存放区起始地址,ddr,只放4K + u32 udPExPableSize; ///<扩展页表存放区大小,ddr,只放4K + + u32 udL1PageTableNum; /// +#include +#include +#include + +#include "hal_smmu.h" +#include "cmdk_mmu600_inner.h" +#include "cmdk_mmu600.h" +#include "pub_print.h" + +u8 g_ucMmu600PrintModuleId = 0x2; + +u32 uswap_32(u32 v) +{ + return v; +} + +u64 uswap_64(u64 v) +{ + return v; +} + +u32 memset_8byte(u64 *p, u64 data, u64 size) +{ + u32 i = 0; + + if (size % 8) + return -1; + + for (i = 0; i < (size / 8); i++) + *(p + i) = data; + + return 0; +} + +u32 mpf_sync_msg_send(u8 type, u8 module_id, u8 *msg, u16 len) +{ + //communication channel, other module supply + return CMDK_OK; +} + +u32 mpf_async_msg_send(u8 type, u8 module_id, u8 *msg, u16 len) +{ + //communication channel, other module supply + return CMDK_OK; +} + +u32 CmdkSysMmuCmdTlbSync(void) +{ + struct st2RiscMsg recv_msg = { 0 }; + + recv_msg.type = SMMU_MSG_TLB_SYNC; + + mpf_sync_msg_send(0x4, 2, (u8 *)&recv_msg, sizeof(struct st2RiscMsg)); + + return CMDK_OK; +} +EXPORT_SYMBOL(CmdkSysMmuCmdTlbSync); + +u32 CmdkSysMmuCmdTlbCleanByVa(u32 udSid, u32 udSsid, u64 uddVa, u32 udPageLvl) +{ + return CMDK_OK; +} +EXPORT_SYMBOL(CmdkSysMmuCmdTlbCleanByVa); + +u32 CmdkSysMmuCmdTlbCleanByIpa(u32 udSid, u64 uddVa, u32 udPageLvl) +{ + //需覝坑risc-v的smmu驱动坑逝命�?TODO + struct st2RiscMsg recv_msg = { 0 }; + + recv_msg.type = SMMU_MSG_TLB_IPA; + recv_msg.udStreamid = udSid; + recv_msg.vaddr = uddVa; + recv_msg.uddSize = udPageLvl; + + mpf_sync_msg_send(0x4, 2, (u8 *)&recv_msg, sizeof(struct st2RiscMsg)); + + return CMDK_OK; +} +EXPORT_SYMBOL(CmdkSysMmuCmdTlbCleanByIpa); + +u32 CmdkSysMmuCmdDeletePte(u32 udSid, u64 uddVa, u64 uddSize, u64 uddPteL2DAddr, + u64 uddPteL2DLen) +{ + //需覝坑risc-v的smmu驱动坑逝命�?TODO + struct st2RiscMsg recv_msg = { 0 }; + + recv_msg.type = SMMU_MSG_DEL_PTE; + recv_msg.udStreamid = udSid; + recv_msg.vaddr = uddVa; + recv_msg.uddSize = uddSize; + recv_msg.uddPteL2DAddr = uddPteL2DAddr; + recv_msg.uddPteL2DLen = uddPteL2DLen; + + mpf_sync_msg_send(0x4, 2, (u8 *)&recv_msg, sizeof(struct st2RiscMsg)); + + return CMDK_OK; +} +EXPORT_SYMBOL(CmdkSysMmuCmdDeletePte); + +u32 CmdkSysMmuCmdTlbCleanByVmid(u32 udSid) +{ + //需覝坑risc-v的smmu驱动坑逝命�?TODO + + return CMDK_OK; +} +EXPORT_SYMBOL(CmdkSysMmuCmdTlbCleanByVmid); + +u32 CmdkSysMmuCmdTlbCleanByAsid(u32 udSid, u32 udSsid) +{ + //需覝坑risc-v的smmu驱动坑逝命�?TODO + + return CMDK_OK; +} +EXPORT_SYMBOL(CmdkSysMmuCmdTlbCleanByAsid); + +u32 CmdkSysMmuCmdSteSync(void) +{ + //需覝坑risc-v的smmu驱动坑逝命�?TODO + + return CMDK_OK; +} +EXPORT_SYMBOL(CmdkSysMmuCmdSteSync); + +u32 CmdkSysMmuCmdCdSync(u32 udSid) +{ + //需覝坑risc-v的smmu驱动坑逝命�?TODO + + return CMDK_OK; +} +EXPORT_SYMBOL(CmdkSysMmuCmdCdSync); + +u32 CmdkSysMmuSetPrintLevel(u32 udPrintLvl) +{ + g_ucMmu600PrintModuleId = (u8)udPrintLvl; + + return CMDK_OK; +} +EXPORT_SYMBOL(CmdkSysMmuSetPrintLevel); + +u8 CmdkSysMmuGetPrintLevel(void) +{ + return g_ucMmu600PrintModuleId; +} +EXPORT_SYMBOL(CmdkSysMmuGetPrintLevel); diff --git a/src/rdma/src/smmu/kernel/cmdk_mmu600.h b/src/rdma/src/smmu/kernel/cmdk_mmu600.h new file mode 100644 index 0000000000000000000000000000000000000000..69f60f79328b20696001e6dc9f529092376ba8f8 --- /dev/null +++ b/src/rdma/src/smmu/kernel/cmdk_mmu600.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +/** + * @file cmdk_mmu600.h + * @brief mmu600 sdk对外接口头文件 + * @details 主要包含mmu600初始化、配置、维测等接口 + * @author 陈港文 + * @date 2021-04-27 + * @version V1.0 + * @copyright Copyright (c) 2018-2020 中兴通讯有限公司 + ********************************************************************************** + * @attention + * - 硬件平台:msc4.0 + * - 架构支持:arm64 + * @warning + * - xxxxx + * - xxxxxxx + * @bug + * - + * @par 修改日志: + * + *
Date Version Author Description + *
2021/04/27 1.0 陈港文 创建初始版本 + *
+ * + ********************************************************************************** + */ + +#ifndef _CMDK_MMU600_H_ +#define _CMDK_MMU600_H_ + +#include "cmdk.h" +#include "../../type.h" +#include "adk_mmu600.h" + +/************************************************************************** + * Macro * + **************************************************************************/ +#define PAGE_SIZE_4K 0x1000 ///<页表大小4K +#define PAGE_SIZE_2M 0x200000 ///<页表大小2M +#define PAGE_SIZE_1G 0x40000000 ///<页表大小1G + +#define SMMU_S1CDMAX_VALUE (1) + +// page table mask +#define PAGE_MASK_4K 0xfffffffff000ULL +#define PAGE_MASK_64K 0xffffffff0000ULL +#define PAGE_MASK_1M 0xfffffff00000ULL +#define PAGE_MASK_2M 0xffffffe00000ULL +#define PAGE_MASK_16M 0xffffff000000ULL +#define PAGE_MASK_512M 0xffffe0000000ULL +#define PAGE_MASK_1G 0xffffc0000000ULL + +// reverse mask +#define REV_PAGE_MASK_4K 0x0000000fffULL +#define REV_PAGE_MASK_64K 0x000000ffffULL +#define REV_PAGE_MASK_1M 0x00000fffffULL +#define REV_PAGE_MASK_2M 0x00001fffffULL +#define REV_PAGE_MASK_16M 0x0000ffffffULL +#define REV_PAGE_MASK_512M 0x001fffffffULL +#define REV_PAGE_MASK_1G 0x003fffffffULL + +/* udRWFlag value */ +#define SMMU_PTE_AP_EL_RW \ + (0) /// +#include +#include +#include +#include "hal_smmu.h" +#include "cmdk_mmu600.h" +#include "cmdk_mmu600_inner.h" +#include "pub_print.h" + +//#include "../../../../../../../net/msg_chan_driver/msg_chan_pub.h" +//#include "../../../../../msg_chan_driver/msg_chan_pub.h" +/************************************************************************** + * Macro * + **************************************************************************/ +#define MAX_PTE_RECORDS_NUM (2000) + +//-------translation table------------------------------ +// SID[4:0] 一个SID对应一个PF,共32个PF,32个PF共享这32G,所以需要32套页表 +// 32个PF * 每个PF对应32个L1页表项映射32G = 一共需要L1页表项的个数是1024 个 +#define SMMU_L1_PER_PT_SIZE \ + (0x100) // 32个pte,共占用32 * 8Byte = 256Byte内存空间,映射32G +#define SMMU_L1_PT_ALIGN_SIZE (0x100) // 0x100 = 256 +#define SMMU_L1_PT_NUM (32) // 32套页表 +#define SMMU_L1_PT_SIZE (SMMU_L1_PT_NUM * SMMU_L1_PER_PT_SIZE) // 8K = 0x2000 + +// 32个PF共用同一套L2,动态维护管理 +#define SMMU_L2_PER_PT_SIZE \ + (0x1000) // 4k = 0x1000, 每个块表示1G,包含 512个2M 共占用 512*8=4K 内存 +#define SMMU_L2_PT_ALIGN_SIZE (0x1000) // 4k = 0x1000 +#define SMMU_L2_PT_NUM (32) // 物理上只有32G,共需要32个 512*2M 就可以表示 +#define SMMU_L2_PT_SIZE (SMMU_L2_PT_NUM * SMMU_L2_PER_PT_SIZE) // 128k = 0x20000 + +// 32个PF共用同一套L3,动态维护管理 +#define SMMU_L3_PER_PT_SIZE \ + (0x1000) // 4k = 0x1000, 每个块表示2M,包含 512个4K 共占用 512*8=4K 内存 +#define SMMU_L3_PT_ALIGN_SIZE (0x1000) // 4k = 0x1000 +#define SMMU_L3_PT_NUM (0x3DE) // 0x3DE = 990 +#define SMMU_L3_PT_SIZE (SMMU_L3_PT_NUM * SMMU_L3_PER_PT_SIZE) // 3M + 896K + +#define SMMU_PT_TOTAL (SMMU_L1_PT_SIZE + SMMU_L2_PT_SIZE + SMMU_L3_PT_SIZE) + +// L1要求0x100对齐 +// #define PTE_L2D_START_PA (0x6200B2E800) +// 4k对齐 +#define PTE_L2D_START_PA (0x6200630000) + +//--------map manage struct------------------------------ +#define SMMU_L2_MAP_MANAGE_SIZE (SMMU_L2_PT_NUM * sizeof(struct t_Map_Manage)) +#define SMMU_L3_MAP_MANAGE_SIZE (SMMU_L3_PT_NUM * sizeof(struct t_Map_Manage)) + +/************************************************************************** + * Global Value * + **************************************************************************/ +struct ttbManage { + u32 udSid; + u32 udValid; + u64 uddPhyTTB; +}; + +static struct ttbManage *g_ptTtbMng; // manage l1 + +/************************************************************************** + * Functions * + **************************************************************************/ +static u64 zxdh_smmu_get_ttb(u32 sid, struct smmu_pte_address *pstPteAddress) +{ + return pstPteAddress->CmaPageMemBasePA + sid * SMMU_L1_PER_PT_SIZE; +} + +/************************************************************************** + * 函数名称: zxdh_smmu_get_pte_size + * 功能描述: + * 判断是否可以使用块类型的页表项(优先使用块类型的页表项) + * + * 输入参数:udd_request_va 申请映射的虚拟地址 + * udd_request_size 申请映射的空间大小 + * ud_granule 使用的粒度 + * 输出参数: + * 返 回 值: ud_final_pte_size + * 其它说明: + * 端午节加班ing + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/06/23 V1.0 guoll + ***************************************************************************/ +static u32 zxdh_smmu_get_pte_size(u64 udd_request_va, u64 udd_request_pa, + u64 udd_request_size, u32 ud_granule) +{ + u32 ud_final_pte_size = PAGE_SIZE_4K; + + switch (ud_granule) { + case SMMU_CD_TG0_4K: { + if ((0 == (udd_request_va & REV_PAGE_MASK_1G)) && + (0 == (udd_request_pa & REV_PAGE_MASK_1G)) && + (udd_request_size >= PAGE_SIZE_1G)) { + ud_final_pte_size = PAGE_SIZE_1G; + } else { + if ((0 == (udd_request_va & REV_PAGE_MASK_2M)) && + (0 == (udd_request_pa & REV_PAGE_MASK_2M)) && + (udd_request_size >= PAGE_SIZE_2M)) { + ud_final_pte_size = PAGE_SIZE_2M; + } + } + break; + } + default: { + break; + } + } + return ud_final_pte_size; +} + +/* 把传下来的配置信息填入到 struct smmu_pte_cfg *ptTlbEntryCfg 中 */ +static u32 zxdh_smmu_request_to_pte_cfg(const u32 udPTESize, + const struct stPteRequest *ptPteRequest, + struct smmu_pte_cfg *ptTlbEntryCfg) +{ + u64 uddRequestPhyAddr = 0; + + /* param check */ + SMMU_POINTER_CHECK(ptTlbEntryCfg); + SMMU_POINTER_CHECK(ptPteRequest); + + uddRequestPhyAddr = ptPteRequest->uddPhyAddr; + + ptTlbEntryCfg->udExecuteNever = SMMU_PAGETABLE_EXECUTE; + ptTlbEntryCfg->udShareable = ptPteRequest->udShare; + ptTlbEntryCfg->udAccessPermission = ptPteRequest->udRWFlag; + ptTlbEntryCfg->udMemoryAttribute = ptPteRequest->udMemAttr; + + /* 默认设为0 */ + ptTlbEntryCfg->udRACFG = 0; + ptTlbEntryCfg->udWACFG = 0; + if (READ_NOALLOCATE == + (READ_NOALLOCATE & ptTlbEntryCfg->udMemoryAttribute)) { + ptTlbEntryCfg->udRACFG = 3; + } + if (WRITE_NOALLOCATE == + (WRITE_NOALLOCATE & ptTlbEntryCfg->udMemoryAttribute)) { + ptTlbEntryCfg->udWACFG = 3; + } + + switch (udPTESize) { + case PAGE_SIZE_4K: { + ptTlbEntryCfg->uddPABaseAddr = uddRequestPhyAddr & PAGE_MASK_4K; + ptTlbEntryCfg->udPageType = SMMU_PAGETABLE_PAGESIZE_4KB; /* */ + break; + } + case PAGE_SIZE_2M: { + ptTlbEntryCfg->uddPABaseAddr = uddRequestPhyAddr & PAGE_MASK_2M; + ptTlbEntryCfg->udPageType = SMMU_PAGETABLE_PAGESIZE_2MB; /* */ + break; + } + case PAGE_SIZE_1G: { + ptTlbEntryCfg->uddPABaseAddr = uddRequestPhyAddr & PAGE_MASK_1G; + ptTlbEntryCfg->udPageType = SMMU_PAGETABLE_PAGESIZE_1G; /* */ + break; + } + default: /* 默认按4k处理 */ + { + ptTlbEntryCfg->uddPABaseAddr = uddRequestPhyAddr & PAGE_MASK_4K; + ptTlbEntryCfg->udPageType = SMMU_PAGETABLE_PAGESIZE_4KB; /* */ + break; + } + } + + return CMDK_OK; +} + +static u64 zxdh_smmu_sram_pagetable_v2p(u64 uddVa, + struct smmu_pte_address *pstPteAddress) +{ + u64 uddPa = 0; + + if ((pstPteAddress->uddPageTableVirBaseAddr == 0) || + (pstPteAddress->tPageTableCfg.uddPageTablePhyAddr == 0)) { + return CMDK_ERROR; + } + + uddPa = pstPteAddress->tPageTableCfg.uddPageTablePhyAddr + uddVa - + pstPteAddress->uddPageTableVirBaseAddr; + + return uddPa; +} + +static u64 zxdh_smmu_sram_pagetable_p2v(u64 uddPa, + struct smmu_pte_address *pstPteAddress) +{ + u64 uddVa = 0; + + if ((pstPteAddress->uddPageTableVirBaseAddr == 0) || + (pstPteAddress->tPageTableCfg.uddPageTablePhyAddr == 0)) { + return CMDK_ERROR; + } + + uddVa = pstPteAddress->uddPageTableVirBaseAddr + uddPa - + pstPteAddress->tPageTableCfg.uddPageTablePhyAddr; + + return uddVa; +} + +/************************************************************************** + * 函数名称: zxdh_smmu_get_l1_page_base_addr + * 功能描述: 获取L1 pte base address + * 输入参数: + * u64 uddPgTblAddr : sid对应的页表基地址 + * u64 uddVa : VA + * 输出参数: + * 返 回 值:L1 PTE 地址 + * 其它说明: + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/05/29 V1.0 guoll + ***************************************************************************/ +static u64 zxdh_smmu_get_l1_descriptor_va(u64 udd_l1_ttb_va, u64 udd_request_va) +{ + return (udd_l1_ttb_va + ((udd_request_va & 0xffc0000000ULL) >> 27)); +} + +/************************************************************************** + * 函数名称: zxdh_smmu_get_l1_page_base_addr + * 功能描述: 获取L2 pte base address,即获取L2 descriptor + * 输入参数: + * u64 uddPgTblAddr : sid对应的页表基地址 + * u64 udd_request_va : VA + * 输出参数: + * 返 回 值:L1 PTE 地址 (VA) + * 其它说明: + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/05/29 V1.0 guoll + ***************************************************************************/ +static u64 +zxdh_smmu_get_l2_descriptor_va(u32 sid, u64 udd_request_va, + struct smmu_pte_address *pstPteAddress) +{ + u32 i = 0; + u64 uddLevelMask = 0; + u32 udLevelOffset = 0; + u64 udd_l2_nth_ttb_va = 0; + u64 udd_l2_start_ttb_va = 0; + struct t_Map_Manage *ptL2MapManage = NULL; + u32 *pud_used_l2_ttb_num = NULL; + /* 记录 l2 已申请使用的 ttb 数量 */ + static u32 s_udV8NumL2Pta; + + /* check param */ + SMMU_POINTER_CHECK(pstPteAddress); + SMMU_POINTER_CHECK(pstPteAddress->uddSmmuMapManageAddr); + + // 1G-1: 11 1111 1111 1111 1111 1111 1111 1111 + // 2M-1: 1 1111 1111 1111 1111 1111 + // ~(2M-1): 11 1111 1110 0000 0000 0000 0000 0000 + uddLevelMask = 0x3fe00000ull; /* (1G-1)&(~(2M-1)) */ + udLevelOffset = 18; /* div by 2M, mul 8 */ + + /* l2 map manage struct */ + ptL2MapManage = + (struct t_Map_Manage *)(pstPteAddress->uddSmmuMapManageAddr); + /* l2 start ttb 的内存起始地址 */ + pr_info("l2d_smmu_l2_offset:%d\n", pstPteAddress->l2d_smmu_l2_offset); + udd_l2_start_ttb_va = pstPteAddress->uddPageTableVirBaseAddr + + SMMU_L1_PT_SIZE + + pstPteAddress->l2d_smmu_l2_offset; + + pud_used_l2_ttb_num = &s_udV8NumL2Pta; + + // 先在已用的L2页表中找,是否在已存在的页表中,如果有,就不用再申请新的页表了,直接返回对应页表项的地址 + // L1 的每一个页表项能够映射1G的空间 + /* if the 1G which this va corresponds to has been allocated, find the existing address */ + for (i = 0; i < *pud_used_l2_ttb_num; i++) { + if (((udd_request_va & PAGE_MASK_1G) == + ptL2MapManage[i].uddMaskedVa) && + ptL2MapManage[i].udMapValid && + (sid == ptL2MapManage[i].udSteamIndex)) { + break; + } + } + + /* if not, allocate 4K space used for L2 page table for this 1G */ + if (i == *pud_used_l2_ttb_num) { + /* 使用一个新的 L2 ttb */ + if (*pud_used_l2_ttb_num < SMMU_L2_PT_NUM) { + /* 得到第 n 个L2页表的起始地址 即得到该1G对应的2M页表的基地址 */ + udd_l2_nth_ttb_va = + udd_l2_start_ttb_va + i * SMMU_L2_PER_PT_SIZE; + } else { + return 0; + } + + ptL2MapManage[i].udMapValid = 1; + ptL2MapManage[i].udSteamIndex = sid; + ptL2MapManage[i].uddTTBaseAddr = udd_l2_nth_ttb_va; + ptL2MapManage[i].uddMaskedVa = udd_request_va & PAGE_MASK_1G; + + *pud_used_l2_ttb_num += 1; + pstPteAddress->udL2PageTableNum = *pud_used_l2_ttb_num; + } + + /* 返回第 n 张 l2 ttb 的 pte base address,即获取 l2 descriptor */ + return (ptL2MapManage[i].uddTTBaseAddr + + (u64)((udd_request_va & uddLevelMask) >> udLevelOffset)); +} + +static u64 zxdh_smmu_get_l3_descriptor(u32 sid, u64 udd_request_va, + struct smmu_pte_address *pstPteAddress) +{ + u32 i = 0; + u64 uddLevelMask = 0; + u32 udLevelOffset = 0; + u64 udd_l3_nth_ttb_va = 0; + u64 udd_l3_start_ttb_va = 0; + struct t_Map_Manage *ptL3ManageMap = NULL; + u32 *pud_used_l3_ttb_num = NULL; + /* 记录 l3 已申请使用的 ttb 数量 */ + static u32 s_udV8NumL3Pta; + + /* check param */ + SMMU_POINTER_CHECK(pstPteAddress); + SMMU_POINTER_CHECK(pstPteAddress->uddSmmuMapManageAddr); + + // 2M-1: 1 1111 1111 1111 1111 1111 + // 4K-1: 1111 1111 1111 + //~(4K-1): 1 1111 1111 0000 0000 0000 + uddLevelMask = 0x001ff000ull; /* (2M-1)&(~(4K-1)) */ + udLevelOffset = 9; /* div 4K, mul 8 */ + + /* l3 map manage struct */ + ptL3ManageMap = + (struct t_Map_Manage *)(pstPteAddress->uddSmmuMapManageAddr + + SMMU_L2_MAP_MANAGE_SIZE); + /* l3 start ttb 的内存起始地址 */ + udd_l3_start_ttb_va = pstPteAddress->uddPageTableVirBaseAddr + + SMMU_L1_PT_SIZE + SMMU_L2_PT_SIZE; + + pud_used_l3_ttb_num = &s_udV8NumL3Pta; + + /* the same logic as get L2 */ + for (i = 0; i < *pud_used_l3_ttb_num; i++) { + // 此 L3 页表(每块4K)是给哪个 L2 的 2M 使用的 + if (((udd_request_va & PAGE_MASK_2M) == + ptL3ManageMap[i].uddMaskedVa) && + ptL3ManageMap[i].udMapValid && + (sid == ptL3ManageMap[i].udSteamIndex)) { + break; + } + } + + if (i == *pud_used_l3_ttb_num) { + /* 使用一个新的 l3 ttb */ + if (*pud_used_l3_ttb_num < SMMU_L3_PT_NUM) { + /* 得到第 n 个 l3 页表的起始地址 即得到该 2M 对应的 4k 页表的基地址 */ + udd_l3_nth_ttb_va = + udd_l3_start_ttb_va + i * SMMU_L3_PER_PT_SIZE; + } else { + return 0; + } + + ptL3ManageMap[i].udMapValid = 1; + ptL3ManageMap[i].udSteamIndex = sid; + ptL3ManageMap[i].uddTTBaseAddr = udd_l3_nth_ttb_va; + ptL3ManageMap[i].uddMaskedVa = udd_request_va & PAGE_MASK_2M; + + *pud_used_l3_ttb_num += 1; + pstPteAddress->udL3PageTableNum = *pud_used_l3_ttb_num; + } + /* 返回第 n 张 l3 ttb 的 pte base address,即获取 l3 descriptor */ + return (ptL3ManageMap[i].uddTTBaseAddr + + (u64)((udd_request_va & uddLevelMask) >> udLevelOffset)); +} + +/************************************************************************** + * 函数名称: zxdh_smmu_host_pa_to_l2d_pa + * 功能描述: + * 根据偏移,转换成risc_v l2d 上的 pa + * 输入参数: + * udd_host_pa : host上的pa + * dev + * 输出参数: + * 返 回 值: + * 其它说明: + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/05/29 V1.0 guoll + ***************************************************************************/ +u64 zxdh_smmu_host_pa_to_l2d_pa(u64 udd_host_pa, struct zxdh_sc_dev *dev) +{ + u64 udd_offset = 0; + u64 udd_l2d_pa = 0; + + /* check param */ + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(dev->pte_address); + SMMU_POINTER_CHECK(dev->pte_address->CmaPageMemBasePA); + + if (udd_host_pa < dev->pte_address->CmaPageMemBasePA) + return -1; + + udd_offset = udd_host_pa - dev->pte_address->CmaPageMemBasePA; + udd_l2d_pa = dev->pte_l2d_startpa + udd_offset; + return udd_l2d_pa; +} + +/************************************************************************** + * 函数名称: zxdh_smmu_write_l1_page_table_entry + * 功能描述: 配置 L1 PTE表项 + * 如果是块类型页表项: + * 配置最终的输出地址的高位; + * 配置高位属性 + * 配置低位属性 + * 如果是页表类型的页表项: + * 配置下一级页表的地址; + * 配置为页表类型的页表项 + * 输入参数: + * + * 输出参数: + * 返 回 值: + * 其它说明: + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/05/29 V1.0 guoll + ***************************************************************************/ +static u32 zxdh_smmu_write_l1_pagetable_entry( + const u64 udd_l1_descriptor_va, + const struct smmu_pte_cfg *const ptMmuPageTableEntryCfg, + struct zxdh_sc_dev *dev) +{ + u64 udd_l2d_pa = 0; + u64 uddPysicalAddress = 0; + u64 udd_l1_pte_offset = 0; + u64 *pull_l1_descriptor_va = NULL; + u64 *pull_tmp_descriptor_va = NULL; + u64 udd_tmp_l1_descriptor_value = 0; + u64 udd_l2d_tmp_l1_descriptor_value = 0; + + struct zxdh_src_copy_dest src_dest = {}; + + /* check param */ + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(ptMmuPageTableEntryCfg); + + if (ptMmuPageTableEntryCfg->udPageFormat != PAGE_FORMAT_V8) + return CMDK_ERROR; + + /* pte base address */ + pull_l1_descriptor_va = (u64 *)udd_l1_descriptor_va; + *pull_l1_descriptor_va = 0; + + /* physical block base address or next level page table address */ + uddPysicalAddress = ptMmuPageTableEntryCfg->uddPABaseAddr; + + /* block descriptor */ + if (ptMmuPageTableEntryCfg->udPageType == SMMU_PAGETABLE_PAGESIZE_1G) { + udd_tmp_l1_descriptor_value = + ((uddPysicalAddress & + L1_LONG_DESCRIPTOR_BLOCK_PA_MASK) | + ((ptMmuPageTableEntryCfg->udExecuteNever + << L1_LONG_DESCRIPTOR_BLOCK_XN_POS) & + L1_LONG_DESCRIPTOR_BLOCK_XN_MASK) | + (((ptMmuPageTableEntryCfg->udAccessPermission) + << L1_LONG_DESCRIPTOR_BLOCK_S2AP_POS) & + L1_LONG_DESCRIPTOR_BLOCK_S2AP_MASK) | + (((0x1) << L1_LONG_DESCRIPTOR_BLOCK_AF_POS) & + L1_LONG_DESCRIPTOR_BLOCK_AF_MASK) | + (((ptMmuPageTableEntryCfg->udShareable) + << L1_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS) & + L1_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK) | + (((ptMmuPageTableEntryCfg->udMemoryAttribute) + << L1_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS) & + L1_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK) | + (L1_LONG_DESCRIPTOR_FOR_BLOCK) | + (((ptMmuPageTableEntryCfg->udRACFG) + << LONG_DESCRIPTOR_RACFG_POS) & + LONG_DESCRIPTOR_RACFG_MASK) | + (((ptMmuPageTableEntryCfg->udWACFG) + << LONG_DESCRIPTOR_WACFG_POS) & + LONG_DESCRIPTOR_WACFG_MASK)); + + udd_l2d_tmp_l1_descriptor_value = udd_tmp_l1_descriptor_value; + + } + /* page table */ + else if (SMMU_PAGETABLE_PAGESIZE_2MB == + ptMmuPageTableEntryCfg->udPageType || + SMMU_PAGETABLE_PAGESIZE_4KB == + ptMmuPageTableEntryCfg->udPageType) { + udd_tmp_l1_descriptor_value = + ((uddPysicalAddress & + L1_LONG_DESCRIPTOR_TABLE_PA_MASK) | + (L1_LONG_DESCRIPTOR_FOR_TABLE)); + + udd_l2d_pa = + zxdh_smmu_host_pa_to_l2d_pa(uddPysicalAddress, dev); + udd_l2d_tmp_l1_descriptor_value = + ((udd_l2d_pa & L1_LONG_DESCRIPTOR_TABLE_PA_MASK) | + (L1_LONG_DESCRIPTOR_FOR_TABLE)); + } + + /* default little endian */ + if (ptMmuPageTableEntryCfg->udEndian == SMMU_TT_BIGENDIAN) { + udd_tmp_l1_descriptor_value = + uswap_64(udd_tmp_l1_descriptor_value); + udd_l2d_tmp_l1_descriptor_value = + uswap_64(udd_l2d_tmp_l1_descriptor_value); + } + + *pull_l1_descriptor_va = udd_tmp_l1_descriptor_value; + + memset((void *)dev->pte_address->uddPTETempVirAddr, 0, 8); + pull_tmp_descriptor_va = (u64 *)dev->pte_address->uddPTETempVirAddr; + *pull_tmp_descriptor_va = udd_l2d_tmp_l1_descriptor_value; + + udd_l1_pte_offset = + udd_l1_descriptor_va - dev->pte_address->CmaPageMemBaseVA; + + /* cpy data from host to l2d */ + src_dest.src = dev->pte_address->uddPTETempPhyAddr; + src_dest.len = 8; + src_dest.dest = dev->pte_l2d_startpa + udd_l1_pte_offset; + + dev->cqp->process_config_pte_table(dev, src_dest); + pr_info("%s pte_addr:0x%llx\n", __func__, src_dest.dest); + + return CMDK_OK; +} + +static u32 zxdh_smmu_write_l2_pagetable_entry( + u32 sid, const u64 udd_l2_descriptor_va, + const struct smmu_pte_cfg *const ptMmuPageTableEntryCfg, + struct zxdh_sc_dev *dev) +{ + u64 uddPysicalAddress = 0; + u64 udd_l2_descriptor_value = 0; + u64 l2d_l2_descriptor_offset = 0; + u64 *pull_tmp_l2_descriptor_va = NULL; + u64 *pull_to_l2d_descriptor_va = NULL; + u64 udd_l2d_l2_descriptor_value = 0; + + static u64 dma_to_l2d_count; + + struct zxdh_src_copy_dest src_dest = {}; + + /* param check */ + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(ptMmuPageTableEntryCfg); + SMMU_POINTER_CHECK(dev->pte_address->uddPTETempVirAddr); + + if (ptMmuPageTableEntryCfg->udPageFormat != PAGE_FORMAT_V8) + return CMDK_ERROR; + + /* page table base address */ + pull_tmp_l2_descriptor_va = (u64 *)udd_l2_descriptor_va; + *pull_tmp_l2_descriptor_va = 0; + + /* block base physical address, or next level page table base address */ + uddPysicalAddress = ptMmuPageTableEntryCfg->uddPABaseAddr; + + /* block descriptor */ + if (ptMmuPageTableEntryCfg->udPageType == SMMU_PAGETABLE_PAGESIZE_2MB) { + udd_l2_descriptor_value = + ((uddPysicalAddress & + L2_LONG_DESCRIPTOR_BLOCK_PA_MASK) | + ((ptMmuPageTableEntryCfg->udExecuteNever + << L2_LONG_DESCRIPTOR_BLOCK_XN_POS) & + L2_LONG_DESCRIPTOR_BLOCK_XN_MASK) | + (((ptMmuPageTableEntryCfg->udAccessPermission) + << L2_LONG_DESCRIPTOR_BLOCK_S2AP_POS) & + L2_LONG_DESCRIPTOR_BLOCK_S2AP_MASK) | + (((0x1) << L2_LONG_DESCRIPTOR_BLOCK_AF_POS) & + L2_LONG_DESCRIPTOR_BLOCK_AF_MASK) | + (((ptMmuPageTableEntryCfg->udShareable) + << L2_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS) & + L2_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK) | + (((ptMmuPageTableEntryCfg->udMemoryAttribute) + << L2_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS) & + L2_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK) | + (L2_LONG_DESCRIPTOR_FOR_BLOCK) | + (((ptMmuPageTableEntryCfg->udRACFG) + << LONG_DESCRIPTOR_RACFG_POS) & + LONG_DESCRIPTOR_RACFG_MASK) | + (((ptMmuPageTableEntryCfg->udWACFG) + << LONG_DESCRIPTOR_WACFG_POS) & + LONG_DESCRIPTOR_WACFG_MASK)); + + udd_l2d_l2_descriptor_value = udd_l2_descriptor_value; + } + /* page table */ + else if (SMMU_PAGETABLE_PAGESIZE_4KB == + ptMmuPageTableEntryCfg->udPageType) { + udd_l2_descriptor_value = ((uddPysicalAddress & + L2_LONG_DESCRIPTOR_TABLE_PA_MASK) | + (L2_LONG_DESCRIPTOR_FOR_TABLE)); + + udd_l2d_l2_descriptor_value = ( + // 新版本方案 + (uddPysicalAddress & 0x3FFFFFFFFF) // bit[37:0] + | ((sid & 0xFULL) << 42) // bit[46:42] + | (1ULL << 47) // bit[51:47] + | (L2_LONG_DESCRIPTOR_FOR_TABLE)); + } + + /* default little endian */ + if (ptMmuPageTableEntryCfg->udEndian == SMMU_TT_BIGENDIAN) { + udd_l2_descriptor_value = uswap_64(udd_l2_descriptor_value); + udd_l2d_l2_descriptor_value = + uswap_64(udd_l2d_l2_descriptor_value); + } + + *pull_tmp_l2_descriptor_va = udd_l2_descriptor_value; + + memset((void *)dev->pte_address->uddPTETempVirAddr, 0, 8); + pull_to_l2d_descriptor_va = (u64 *)dev->pte_address->uddPTETempVirAddr; + *pull_to_l2d_descriptor_va = udd_l2d_l2_descriptor_value; + + dma_to_l2d_count++; + + // ======================================================================= + // 计算偏移量 + // ======================================================================= + l2d_l2_descriptor_offset = + udd_l2_descriptor_va - dev->pte_address->CmaPageMemBaseVA; + + /* cpy data from host to l2d */ + src_dest.src = dev->pte_address->uddPTETempPhyAddr; + src_dest.len = 8; + src_dest.dest = dev->pte_l2d_startpa + l2d_l2_descriptor_offset; + dev->cqp->process_config_pte_table(dev, src_dest); + pr_info("%s pte_addr:0x%llx\n", __func__, src_dest.dest); + + return CMDK_OK; +} + +static u32 zxdh_smmu_write_l3_pagetable_entry( + const u64 udd_l3_descriptor_va, + const struct smmu_pte_cfg *const ptMmuPageTableEntryCfg, + struct smmu_pte_address *pstPteAddress) +{ + u64 uddPysicalAddress = 0; + u64 *pull_l3_descriptor_va = NULL; + u64 udd_tmp_l3_descriptor_value = 0; + + /* param check */ + SMMU_POINTER_CHECK(pstPteAddress); + SMMU_POINTER_CHECK(ptMmuPageTableEntryCfg); + + if (ptMmuPageTableEntryCfg->udPageFormat != PAGE_FORMAT_V8) + return CMDK_ERROR; + + /* pte address */ + pull_l3_descriptor_va = (u64 *)udd_l3_descriptor_va; + *pull_l3_descriptor_va = 0; + + uddPysicalAddress = ptMmuPageTableEntryCfg->uddPABaseAddr; + + if (ptMmuPageTableEntryCfg->udPageType == SMMU_PAGETABLE_PAGESIZE_4KB) { + udd_tmp_l3_descriptor_value = + ((uddPysicalAddress & + L3_LONG_DESCRIPTOR_BLOCK_PA_MASK) | + ((ptMmuPageTableEntryCfg->udExecuteNever + << L3_LONG_DESCRIPTOR_BLOCK_XN_POS) & + L3_LONG_DESCRIPTOR_BLOCK_XN_MASK) | + (((ptMmuPageTableEntryCfg->udAccessPermission) + << L3_LONG_DESCRIPTOR_BLOCK_S2AP_POS) & + L3_LONG_DESCRIPTOR_BLOCK_S2AP_MASK) | + (((0x1) << L3_LONG_DESCRIPTOR_BLOCK_AF_POS) & + L3_LONG_DESCRIPTOR_BLOCK_AF_MASK) | + (((ptMmuPageTableEntryCfg->udShareable) + << L3_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS) & + L3_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK) | + (((ptMmuPageTableEntryCfg->udMemoryAttribute) + << L3_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS) & + L3_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK) | + (L3_LONG_DESCRIPTOR_FOR_PAGE) | + (((ptMmuPageTableEntryCfg->udRACFG) + << LONG_DESCRIPTOR_RACFG_POS) & + LONG_DESCRIPTOR_RACFG_MASK) | + (((ptMmuPageTableEntryCfg->udWACFG) + << LONG_DESCRIPTOR_WACFG_POS) & + LONG_DESCRIPTOR_WACFG_MASK)); + } + + /* default little endian */ + if (ptMmuPageTableEntryCfg->udEndian == SMMU_TT_BIGENDIAN) { + udd_tmp_l3_descriptor_value = + uswap_64(udd_tmp_l3_descriptor_value); + } + + *pull_l3_descriptor_va = udd_tmp_l3_descriptor_value; + + return CMDK_OK; +} + +static u32 zxdh_smmu_set_l1_pte_entry(u64 udd_l1_descriptor_va, + struct smmu_pte_cfg *ptTlbEntryCfg, + struct zxdh_sc_dev *dev) +{ + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(ptTlbEntryCfg); + + zxdh_smmu_write_l1_pagetable_entry(udd_l1_descriptor_va, ptTlbEntryCfg, + dev); + + return CMDK_OK; +} + +static u32 zxdh_smmu_set_l2_pte_entry(u64 udd_l1_descriptor_va, + u64 udd_l2_descriptor_va, u32 sid, + struct smmu_pte_cfg *ptTlbEntryCfg, + struct zxdh_sc_dev *dev) +{ + SMMU_POINTER_CHECK(ptTlbEntryCfg); + + /* write L2 block descriptor */ + zxdh_smmu_write_l2_pagetable_entry(sid, udd_l2_descriptor_va, + ptTlbEntryCfg, dev); + + /* create Level1 page table config struct, get L2 pagetable base phyaddr */ + ptTlbEntryCfg->uddPABaseAddr = zxdh_smmu_sram_pagetable_v2p( + udd_l2_descriptor_va, dev->pte_address); + if (ptTlbEntryCfg->uddPABaseAddr == 0) + return CMDK_ERROR; + + /* write L1 page table entry */ + zxdh_smmu_write_l1_pagetable_entry(udd_l1_descriptor_va, ptTlbEntryCfg, + dev); + + return CMDK_OK; +} + +static u32 zxdh_smmu_set_l3_pte_entry(u64 udd_l1_descriptor_va, + u64 udd_l2_descriptor_va, + u64 udd_l3_descriptor_va, u64 sid, + u64 udd_request_va, + struct smmu_pte_cfg *ptTlbEntryCfg, + struct zxdh_sc_dev *dev) +{ + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(ptTlbEntryCfg); + + /* write L3 page table descriptor */ + zxdh_smmu_write_l3_pagetable_entry(udd_l3_descriptor_va, ptTlbEntryCfg, + dev->pte_address); + + /* structure L2 page table descriptor config */ + ptTlbEntryCfg->uddPABaseAddr = zxdh_smmu_sram_pagetable_v2p( + udd_l3_descriptor_va, dev->pte_address); + + /* write L2 page table descriptor */ + // 因为L2 PTE中要写L3页表的基地址,所以,这里应该拿L3页表地址算L2 PTE偏移 + zxdh_smmu_write_l2_pagetable_entry(sid, udd_l2_descriptor_va, + ptTlbEntryCfg, dev); + + /* structure L1 page table descriptor config */ + ptTlbEntryCfg->uddPABaseAddr = zxdh_smmu_sram_pagetable_v2p( + udd_l2_descriptor_va, dev->pte_address); + + /* write L1 page table descriptor */ + zxdh_smmu_write_l1_pagetable_entry(udd_l1_descriptor_va, ptTlbEntryCfg, + dev); + + return CMDK_OK; +} + +static u32 zxdh_smmu_set_pte_entry(u64 udd_l1_ttb_va, u64 udd_request_va, + u64 udd_request_pa, u32 sid, + struct smmu_pte_cfg *ptTlbEntryCfg, + struct zxdh_sc_dev *dev) +{ + u64 udd_l1_descriptor_va = 0; + u64 udd_l2_descriptor_va = 0; + u64 udd_l3_descriptor_va = 0; + + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(ptTlbEntryCfg); + + if (dev->pte_address->uddPageTableVirBaseAddr == 0) + return CMDK_ERROR; + + switch (ptTlbEntryCfg->udPageType) { + case SMMU_PAGETABLE_PAGESIZE_4KB: { + udd_l3_descriptor_va = zxdh_smmu_get_l3_descriptor( + sid, udd_request_va, dev->pte_address); + udd_l2_descriptor_va = zxdh_smmu_get_l2_descriptor_va( + sid, udd_request_va, dev->pte_address); + udd_l1_descriptor_va = zxdh_smmu_get_l1_descriptor_va( + udd_l1_ttb_va, udd_request_va); + + if (!udd_l3_descriptor_va || !udd_l2_descriptor_va || + !udd_l1_descriptor_va) { + return CMDK_ERROR; + } + + zxdh_smmu_set_l3_pte_entry(udd_l1_descriptor_va, + udd_l2_descriptor_va, + udd_l3_descriptor_va, sid, + udd_request_va, ptTlbEntryCfg, dev); + break; + } + case SMMU_PAGETABLE_PAGESIZE_2MB: { + udd_l3_descriptor_va = 0; + udd_l2_descriptor_va = zxdh_smmu_get_l2_descriptor_va( + sid, udd_request_va, dev->pte_address); + udd_l1_descriptor_va = zxdh_smmu_get_l1_descriptor_va( + udd_l1_ttb_va, udd_request_va); + + if (!udd_l2_descriptor_va || !udd_l1_descriptor_va) + return CMDK_ERROR; + + zxdh_smmu_set_l2_pte_entry(udd_l1_descriptor_va, + udd_l2_descriptor_va, sid, + ptTlbEntryCfg, dev); + break; + } + case SMMU_PAGETABLE_PAGESIZE_1G: { + udd_l3_descriptor_va = 0; + udd_l2_descriptor_va = 0; + udd_l1_descriptor_va = zxdh_smmu_get_l1_descriptor_va( + udd_l1_ttb_va, udd_request_va); + + if (!udd_l1_descriptor_va) + return CMDK_ERROR; + + zxdh_smmu_set_l1_pte_entry(udd_l1_descriptor_va, ptTlbEntryCfg, + dev); + break; + } + default: { + return CMDK_ERROR; + } + } + + return CMDK_OK; +} + +u32 smmuShowPagetableInfo(struct smmu_pte_address *pstPteAddress) +{ + SMMU_PRINT( + PM_INFO, + "pagetable info: -------------------------------------------------------------------\n"); + SMMU_PRINT(PM_INFO, "pagetable config.uddPageTablePhyAddr = 0x%llx\n", + pstPteAddress->tPageTableCfg.uddPageTablePhyAddr); + SMMU_PRINT(PM_INFO, "pagetable config.uddPageTableVirAddr = 0x%llx\n", + pstPteAddress->tPageTableCfg.uddPageTableVirAddr); + SMMU_PRINT(PM_INFO, "pagetable config.udPageTableSize = 0x%x\n", + pstPteAddress->tPageTableCfg.udPageTableSize); + SMMU_PRINT(PM_INFO, + "pagetable config.uddExPageTablePhyAddr = 0x%llx\n", + pstPteAddress->tPageTableCfg.uddExPageTablePhyAddr); + SMMU_PRINT(PM_INFO, "pagetable config.udPExPableSize = 0x%x\n", + pstPteAddress->tPageTableCfg.udPExPableSize); + SMMU_PRINT(PM_INFO, "max L1 pagetable num = %d, used = %d\n", + SMMU_L1_PT_NUM, pstPteAddress->udL1PageTableNum); + SMMU_PRINT(PM_INFO, "max L2 pagetable num = %d, used = %d\n", + SMMU_L2_PT_NUM, pstPteAddress->udL2PageTableNum); + SMMU_PRINT(PM_INFO, "max L3 pagetable num = %d, used = %d\n", + SMMU_L3_PT_NUM, pstPteAddress->udL3PageTableNum); + SMMU_PRINT( + PM_INFO, + "pte records num = %d, fail record = %d, max capacity = %d\n", + pstPteAddress->udPteRecordNum, + pstPteAddress->udPteFailRecordNum, MAX_PTE_RECORDS_NUM); + + return CMDK_OK; +} +EXPORT_SYMBOL(smmuShowPagetableInfo); + +u32 CmdkSysMmuShowPteRecord(u32 udSid, u64 uddVa, + struct smmu_pte_address *pstPteAddress) +{ + u32 i = 0; + u64 uddVaTmp; + u64 uddTTBAddr; + + for (; i < pstPteAddress->udPteRecordNum; i++) { + if (pstPteAddress->ptPteRecords[i].udValid) { + // print all records + if (uddVa == 0xffffffffffffffff) { + uddVaTmp = pstPteAddress->ptPteRecords[i].uddVa; + } else if (uddVa >= pstPteAddress->ptPteRecords[i] + .uddVa && + uddVa < (pstPteAddress->ptPteRecords[i].uddVa + + pstPteAddress->ptPteRecords[i] + .uddSize)) { + uddVaTmp = uddVa; + } else { + continue; + } + + uddTTBAddr = zxdh_smmu_get_ttb(udSid, pstPteAddress); + if (uddTTBAddr == CMDK_ERROR || uddTTBAddr == 0) + return CMDK_ERROR; + //zxdh_smmu_get_l1_descriptor_va( + // zxdh_smmu_sram_pagetable_p2v(uddTTBAddr, + // pstPteAddress), + // uddVaTmp); + if (pstPteAddress->ptPteRecords[i].uddSize == + PAGE_SIZE_2M) { + zxdh_smmu_get_l2_descriptor_va(udSid, uddVaTmp, + pstPteAddress); + } + + if (pstPteAddress->ptPteRecords[i].uddSize == + PAGE_SIZE_4K) { + zxdh_smmu_get_l2_descriptor_va(udSid, uddVaTmp, + pstPteAddress); + + zxdh_smmu_get_l3_descriptor(udSid, uddVaTmp, + pstPteAddress); + } + } + } + return CMDK_OK; +} +EXPORT_SYMBOL(CmdkSysMmuShowPteRecord); + +struct zxdh_smmu_host_risc_msgs { + u32 sid; + u32 va; +}; + +/************************************************************************** + * 函数名称: zxdh_smmu_mmap + * 功能描述: 在host上 + * 写入pte,实现smmu虚实地址映射 + * 输入参数:ptPteRequest 地址映射信息 + * dev 设备信息 + * 输出参数: + * 返 回 值: CMDK_OK / CMDK_ERROR + * 其它说明: + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/04/26 V1.0 guoll + ***************************************************************************/ +u32 zxdh_smmu_mmap(struct stPteRequest *ptPteRequest, struct zxdh_sc_dev *dev) +{ + u32 ud_pte_size = 0; + u64 udd_l1_ttb_pa = 0; + u64 udd_request_va = 0; + u64 udd_request_pa = 0; + u64 udd_request_size = 0; + struct smmu_pte_cfg tTlbEntryCfg = { 0 }; + u32 ud_mmap_cnt = 0; + + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(ptPteRequest); + + udd_request_va = ptPteRequest->uddVirAddr; + udd_request_pa = ptPteRequest->uddPhyAddr; + udd_request_size = ptPteRequest->uddSize; + + if ((udd_request_pa & REV_PAGE_MASK_4K) || + (udd_request_va & REV_PAGE_MASK_4K) || + (udd_request_size & REV_PAGE_MASK_4K)) { + return CMDK_ERROR; + } + + tTlbEntryCfg.udEndian = SMMU_TT_LITTLEENDIAN; /* endian cfg */ + tTlbEntryCfg.udPageFormat = PAGE_FORMAT_V8; + + /* pa */ + udd_l1_ttb_pa = + zxdh_smmu_get_ttb(ptPteRequest->udStreamid, dev->pte_address); + if (udd_l1_ttb_pa == CMDK_ERROR) + return CMDK_ERROR; + + while (udd_request_size > 0) { + ud_mmap_cnt++; + + // if (10 == ud_mmap_cnt) + // { + // g_ucMmu600PrintModuleId = 8; + // } + + // 判断是否可以使用块类型的页表项(优先使用块类型的页表项) + // 1G 2M 4k + ud_pte_size = zxdh_smmu_get_pte_size(udd_request_va, + udd_request_pa, + udd_request_size, + SMMU_CD_TG0_4K); + + // ud_pte_size = PAGE_SIZE_4K; + + zxdh_smmu_request_to_pte_cfg(ud_pte_size, ptPteRequest, + &tTlbEntryCfg); + + zxdh_smmu_set_pte_entry( + zxdh_smmu_sram_pagetable_p2v(udd_l1_ttb_pa, + dev->pte_address), + udd_request_va, udd_request_pa, + ptPteRequest->udStreamid, &tTlbEntryCfg, dev); + + udd_request_va += ud_pte_size; + udd_request_pa += ud_pte_size; + ptPteRequest->uddPhyAddr = udd_request_pa; + if (udd_request_size < ud_pte_size) { + /* avoid negative value */ + udd_request_size = 0; + } else { + udd_request_size -= ud_pte_size; + } + } +#ifndef BSP_IS_PC_UT + wmb(); +#endif + + return CMDK_OK; +} +EXPORT_SYMBOL(zxdh_smmu_mmap); + +/************************************************************************** + * 函数名称: zxdh_smmu_struct_init + * 功能描述: 初始化mmu600页表相关数据结构 + * + * 输入参数: struct stPagetableParam *ptPgtPara : 页表初始化参数 + * struct zxdh_sc_dev * + * 输出参数: + * 返 回 值: CMDK_OK / CMDK_ERROR + * 其它说明: + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/04/26 V1.0 guoll + ***************************************************************************/ +u32 zxdh_smmu_struct_init(const struct stPagetableParam *ptPgtPara, + struct smmu_pte_address *pstPteAddress) +{ + void *pddr = NULL; + u32 udSize = 0; + u32 udL1PtIndex = 0; + u32 udPageTableSize = 0; + + SMMU_POINTER_CHECK(ptPgtPara); + SMMU_POINTER_CHECK(pstPteAddress); + + // ========== ========== ========== ========== + // 页表初始化参数值校验 + // ========== ========== ========== ========== + if (ptPgtPara->udPageTableSize == 0 || + ptPgtPara->udL1PageTableNum == 0 || + ptPgtPara->udL2PageTableNum == 0 || + ptPgtPara->udL3PageTableNum == 0) { + return CMDK_ERROR; + } + + udPageTableSize = ptPgtPara->udL1PageTableNum * SMMU_L1_PER_PT_SIZE + + ptPgtPara->udL2PageTableNum * SMMU_L2_PER_PT_SIZE + + ptPgtPara->udL3PageTableNum * SMMU_L3_PER_PT_SIZE; + if (udPageTableSize > ptPgtPara->udPageTableSize) + return CMDK_ERROR; + + MEMCPY(&(pstPteAddress->tPageTableCfg), sizeof(struct stPagetableParam), + ptPgtPara, sizeof(struct stPagetableParam)); + + if (pstPteAddress->CmaPageMemBaseVA == 0) { + // ========== ========== ========== ========== + // use reserve mem + // ========== ========== ========== ========== + + // self: 我的理解,这个是自己打桩测试,正是代码不需要走这个分支 + SMMU_POINTER_CHECK(ptPgtPara->uddPageTablePhyAddr); + + // Mmap page table space + pddr = (void *)ioremap(ptPgtPara->uddPageTablePhyAddr, + ptPgtPara->udPageTableSize); + + memset_8byte(pddr, 0, ptPgtPara->udPageTableSize); + + pstPteAddress->uddPageTableVirBaseAddr = (u64)pddr; + } else { + // ========== ========== ========== ========== + // use cma mem + // ========== ========== ========== ========== + + // ========== ========== ========== ========== + // 对齐待定 self: 怎么对齐?这里暂时先注销 + // ========== ========== ========== ========== + if (ptPgtPara->uddPageTablePhyAddr & + (SMMU_L1_PT_ALIGN_SIZE - 1)) { + return CMDK_ERROR; + } + + pstPteAddress->uddPageTableVirBaseAddr = + pstPteAddress->CmaPageMemBaseVA; + } + + // ========== ========== ========== ========== + // allocate g_uddSmmuMapManageAddr + // T_MAP_MANGE共有1+512个,分别用来记录1个同一L1下的L2的首地址,512个同一L2下的L3的首地址。 + // ========== ========== ========== ========== + udSize = SMMU_L2_MAP_MANAGE_SIZE + SMMU_L3_MAP_MANAGE_SIZE; + pstPteAddress->uddSmmuMapManageAddr = (u64)kmalloc(udSize, GFP_KERNEL); + SMMU_POINTER_CHECK(pstPteAddress->uddSmmuMapManageAddr); + MEMSET((void *)pstPteAddress->uddSmmuMapManageAddr, udSize, 0, udSize); + + // ========== ========== ========== ========== + // allocate g_ptPteRecords + // ========== ========== ========== ========== + udSize = sizeof(struct pteRecord) * MAX_PTE_RECORDS_NUM; + pstPteAddress->ptPteRecords = + (struct pteRecord *)kmalloc(udSize, GFP_KERNEL); + SMMU_POINTER_CHECK(pstPteAddress->ptPteRecords); + MEMSET(pstPteAddress->ptPteRecords, udSize, 0, udSize); + + // ========== ========== ========== ========== ===== + // 分配8字节空间存储每一次下发PTE的数据,作为中转 + // ========== ========== ========== ========== ===== + // dma对源地址有对齐要求,必须32byte对齐 + // kmalloc申请到的va是根据传入的申请大小决定对齐的 + pstPteAddress->uddPTETempVirAddr = + (u64)kmalloc(SMMU_L1_PER_PT_SIZE * 4, GFP_KERNEL); + SMMU_POINTER_CHECK(pstPteAddress->uddPTETempVirAddr); + MEMSET((void *)pstPteAddress->uddPTETempVirAddr, + SMMU_L1_PER_PT_SIZE * 4, 0, SMMU_L1_PER_PT_SIZE * 4); + pstPteAddress->uddPTETempPhyAddr = + __pa(pstPteAddress->uddPTETempVirAddr); + + // ====================================================================== + // init g_ptTtbMng + // self: TtbMmg用来管理L1基地址 + // TtbMng用来管理L1的信息,包括L1的基地址、该L1表是否有效、对应的SID + // ====================================================================== + udSize = sizeof(struct ttbManage) * SMMU_L1_PT_NUM; + g_ptTtbMng = (struct ttbManage *)kmalloc(udSize, GFP_KERNEL); + SMMU_POINTER_CHECK(g_ptTtbMng); + MEMSET(g_ptTtbMng, udSize, 0, udSize); + + // 这里只负责把用到的TTB配置好,具体哪个sid使用,在cmdk进行配置,即由用户自己根据业务需求自己配置 + // 只需要把L1的TTB配置了就可以了,因为L1是确定的,L2 L3共用一份 + for (udL1PtIndex = 0; udL1PtIndex < SMMU_L1_PT_NUM; udL1PtIndex++) { + g_ptTtbMng[udL1PtIndex].uddPhyTTB = + (pstPteAddress->tPageTableCfg.uddPageTablePhyAddr + + udL1PtIndex * SMMU_L1_PER_PT_SIZE); + } + + return CMDK_OK; +} diff --git a/src/rdma/src/smmu/kernel/common_define.h b/src/rdma/src/smmu/kernel/common_define.h new file mode 100644 index 0000000000000000000000000000000000000000..3cbc626ad2fc18875be2d67e7966f8c99174fa34 --- /dev/null +++ b/src/rdma/src/smmu/kernel/common_define.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef COMMON_DEFINE_H +#define COMMON_DEFINE_H + +#define SMMU_DRIVER_IN_KERNEL + +#include "cmdk.h" +#include "pub_return.h" + +/************************************************************************** + * Macro * + **************************************************************************/ +#define MEMCPY(a, b, c, d) memcpy(a, c, d) +#define MEMSET(a, b, c, d) memset(a, c, d) + +//#define BSP_ADS4 +#ifndef SMMU_BIG_ENDIAN +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define SMMU_BIG_ENDIAN +#endif +#endif + +extern u8 g_ucMmu600PrintModuleId; + +#define SMMU_PRINT(level, format, ...) \ + do { \ + if (level >= g_ucMmu600PrintModuleId) { \ + printk(format, ##__VA_ARGS__); \ + } else { \ + ; \ + } \ + } while (0) + +#define SMMU_POINTER_CHECK(ptr) PUB_CHECK_NULL_PTR_RET_ERR(ptr) + +extern u32 uswap_32(u32 v); +extern u64 uswap_64(u64 v); + +#ifdef SMMU_BIG_ENDIAN + +#define SMMU_SWAP_32(x) uswap_32(x) +#define SMMU_SWAP_64(x) uswap_64(x) + +#else + +#define SMMU_SWAP_32(x) (x) +#define SMMU_SWAP_64(x) (x) + +#endif + +#endif diff --git a/src/rdma/src/smmu/kernel/hal_smmu.h b/src/rdma/src/smmu/kernel/hal_smmu.h new file mode 100644 index 0000000000000000000000000000000000000000..369727862b8443f095f21ea5c783385d9683ebe8 --- /dev/null +++ b/src/rdma/src/smmu/kernel/hal_smmu.h @@ -0,0 +1,634 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +/** + * @file hal_smmu.h + * @brief mmu600 ARMV8页表格式头文件 + * @details + * @author 陈港文 + * @date 2021-04-27 + * @version V1.0 + * @copyright Copyright (c) 2018-2020 中兴通讯有限公司 + ********************************************************************************** + * @attention + * - 硬件平台:msc4.0 + * - 架构支持:arm64 + * @warning + * - + * @bug + * - + * @par 修改日志: + * + *
Date Version Author Description + *
2021/04/27 1.0 陈港文 创建初始版本 + *
+ * + ********************************************************************************** + */ + +#ifndef HAL_SMMU_H +#define HAL_SMMU_H + +/************************************************************************** + * struct * + **************************************************************************/ +/*udMemoryAttribute use + * + * SO:strongly-ordered memory + * DE:device memory + * NM:nomal memory, + * IWT: inner cache, write-through, + * OWT:outer cache, write-through, + * INC:inner non-cacheable + * ONC:outer non-cacheable + * IWB:inner cache,write-back, + * OWB:outer cache,write-back, + */ + +/** bit wide */ +#ifdef BW1 +#undef BW1 +#endif +#define BW1 ((u64)0x00000001) + +#ifdef BW2 +#undef BW2 +#endif +#define BW2 ((u64)0x00000003) + +#ifdef BW3 +#undef BW3 +#endif +#define BW3 ((u64)0x00000007) + +#ifdef BW4 +#undef BW4 +#endif +#define BW4 ((u64)0x0000000f) + +#ifdef BW5 +#undef BW5 +#endif +#define BW5 ((u64)0x0000001f) + +#ifdef BW6 +#undef BW6 +#endif +#define BW6 ((u64)0x0000003f) + +#ifdef BW7 +#undef BW7 +#endif +#define BW7 ((u64)0x0000007f) + +#ifdef BW8 +#undef BW8 +#endif +#define BW8 ((u64)0x000000ff) + +#ifdef BW9 +#undef BW9 +#endif +#define BW9 ((u64)0x000001ff) + +#ifdef BW10 +#undef BW10 +#endif +#define BW10 ((u64)0x000003ff) + +#ifdef BW11 +#undef BW11 +#endif +#define BW11 ((u64)0x000007ff) + +#ifdef BW12 +#undef BW12 +#endif +#define BW12 ((u64)0x00000fff) + +#ifdef BW13 +#undef BW13 +#endif +#define BW13 ((u64)0x00001fff) + +#ifdef BW14 +#undef BW14 +#endif +#define BW14 ((u64)0x00003fff) + +#ifdef BW15 +#undef BW15 +#endif +#define BW15 ((u64)0x00007fff) + +#ifdef BW16 +#undef BW16 +#endif +#define BW16 ((u64)0x0000ffff) + +#ifdef BW17 +#undef BW17 +#endif +#define BW17 ((u64)0x0001ffff) + +#ifdef BW18 +#undef BW18 +#endif +#define BW18 ((u64)0x0003ffff) + +#ifdef BW19 +#undef BW19 +#endif +#define BW19 ((u64)0x0007ffff) + +#ifdef BW20 +#undef BW20 +#endif +#define BW20 ((u64)0x000fffff) + +#ifdef BW21 +#undef BW21 +#endif +#define BW21 ((u64)0x001fffff) + +#ifdef BW22 +#undef BW22 +#endif +#define BW22 ((u64)0x003fffff) + +#ifdef BW23 +#undef BW23 +#endif +#define BW23 ((u64)0x007fffff) + +#ifdef BW24 +#undef BW24 +#endif +#define BW24 ((u64)0x00ffffff) + +#ifdef BW25 +#undef BW25 +#endif +#define BW25 ((u64)0x01ffffff) + +#ifdef BW26 +#undef BW26 +#endif +#define BW26 ((u64)0x03ffffff) + +#ifdef BW27 +#undef BW27 +#endif +#define BW27 ((u64)0x07ffffff) + +#ifdef BW28 +#undef BW28 +#endif +#define BW28 ((u64)0x0fffffff) + +#ifdef BW29 +#undef BW29 +#endif +#define BW29 ((u64)0x1fffffff) + +#ifdef BW30 +#undef BW30 +#endif +#define BW30 ((u64)0x3fffffff) + +#ifdef BW31 +#undef BW31 +#endif +#define BW31 ((u64)0x7fffffff) + +#ifdef BW32 +#undef BW32 +#endif +#define BW32 ((u64)0xffffffff) + +#define BW33 ((u64)0x00000001ffffffff) +#define BW34 ((u64)0x00000003ffffffff) +#define BW35 ((u64)0x00000007ffffffff) +#define BW36 ((u64)0x0000000fffffffff) +#define BW37 ((u64)0x0000001fffffffff) +#define BW38 ((u64)0x0000003fffffffff) +#define BW39 ((u64)0x0000007fffffffff) +#define BW40 ((u64)0x000000ffffffffff) +#define BW41 ((u64)0x000001ffffffffff) +#define BW42 ((u64)0x000003ffffffffff) +#define BW43 ((u64)0x000007ffffffffff) +#define BW44 ((u64)0x00000fffffffffff) +#define BW45 ((u64)0x00001fffffffffff) +#define BW46 ((u64)0x00003fffffffffff) +#define BW47 ((u64)0x00007fffffffffff) +#ifdef BW48 +#undef BW48 +#endif +#define BW48 ((u64)0x0000ffffffffffff) +#define BW49 ((u64)0x0001ffffffffffff) +#define BW50 ((u64)0x0003ffffffffffff) +#define BW51 ((u64)0x0007ffffffffffff) +#define BW52 ((u64)0x000fffffffffffff) +#define BW53 ((u64)0x001fffffffffffff) +#define BW54 ((u64)0x003fffffffffffff) +#define BW55 ((u64)0x007fffffffffffff) +#define BW56 ((u64)0x00ffffffffffffff) +#define BW57 ((u64)0x01ffffffffffffff) +#define BW58 ((u64)0x03ffffffffffffff) +#define BW59 ((u64)0x07ffffffffffffff) +#define BW60 ((u64)0x0fffffffffffffff) +#define BW61 ((u64)0x1fffffffffffffff) +#define BW62 ((u64)0x3fffffffffffffff) +#define BW63 ((u64)0x7fffffffffffffff) +#define BW64 ((u64)0xffffffffffffffff) + +/* ------------------------------------------------------------------------------------------------------------------ + * + * V8描述符定义 + * 1)一级页表block 1G 2b01---block,2b11---table + * 2)二级页表block 2M 2b01---block,2b11---table + * 3)三级页表page 4K 2b11---page + * + *------------------------------------------------------------------------------------------------------------------- + */ + +#define LONG_DESCRIPTOR_WACFG_POS (55) +#define LONG_DESCRIPTOR_WACFG_MASK (((u64)BW2) << LONG_DESCRIPTOR_WACFG_POS) +#define LONG_DESCRIPTOR_RACFG_POS (57) +#define LONG_DESCRIPTOR_RACFG_MASK (((u64)BW2) << LONG_DESCRIPTOR_RACFG_POS) + +/* stage1 */ +/* 一级页表的描述符定义 + * 1) block 型的,1G的页表 + * +-----------------+-------------+-----------+---------+----------+-------------+-----------------+ + * | 63 |59-62 | 58--55 | 54 | 53 | 52 | 51 | 50 | 49-48 | 47--32 | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * | RES |PBHA | ignored| XN |PXN |contiguous | DBM |GP-S1only | 0 | output address | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * +-------------------+------------+------+------+---------+----------+------+---------+-----+----------+ + * | 31 --- 30 | 29--12 | 11 | 10 | 9--8 | 7--6 | 5 | 4--2 | 1 | 0 | + * +-------------------+------------+------+------+---------+----------+------+--------------+-----+-----+ + * | output address | 0 | nG | AF | SH[1:0] | AP[2:1] | NS | MEMATTR[3:0] | 0 | 1 | + * +-------------------+------------+------+------+---------+----------+------+--------------+-----+-----+ + */ + +/* stage2 */ +/* 一级页表的描述符定义 + * 1) block 型的,1G的页表 + * +-----------------+-------------+-----------+---------+----------+-------------+-----------------+ + * | 63 |59-62 | 58--55 | 54--53 | 52 | 51 | 50 | 49-48 | 47--32 | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * | RES |PBHA | ignored| XN |contiguous | DBM |GP-S1only | 0 | output address | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * +-------------------+------------+------+------+---------+----------+------+---------+-----+----------+ + * | 31 --- 21 | 29--12 | 11 | 10 | 9--8 | 7--6 | 5--2 | 1 | 0 | + * +-------------------+------------+------+------+---------+----------+------+--------------+-----+-----+ + * | output address | 0 | FnXS | AF | SH[1:0] | AP[1:0] | MEMATTR[3:0] | 0 | 1 | + * +-------------------+------------+------+------+---------+----------+------+--------------+-----+-----+ + */ + +/*BIT54 XN + * 字段作用:设置execute never,memory的区域属性设置,如果为execute never,不能存放指令 + */ +#define L1_LONG_DESCRIPTOR_BLOCK_XN_POS (53) +#define L1_LONG_DESCRIPTOR_BLOCK_XN_MASK \ + (((u64)BW2) << L1_LONG_DESCRIPTOR_BLOCK_XN_POS) + +/*BIT 39-30 + *字段作用:设置PA地址 + */ +#define L1_LONG_DESCRIPTOR_BLOCK_PA_POS (30) +#define L1_LONG_DESCRIPTOR_BLOCK_PA_MASK \ + (((u64)BW18) << L1_LONG_DESCRIPTOR_BLOCK_PA_POS) + +/*BIT10 AF -- Access flag + * 字段作用:访问标志,为0不能读入TLB + */ +#define L1_LONG_DESCRIPTOR_BLOCK_AF_POS (10) +#define L1_LONG_DESCRIPTOR_BLOCK_AF_MASK \ + (BW1 << L1_LONG_DESCRIPTOR_BLOCK_AF_POS) + +#define L1_LONG_DESCRIPTOR_BLOCK_NG_POS (11) +#define L1_LONG_DESCRIPTOR_BLOCK_NG_MASK \ + (BW1 << L1_LONG_DESCRIPTOR_BLOCK_NG_POS) + +/*BIT[9-8]: SH[1:0] + * 字段作用:共享标志 + */ +#define L1_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS (8) +#define L1_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK \ + (BW2 << L1_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS) + +/*BIT[7-6]: AP[2:1] + * 字段作用:访问权限AP + */ +#define L1_LONG_DESCRIPTOR_BLOCK_S2AP_POS (6) +#define L1_LONG_DESCRIPTOR_BLOCK_S2AP_MASK \ + (BW2 << L1_LONG_DESCRIPTOR_BLOCK_S2AP_POS) + +/*BIT[5:2] MEMATTR[3:0] + * 字段作用:设置内存属性索引 + */ +#define L1_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS (2) +#define L1_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK \ + (BW4 << L1_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS) + +#define L1_LONG_DESCRIPTOR_FOR_BLOCK (1) +#define L1_LONG_DESCRIPTOR_FOR_TABLE (3) + +/* L1 page table , point to L2 page table base address + * 1) table 型的,存放2级页表地址 + * +----------+--------------+----------+-----------+----------------+-------------+----------------+ + * | 63 | 62--61 | 60 | 59 | 58--52 | 51-48 | 47--32 | + * +----------+--------------+----------+-----------+----------------+-------------+----------------+ + * | SBZ | ignored | 0 | output address | + * +----------+--------------+----------+-----------+----------------+-------------+----------------+ + * +----------------------------------------------+-------------------------------------+-----+-----+ + * | 31--12 | 11--2 | 1 | 0 | + * +----------------------------------------------+-------------------------------------+-----+-----+ + * | output address | ignored | 1 | 1 | + * +----------------------------------------------+-------------------------------------+-----+-----+ + */ + +/*BIT 39-12 + *字段作用:设置PA地址 + */ +#define L1_LONG_DESCRIPTOR_TABLE_PA_POS (12) +#define L1_LONG_DESCRIPTOR_TABLE_PA_MASK \ + (((u64)BW36) << L1_LONG_DESCRIPTOR_TABLE_PA_POS) + +/* stage1 */ +/* 二级页表的描述符定义 + * 1) block 型的,2M的页表 + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * | 63 |59-62 | 58--55 | 54 | 53 | 52 | 51 | 50-48 | 47--32 | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * | RES |PBHA | ignored| XN |PXN |contiguous | DBM | res | output address | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * +-------------------+------------+------+------+---------+----------+------+---------+-----+----+-----+ + * | 31 --- 12 | 11 | 10 | 9--8 | 7--6 | 5 | 4--2 | 1 | 0 | + * +-------------------+------------+------+------+---------+----------+------+---------+-----+----+-----+ + * | output address | nG | AF | SH[1:0] | AP[2:1] | NS | MEMATTR[3:0] | 0 | 1 | + * +-------------------+------------+------+------+---------+----------+------+---------+-----+----+-----+ + */ + +/* stage2 */ +/* 二级页表的描述符定义 + * 1) block 型的,2M的页表 + * +-----------------+-------------+-----------+---------+----------+-------------+-----------------+ + * | 63 |59-62 | 58--55 | 54 53 | 52 | 51 | 50 | 49-48 | 47--32 | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * | RES |PBHA | ignored| XN |contiguous | DBM |GP-S1only | 0 | output address | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * +-------------------+------------+------+------+---------+----------+------+---------+-----+----------+ + * | 31 --- 21 | 29--12 | 11 | 10 | 9--8 | 7--6 | 5--2 | 1 | 0 | + * +-------------------+------------+------+------+---------+----------+------+--------------+-----+-----+ + * | output address | 0 | FnXS | AF | SH[1:0] | AP[1:0] | MEMATTR[3:0] | 0 | 1 | + * +-------------------+------------+------+------+---------+----------+------+--------------+-----+-----+ + */ + +/*BIT54 XN + * 字段作用:设置execute never,memory的区域属性设置,如果为execute never,不能存放指令 + */ +#define L2_LONG_DESCRIPTOR_BLOCK_XN_POS (53) +#define L2_LONG_DESCRIPTOR_BLOCK_XN_MASK \ + (((u64)BW2) << L2_LONG_DESCRIPTOR_BLOCK_XN_POS) + +/*BIT 39-21 + *字段作用:设置PA地址 + */ +#define L2_LONG_DESCRIPTOR_BLOCK_PA_POS (21) +#define L2_LONG_DESCRIPTOR_BLOCK_PA_MASK \ + (((u64)BW27) << L2_LONG_DESCRIPTOR_BLOCK_PA_POS) + +/*BIT10 AF -- Access flag + * 字段作用:访问标志,为0不能读入TLB + */ +#define L2_LONG_DESCRIPTOR_BLOCK_AF_POS (10) +#define L2_LONG_DESCRIPTOR_BLOCK_AF_MASK \ + (BW1 << L2_LONG_DESCRIPTOR_BLOCK_AF_POS) + +#define L2_LONG_DESCRIPTOR_BLOCK_NG_POS (11) +#define L2_LONG_DESCRIPTOR_BLOCK_NG_MASK \ + (BW1 << L2_LONG_DESCRIPTOR_BLOCK_NG_POS) + +/*BIT[9-8]: SH[1:0] + * 字段作用:共享标志 + */ +#define L2_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS (8) +#define L2_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK \ + (BW2 << L2_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS) + +/*BIT[7-6]: AP[2:1] + * 字段作用:访问权限AP + */ +#define L2_LONG_DESCRIPTOR_BLOCK_S2AP_POS (6) +#define L2_LONG_DESCRIPTOR_BLOCK_S2AP_MASK \ + (BW2 << L2_LONG_DESCRIPTOR_BLOCK_S2AP_POS) + +/*BIT[5:2] MemAttr[3:0] + * 字段作用:设置内存属性索引 + */ +#define L2_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS (2) +#define L2_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK \ + (BW4 << L2_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS) + +#define L2_LONG_DESCRIPTOR_FOR_BLOCK (1) +#define L2_LONG_DESCRIPTOR_FOR_TABLE (3) + +/* 二级页表的描述符定义 + * 1) table 型的,存放3级页表地址 + * +----------+--------------+----------+-----------+----------------+-------------+----------------+ + * | 63 | 62--61 | 60 | 59 | 58--52 | 51-40 | 47--32 | + * +----------+--------------+----------+-----------+----------------+-------------+----------------+ + * | SBZ | ignored | 0 | output address | + * +----------+--------------+----------+-----------+----------------+-------------+----------------+ + * +----------------------------------------------+-------------------------------------+-----+-----+ + * | 31--12 | 11--2 | 1 | 0 | + * +----------------------------------------------+-------------------------------------+-----+-----+ + * | output address | ignored | 1 | 1 | + * +----------------------------------------------+-------------------------------------+-----+-----+ + */ + +/*BIT 39-12 + *字段作用:设置PA地址 + */ +#define L2_LONG_DESCRIPTOR_TABLE_PA_POS (12) +#define L2_LONG_DESCRIPTOR_TABLE_PA_MASK \ + (((u64)BW36) << L2_LONG_DESCRIPTOR_TABLE_PA_POS) + +/* stage1 */ +/* 三级页表的描述符定义 + * 1) page 型的,4K的页表 + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * | 63 |59-62 | 58--55 | 54 | 53 | 52 | 51 | 50-48 | 47--32 | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * | RES |PBHA | ignored| XN |PXN |contiguous | DBM | res | output address | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * +-------------------+------------+------+------+---------+----------+------+---------+-----+----+-----+ + * | 31 --- 12 | 11 | 10 | 9--8 | 7--6 | 5 | 4--2 | 1 | 0 | + * +-------------------+------------+------+------+---------+----------+------+---------+-----+----+-----+ + * | output address | nG | AF | SH[1:0] | AP[2:1] | NS | MEMATTR[3:0] | 0 | 1 | + * +-------------------+------------+------+------+---------+----------+------+---------+-----+----+-----+ + */ + +/* stage2 */ +/* 三级页表的描述符定义 + * 1) page 型的,4K的页表 + * +-----------------+-------------+-----------+---------+----------+-------------+-----------------+ + * | 63 |59-62 | 58--55 | 54 53 | 52 | 51 | 50 | 49-48 | 47--32 | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * | RES |PBHA | ignored| XN |contiguous | DBM |GP-S1only | 0 | output address | + * +-----------------+----------------+--------+---------+----------+-------------+-----------------+ + * +-------------------+------------+------+------+---------+----------+------+---------+-----+----------+ + * | 31 --- 21 | 29--12 | 11 | 10 | 9--8 | 7--6 | 5--2 | 1 | 0 | + * +-------------------+------------+------+------+---------+----------+------+--------------+-----+-----+ + * | output address | 0 | FnXS | AF | SH[1:0] | AP[1:0] | MEMATTR[3:0] | 0 | 1 | + * +-------------------+------------+------+------+---------+----------+------+--------------+-----+-----+ + */ + +/*BIT54 XN + * 字段作用:设置execute never,memory的区域属性设置,如果为execute never,不能存放指令 + */ +#define L3_LONG_DESCRIPTOR_BLOCK_XN_POS (53) +#define L3_LONG_DESCRIPTOR_BLOCK_XN_MASK \ + (((u64)BW2) << L3_LONG_DESCRIPTOR_BLOCK_XN_POS) + +/*BIT 39-12 + *字段作用:设置PA地址 + */ +#define L3_LONG_DESCRIPTOR_BLOCK_PA_POS (12) +#define L3_LONG_DESCRIPTOR_BLOCK_PA_MASK \ + (((u64)BW36) << L3_LONG_DESCRIPTOR_BLOCK_PA_POS) + +/*BIT10 AF -- Access flag + * 字段作用:访问标志,为0不能读入TLB + */ +#define L3_LONG_DESCRIPTOR_BLOCK_AF_POS (10) +#define L3_LONG_DESCRIPTOR_BLOCK_AF_MASK \ + (BW1 << L3_LONG_DESCRIPTOR_BLOCK_AF_POS) + +#define L3_LONG_DESCRIPTOR_BLOCK_NG_POS (11) +#define L3_LONG_DESCRIPTOR_BLOCK_NG_MASK \ + (BW1 << L3_LONG_DESCRIPTOR_BLOCK_NG_POS) + +/*BIT[9-8]: SH[1:0] + * 字段作用:共享标志 + */ +#define L3_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS (8) +#define L3_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK \ + (BW2 << L3_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS) + +/*BIT[7-6]: AP[2:1] + * 字段作用:访问权限AP + */ +#define L3_LONG_DESCRIPTOR_BLOCK_S2AP_POS (6) +#define L3_LONG_DESCRIPTOR_BLOCK_S2AP_MASK \ + (BW2 << L3_LONG_DESCRIPTOR_BLOCK_S2AP_POS) + +/* + * BIT[5:2] MemAttr[3:0] + * 字段作用:设置内存属性索引 + */ +#define L3_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS (2) +#define L3_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK \ + (BW4 << L3_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS) + +#define L3_LONG_DESCRIPTOR_FOR_PAGE (3) + +#define SMMU_PAGETABLE_SO (0) /**< strongly-ordered memory*/ +#define SMMU_PAGETABLE_DE (1) /**< device memory*/ +#define SMMU_PAGETABLE_NM_ONC_INC (5) /**< nomal memory, non-cache*/ +#define SMMU_PAGETABLE_NM_ONC_IWT (6) +#define SMMU_PAGETABLE_NM_ONC_IWB (7) +#define SMMU_PAGETABLE_NM_OWT_INC \ + (9) /**< nomal memory, L1 non-cache, L2 cache WT*/ +#define SMMU_PAGETABLE_NM_OWT_IWT (10) /**< nomal memory, WT*/ +#define SMMU_PAGETABLE_NM_OWT_IWB (11) +#define SMMU_PAGETABLE_NM_OWB_INC \ + (13) /**< nomal memory, L1 non-cache,L2 cache WB*/ +#define SMMU_PAGETABLE_NM_OWB_IWT \ + (14) /**< nomal memory,L1 cache WT,L2 cache WB*/ +#define SMMU_PAGETABLE_NM_OWB_IWB (15) +/* Allocate default */ +#define READ_NOALLOCATE (3 << 4) +#define WRITE_NOALLOCATE (3 << 6) + +#define SMMU_PAGETABLE_NM_ONC_IWT_RNA \ + (READ_NOALLOCATE | SMMU_PAGETABLE_NM_ONC_IWT) +#define SMMU_PAGETABLE_NM_ONC_IWB_RNA \ + (READ_NOALLOCATE | SMMU_PAGETABLE_NM_ONC_IWB) +#define SMMU_PAGETABLE_NM_OWT_INC_RNA \ + (READ_NOALLOCATE | SMMU_PAGETABLE_NM_OWT_INC) +#define SMMU_PAGETABLE_NM_OWT_IWT_RNA \ + (READ_NOALLOCATE | SMMU_PAGETABLE_NM_OWT_IWT) +#define SMMU_PAGETABLE_NM_OWT_IWB_RNA \ + (READ_NOALLOCATE | SMMU_PAGETABLE_NM_OWT_IWB) +#define SMMU_PAGETABLE_NM_OWB_INC_RNA \ + (READ_NOALLOCATE | SMMU_PAGETABLE_NM_OWB_INC) +#define SMMU_PAGETABLE_NM_OWB_IWT_RNA \ + (READ_NOALLOCATE | SMMU_PAGETABLE_NM_OWB_IWT) +#define SMMU_PAGETABLE_NM_OWB_IWB_RNA \ + (READ_NOALLOCATE | SMMU_PAGETABLE_NM_OWB_IWB) + +#define SMMU_PAGETABLE_NM_ONC_IWT_WNA \ + (WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_ONC_IWT) +#define SMMU_PAGETABLE_NM_ONC_IWB_WNA \ + (WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_ONC_IWB) +#define SMMU_PAGETABLE_NM_OWT_INC_WNA \ + (WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWT_INC) +#define SMMU_PAGETABLE_NM_OWT_IWT_WNA \ + (WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWT_IWT) +#define SMMU_PAGETABLE_NM_OWT_IWB_WNA \ + (WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWT_IWB) +#define SMMU_PAGETABLE_NM_OWB_INC_WNA \ + (WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWB_INC) +#define SMMU_PAGETABLE_NM_OWB_IWT_WNA \ + (WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWB_IWT) +#define SMMU_PAGETABLE_NM_OWB_IWB_WNA \ + (WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWB_IWB) + +#define SMMU_PAGETABLE_NM_ONC_IWT_RNA_WNA \ + (READ_NOALLOCATE | WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_ONC_IWT) +#define SMMU_PAGETABLE_NM_ONC_IWB_RNA_WNA \ + (READ_NOALLOCATE | WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_ONC_IWB) +#define SMMU_PAGETABLE_NM_OWT_INC_RNA_WNA \ + (READ_NOALLOCATE | WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWT_INC) +#define SMMU_PAGETABLE_NM_OWT_IWT_RNA_WNA \ + (READ_NOALLOCATE | WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWT_IWT) +#define SMMU_PAGETABLE_NM_OWT_IWB_RNA_WNA \ + (READ_NOALLOCATE | WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWT_IWB) +#define SMMU_PAGETABLE_NM_OWB_INC_RNA_WNA \ + (READ_NOALLOCATE | WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWB_INC) +#define SMMU_PAGETABLE_NM_OWB_IWT_RNA_WNA \ + (READ_NOALLOCATE | WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWB_IWT) +#define SMMU_PAGETABLE_NM_OWB_IWB_RNA_WNA \ + (READ_NOALLOCATE | WRITE_NOALLOCATE | SMMU_PAGETABLE_NM_OWB_IWB) + +// udExecuteNever use +#define SMMU_PAGETABLE_EXECUTE_NEVER (1) /* XN, can not prefetch */ +#define SMMU_PAGETABLE_EXECUTE (0) + +// udPageType use +#define SMMU_PAGETABLE_PAGESIZE_4KB (0) /* 4KB, small page*/ +#define SMMU_PAGETABLE_PAGESIZE_64KB (1) /* 64KB,large page*/ +#define SMMU_PAGETABLE_PAGESIZE_1MB (2) /* 1MB,section*/ +#define SMMU_PAGETABLE_PAGESIZE_2MB (3) /* 2MB,block*/ +#define SMMU_PAGETABLE_PAGESIZE_16MB (4) /* 16MB,surper-section*/ +#define SMMU_PAGETABLE_PAGESIZE_512MB (5) /* 16MB,surper-section*/ +#define SMMU_PAGETABLE_PAGESIZE_1G (6) /* 1G,block*/ + +struct smmu_pte_cfg { + u64 uddPABaseAddr; /* Block base address */ + u64 udExecuteNever; /* Executable code memory */ + u32 udShareable; /* SH */ + u32 udAccessPermission; /* AP */ + u32 udMemoryAttribute; /* MemAttr */ + u32 udPageType; /* Page size */ + u64 udWACFG; /* Write-Allocate */ + u64 udRACFG; /* Read-Allocate */ + u32 udEndian; /* Endian */ + u32 udPageFormat; /* page table format */ +}; + +#endif diff --git a/src/rdma/src/smmu/kernel/ioctl_mmu600.c b/src/rdma/src/smmu/kernel/ioctl_mmu600.c new file mode 100644 index 0000000000000000000000000000000000000000..d0498e3b2c12cd98d0fb6562a6c45dec67812321 --- /dev/null +++ b/src/rdma/src/smmu/kernel/ioctl_mmu600.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cmdk_mmu600.h" +#include "adk_mmu600.h" +#include "ioctl_mmu600.h" +#include "../../main.h" +/************************************************************************** + * Global Value * + **************************************************************************/ +static struct dentry *mmu600_dbg_root; + +static long mmu600test_module_ioctl(struct file *file, unsigned int cmd, + unsigned long param); + +static const struct file_operations zxdh_smmu_test_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = mmu600test_module_ioctl, +}; + +/************************************************************************** + * Extern Value * + **************************************************************************/ +extern u32 smmuShowPagetableInfo(struct smmu_pte_address *pstPteAddress); +extern u32 CmdkSysMmuShowPteRecord(u32 udSid, u64 uddVa, + struct smmu_pte_address *pstPteAddress); + +/************************************************************************** + * Local Function * + **************************************************************************/ +static int mmu600test_ioctl_example(unsigned long arg) +{ + int ret = CMDK_OK; + u64 addr = 0x20000000; + + if (copy_to_user((void *)arg, (void *)&addr, sizeof(u64))) + ret = CMDK_ERROR; + + pr_info("[mmu600test] %s finished.\n", __func__); + return ret; +} + +static int mmu600test_show_all_info(unsigned long arg, + struct smmu_pte_address *pstPteAddress) +{ + struct mmu600test_para para; + + if (copy_from_user((void *)¶, (void *)arg, + sizeof(struct mmu600test_para))) { + pr_info("[mmu600test] %s copy_from_user fail.\n", __func__); + return CMDK_ERROR; + } + + smmuShowPagetableInfo(pstPteAddress); + pr_info("[mmu600test] %s finished.\n", __func__); + return CMDK_OK; +} + +static int mmu600test_show_pte_info(unsigned long arg, + struct smmu_pte_address *pstPteAddress) +{ + struct mmu600test_para para; + + if (copy_from_user((void *)¶, (void *)arg, + sizeof(struct mmu600test_para))) { + pr_info("[mmu600test] %s copy_from_user fail.\n", __func__); + return CMDK_ERROR; + } + + CmdkSysMmuShowPteRecord(para.udSid, para.uddVa, pstPteAddress); + pr_info("[mmu600test] %s for sid%d va(0x%llx) finished.\n", __func__, + para.udSid, para.uddVa); + return CMDK_OK; +} + +static int mmu600test_set_pte(unsigned long arg, struct zxdh_sc_dev *dev) +{ + struct stPteRequest tMmuMmapCfg; + + if (copy_from_user((void *)&tMmuMmapCfg, (void *)arg, + sizeof(struct stPteRequest))) { + pr_info("[mmu600test] %s copy_from_user fail.\n", __func__); + return CMDK_ERROR; + } + + /* dev not init */ + zxdh_smmu_set_pte(&tMmuMmapCfg, dev); + pr_info("[mmu600test] %s for sid%d finished.\n", __func__, + tMmuMmapCfg.udStreamid); + return CMDK_OK; +} + +static int mmu600test_delete_pte(unsigned long arg, struct zxdh_sc_dev *dev) +{ + struct mmu600test_para para; + + if (copy_from_user((void *)¶, (void *)arg, + sizeof(struct mmu600test_para))) { + pr_info("[mmu600test] %s copy_from_user fail.\n", __func__); + return CMDK_ERROR; + } + + /* dev not init */ + pr_info("[mmu600test] %s for sid%d finished.\n", __func__, para.udSid); + return CMDK_OK; +} + +static int mmu600test_clean_tlb_by_ipa(unsigned long arg) +{ + struct mmu600test_para para; + + if (copy_from_user((void *)¶, (void *)arg, + sizeof(struct mmu600test_para))) { + pr_info("[mmu600test] %s copy_from_user fail.\n", __func__); + return CMDK_ERROR; + } + + CmdkSysMmuCmdTlbCleanByIpa(para.udSid, para.uddVa, para.udPageLvl); + pr_info("[mmu600test] %s for sid%d finished.\n", __func__, para.udSid); + return CMDK_OK; +} + +static int mmu600test_sync_tlb(unsigned long arg) +{ + struct mmu600test_para para; + + if (copy_from_user((void *)¶, (void *)arg, + sizeof(struct mmu600test_para))) { + pr_info("[mmu600test] %s copy_from_user fail.\n", __func__); + return CMDK_ERROR; + } + + CmdkSysMmuCmdTlbSync(); + pr_info("[mmu600test] %s finished.\n", __func__); + return CMDK_OK; +} + +static long mmu600test_module_ioctl(struct file *filp, unsigned int cmd, + unsigned long param) +{ + int ret = CMDK_OK; + struct zxdh_sc_dev *dev = filp->private_data; + + switch (cmd) { + case MMU600TEST_API_CMD_EXAMPLE: { + ret = mmu600test_ioctl_example(param); + break; + } + case MMU600TEST_API_CMD_SHOW_ALL_INFO: { + ret = mmu600test_show_all_info(param, dev->pte_address); + break; + } + case MMU600TEST_API_CMD_SHOW_PTE_INFO: { + ret = mmu600test_show_pte_info(param, dev->pte_address); + break; + } + case MMU600TEST_API_CMD_SET_PTE: { + ret = mmu600test_set_pte(param, dev); + break; + } + case MMU600TEST_API_CMD_DELETE_PTE: { + ret = mmu600test_delete_pte(param, dev); + break; + } + case MMU600TEST_API_CMD_CLEAN_TLB_BY_VA: { + ret = mmu600test_clean_tlb_by_ipa(param); + break; + } + case MMU600TEST_API_CMD_SYNC_TLB: { + ret = mmu600test_sync_tlb(param); + break; + } + default: { + pr_info("[mmu600test] Unknown ioctl cmd!\n"); + ret = CMDK_ERROR; + } + } + return ret; +} + +void zxdh_smmu_test_dbg_init(struct zxdh_sc_dev *dev) +{ +#ifdef Z_CONFIG_RDMA_HOST + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + const char *name = pci_name(rf->pcidev); +#else + const char *name = "mmu600test"; +#endif + struct dentry *pfile __attribute__((unused)); + + dev->pte_address->mmu600_dbg_dentry = + debugfs_create_dir(name, mmu600_dbg_root); + if (dev->pte_address->mmu600_dbg_dentry) + pfile = debugfs_create_file("mmu600_test", 0600, + dev->pte_address->mmu600_dbg_dentry, + dev, &zxdh_smmu_test_fops); + else + pr_err("%s: debugfs entry for %s failed\n", __func__, name); +} +EXPORT_SYMBOL(zxdh_smmu_test_dbg_init); + +void mmu600test_dbg_exit(struct zxdh_sc_dev *dev) +{ + if (dev->pte_address) { + pr_err("%s: removing debugfs entries\n", __func__); + debugfs_remove_recursive(dev->pte_address->mmu600_dbg_dentry); + dev->pte_address->mmu600_dbg_dentry = NULL; + } +} +EXPORT_SYMBOL(mmu600test_dbg_exit); diff --git a/src/rdma/src/smmu/kernel/ioctl_mmu600.h b/src/rdma/src/smmu/kernel/ioctl_mmu600.h new file mode 100644 index 0000000000000000000000000000000000000000..574ada242b0864599a213b7a49db85e3ef18b039 --- /dev/null +++ b/src/rdma/src/smmu/kernel/ioctl_mmu600.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef IOCLT_MMU600_H +#define IOCLT_MMU600_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#define MMU600TEST_DEV_NAME "mmu600test_device" +#define MMU600TEST_CLASS_NAME "mmu600test_class" +#define MMU600TEST_NAME "mmu600test" +#define MMU600TEST_DEV_COUNT 1 + +#define MMU600TEST_IOCTL_BASE 'M' +#define MMU600TEST_API_CMD_EXAMPLE _IOWR(MMU600TEST_IOCTL_BASE, 1, int) +#define MMU600TEST_API_CMD_SHOW_ALL_INFO _IOWR(MMU600TEST_IOCTL_BASE, 2, int) +#define MMU600TEST_API_CMD_SHOW_PTE_INFO _IOWR(MMU600TEST_IOCTL_BASE, 5, int) + +#define MMU600TEST_API_CMD_SET_STAGE1_ENABLE \ + _IOWR(MMU600TEST_IOCTL_BASE, 20, int) +#define MMU600TEST_API_CMD_SET_STAGE1_BYPASS \ + _IOWR(MMU600TEST_IOCTL_BASE, 21, int) +#define MMU600TEST_API_CMD_SET_PTE _IOWR(MMU600TEST_IOCTL_BASE, 22, int) +#define MMU600TEST_API_CMD_DELETE_PTE _IOWR(MMU600TEST_IOCTL_BASE, 23, int) +#define MMU600TEST_API_CMD_CLEAN_TLB_BY_VA _IOWR(MMU600TEST_IOCTL_BASE, 24, int) +#define MMU600TEST_API_CMD_SYNC_TLB _IOWR(MMU600TEST_IOCTL_BASE, 26, int) + +struct mmu600test_para { + u32 udSid; + u32 udSsid; + u64 uddVa; + u64 uddSize; + u32 udPageLvl; + u32 udNum; + u32 udRegOffset; + u32 udRegVal32; + u64 uddRegVal64; +}; + +void zxdh_smmu_test_dbg_init(struct zxdh_sc_dev *dev); +void mmu600test_dbg_exit(struct zxdh_sc_dev *dev); + +#ifdef __cplusplus +} +#endif + +#endif /* IOCLT_MMU600_H */ diff --git a/src/rdma/src/smmu/kernel/pub_print.h b/src/rdma/src/smmu/kernel/pub_print.h new file mode 100644 index 0000000000000000000000000000000000000000..3c7590844e3daf6095fd5ea077e3c545a722c686 --- /dev/null +++ b/src/rdma/src/smmu/kernel/pub_print.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef PUB_PRINT_H +#define PUB_PRINT_H + +#if defined(__KERNEL__) +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#else +#include +#include +#endif +#include "cmdk.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define PM_DEBUG ((u8)0x01) /**< 默认仅在debug版本显示 */ +#define PM_INFO ((u8)0x02) +#define PM_WARN ((u8)0x04) +#define PM_ERROR ((u8)0x08) +#define PM_FATAL ((u8)0x10) +#define DEFAULT_LEVEL ((u8)0x1E) /**< 默认不显示debug信息 */ +/** @} 输出控制级别 */ + +#define MAX_LEVEL_MASK ((u8)0x1F) /**< 全级别掩码 */ + +#define MAX_LEVEL_TYPE ((u8)0x05) /**< 定义5级打印 */ +#define INVALID_MODULE_ID 0xFF /**< 无效的模块id */ + +#define MAX_MDL_NAME_LEN 24 /**< 打印模块名称最大长度 */ +#define MAX_MODULE_ID ((u8)0x80) /**< 最大模块号,目前定义了128个模块 */ +#define MAX_MDL_PRINT_BUF_LEN 512 /**< 打印最大buffer长度 */ + +#define PM_FLAG_ON 1 /**< 打印flag打开 */ +#define PM_FLAG_OFF 0 /**< 打印flag关闭 */ + +extern u8 g_ucBySelfId; /**< 默认打印模块id */ + +/************************************************************************** + * 宏定义 * + **************************************************************************/ +/** 通用打印封装 */ +#define PUB_PRINTF printk + +#ifdef __cplusplus +} +#endif + +#endif /* PUB_PRINT_H */ diff --git a/src/rdma/src/smmu/kernel/pub_return.h b/src/rdma/src/smmu/kernel/pub_return.h new file mode 100644 index 0000000000000000000000000000000000000000..05362f6a1c7e2cf2dcf0cacbaaff8e7b973739e9 --- /dev/null +++ b/src/rdma/src/smmu/kernel/pub_return.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef _PUB_RETURN_H_ +#define _PUB_RETURN_H_ +#include "pub_print.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/************************************************************************** + * 宏定义 * + **************************************************************************/ +#ifdef PUB_ERROR +#undef PUB_ERROR +#define PUB_ERROR (0xffffffff) /*直接定义为0xffffffff*/ +#else +#define PUB_ERROR (0xffffffff) /*0xffffffff*/ +#endif + +/** 检查空指针,返回错误 */ +#define PUB_CHECK_NULL_PTR_RET_ERR(ptr) \ + do { \ + if (!ptr) { \ + pr_info("Null Ptr Err! Fuc:%s,Line:%d,File:%s\n", \ + __func__, __LINE__, __FILE__); \ + return PUB_ERROR; \ + } \ + } while (0) + +/************************************************************************** + * 数据类型 * + **************************************************************************/ + +/************************************************************************** + * 全局函数原型 * + **************************************************************************/ + +#ifdef __cplusplus +} +#endif + +#endif /* _PUB_RETURN_H_ */ diff --git a/src/rdma/src/srq.c b/src/rdma/src/srq.c new file mode 100644 index 0000000000000000000000000000000000000000..0a06077429abc0b1858ac7559b7af8cbaeb0b3c9 --- /dev/null +++ b/src/rdma/src/srq.c @@ -0,0 +1,1372 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "ws.h" +#include "protos.h" +#include "vf.h" +#include "virtchnl.h" +#include "icrdma_hw.h" +#include "main.h" +#include "srq.h" +#include "linux_kcompat.h" + +static unsigned int ft_debug_srq_msg; +module_param(ft_debug_srq_msg, uint, 0444); +MODULE_PARM_DESC(ft_debug_srq_msg, "ft_debug_srq_msg =1, printk srq info"); + +/** + * zxdh_get_srq_wqe_shift - get shift count for maximum srq wqe size + * @uk_attrs: srq HW attributes + * @sge: Maximum Scatter Gather Elements wqe + * @shift: Returns the shift needed based on sge + * + * Shift can be used to left shift the srq wqe size based on number of SGEs. + * For 1 SGE, shift = 1 (wqe size of 2*16 bytes). + * For 2 or 3 SGEs, shift = 2 (wqe size of 4*16 bytes). + * For 4-7 SGE's Shift of 3. + * For 8-15 SGE's Shift of 4 otherwise (wqe size of 512 bytes). + */ +static void zxdh_get_srq_wqe_shift(struct zxdh_uk_attrs *uk_attrs, u32 sge, + u8 *shift) +{ + *shift = 0; //16bytes RQE, need to confirm configuration + if (sge < 2) + *shift = 1; + else if (sge < 4) + *shift = 2; + else if (sge < 8) + *shift = 3; + else if (sge < 16) + *shift = 4; + else + *shift = 5; +} + +/** + * zxdh_srq_round_up - return round up srq wq depth + * @wqdepth: wq depth in quanta to round up + */ +static int zxdh_srq_round_up(u32 wqdepth) +{ + int scount = 1; + + for (wqdepth--; scount <= 16; scount *= 2) + wqdepth |= wqdepth >> scount; + + return ++wqdepth; +} + +/* + * zxdh_get_srqdepth - get SRQ depth (quanta) + * @max_hw_rq_quanta: HW SRQ size limit + * @srq_size: SRQ size + * @shift: shift which determines size of WQE + * @srqdepth: depth of SRQ + */ +static int zxdh_get_srqdepth(u32 max_hw_srq_quanta, u32 srq_size, u8 shift, + u32 *srqdepth) +{ + *srqdepth = zxdh_srq_round_up((srq_size << shift) + ZXDH_SRQ_RSVD); + + if (*srqdepth < (ZXDH_QP_SW_MIN_WQSIZE << shift)) + *srqdepth = ZXDH_QP_SW_MIN_WQSIZE << shift; + else if ((*srqdepth >> shift) > max_hw_srq_quanta) + return -EINVAL; + + return 0; +} + +static __le64 *zxdh_get_srq_wqe(struct zxdh_srq *srq, int wqe_index) +{ + struct zxdh_srq_uk *srq_uk; + __le64 *wqe; + + srq_uk = &srq->sc_srq.srq_uk; + wqe = srq_uk->srq_base[wqe_index * srq_uk->srq_wqe_size_multiplier].elem; + return wqe; +} + +//each srq index occupies 2 Bytes +static __le16 *zxdh_get_srq_list_wqe(struct zxdh_srq *srq, u16 *idx) +{ + struct zxdh_srq_uk *srq_uk; + __le16 *wqe; + u16 wqe_idx; + + srq_uk = &srq->sc_srq.srq_uk; + wqe_idx = ZXDH_RING_CURRENT_TAIL(srq_uk->srq_list_ring); + dma_wmb(); /* make sure shadow area is updated before moving tail */ + ZXDH_RING_MOVE_TAIL(srq_uk->srq_list_ring); + *idx = ZXDH_RING_CURRENT_TAIL(srq_uk->srq_list_ring); + + if (!(*idx)) + srq_uk->srq_list_polarity = !srq_uk->srq_list_polarity; + + wqe = &srq->sc_srq.srq_uk.srq_list_base[wqe_idx]; + + return wqe; +} + +void zxdh_free_srq_wqe(struct zxdh_srq_uk *srq, int wqe_index) +{ + struct zxdh_srq *iwsrq; + struct zxdh_sc_srq *sc_srq; + unsigned long flags; + __le64 *wqe; + u64 hdr; + + sc_srq = container_of(srq, struct zxdh_sc_srq, srq_uk); + iwsrq = container_of(sc_srq, struct zxdh_srq, sc_srq); + /* always called with interrupts disabled. */ + spin_lock_irqsave(&iwsrq->lock, flags); + wqe = zxdh_get_srq_wqe(iwsrq, srq->srq_ring.tail); + + srq->srq_ring.tail = wqe_index; + hdr = FIELD_PREP(IRDMAQPSRQ_NEXT_WQE_INDEX, wqe_index); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 0, hdr); + + spin_unlock_irqrestore(&iwsrq->lock, flags); +} + +/** + * zxdh_setup_kmode_srq - setup initialization for kernel mode srq + * @iwdev: iwarp device + * @iwsrq: srq ptr (user or kernel) + * @info: initialize info to return + * @init_attr: Initial SRQ create attributes + */ +static int zxdh_setup_kmode_srq(struct zxdh_device *iwdev, + struct zxdh_srq *iwsrq, + struct zxdh_srq_init_info *info, + struct ib_srq_init_attr *init_attr) +{ + struct zxdh_dma_mem *mem = &iwsrq->kmem; + struct zxdh_dma_mem *mem_list = &iwsrq->kmem_list; + struct zxdh_dma_mem *mem_db = &iwsrq->kmem_db; + u32 srqdepth; + u8 srqshift; + u32 srq_size; + u32 srq_list_size; + u32 db_size; + u32 log2_srq_size; + int status; + struct zxdh_srq_uk_init_info *ukinfo = &info->srq_uk_init_info; + struct zxdh_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; + + //get shift count for maximum wqe size + zxdh_get_srq_wqe_shift(uk_attrs, ukinfo->max_srq_frag_cnt, &srqshift); + + //get SRQ depth (quanta) + status = zxdh_get_srqdepth(uk_attrs->max_hw_srq_quanta, + ukinfo->srq_size, srqshift, &srqdepth); + if (status) + return status; + + iwsrq->ksrq.srq_wrid_mem = kcalloc(ukinfo->srq_size, + sizeof(*iwsrq->ksrq.srq_wrid_mem), + GFP_KERNEL); + if (!iwsrq->ksrq.srq_wrid_mem) + return -ENOMEM; + + ukinfo->srq_wrid_array = iwsrq->ksrq.srq_wrid_mem; + srq_size = srqdepth * ZXDH_SRQ_WQE_MIN_SIZE; + ukinfo->srq_size = srqdepth >> srqshift; + log2_srq_size = roundup_pow_of_two(ukinfo->srq_size); + log2_srq_size = order_base_2(log2_srq_size); + ukinfo->log2_srq_size = log2_srq_size; + + mem->size = ALIGN(srq_size, ZXDH_HW_PAGE_SIZE); + mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size, &mem->pa, + GFP_KERNEL); + if (!mem->va) { + kfree(iwsrq->ksrq.srq_wrid_mem); + iwsrq->ksrq.srq_wrid_mem = NULL; + return -ENOMEM; + } + + srq_list_size = ukinfo->srq_size * sizeof(u16); + ukinfo->srq_list_size = ukinfo->srq_size; + mem_list->size = ALIGN(srq_list_size, 64); + mem_list->va = dma_alloc_coherent(iwdev->rf->hw.device, mem_list->size, + &mem_list->pa, GFP_KERNEL); + if (!mem_list->va) { + kfree(iwsrq->ksrq.srq_wrid_mem); + iwsrq->ksrq.srq_wrid_mem = NULL; + dma_free_coherent(iwdev->rf->hw.device, mem->size, mem->va, + mem->pa); + mem->va = NULL; + return -ENOMEM; + } + + db_size = 8; + mem_db->size = ALIGN(db_size, 8); + mem_db->va = dma_alloc_coherent(iwdev->rf->hw.device, mem_db->size, + &mem_db->pa, GFP_KERNEL); + if (!mem_db->va) { + kfree(iwsrq->ksrq.srq_wrid_mem); + iwsrq->ksrq.srq_wrid_mem = NULL; + dma_free_coherent(iwdev->rf->hw.device, mem->size, mem->va, + mem->pa); + mem->va = NULL; + dma_free_coherent(iwdev->rf->hw.device, mem_list->size, + mem_list->va, mem_list->pa); + mem_list->va = NULL; + return -ENOMEM; + } + + *(u64 *)mem_db->va = ZXDH_SRQ_DB_INIT_VALUE; + ukinfo->srq_base = mem->va; + info->srq_pa = mem->pa; + ukinfo->srq_list_base = mem_list->va; + info->srq_list_pa = mem_list->pa; + ukinfo->srq_db_base = mem_db->va; + info->srq_db_pa = mem_db->pa; + init_attr->attr.max_wr = (srqdepth - ZXDH_SRQ_RSVD) >> srqshift; + + return 0; +} + +static void zxdh_srq_wqe_init(struct zxdh_srq *srq) +{ + int i; + struct zxdh_srq_uk *srq_uk; + __le64 *wqe; + u64 hdr; + + srq_uk = &srq->sc_srq.srq_uk; + + for (i = srq_uk->srq_ring.head; i < srq_uk->srq_ring.tail; i++) { + wqe = zxdh_get_srq_wqe(srq, i); + hdr = FIELD_PREP(IRDMAQPSRQ_NEXT_WQE_INDEX, (i + 1)); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 0, hdr); + } +} + +static int zxdh_validate_srq_attrs(struct ib_srq_init_attr *init_attr, + struct zxdh_device *iwdev) +{ + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct zxdh_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; + + if (init_attr->attr.max_sge > uk_attrs->max_hw_wq_frags) + return -EINVAL; + + if (init_attr->attr.max_wr > uk_attrs->max_hw_srq_wr) + return -EINVAL; + + if (init_attr->attr.srq_limit > init_attr->attr.max_wr) + return -EINVAL; + + if (init_attr->srq_type != IB_SRQT_BASIC) + return -EOPNOTSUPP; + + return 0; +} + +/** + * zxdh_free_srq_rsrc - free up memory resources for srq + * @iwsrq: srq ptr (user or kernel) + */ +static void zxdh_free_srq_rsrc(struct zxdh_srq *iwsrq) +{ + struct zxdh_device *iwdev = iwsrq->iwdev; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_sc_dev *dev; + u32 srq_num; + + dev = &rf->sc_dev; + srq_num = iwsrq->ibsrq.ext.xrc.srq_num - dev->base_srqn; + zxdh_free_rsrc(rf, rf->allocated_srqs, srq_num); + + if (!iwsrq->user_mode) { + kfree(iwsrq->ksrq.srq_wrid_mem); + iwsrq->ksrq.srq_wrid_mem = NULL; + dma_free_coherent(iwdev->rf->hw.device, iwsrq->kmem.size, + iwsrq->kmem.va, iwsrq->kmem.pa); + iwsrq->kmem.va = NULL; + dma_free_coherent(iwdev->rf->hw.device, iwsrq->kmem_list.size, + iwsrq->kmem_list.va, iwsrq->kmem_list.pa); + iwsrq->kmem_list.va = NULL; + dma_free_coherent(iwdev->rf->hw.device, iwsrq->kmem_db.size, + iwsrq->kmem_db.va, iwsrq->kmem_db.pa); + iwsrq->kmem_db.va = NULL; + } +} + +/** + * zxdh_uk_srq_init - initialize srq + * @srq: hw srq (user and kernel) + * @info: srq initialization info + * + * initializes the vars used in both user and kernel mode. + * size of the wqe depends on numbers of max. fragements + * allowed. Then size of wqe * the number of wqes should be the + * amount of memory allocated for srq. + */ +static int zxdh_uk_srq_init(struct zxdh_srq_uk *srq, + struct zxdh_srq_uk_init_info *info) +{ + u32 srq_ring_size; + u8 srqshift; + + srq->uk_attrs = info->uk_attrs; + if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags) + return -EINVAL; + + zxdh_get_srq_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, + &srqshift); + srq->srq_base = info->srq_base; + srq->srq_list_base = info->srq_list_base; + srq->srq_db_base = info->srq_db_base; + srq->srq_wrid_array = info->srq_wrid_array; + srq->srq_id = info->srq_id; + srq->srq_size = info->srq_size; + srq->log2_srq_size = info->log2_srq_size; + srq->srq_list_size = info->srq_list_size; + srq->max_srq_frag_cnt = info->max_srq_frag_cnt; + srq_ring_size = srq->srq_size; + srq->srq_wqe_size = srqshift; + srq->srq_wqe_size_multiplier = 1 << srqshift; + ZXDH_RING_INIT(srq->srq_ring, srq_ring_size); + ZXDH_RING_INIT(srq->srq_list_ring, srq->srq_list_size); + srq->srq_ring.tail = srq->srq_size - 1; + //initial value is 0, initial use is 1 + srq->srq_list_polarity = 1; + + return 0; +} + +/** + * zxdh_sc_srq_init - initialize srq + * @srq: sc srq + * @info: initialization srq info + */ +static int zxdh_sc_srq_init(struct zxdh_sc_srq *srq, + struct zxdh_srq_init_info *info) +{ + int ret_code; + u32 pble_obj_cnt; + + if (info->srq_uk_init_info.max_srq_frag_cnt > + info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags) + return -EINVAL; + + srq->srq_pa = info->srq_pa; + srq->srq_list_pa = info->srq_list_pa; + srq->srq_db_pa = info->srq_db_pa; + srq->pd = info->pd; + srq->virtual_map = info->virtual_map; + srq->list_virtual_map = info->list_virtual_map; + srq->pbl_chunk_size = info->pbl_chunk_size; + srq->list_pbl_chunk_size = info->list_pbl_chunk_size; + srq->first_pm_pbl_idx = info->first_pm_pbl_idx; + srq->list_first_pm_pbl_idx = info->list_first_pm_pbl_idx; + srq->srq_limit = info->srq_limit; + ret_code = zxdh_uk_srq_init(&srq->srq_uk, &info->srq_uk_init_info); + if (ret_code) + return ret_code; + + pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + + if ((info->virtual_map && info->srq_pa >= pble_obj_cnt) || + (info->list_virtual_map && info->srq_list_pa >= pble_obj_cnt)) + return -EINVAL; + srq->hw_srq_size = zxdh_get_encoded_wqe_size(srq->srq_uk.srq_ring.size, + ZXDH_QUEUE_TYPE_SQ_RQ); + + return 0; +} + +static int zxdh_cqp_create_srq_cmd(struct zxdh_srq *iwsrq) +{ + struct zxdh_pci_f *rf = iwsrq->iwdev->rf; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_create_srq_info *srq_info; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_SRQ_CREATE; + srq_info = &cqp_request->info.in.u.srq_create.info; + memset(srq_info, 0, sizeof(*srq_info)); + srq_info->state = ZXDH_SRQ_STATE_GOOD; + cqp_info->post_sq = 1; + cqp_info->in.u.srq_create.srq = &iwsrq->sc_srq; + cqp_info->in.u.srq_create.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +#ifdef ZRDMA_CREATE_SRQ_VER_1 +/** + * zxdh_create_srq - create srq + * @ibpd: ptr of ibpd + * @init_attr: attributes for srq + * @udata: user data for create srq + */ +struct ib_srq *zxdh_create_srq(struct ib_pd *ibpd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_srq *iwsrq; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_create_srq_req req; + struct zxdh_create_srq_resp uresp = {}; + u32 srq_num = 0; + int ret; + int err_code; + int srq_size; + u32 log2_srq_size; + struct zxdh_sc_srq *srq; + struct zxdh_srq_init_info init_info = {}; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; + struct zxdh_ucontext *ucontext; + unsigned long flags; + struct zxdh_srq_mr *srqmr; + + err_code = zxdh_validate_srq_attrs(init_attr, iwdev); + if (err_code) + return ERR_PTR(err_code); + + iwsrq = kzalloc(sizeof(*iwsrq), GFP_KERNEL); + if (!iwsrq) + return ERR_PTR(-ENOMEM); + + srq_size = init_attr->attr.max_wr; + log2_srq_size = order_base_2(srq_size); + + init_info.srq_uk_init_info.srq_size = srq_size; + init_info.srq_uk_init_info.log2_srq_size = log2_srq_size; + init_info.srq_uk_init_info.max_srq_frag_cnt = init_attr->attr.max_sge; + init_info.srq_uk_init_info.srq_limit = init_attr->attr.srq_limit; + init_info.srq_limit = init_attr->attr.srq_limit; + init_info.srq_uk_init_info.uk_attrs = uk_attrs; + + err_code = zxdh_alloc_rsrc(rf, rf->allocated_srqs, rf->max_srq, + &srq_num, &rf->next_srq); + if (err_code) + goto error; + iwsrq->iwdev = iwdev; + iwsrq->ibsrq.ext.xrc.srq_num = dev->base_srqn + srq_num; + srq = &iwsrq->sc_srq; + srq->dev = dev; + srq->back_srq = iwsrq; + init_info.pd = &iwpd->sc_pd; + init_info.srq_uk_init_info.srq_id = dev->base_srqn + srq_num; + iwsrq->max_wr = srq_size; + iwsrq->max_sge = init_attr->attr.max_sge; + iwsrq->srq_limit = init_attr->attr.srq_limit; + iwsrq->srq_compl_ctx = (uintptr_t)srq; + iwsrq->sc_srq.srq_compl_ctx = iwsrq->srq_compl_ctx; + refcount_set(&iwsrq->refcnt, 1); + spin_lock_init(&iwsrq->lock); + + if (udata) { + err_code = ib_copy_from_udata(&req, udata, + min(sizeof(req), udata->inlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: ib_copy_from_data fail\n"); + goto free_rsrc; + } + iwsrq->user_mode = 1; + init_info.srq_uk_init_info.srq_base = + (void *)((unsigned long)req.user_wqe_bufs); + + ucontext = kc_rdma_udata_to_drv_context(ibpd, udata); + + spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags); + iwsrq->iwpbl = zxdh_get_pbl((unsigned long)req.user_wqe_bufs, + &ucontext->srq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags); + + if (!iwsrq->iwpbl) { + err_code = -ENODATA; + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: no pbl info\n"); + goto free_rsrc; + } + srqmr = &iwsrq->iwpbl->srq_mr; + + //srq wqe addr configuration + if (iwsrq->iwpbl->pbl_allocated) { + init_info.virtual_map = true; + init_info.pbl_chunk_size = 1; + init_info.first_pm_pbl_idx = srqmr->srq_pbl.idx; + init_info.srq_pa = srqmr->srq_pbl.idx; + } else { + init_info.srq_pa = srqmr->srq_pbl.addr; + init_info.virtual_map = false; + init_info.pbl_chunk_size = 0; + } + + //srq wqe idx addr configuration + if (iwsrq->iwpbl->pbl_allocated) { + init_info.list_virtual_map = true; + init_info.list_pbl_chunk_size = 1; + init_info.list_first_pm_pbl_idx = + srqmr->srq_list_pbl.idx; + init_info.srq_list_pa = srqmr->srq_list_pbl.idx; + } else { + init_info.srq_list_pa = srqmr->srq_list_pbl.addr; + init_info.list_virtual_map = false; + init_info.list_pbl_chunk_size = 0; + } + + //srq wqe db addr configuration + init_info.srq_db_pa = srqmr->db_addr; + init_info.db_virtual_map = false; + init_info.db_pbl_chunk_size = 0; + + } else { + err_code = zxdh_setup_kmode_srq(iwdev, iwsrq, &init_info, + init_attr); + } + + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: setup srq failed\n"); + goto free_rsrc; + } + + ret = zxdh_sc_srq_init(srq, &init_info); + if (ret) { + err_code = -EPROTO; + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: srq_init fail\n"); + goto free_rsrc; + } + + if (!udata) + zxdh_srq_wqe_init(iwsrq); + + err_code = zxdh_cqp_create_srq_cmd(iwsrq); + if (err_code) + goto free_rsrc; + + if (udata) { + uresp.srq_size = srq_size; + uresp.srq_list_size = srq_size; + uresp.srq_id = dev->base_srqn + srq_num; + + err_code = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: copy_to_udata failed\n"); +#ifdef ZRDMA_DESTROY_SRQ_VER_1 + zxdh_destroy_srq(&iwsrq->ibsrq); +#else + zxdh_destroy_srq(&iwsrq->ibsrq, udata); +#endif + goto free_rsrc; + } + } + iwsrq->state = ZXDH_SRQ_STATE_GOOD; + rf->srq_table[srq_num] = iwsrq; + init_completion(&iwsrq->free_srq); + return &iwsrq->ibsrq; +free_rsrc: + zxdh_free_srq_rsrc(iwsrq); +error: + kfree(iwsrq); + return ERR_PTR(err_code); +} +#endif + +#ifdef ZRDMA_CREATE_SRQ_VER_2 +/** + * zxdh_create_srq - create srq + * @ibsrq: ptr of srq + * @init_attr: attributes for srq + * @udata: user data for create srq + */ +int zxdh_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct ib_pd *ibpd = ibsrq->pd; + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_create_srq_req req; + struct zxdh_create_srq_resp uresp = {}; + u32 srq_num = 0; + int ret; + int err_code; + int srq_size; + u32 log2_srq_size; + struct zxdh_sc_srq *srq; + struct zxdh_srq_init_info init_info = {}; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; + struct zxdh_ucontext *ucontext; + unsigned long flags; + struct zxdh_srq_mr *srqmr; + + err_code = zxdh_validate_srq_attrs(init_attr, iwdev); + if (err_code) + return err_code; + + srq_size = init_attr->attr.max_wr; + log2_srq_size = order_base_2(srq_size); + + init_info.srq_uk_init_info.srq_size = srq_size; + init_info.srq_uk_init_info.log2_srq_size = log2_srq_size; + init_info.srq_uk_init_info.max_srq_frag_cnt = init_attr->attr.max_sge; + init_info.srq_uk_init_info.srq_limit = init_attr->attr.srq_limit; + init_info.srq_limit = init_attr->attr.srq_limit; + init_info.srq_uk_init_info.uk_attrs = uk_attrs; + + err_code = zxdh_alloc_rsrc(rf, rf->allocated_srqs, rf->max_srq, + &srq_num, &rf->next_srq); + if (err_code) + goto error; + iwsrq->iwdev = iwdev; + iwsrq->ibsrq.ext.xrc.srq_num = dev->base_srqn + srq_num; + srq = &iwsrq->sc_srq; + srq->dev = dev; + srq->back_srq = iwsrq; + init_info.pd = &iwpd->sc_pd; + init_info.srq_uk_init_info.srq_id = dev->base_srqn + srq_num; + iwsrq->max_wr = srq_size; + iwsrq->max_sge = init_attr->attr.max_sge; + iwsrq->srq_limit = init_attr->attr.srq_limit; + iwsrq->srq_compl_ctx = (uintptr_t)srq; + iwsrq->sc_srq.srq_compl_ctx = iwsrq->srq_compl_ctx; + refcount_set(&iwsrq->refcnt, 1); + spin_lock_init(&iwsrq->lock); + + if (udata) { + err_code = ib_copy_from_udata(&req, udata, + min(sizeof(req), udata->inlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: ib_copy_from_data fail\n"); + goto free_rsrc; + } + iwsrq->user_mode = 1; + init_info.srq_uk_init_info.srq_base = + (void *)((unsigned long)req.user_wqe_bufs); + + ucontext = kc_rdma_udata_to_drv_context(ibpd, udata); + + spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags); + iwsrq->iwpbl = zxdh_get_pbl((unsigned long)req.user_wqe_bufs, + &ucontext->srq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags); + + if (!iwsrq->iwpbl) { + err_code = -ENODATA; + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: no pbl info\n"); + goto free_rsrc; + } + srqmr = &iwsrq->iwpbl->srq_mr; + + //srq wqe addr configuration + if (iwsrq->iwpbl->pbl_allocated) { + init_info.virtual_map = true; + init_info.pbl_chunk_size = 1; + init_info.first_pm_pbl_idx = srqmr->srq_pbl.idx; + init_info.srq_pa = srqmr->srq_pbl.idx; + } else { + init_info.srq_pa = srqmr->srq_pbl.addr; + init_info.virtual_map = false; + init_info.pbl_chunk_size = 0; + } + + //srq wqe idx addr configuration + if (iwsrq->iwpbl->pbl_allocated) { + init_info.list_virtual_map = true; + init_info.list_pbl_chunk_size = 1; + init_info.list_first_pm_pbl_idx = + srqmr->srq_list_pbl.idx; + init_info.srq_list_pa = srqmr->srq_list_pbl.idx; + } else { + init_info.srq_list_pa = srqmr->srq_list_pbl.addr; + init_info.list_virtual_map = false; + init_info.list_pbl_chunk_size = 0; + } + + //srq wqe db addr configuration + init_info.srq_db_pa = srqmr->db_addr; + init_info.db_virtual_map = false; + init_info.db_pbl_chunk_size = 0; + + } else { + err_code = zxdh_setup_kmode_srq(iwdev, iwsrq, &init_info, + init_attr); + } + + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: setup srq failed\n"); + goto free_rsrc; + } + + ret = zxdh_sc_srq_init(srq, &init_info); + if (ret) { + err_code = -EPROTO; + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: srq_init fail\n"); + goto free_rsrc; + } + + if (!udata) + zxdh_srq_wqe_init(iwsrq); + + err_code = zxdh_cqp_create_srq_cmd(iwsrq); + if (err_code) + goto free_rsrc; + + if (udata) { + uresp.srq_size = srq_size; + uresp.srq_list_size = srq_size; + uresp.srq_id = dev->base_srqn + srq_num; + + err_code = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: copy_to_udata failed\n"); +#ifdef ZRDMA_DESTROY_SRQ_VER_1 + zxdh_destroy_srq(&iwsrq->ibsrq); +#else + zxdh_destroy_srq(&iwsrq->ibsrq, udata); +#endif + goto free_rsrc; + } + } + iwsrq->state = ZXDH_SRQ_STATE_GOOD; + rf->srq_table[srq_num] = iwsrq; + init_completion(&iwsrq->free_srq); + + return 0; +free_rsrc: + zxdh_free_srq_rsrc(iwsrq); +error: + return err_code; +} +#endif + +void zxdh_srq_add_ref(struct ib_srq *ibsrq) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + + refcount_inc(&iwsrq->refcnt); +} + +void zxdh_srq_rem_ref(struct ib_srq *ibsrq) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_device *iwdev = iwsrq->iwdev; + unsigned long flags; + + spin_lock_irqsave(&iwdev->rf->srqtable_lock, flags); + if (!refcount_dec_and_test(&iwsrq->refcnt)) { + spin_unlock_irqrestore(&iwdev->rf->srqtable_lock, flags); + return; + } + + iwdev->rf->srq_table[iwsrq->ibsrq.ext.xrc.srq_num - + iwdev->rf->sc_dev.base_srqn] = NULL; + spin_unlock_irqrestore(&iwdev->rf->srqtable_lock, flags); + complete(&iwsrq->free_srq); +} + +/** + * zxdh_srq_wq_destroy - send srq destroy cqp + * @rf: RDMA PCI function + * @srq: hardware control srq + */ +static void zxdh_srq_wq_destroy(struct zxdh_pci_f *rf, struct zxdh_sc_srq *srq) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_destroy_srq_info *srq_info; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + srq_info = &cqp_request->info.in.u.srq_destroy.info; + cqp_info->cqp_cmd = ZXDH_OP_SRQ_DESTROY; + srq_info->state = ZXDH_SRQ_STATE_ERROR; + cqp_info->post_sq = 1; + cqp_info->in.u.srq_destroy.srq = srq; + cqp_info->in.u.srq_destroy.scratch = (uintptr_t)cqp_request; + + zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); +} + +#ifdef ZRDMA_DESTROY_SRQ_VER_3 +/** + * zxdh_destroy_srq - destroy + * @ibsrq: ptr of srq + * @udata: user data for destroy srq + */ +int zxdh_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_device *iwdev = iwsrq->iwdev; + if (iwsrq->sc_srq.srq_uk.destroy_pending) + goto free_rsrc; + iwsrq->sc_srq.srq_uk.destroy_pending = true; + + zxdh_srq_rem_ref(&iwsrq->ibsrq); + wait_for_completion(&iwsrq->free_srq); + zxdh_srq_wq_destroy(iwdev->rf, &iwsrq->sc_srq); + +free_rsrc: + zxdh_free_srq_rsrc(iwsrq); + + return 0; +} +#endif +#ifdef ZRDMA_DESTROY_SRQ_VER_2 +/** + * zxdh_destroy_srq - destroy + * @ibsrq: ptr of srq + * @udata: user data for destroy srq + */ +void zxdh_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_device *iwdev = iwsrq->iwdev; + if (iwsrq->sc_srq.srq_uk.destroy_pending) + goto free_rsrc; + iwsrq->sc_srq.srq_uk.destroy_pending = true; + + zxdh_srq_rem_ref(&iwsrq->ibsrq); + wait_for_completion(&iwsrq->free_srq); + zxdh_srq_wq_destroy(iwdev->rf, &iwsrq->sc_srq); + +free_rsrc: + zxdh_free_srq_rsrc(iwsrq); + + return 0; +} +#endif + +#ifdef ZRDMA_DESTROY_SRQ_VER_1 +/** + * zxdh_destroy_srq - destroy + * @ibsrq: ptr of srq + */ +int zxdh_destroy_srq(struct ib_srq *ibsrq) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_device *iwdev = iwsrq->iwdev; + if (iwsrq->sc_srq.srq_uk.destroy_pending) + goto free_rsrc; + iwsrq->sc_srq.srq_uk.destroy_pending = true; + + zxdh_srq_rem_ref(&iwsrq->ibsrq); + wait_for_completion(&iwsrq->free_srq); + zxdh_srq_wq_destroy(iwdev->rf, &iwsrq->sc_srq); + +free_rsrc: + zxdh_free_srq_rsrc(iwsrq); + kfree(iwsrq); + return 0; +} +#endif + +/** + * zxdh_modify_srq - modify srq + * @ibsrq: ptr of srq + * @attr: access attributes + * @attr_mask: state mask + * @udata: user data + */ +int zxdh_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_device *iwdev = iwsrq->iwdev; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = iwdev->rf; +#ifdef Z_DH_DEBUG + //int err_code; +#endif + /* We don't support resizing SRQs yet */ + if (attr_mask & IB_SRQ_MAX_WR) + return -EINVAL; + + if (attr_mask & IB_SRQ_LIMIT) { + if (attr->srq_limit >= iwsrq->max_wr) + return -EINVAL; + } + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + iwsrq->srq_limit = attr->srq_limit; + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_SRQ_MODIFY; + cqp_info->post_sq = 1; + cqp_info->in.u.srq_modify.srq = &iwsrq->sc_srq; + cqp_info->in.u.srq_modify.info.limit = attr->srq_limit; + cqp_info->in.u.srq_modify.scratch = (uintptr_t)cqp_request; + + zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); +#ifdef Z_DH_DEBUG + //err_code = zxdh_query_srqc(&iwsrq->sc_srq, NULL); +#endif + return 0; +} + +/** + * zxdh_query_srq - query srq + * @ibsrq: ptr of srq + * @srq_attr: srq attributes to return + */ +int zxdh_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + u32 limit; + + zxdh_query_srqc(&iwsrq->sc_srq, &limit); + + srq_attr->max_wr = (iwsrq->max_wr - 1); + srq_attr->max_sge = iwsrq->max_sge; + srq_attr->srq_limit = limit; + + return 0; +} + +/** + * zxdh_post_srq_recv - post srq recv + * @ibsrq: ptr of srq + * @ib_wr: work request for receive + * @bad_wr: bad wr caused an error + */ +int zxdh_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *ib_wr, + const struct ib_recv_wr **bad_wr) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_srq_uk *srq_uk; + __le16 *wqe_16; + __le64 *wqe_64; + u64 temp_val; + unsigned long flags; + int err = 0; + int nreq; + int i; + u16 *buf; + u32 buf_size; + u16 idx = 0; + u64 hdr; + __u32 byte_off; + + srq_uk = &iwsrq->sc_srq.srq_uk; + spin_lock_irqsave(&iwsrq->lock, flags); + buf_size = (iwsrq->max_wr * sizeof(u16)); + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + *bad_wr = ib_wr; + goto out; + } + + if (iwsrq->state == ZXDH_SRQ_STATE_ERROR) { + err = -EIO; + *bad_wr = ib_wr; + goto out; + } + + for (nreq = 0; ib_wr; nreq++, ib_wr = ib_wr->next) { + if (unlikely(ib_wr->num_sge > iwsrq->max_sge)) { + err = -EINVAL; + *bad_wr = ib_wr; + break; + } + + if (unlikely(srq_uk->srq_ring.head == srq_uk->srq_ring.tail)) { + err = -ENOMEM; + *bad_wr = ib_wr; + break; + } + + srq_uk->srq_wrid_array[srq_uk->srq_ring.head] = ib_wr->wr_id; + buf[nreq] = srq_uk->srq_ring.head; + + wqe_64 = zxdh_get_srq_wqe(iwsrq, srq_uk->srq_ring.head); + get_64bit_val(wqe_64, 0, &temp_val); + srq_uk->srq_ring.head = + (__u16)FIELD_GET(IRDMAQPSRQ_NEXT_WQE_INDEX, temp_val); + + for (i = 0, byte_off = ZXDH_SRQ_FRAG_BYTESIZE; + i < ib_wr->num_sge; i++) { + set_64bit_val(wqe_64, byte_off, ib_wr->sg_list[i].addr); + set_64bit_val( + wqe_64, byte_off + 8, + FIELD_PREP(IRDMAQPSRQ_FRAG_LEN, + ib_wr->sg_list[i].length) | + FIELD_PREP(IRDMAQPSRQ_FRAG_STAG, + ib_wr->sg_list[i].lkey)); + byte_off += ZXDH_SRQ_FRAG_BYTESIZE; + } + + if ((ib_wr->num_sge < iwsrq->max_sge) || + (ib_wr->num_sge == 0)) { + set_64bit_val(wqe_64, byte_off, 0); + set_64bit_val( + wqe_64, byte_off + 8, + FIELD_PREP(IRDMAQPSRQ_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPSRQ_FRAG_STAG, + ZXDH_SRQ_INVALID_LKEY)); + } + set_64bit_val(wqe_64, 8, ((u64)srq_uk->srq_id) << 32); + + hdr = FIELD_PREP(IRDMAQPSRQ_RSV, 0) | + FIELD_PREP(IRDMAQPSRQ_VALID_SGE_NUM, ib_wr->num_sge) | + FIELD_PREP(IRDMAQPSRQ_SIGNATURE, 0) | + FIELD_PREP(IRDMAQPSRQ_NEXT_WQE_INDEX, + srq_uk->srq_ring.head); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe_64, 0, hdr); + } + + if (err == 0) { + for (i = 0; i < nreq; i++) { + wqe_16 = zxdh_get_srq_list_wqe(iwsrq, &idx); + set_16bit_val(wqe_16, 0, buf[i]); + } + + hdr = FIELD_PREP(ZXDH_SRQ_PARITY_SIGN, + iwsrq->sc_srq.srq_uk.srq_list_polarity) | + FIELD_PREP(ZXDH_SRQ_SW_SRQ_HEAD, idx); + dma_wmb(); + set_64bit_val(iwsrq->sc_srq.srq_uk.srq_db_base, 0, hdr); + } +out: + spin_unlock_irqrestore(&iwsrq->lock, flags); + if (err) + *bad_wr = ib_wr; + if (buf) + kfree(buf); + return err; +} + +/** + * zxdh_sc_srq_create - create srq + * @srq: sc srq + * @info: srq create info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_srq_create(struct zxdh_sc_srq *srq, + struct zxdh_create_srq_info *info, u64 scratch, + bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + struct zxdh_sc_dev *dev; + + dev = srq->dev; + cqp = srq->dev->cqp; + if ((srq->srq_uk.srq_id - dev->base_srqn) < ZXDH_MIN_ROCE_SRQ_ID || + (srq->srq_uk.srq_id - dev->base_srqn) > + (cqp->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].max_cnt - 1)) + return -EINVAL; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_SWWQECNT, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LISTVIRTMAP, srq->list_virtual_map) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LIST_LEAFPBLSIZE, + srq->list_pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LOGSRQSTRIDE, + srq->srq_uk.srq_wqe_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_SRQAXIERRSIG, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_REVERSEDLKEY, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_SRQVIRTMAP, srq->virtual_map) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_CONTSRQ, ZXDH_SRQ_WQE_NOT_CONT) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_WQSIG, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_PD_INDEX, srq->pd->pd_id) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LOGSRQSIZE, srq->srq_uk.log2_srq_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LEAFPBLSIZE, srq->pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_STATE, ZXDH_SRQ_STATE_GOOD); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 16, hdr); + + set_64bit_val(wqe, 24, + srq->virtual_map ? srq->first_pm_pbl_idx : srq->srq_pa); + set_64bit_val(wqe, 32, + srq->list_virtual_map ? srq->list_first_pm_pbl_idx : + srq->srq_list_pa); + + set_64bit_val(wqe, 40, srq->srq_db_pa); + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_LIMITWATERMARK, srq->srq_limit); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 48, hdr); + + set_64bit_val(wqe, 56, srq->srq_compl_ctx); + + //bit0 bit1 bit2 set to 1 + set_64bit_val(wqe, 8, + (RDMA_SRQC_MASK_GENERAL_CFG | + RDMA_SRQC_MASK_LIMIT_WATER_CFG | + RDMA_SRQC_MASK_DEBUG_SET_CFG)); + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_ID, srq->srq_uk.srq_id) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_SRQ); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: SRQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + // print_hex_dump(KERN_DEBUG, "srq create ", DUMP_PREFIX_OFFSET, 16, 8, wqe, 9*8, false); + return 0; +} + +/** + * zxdh_sc_srq_modify - modify srq cqp wqe + * @srq: sc srq + * @info: modify srq info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_srq_modify(struct zxdh_sc_srq *srq, + struct zxdh_modify_srq_info *info, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = srq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, RDMA_SRQC_MASK_LIMIT_WATER_CFG); + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_LIMITWATERMARK, info->limit); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 48, hdr); + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_ID, srq->srq_uk.srq_id) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MODIFY_SRQ); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: SRQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + // print_hex_dump(KERN_DEBUG, "srq modify ", DUMP_PREFIX_OFFSET, 16, 8, wqe, 9*8, false); + + return 0; +} + +/** + * zxdh_sc_srq_destroy - cqp destroy srq + * @srq: sc srq + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_srq_destroy(struct zxdh_sc_srq *srq, u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = srq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_SWWQECNT, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LISTVIRTMAP, srq->list_virtual_map) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LIST_LEAFPBLSIZE, + srq->list_pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LOGSRQSTRIDE, + ZXDH_SRQ_WQE_MIN_LEN_32_BYTE) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_SRQAXIERRSIG, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_REVERSEDLKEY, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_SRQVIRTMAP, srq->virtual_map) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_CONTSRQ, ZXDH_SRQ_WQE_NOT_CONT) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_WQSIG, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_PD_INDEX, srq->pd->pd_id) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LOGSRQSIZE, srq->srq_uk.log2_srq_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LEAFPBLSIZE, srq->pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_STATE, ZXDH_SRQ_STATE_ERROR); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 16, hdr); + + //bit0 set to 1 + set_64bit_val(wqe, 8, RDMA_SRQC_MASK_GENERAL_CFG); + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_ID, srq->srq_uk.srq_id) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_SRQ); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: SRQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +#ifdef Z_DH_DEBUG +static void zxdh_print_hw_srqc(__le64 *srq_ctx) +{ + u64 temp; + + get_64bit_val(srq_ctx, 0, &temp); + pr_info("Sw Wqe cnt:0x%llx\n", FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("list virtually mapped:0x%llx\n", FIELD_GET(BIT_ULL(21), temp)); + pr_info("list leaf pbl size:0x%llx\n", + FIELD_GET(GENMASK_ULL(23, 22), temp)); + pr_info("log srq stride:0x%llx\n", + FIELD_GET(GENMASK_ULL(26, 24), temp)); + pr_info("srq axi err sig:0x%llx\n", FIELD_GET(BIT_ULL(27), temp)); + pr_info("srq virtually mapped:0x%llx\n", FIELD_GET(BIT_ULL(29), temp)); + pr_info("cont srq:0x%llx\n", FIELD_GET(BIT_ULL(30), temp)); + pr_info("pd index:0x%llx\n", FIELD_GET(GENMASK_ULL(51, 32), temp)); + pr_info("log srq size:0x%llx\n", FIELD_GET(GENMASK_ULL(59, 56), temp)); + pr_info("leaf pbl size:0x%llx\n", FIELD_GET(GENMASK_ULL(61, 60), temp)); + pr_info("state:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 62), temp)); + + get_64bit_val(srq_ctx, 8, &temp); + pr_info("srq address:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(srq_ctx, 16, &temp); + pr_info("srq list address:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(srq_ctx, 24, &temp); + pr_info("dbr address:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(srq_ctx, 32, &temp); + pr_info("hw wqe cnt:0x%llx\n", FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("limit water mark:0x%llx\n", + FIELD_GET(GENMASK_ULL(31, 16), temp)); + pr_info("debug set:0x%llx\n", FIELD_GET(GENMASK_ULL(41, 32), temp)); + pr_info("valid wqe index point:0x%llx\n", + FIELD_GET(GENMASK_ULL(43, 42), temp)); + + get_64bit_val(srq_ctx, 40, &temp); + pr_info("compl context value:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(srq_ctx, 48, &temp); + pr_info("srq wqe index:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(srq_ctx, 56, &temp); + pr_info("srq1 first pble index:0x%llx\n", + FIELD_GET(GENMASK_ULL(51, 0), temp)); +} +#endif + +int zxdh_query_srqc(struct zxdh_sc_srq *srq, u32 *limit) +{ + struct zxdh_sc_dev *dev = srq->dev; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_dma_mem srqc_buf; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + u64 temp; + + srqc_buf.va = NULL; + srqc_buf.size = ALIGN(ZXDH_SRQ_CTX_SIZE, ZXDH_SRQC_ALIGNMENT); + srqc_buf.va = dma_alloc_coherent(dev->hw->device, srqc_buf.size, + &srqc_buf.pa, GFP_KERNEL); + if (!srqc_buf.va) + return -ENOMEM; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto free_rsrc; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_SRQC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_srqc.srq = srq; + cqp_info->in.u.query_srqc.srqc_buf_pa = srqc_buf.pa; + cqp_info->in.u.query_srqc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + goto free_rsrc; + } +#ifdef Z_DH_DEBUG + pr_info("***SRQ %d HW SRQC info print start***\n", srq->srq_uk.srq_id); + zxdh_print_hw_srqc(srqc_buf.va); + pr_info("****SRQ %d HW SRQC info print end****\n", srq->srq_uk.srq_id); +#endif + if (limit) { + get_64bit_val(srqc_buf.va, 32, &temp); + *limit = FIELD_GET(ZXDH_CQPSQ_SRQ_LIMITWATERMARK, temp); + } +free_rsrc: + dma_free_coherent(dev->hw->device, srqc_buf.size, srqc_buf.va, + srqc_buf.pa); + srqc_buf.va = NULL; + return err_code; +} + +int zxdh_sc_query_srqc(struct zxdh_sc_srq *srq, u64 srqc_buf_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp = srq->dev->cqp; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_SRQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_SRQC_ID, srq->srq_uk.srq_id); + set_64bit_val(wqe, 8, srqc_buf_pa); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} diff --git a/src/rdma/src/srq.h b/src/rdma/src/srq.h new file mode 100644 index 0000000000000000000000000000000000000000..a941b4528b68e691d213fa48e64ee8bf2d84f6c8 --- /dev/null +++ b/src/rdma/src/srq.h @@ -0,0 +1,305 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef RDMA_SRQ_H +#define RDMA_SRQ_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" +#include "verbs.h" +#include "linux_kcompat.h" + +//SRQC_FIELD_MASK +#define RDMA_SRQC_MASK_GENERAL_CFG (0x01UL << 0) +#define RDMA_SRQC_MASK_LIMIT_WATER_CFG (0x01UL << 1) +#define RDMA_SRQC_MASK_DEBUG_SET_CFG (0x01UL << 2) + +#define ZXDH_CQPSQ_SRQ_ID_S 0 +#define ZXDH_CQPSQ_SRQ_ID GENMASK_ULL(19, 0) +#define ZXDH_CQPSQ_SRQ_FILED_VALID_S 0 +#define ZXDH_CQPSQ_SRQ_FILEDVALID GENMASK_ULL(31, 0) + +#define ZXDH_CQPSQ_SRQ_STATE_S 62 +#define ZXDH_CQPSQ_SRQ_STATE GENMASK_ULL(63, 62) +#define ZXDH_CQPSQ_SRQ_LEAFPBLSIZE_S 62 +#define ZXDH_CQPSQ_SRQ_LEAFPBLSIZE GENMASK_ULL(61, 60) +#define ZXDH_CQPSQ_SRQ_LOGSRQSIZE_S 56 +#define ZXDH_CQPSQ_SRQ_LOGSRQSIZE GENMASK_ULL(59, 56) +#define ZXDH_CQPSQ_SRQ_PD_INDEX_S 32 +#define ZXDH_CQPSQ_SRQ_PD_INDEX GENMASK_ULL(51, 32) +#define ZXDH_CQPSQ_SRQ_WQSIG_S 31 +#define ZXDH_CQPSQ_SRQ_WQSIG BIT_ULL(31) +#define ZXDH_CQPSQ_SRQ_CONTSRQ_S 30 +#define ZXDH_CQPSQ_SRQ_CONTSRQ BIT_ULL(30) +#define ZXDH_CQPSQ_SRQ_SRQVIRTMAP_S 29 +#define ZXDH_CQPSQ_SRQ_SRQVIRTMAP BIT_ULL(29) +#define ZXDH_CQPSQ_SRQ_REVERSEDLKEY_S 28 +#define ZXDH_CQPSQ_SRQ_REVERSEDLKEY BIT_ULL(28) +#define ZXDH_CQPSQ_SRQ_SRQAXIERRSIG_S 27 +#define ZXDH_CQPSQ_SRQ_SRQAXIERRSIG BIT_ULL(27) +#define ZXDH_CQPSQ_SRQ_LOGSRQSTRIDE_S 24 +#define ZXDH_CQPSQ_SRQ_LOGSRQSTRIDE GENMASK_ULL(26, 24) +#define ZXDH_CQPSQ_SRQ_LIST_LEAFPBLSIZE_S 22 +#define ZXDH_CQPSQ_SRQ_LIST_LEAFPBLSIZE GENMASK_ULL(23, 22) +#define ZXDH_CQPSQ_SRQ_LISTVIRTMAP_S 21 +#define ZXDH_CQPSQ_SRQ_LISTVIRTMAP BIT_ULL(21) +#define ZXDH_CQPSQ_SRQ_SWWQECNT_S 0 +#define ZXDH_CQPSQ_SRQ_SWWQECNT GENMASK_ULL(15, 0) + +#define ZXDH_CQPSQ_SRQ_VALIDWQEINDEXPOINT_S 42 +#define ZXDH_CQPSQ_SRQ_VALIDWQEINDEXPOINT GENMASK_ULL(43, 42) +#define ZXDH_CQPSQ_SRQ_DEBUGSET_S 32 +#define ZXDH_CQPSQ_SRQ_DEBUGSET GENMASK_ULL(41, 32) +#define ZXDH_CQPSQ_SRQ_LIMITWATERMARK_S 16 +#define ZXDH_CQPSQ_SRQ_LIMITWATERMARK GENMASK_ULL(31, 16) +#define ZXDH_CQPSQ_SRQ_HWWQECNT_S 0 +#define ZXDH_CQPSQ_SRQ_HWWQECNT GENMASK_ULL(15, 0) + +#define ZXDH_SRQ_PARITY_SIGN_S 15 +#define ZXDH_SRQ_PARITY_SIGN BIT_ULL(15) +#define ZXDH_SRQ_SW_SRQ_HEAD_S 0 +#define ZXDH_SRQ_SW_SRQ_HEAD GENMASK_ULL(14, 0) + +#define ZXDH_SRQ_DB_CACHE_ID_S 0 +#define ZXDH_SRQ_DB_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_SRQ_DB_INDICATE_ID_S 2 +#define ZXDH_SRQ_DB_INDICATE_ID GENMASK_ULL(3, 2) +#define ZXDH_SRQ_DB_AXI_ID_S 4 +#define ZXDH_SRQ_DB_AXI_ID GENMASK_ULL(6, 4) +#define ZXDH_SRQ_DB_WAY_PATION_S 7 +#define ZXDH_SRQ_DB_WAY_PATION GENMASK_ULL(9, 7) + +#define ZXDH_SRQ_SRQL_CACHE_ID_S 0 +#define ZXDH_SRQ_DSRQL_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_SRQ_SRQL_INDICATE_ID_S 2 +#define ZXDH_SRQ_SRQL_INDICATE_ID GENMASK_ULL(3, 2) +#define ZXDH_SRQ_SRQL_AXI_ID_S 4 +#define ZXDH_SRQ_SRQL_AXI_ID GENMASK_ULL(6, 4) +#define ZXDH_SRQ_SRQL_WAY_PATION_S 7 +#define ZXDH_SRQ_SRQL_WAY_PATION GENMASK_ULL(9, 7) + +#define ZXDH_SRQ_SW_MIN_WQSIZE 32u /* in WRs*/ +#define ZXDH_SRQ_WQE_MIN_SIZE 16 +#define ZXDH_SRQ_WQE_MAX_SIZE 512 +#define ZXDH_SRQ_FRAG_BYTESIZE 16 +#define ZXDH_SRQ_WQE_BYTESIZE 32 + +#define ZXDH_SRQE_SIZE 2 + +#define ZXDH_SRQ_STATE_ERROR 0 +#define ZXDH_SRQ_STATE_GOOD 1 + +#define ZXDH_SRQ_WQE_NOT_CONT 0 +#define ZXDH_SRQ_WQE_CONT 1 + +#define ZXDH_SRQ_WQE_MIN_LEN_32_BYTE 1 +#define ZXDH_SRQ_WQE_MIN_LEN_64_BYTE 2 + +#define ZXDH_SRQ_INVALID_LKEY 0x100 +#define ZXDH_SRQ_DB_INIT_VALUE 0x8000 + +struct zxdh_wqe_srq_next_sge { + __le16 next_wqe_index; + __le16 signature; + u8 valid_sge_num; + u8 rsvd[11]; +}; + +struct zxdh_srq_sge { + __le64 addr; + __le32 length; + __le32 lkey; +}; + +struct zxdh_srq_wqe { + __le64 elem[ZXDH_SRQE_SIZE]; +}; + +struct zxdh_srq_uk { + struct zxdh_srq_wqe *srq_base; + struct zxdh_uk_attrs *uk_attrs; + __le16 *srq_list_base; + __le64 *srq_db_base; + u32 srq_id; + u32 srq_size; + u32 log2_srq_size; + u32 srq_list_size; + struct zxdh_ring srq_ring; + struct zxdh_ring srq_list_ring; + u8 srq_list_polarity; + u64 *srq_wrid_array; + u8 srq_wqe_size; + u8 srq_wqe_size_multiplier; + u32 srq_caps; + u32 max_srq_frag_cnt; + u32 srq_type; + spinlock_t *lock; + u8 srq_flush_complete : 1; /* Indicates flush was seen and SRQ was empty after the flush */ + u8 destroy_pending : 1; /* Indicates the SRQ is being destroyed */ + u8 srq_flush_seen; +}; + +struct zxdh_sc_srq { + struct zxdh_srq_uk srq_uk; + struct zxdh_sc_dev *dev; + struct zxdh_sc_pd *pd; + u64 srq_pa; + u64 srq_list_pa; + u64 srq_db_pa; + u32 srq_limit; + u64 srq_compl_ctx; + void *back_srq; + u8 srq_state; + u8 hw_srq_size; + u8 flush_srq; + u8 virtual_map : 1; + u8 list_virtual_map : 1; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u8 list_pbl_chunk_size; + u32 list_first_pm_pbl_idx; +}; + +struct zxdh_srq { + struct ib_srq ibsrq; + struct zxdh_sc_srq sc_srq; + struct zxdh_device *iwdev; + spinlock_t lock; /* serialize posting WRs to SQ/RQ */ + + u32 max_wr; + u32 max_sge; + u32 srq_limit; + refcount_t refcnt; + struct ib_umem *umem; + int wq_sig; + u8 state; + u8 user_mode; + enum ib_srq_type srq_type; + struct zxdh_dma_mem kmem; + struct zxdh_dma_mem kmem_list; + struct zxdh_dma_mem kmem_db; + struct zxdh_pbl *iwpbl; + struct completion free_srq; + int limit; + struct zxdh_srq_kmode ksrq; + + u64 srq_compl_ctx; +}; + +struct zxdh_srq_attr { + u32 type; + u32 flags; + u32 log_size; + u32 wqe_shift; + u32 log_page_size; + u32 wqe_cnt; + u32 srqn; + u32 page_offset; + u32 user_index; + struct ib_umem *umem; +}; + +struct zxdh_srq_uk_init_info { + struct zxdh_srq_wqe *srq_base; + struct zxdh_uk_attrs *uk_attrs; + __le16 *srq_list_base; + __le64 *srq_db_base; + u64 *srq_wrid_array; + u32 srq_id; + u32 srq_caps; + u32 srq_size; + u32 log2_srq_size; + u32 srq_list_size; + u32 max_srq_frag_cnt; + u32 srq_limit; +}; + +struct zxdh_srq_init_info { + struct zxdh_srq_uk_init_info srq_uk_init_info; + struct zxdh_sc_dev *dev; + struct zxdh_sc_pd *pd; + u8 virtual_map : 1; + u8 list_virtual_map : 1; + u8 db_virtual_map : 1; + u64 srq_pa; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u8 list_pbl_chunk_size; + u32 list_first_pm_pbl_idx; + u8 db_pbl_chunk_size; + u32 db_first_pm_pbl_idx; + u64 srq_list_pa; + u64 srq_db_pa; + u32 srq_limit; +}; + +struct zxdh_create_srq_req { + __aligned_u64 user_wqe_bufs; + __aligned_u64 user_compl_ctx; + __aligned_u64 user_wqe_list; + __aligned_u64 user_wqe_db; +}; + +struct zxdh_create_srq_resp { + __u32 srq_id; + __u32 srq_size; + __u32 srq_list_size; + __u32 srq_caps; +}; + +static inline struct zxdh_srq *to_iwsrq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct zxdh_srq, ibsrq); +} + +void zxdh_free_srq_wqe(struct zxdh_srq_uk *srq, int wqe_index); +#ifdef ZRDMA_CREATE_SRQ_VER_2 +int zxdh_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, + struct ib_udata *udata); +#else +struct ib_srq *zxdh_create_srq(struct ib_pd *ibpd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata); +#endif +void zxdh_srq_add_ref(struct ib_srq *ibsrq); +void zxdh_srq_rem_ref(struct ib_srq *ibsrq); +#ifdef ZRDMA_DESTROY_SRQ_VER_3 +int zxdh_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); +#endif + +#ifdef ZRDMA_DESTROY_SRQ_VER_2 +void zxdh_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); +#endif + +#ifdef ZRDMA_DESTROY_SRQ_VER_1 +int zxdh_destroy_srq(struct ib_srq *ibsrq); +#endif +int zxdh_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); +int zxdh_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); +int zxdh_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *ib_wr, + const struct ib_recv_wr **bad_wr); +int zxdh_sc_srq_create(struct zxdh_sc_srq *srq, + struct zxdh_create_srq_info *info, u64 scratch, + bool post_sq); +int zxdh_sc_srq_modify(struct zxdh_sc_srq *srq, + struct zxdh_modify_srq_info *info, u64 scratch, + bool post_sq); +int zxdh_sc_srq_destroy(struct zxdh_sc_srq *srq, u64 scratch, bool post_sq); + +int zxdh_query_srqc(struct zxdh_sc_srq *srq, u32 *limit); +int zxdh_sc_query_srqc(struct zxdh_sc_srq *srq, u64 srqc_buf_pa, u64 scratch, + bool post_sq); + +#endif diff --git a/src/rdma/src/status.h b/src/rdma/src/status.h new file mode 100644 index 0000000000000000000000000000000000000000..c3e6ef199d6273a2ccddad33524967a038d0c600 --- /dev/null +++ b/src/rdma/src/status.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_STATUS_H +#define ZXDH_STATUS_H +#endif /* ZXDH_STATUS_H */ diff --git a/src/rdma/src/suse_kcompat.h b/src/rdma/src/suse_kcompat.h new file mode 100644 index 0000000000000000000000000000000000000000..07126b975de52bc08a3ccb9a359d596fe20b4673 --- /dev/null +++ b/src/rdma/src/suse_kcompat.h @@ -0,0 +1,534 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef SUSE_KCOMPAT_H +#define SUSE_KCOMPAT_H + +#ifdef SLES_15_SP_3 +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_3 +#define ALLOC_UCONTEXT_VER_2 +#define COPY_USER_PGADDR_VER_3 +#define CREATE_AH_VER_5 +#define CREATE_CQ_VER_3 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_3 +#define DEALLOC_UCONTEXT_VER_2 +#define DEREG_MR_VER_2 +#define DESTROY_AH_VER_3 +#define DESTROY_QP_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_DEALLOC_DRIVER_SUPPORT +#define IB_IW_PKEY +#define IB_UMEM_GET_V3 +#define IN_IFADDR +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_3 +#define ZXDH_ALLOC_MR_VER_0 +#define IW_PORT_IMMUTABLE_V1 +#define HAS_IB_SET_DEVICE_OP +#define MODIFY_PORT_V1 +#define NETDEV_TO_IBDEV_SUPPORT +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define RDMA_MMAP_DB_SUPPORT +#define SET_BEST_PAGE_SZ_V2 +#define SET_ROCE_CM_INFO_VER_3 +#define UVERBS_CMD_MASK +#define USE_KMAP + +#define kc_typeq_ib_wr const +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#endif /* SLES_15_SP23 */ + +#ifdef SLES_15_SP_2 +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_3 +#define ALLOC_UCONTEXT_VER_2 +#define COPY_USER_PGADDR_VER_3 +#define CREATE_AH_VER_2 +#define CREATE_CQ_VER_3 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_3 +#define DEALLOC_UCONTEXT_VER_2 +#define DEREG_MR_VER_2 +#define DESTROY_AH_VER_3 +#define DESTROY_QP_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define HAS_IB_SET_DEVICE_OP +#define IB_DEALLOC_DRIVER_SUPPORT +#define IB_UMEM_GET_V1 +#define IB_IW_PKEY +#define IN_IFADDR +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_3 +#define ZXDH_ALLOC_MR_VER_1 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define NETDEV_TO_IBDEV_SUPPORT +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define RDMA_MMAP_DB_SUPPORT +#define SET_BEST_PAGE_SZ_V2 +#define SET_ROCE_CM_INFO_VER_3 +#define UVERBS_CMD_MASK +#define USE_KMAP + +#define kc_typeq_ib_wr const +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#endif /* SLES_15_SP2 */ + +#ifdef SLES_15_SP_1 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_4 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_IW_PKEY +#define IB_UMEM_GET_V1 +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_ADD_DEL_GID +#define ZXDH_DESTROY_CQ_VER_1 +#define ZXDH_SET_DRIVER_ID +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_2 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define kc_typeq_ib_wr const +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, NULL) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) + +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) +#endif /* SLES_15_SP_1 */ + +#ifdef SLES_15 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_1_2 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEALLOC_PD_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define ETHER_COPY_VER_2 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_GET_CACHED_GID +#define IB_IW_PKEY +#define IB_IW_MANDATORY_AH_OP +#define IB_UMEM_GET_V1 +#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION +#define ZXDH_ADD_DEL_GID +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_1 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define kc_typeq_ib_wr +#define kc_set_props_ip_gid_caps(props) \ + ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr).ndev) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) + +#define kc_set_ibdev_add_del_gid(ibdev) \ + do { \ + ibdev->add_gid = zxdh_add_gid; \ + ibdev->del_gid = zxdh_del_gid; \ + } while (0) + +#define set_max_sge(props, rf) \ + ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) + +#define ibdev_dbg(ibdev, ...) dev_dbg(&((ibdev)->dev), __VA_ARGS__) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) +#endif /* SLES_15 */ + +#ifdef SLES_12_SP_4 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_1_2 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define ETHER_COPY_VER_2 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_GET_CACHED_GID +#define IB_IW_MANDATORY_AH_OP +#define IB_IW_PKEY +#define IB_UMEM_GET_V1 +#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION +#define ZXDH_ADD_DEL_GID +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_1 +#define UVERBS_CMD_MASK +#define VMA_DATA + +#define kc_typeq_ib_wr +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr).ndev) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) + +#define kc_set_ibdev_add_del_gid(ibdev) \ + do { \ + ibdev->add_gid = zxdh_add_gid; \ + ibdev->del_gid = zxdh_del_gid; \ + } while (0) + +#define set_max_sge(props, rf) \ + ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) + +#define kc_set_props_ip_gid_caps(props) \ + ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) +#endif /* SLES_12_SP_4 */ + +#ifdef SLES_12_SP_5 +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_1_2 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define ETHER_COPY_VER_2 +#define FOR_IFA +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_GET_CACHED_GID +#define IB_IW_MANDATORY_AH_OP +#define IB_IW_PKEY +#define IB_UMEM_GET_V1 +#define ZXDH_ADD_DEL_GID +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_2 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +#define kc_typeq_ib_wr +#define kc_deref_sgid_attr(sgid_attr) (sgid_attr.ndev) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) +#define ib_device_put(dev) +#define kc_get_ucontext(udata) to_ucontext(context) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) + +#define kc_set_ibdev_add_del_gid(ibdev) \ + do { \ + ibdev->add_gid = zxdh_add_gid; \ + ibdev->del_gid = zxdh_del_gid; \ + } while (0) + +#define set_max_sge(props, rf) \ + ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) + +#define kc_set_props_ip_gid_caps(props) \ + ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS) + +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) + +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) + +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) + +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) + +#define ib_umem_get(udata, addr, size, access, dmasync) \ + ib_umem_get(pd->uobject->context, addr, size, access, dmasync) +#endif /* SLES_12_SP_5 */ + +#endif /* SUSE_KCOMPAT_H */ diff --git a/src/rdma/src/tc_hmcdma.c b/src/rdma/src/tc_hmcdma.c new file mode 100644 index 0000000000000000000000000000000000000000..bac72483e646220d0c9375c75dd3b014a8f6fe7b --- /dev/null +++ b/src/rdma/src/tc_hmcdma.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "tc_hmcdma.h" +#include "icrdma_hw.h" +#include "type.h" +#include "protos.h" + +#define L2D_BASE_PA 0x6200900000 + +int host_test_dma_write32(struct zxdh_pci_f *rf) +{ + int i = 0, status = 0; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_path_index dpath_index = {}; + struct zxdh_dma_mem ddrsrc = {}; + struct zxdh_dma_write32_date dma_data = {}; + struct zxdh_sc_cqp *cqp = rf->sc_dev.cqp; + u64 hmcreg = 0; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + ddrsrc.size = 100; + ddrsrc.va = dma_alloc_coherent(rf->hw.device, ddrsrc.size, &ddrsrc.pa, + GFP_KERNEL); + + if (!ddrsrc.va) { + status = -ENOMEM; + return status; + } + memset(ddrsrc.va, 0, ddrsrc.size); + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + dpath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + dpath_index.obj_id = ZXDH_DMA_OBJ_ID; + dpath_index.vhca_id = dev->vhca_id; + + dma_data.num = 4; + for (i = 0; i < dma_data.num; i++) { + dma_data.addrbuf[i] = ddrsrc.pa + 0x04 * i; + dma_data.databuf[i] = 0x55 + i; + } + + zxdh_sc_dma_write32(cqp, 0, &dpath_index, &dma_data, true); + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + dpath_index.path_select = ZXDH_INDICATE_REGISTER; + dpath_index.obj_id = ZXDH_REG_OBJ_ID; + dpath_index.vhca_id = dev->vhca_id; + + dma_data.num = 4; + + hmcreg = 0x6204c00010; + + dma_data.addrbuf[0] = hmcreg; + dma_data.databuf[0] = 0x55; + + hmcreg = 0x6204c00010 + 4096 * 1; + dma_data.addrbuf[1] = hmcreg; + dma_data.databuf[1] = 0x56; + + hmcreg = 0x6204c00010 + 4096 * 2; + dma_data.addrbuf[2] = hmcreg; + dma_data.databuf[2] = 0x57; + + hmcreg = 0x6204c00010 + 4096 * 3; + dma_data.addrbuf[3] = hmcreg; + dma_data.databuf[3] = 0x58; + + zxdh_sc_dma_write32(cqp, 0, &dpath_index, &dma_data, true); + + return status; +} + +int host_test_dma_write64(struct zxdh_pci_f *rf) +{ + int i = 0, status = 0; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_path_index dpath_index = {}; + struct zxdh_dma_mem ddrsrc = {}; // 外挂DDR + struct zxdh_dma_write64_date dma_data = {}; + struct zxdh_sc_cqp *cqp = rf->sc_dev.cqp; + u64 hmcreg = 0; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + ddrsrc.size = 100; + ddrsrc.va = dma_alloc_coherent(rf->hw.device, ddrsrc.size, &ddrsrc.pa, + GFP_KERNEL); + + if (!ddrsrc.va) { + status = -ENOMEM; + return status; + } + memset(ddrsrc.va, 0, ddrsrc.size); + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + dpath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + dpath_index.obj_id = ZXDH_DMA_OBJ_ID; + dpath_index.vhca_id = dev->vhca_id; + + dma_data.num = 3; + for (i = 0; i < dma_data.num; i++) { + dma_data.addrbuf[i] = ddrsrc.pa + 0x08 * i; + dma_data.databuf[i] = 0x66 + i; + } + + zxdh_sc_dma_write64(cqp, 0, &dpath_index, &dma_data, true); + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + dpath_index.path_select = ZXDH_INDICATE_REGISTER; // L2D + dpath_index.obj_id = ZXDH_REG_OBJ_ID; // L2D + dpath_index.vhca_id = dev->vhca_id; + + dma_data.num = 3; + hmcreg = 0x6204c00008; + + dma_data.addrbuf[0] = hmcreg; + dma_data.databuf[0] = 0x155; + + hmcreg = 0x6204c00008 + 4096 * 1; + dma_data.addrbuf[1] = hmcreg; + dma_data.databuf[1] = 0x156; + + hmcreg = 0x6204c00008 + 4096 * 2; + dma_data.addrbuf[2] = hmcreg; + dma_data.databuf[2] = 0x157; + zxdh_sc_dma_write64(cqp, 0, &dpath_index, &dma_data, true); + return status; +} + +int host_test_dma_write(struct zxdh_pci_f *rf) +{ + int status = 0; + u16 i = 0; + u32 val = 0xff; + u8 *addr; + + struct zxdh_dma_mem ddrsrc = {}; + struct zxdh_dma_mem ddrdest = {}; + + struct zxdh_src_copy_dest src_dest = {}; + + struct zxdh_path_index spath_index = {}; + struct zxdh_path_index dpath_index = {}; + struct zxdh_sc_cqp *cqp = rf->sc_dev.cqp; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + ddrsrc.size = 1024; + ddrsrc.va = dma_alloc_coherent(rf->hw.device, ddrsrc.size, &ddrsrc.pa, + GFP_KERNEL); + + if (!ddrsrc.va) { + status = -ENOMEM; + return status; + } + + memset(ddrsrc.va, 0x00, ddrsrc.size); + + ddrdest.size = 1024; + ddrdest.va = dma_alloc_coherent(rf->hw.device, ddrdest.size, + &ddrdest.pa, GFP_KERNEL); + + if (!ddrdest.va) { + status = -ENOMEM; + return status; + } + memset(ddrdest.va, 0, ddrdest.size); + + addr = (u8 *)(uintptr_t)ddrsrc.va; + + for (i = 0; i < 200; i++) { + *addr = val + i; + addr = addr + sizeof(val); + } + + src_dest.src = ddrsrc.pa; + src_dest.dest = ddrdest.pa; + src_dest.len = 5 * 4; + + spath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + spath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + spath_index.obj_id = ZXDH_DMA_OBJ_ID; + spath_index.vhca_id = rf->sc_dev.vhca_id; + + if (rf->sc_dev.cache_id != 0) { + dpath_index.inter_select = ZXDH_INTERFACE_CACHE; + dpath_index.path_select = rf->sc_dev.cache_id; + dpath_index.obj_id = ZXDH_DMA_OBJ_ID; + dpath_index.vhca_id = rf->sc_dev.vhca_id; + } else { + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + dpath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + dpath_index.obj_id = ZXDH_DMA_OBJ_ID; + dpath_index.vhca_id = rf->sc_dev.vhca_id; + } + + zxdh_sc_dma_write(cqp, 0, &src_dest, &spath_index, &dpath_index, true); + + src_dest.src = ddrsrc.pa; + src_dest.dest = L2D_BASE_PA; + src_dest.len = 5 * 4; + + spath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + spath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + spath_index.obj_id = ZXDH_DMA_OBJ_ID; + spath_index.vhca_id = rf->sc_dev.vhca_id; + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + dpath_index.path_select = ZXDH_INDICATE_L2D; + dpath_index.obj_id = ZXDH_L2D_OBJ_ID; + dpath_index.vhca_id = rf->sc_dev.vhca_id; + zxdh_sc_dma_write(cqp, 0, &src_dest, &spath_index, &dpath_index, true); + + return status; +} + +int host_test_dma_write_bysmmu(struct zxdh_pci_f *rf) +{ + int status = 0; + u16 i = 0; + u32 val = 0xff; + u8 *addr; + + struct zxdh_dma_mem ddrsrc = {}; + struct zxdh_src_copy_dest src_dest = {}; + + struct zxdh_path_index spath_index = {}; + struct zxdh_path_index dpath_index = {}; + struct zxdh_sc_cqp *cqp = rf->sc_dev.cqp; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + ddrsrc.size = 1024; + ddrsrc.va = dma_alloc_coherent(rf->hw.device, ddrsrc.size, &ddrsrc.pa, + GFP_KERNEL); + + if (!ddrsrc.va) { + status = -ENOMEM; + return status; + } + memset(ddrsrc.va, 0x00, ddrsrc.size); + + addr = (u8 *)(uintptr_t)ddrsrc.va; + + for (i = 0; i < 200; i++) { + *addr = val + i; + addr = addr + sizeof(val); + } + + src_dest.src = ddrsrc.pa; + src_dest.dest = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_QP].base; + src_dest.len = 512; + + spath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + spath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + spath_index.obj_id = ZXDH_DMA_OBJ_ID; + spath_index.vhca_id = rf->sc_dev.vhca_id; + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + dpath_index.path_select = ZXDH_INDICATE_HOST_SMMU; + dpath_index.obj_id = ZXDH_QPC_OBJ_ID; + dpath_index.vhca_id = rf->sc_dev.vhca_id; + + status = zxdh_sc_dma_write(cqp, 0, &src_dest, &spath_index, + &dpath_index, true); + return status; +} + +int zxdh_sc_dma_wr32_auto(struct zxdh_pci_f *rf) +{ + int status = 0; + u16 i = 0, len = 0x20; + u32 val = 0xff; + u8 *addr; + + struct zxdh_dma_mem ddr1 = {}; + struct zxdh_dma_mem ddr2 = {}; + struct zxdh_dma_mem ddr3 = {}; + + struct zxdh_src_copy_dest src_dest = {}; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + ddr1.size = 1024; + ddr1.va = dma_alloc_coherent(rf->hw.device, ddr1.size, &ddr1.pa, + GFP_KERNEL); + + if (!ddr1.va) { + status = -ENOMEM; + return status; + } + + ddr2.size = 1024; + ddr2.va = dma_alloc_coherent(rf->hw.device, ddr2.size, &ddr2.pa, + GFP_KERNEL); + + if (!ddr2.va) { + status = -ENOMEM; + return status; + } + memset(ddr2.va, 0x00, ddr2.size); + + ddr3.size = 1024; + ddr3.va = dma_alloc_coherent(rf->hw.device, ddr3.size, &ddr3.pa, + GFP_KERNEL); + + if (!ddr3.va) { + status = -ENOMEM; + return status; + } + memset(ddr3.va, 0x00, ddr3.size); + + addr = (u8 *)(uintptr_t)ddr1.va; + + for (i = 0; i < 200; i++) { + *addr = val + i; + addr = addr + sizeof(val); + } + + src_dest.src = ddr1.pa; + src_dest.dest = ddr2.pa; + src_dest.len = len; + zxdh_cqp_rdma_write_cmd(&rf->sc_dev, &src_dest, + ZXDH_INDICATE_HOST_NOSMMU, + ZXDH_INDICATE_HOST_NOSMMU); + src_dest.src = ddr2.pa; + src_dest.dest = ddr3.pa; + src_dest.len = len; + zxdh_cqp_rdma_read_cmd(&rf->sc_dev, &src_dest, + ZXDH_INDICATE_HOST_NOSMMU, + ZXDH_INDICATE_HOST_NOSMMU); + + if (!memcmp(ddr1.va, ddr3.va, len)) { + status = 0; + pr_info("CQP Write Read is normal!!!\n"); + } + return status; +} + +int zxdh_sc_dma_w32r32_auto(struct zxdh_pci_f *rf) +{ + int status = 0; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_path_index dpath_index = {}; + struct zxdh_dma_write32_date dma_data = {}; + u64 rarry[5], hmcreg = 0; + struct zxdh_dam_read_bycqe rdmadata = {}; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + rdmadata.num = 4; + rdmadata.bitwidth = 1; + rdmadata.valuetype = 1; + rdmadata.addrbuf[0] = 0x6204c00010; + rdmadata.addrbuf[1] = 0x6204c00010 + 4096 * 1; + rdmadata.addrbuf[2] = 0x6204c00010 + 4096 * 2; + rdmadata.addrbuf[3] = 0x6204c00010 + 4096 * 3; + + dma_data.num = 4; + + hmcreg = 0x6204c00010; + + dma_data.addrbuf[0] = hmcreg; + dma_data.databuf[0] = 0x55; + + hmcreg = 0x6204c00010 + 4096 * 1; + dma_data.addrbuf[1] = hmcreg; + dma_data.databuf[1] = 0x56; + + hmcreg = 0x6204c00010 + 4096 * 2; + dma_data.addrbuf[2] = hmcreg; + dma_data.databuf[2] = 0x57; + + hmcreg = 0x6204c00010 + 4096 * 3; + dma_data.addrbuf[3] = hmcreg; + dma_data.databuf[3] = 0x58; + + dpath_index.vhca_id = dev->vhca_id; + dpath_index.obj_id = ZXDH_REG_OBJ_ID; + dpath_index.path_select = ZXDH_INDICATE_REGISTER; + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + zxdh_cqp_rdma_write32_cmd(dev, &dma_data); + + zxdh_cqp_damreadbycqe_cmd(dev, &rdmadata, &dpath_index, rarry); + + if (rarry[0] == 0x55 && rarry[1] == 0x56 && rarry[2] == 0x57 && + rarry[3] == 0x58) { + pr_info("CQP Write32 ReadbyCqe is normal!!!\n"); + status = 0; + } + + return status; +} diff --git a/src/rdma/src/tc_hmcdma.h b/src/rdma/src/tc_hmcdma.h new file mode 100644 index 0000000000000000000000000000000000000000..6056da47c04853078537a121aa6fcc89f0eb909e --- /dev/null +++ b/src/rdma/src/tc_hmcdma.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_TCHMCDMA_H +#define ZXDH_TCHMCDMA_H + +#include "main.h" +#include "protos.h" + +int host_test_dma_write32(struct zxdh_pci_f *rf); +int host_test_dma_write64(struct zxdh_pci_f *rf); +int host_test_dma_write(struct zxdh_pci_f *rf); +int host_test_dma_write_bysmmu(struct zxdh_pci_f *rf); +int zxdh_sc_dma_wr32_auto(struct zxdh_pci_f *rf); +int zxdh_sc_dma_w32r32_auto(struct zxdh_pci_f *rf); + +#endif /* ZXDH_TCHMCDMA_H */ diff --git a/src/rdma/src/trace.c b/src/rdma/src/trace.c new file mode 100644 index 0000000000000000000000000000000000000000..76dab9ae1a5be7ba4db12901b3d83b34bf80f860 --- /dev/null +++ b/src/rdma/src/trace.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#define CREATE_TRACE_POINTS +#include "trace.h" + +const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ipv4) +{ + const char *ret = trace_seq_buffer_ptr(p); + + if (ipv4) { + __be32 myaddr = htonl(*addr); + + trace_seq_printf(p, "%pI4:%d", &myaddr, htons(port)); + } else { + trace_seq_printf(p, "%pI6:%d", addr, htons(port)); + } + trace_seq_putc(p, 0); + + return ret; +} + +const char *parse_iw_event_type(enum iw_cm_event_type iw_type) +{ + switch (iw_type) { + case IW_CM_EVENT_CONNECT_REQUEST: + return "IwRequest"; + case IW_CM_EVENT_CONNECT_REPLY: + return "IwReply"; + case IW_CM_EVENT_ESTABLISHED: + return "IwEstablished"; + case IW_CM_EVENT_DISCONNECT: + return "IwDisconnect"; + case IW_CM_EVENT_CLOSE: + return "IwClose"; + } + + return "Unknown"; +} + +const char *parse_cm_event_type(enum zxdh_cm_event_type cm_type) +{ + switch (cm_type) { + case ZXDH_CM_EVENT_ESTABLISHED: + return "CmEstablished"; + case ZXDH_CM_EVENT_MPA_REQ: + return "CmMPA_REQ"; + case ZXDH_CM_EVENT_MPA_CONNECT: + return "CmMPA_CONNECT"; + case ZXDH_CM_EVENT_MPA_ACCEPT: + return "CmMPA_ACCEPT"; + case ZXDH_CM_EVENT_MPA_REJECT: + return "CmMPA_REJECT"; + case ZXDH_CM_EVENT_MPA_ESTABLISHED: + return "CmMPA_ESTABLISHED"; + case ZXDH_CM_EVENT_CONNECTED: + return "CmConnected"; + case ZXDH_CM_EVENT_RESET: + return "CmReset"; + case ZXDH_CM_EVENT_ABORTED: + return "CmAborted"; + case ZXDH_CM_EVENT_UNKNOWN: + return "none"; + } + return "Unknown"; +} + +const char *parse_cm_state(enum zxdh_cm_node_state state) +{ + switch (state) { + case ZXDH_CM_STATE_UNKNOWN: + return "UNKNOWN"; + case ZXDH_CM_STATE_INITED: + return "INITED"; + case ZXDH_CM_STATE_LISTENING: + return "LISTENING"; + case ZXDH_CM_STATE_SYN_RCVD: + return "SYN_RCVD"; + case ZXDH_CM_STATE_SYN_SENT: + return "SYN_SENT"; + case ZXDH_CM_STATE_ONE_SIDE_ESTABLISHED: + return "ONE_SIDE_ESTABLISHED"; + case ZXDH_CM_STATE_ESTABLISHED: + return "ESTABLISHED"; + case ZXDH_CM_STATE_ACCEPTING: + return "ACCEPTING"; + case ZXDH_CM_STATE_MPAREQ_SENT: + return "MPAREQ_SENT"; + case ZXDH_CM_STATE_MPAREQ_RCVD: + return "MPAREQ_RCVD"; + case ZXDH_CM_STATE_MPAREJ_RCVD: + return "MPAREJ_RECVD"; + case ZXDH_CM_STATE_OFFLOADED: + return "OFFLOADED"; + case ZXDH_CM_STATE_FIN_WAIT1: + return "FIN_WAIT1"; + case ZXDH_CM_STATE_FIN_WAIT2: + return "FIN_WAIT2"; + case ZXDH_CM_STATE_CLOSE_WAIT: + return "CLOSE_WAIT"; + case ZXDH_CM_STATE_TIME_WAIT: + return "TIME_WAIT"; + case ZXDH_CM_STATE_LAST_ACK: + return "LAST_ACK"; + case ZXDH_CM_STATE_CLOSING: + return "CLOSING"; + case ZXDH_CM_STATE_LISTENER_DESTROYED: + return "LISTENER_DESTROYED"; + case ZXDH_CM_STATE_CLOSED: + return "CLOSED"; + } + return ("Bad state"); +} diff --git a/src/rdma/src/trace.h b/src/rdma/src/trace.h new file mode 100644 index 0000000000000000000000000000000000000000..32015f71ab40049576e26711f75915a7fd61b9fa --- /dev/null +++ b/src/rdma/src/trace.h @@ -0,0 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "trace_cm.h" diff --git a/src/rdma/src/trace_cm.h b/src/rdma/src/trace_cm.h new file mode 100644 index 0000000000000000000000000000000000000000..0063213850784452f29a2dcb561ba0d53120b3fc --- /dev/null +++ b/src/rdma/src/trace_cm.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#if !defined(__TRACE_CM_H) || defined(TRACE_HEADER_MULTI_READ) +#define __TRACE_CM_H + +#include +#include + +#include "main.h" + +const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ivp4); +const char *parse_iw_event_type(enum iw_cm_event_type iw_type); +const char *parse_cm_event_type(enum zxdh_cm_event_type cm_type); +const char *parse_cm_state(enum zxdh_cm_node_state); +#define __print_ip_addr(addr, port, ipv4) print_ip_addr(p, addr, port, ipv4) + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM zxdh_cm + +DECLARE_EVENT_CLASS( + cm_node_ah_template, TP_PROTO(struct zxdh_cm_node *cm_node), + TP_ARGS(cm_node), + TP_STRUCT__entry( + __field(struct zxdh_device *, + iwdev) __field(struct zxdh_cm_node *, + cm_node) __field(struct zxdh_sc_ah *, ah) + __field(u32, refcount) __field(u16, lport) __field( + u16, rport) __field(enum zxdh_cm_node_state, + state) __field(bool, ipv4) + __field(u16, vlan_id) __field(int, accel) + __dynamic_array(u32, laddr, 4) + __dynamic_array(u32, raddr, 4)), + TP_fast_assign(__entry->iwdev = cm_node->iwdev; + __entry->cm_node = cm_node; __entry->ah = cm_node->ah; + __entry->refcount = refcount_read(&cm_node->refcnt); + __entry->lport = cm_node->loc_port; + __entry->rport = cm_node->rem_port; + __entry->state = cm_node->state; + __entry->ipv4 = cm_node->ipv4; + __entry->vlan_id = cm_node->vlan_id; + __entry->accel = cm_node->accelerated; + memcpy(__get_dynamic_array(laddr), cm_node->loc_addr, 4); + memcpy(__get_dynamic_array(raddr), cm_node->rem_addr, + 4);), + TP_printk( + "iwdev=%p node=%p ah=%p refcnt=%d vlan_id=%d accel=%d state=%s loc: %s rem: %s", + __entry->iwdev, __entry->cm_node, __entry->ah, + __entry->refcount, __entry->vlan_id, __entry->accel, + parse_cm_state(__entry->state), + __print_ip_addr(__get_dynamic_array(laddr), __entry->lport, + __entry->ipv4), + __print_ip_addr(__get_dynamic_array(raddr), __entry->rport, + __entry->ipv4))); + +DEFINE_EVENT(cm_node_ah_template, zxdh_create_ah, + TP_PROTO(struct zxdh_cm_node *cm_node), TP_ARGS(cm_node)); + +#endif /* __TRACE_CM_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace_cm +#include diff --git a/src/rdma/src/type.h b/src/rdma/src/type.h new file mode 100644 index 0000000000000000000000000000000000000000..5ecdb8f4cd68329b49ada974957b149c6756e847 --- /dev/null +++ b/src/rdma/src/type.h @@ -0,0 +1,1785 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_TYPE_H +#define ZXDH_TYPE_H +#include "status.h" +#include "osdep.h" +#include "zrdma.h" +#include "user.h" +#include "hmc.h" +#include "uda.h" +#include "vf.h" +#include "ws.h" +#include "virtchnl.h" +#include "private_verbs_cmd.h" + +enum zxdh_page_size { + ZXDH_PAGE_SIZE_4K = 0, + ZXDH_PAGE_SIZE_2M = 9, + ZXDH_PAGE_SIZE_1G = 18, +}; + +enum zxdh_hdrct_flags { + DDP_LEN_FLAG = 0x80, + DDP_HDR_FLAG = 0x40, + RDMA_HDR_FLAG = 0x20, +}; + +enum zxdh_term_layers { + LAYER_RDMA = 0, + LAYER_DDP = 1, + LAYER_MPA = 2, +}; + +enum zxdh_pble_type { + PBLE_QUEUE = 0, + PBLE_MR = 1, +}; + +enum zxdh_term_error_types { + RDMAP_REMOTE_PROT = 1, + RDMAP_REMOTE_OP = 2, + DDP_CATASTROPHIC = 0, + DDP_TAGGED_BUF = 1, + DDP_UNTAGGED_BUF = 2, + DDP_LLP = 3, +}; + +enum zxdh_term_rdma_errors { + RDMAP_INV_STAG = 0x00, + RDMAP_INV_BOUNDS = 0x01, + RDMAP_ACCESS = 0x02, + RDMAP_UNASSOC_STAG = 0x03, + RDMAP_TO_WRAP = 0x04, + RDMAP_INV_RDMAP_VER = 0x05, + RDMAP_UNEXPECTED_OP = 0x06, + RDMAP_CATASTROPHIC_LOCAL = 0x07, + RDMAP_CATASTROPHIC_GLOBAL = 0x08, + RDMAP_CANT_INV_STAG = 0x09, + RDMAP_UNSPECIFIED = 0xff, +}; + +enum zxdh_term_ddp_errors { + DDP_CATASTROPHIC_LOCAL = 0x00, + DDP_TAGGED_INV_STAG = 0x00, + DDP_TAGGED_BOUNDS = 0x01, + DDP_TAGGED_UNASSOC_STAG = 0x02, + DDP_TAGGED_TO_WRAP = 0x03, + DDP_TAGGED_INV_DDP_VER = 0x04, + DDP_UNTAGGED_INV_QN = 0x01, + DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02, + DDP_UNTAGGED_INV_MSN_RANGE = 0x03, + DDP_UNTAGGED_INV_MO = 0x04, + DDP_UNTAGGED_INV_TOO_LONG = 0x05, + DDP_UNTAGGED_INV_DDP_VER = 0x06, +}; + +enum zxdh_term_mpa_errors { + MPA_CLOSED = 0x01, + MPA_CRC = 0x02, + MPA_MARKER = 0x03, + MPA_REQ_RSP = 0x04, +}; + +enum zxdh_qp_event_type { + ZXDH_QP_EVENT_CATASTROPHIC, + ZXDH_QP_EVENT_ACCESS_ERR, + ZXDH_QP_EVENT_REQ_ERR, +}; + +enum zxdh_hw_stats_index { + /* 32-bit */ + HW_STAT_DUPLICATE_REQUEST = 0, + HW_STAT_NP_CNP_SENT, + HW_STAT_NP_ECN_MARKED_ROCE_PACKETS, + HW_STAT_OUT_OF_SEQUENCE, + HW_STAT_PACKET_SEQ_ERR, + HW_STAT_REQ_CQE_ERROR, + HW_STAT_REQ_REMOTE_ACCESS_ERRORS, + HW_STAT_REQ_REMOTE_INVALID_REQUEST, + HW_STAT_REQ_REMOTE_OPERATION_ERRORS, + HW_STAT_REQ_LOCAL_LENGTH_ERROR, + HW_STAT_RESP_CQE_ERROR, + HW_STAT_RESP_REMOTE_ACCESS_ERRORS, + HW_STAT_RESP_REMOTE_INVALID_REQUEST, + HW_STAT_RESP_REMOTE_OPERATION_ERRORS, + HW_STAT_RESP_RNR_NAK, + HW_STAT_RNR_NAK_RETRY_ERR, + HW_STAT_RP_CNP_HANDLED, + HW_STAT_RX_READ_REQUESTS, + HW_STAT_RX_WRITE_REQUESTS, + HW_STAT_RX_ICRC_ENCAPSULATED, + HW_STAT_ROCE_SLOW_RESTART_CNPS, + HW_STAT_RDMA_TX_PKTS, + HW_STAT_RDMA_TX_BYTES, + HW_STAT_RDMA_RX_PKTS, + HW_STAT_RDMA_RX_BYTES, + ZXDH_HW_STAT_INDEX_MAX, +}; + +enum zxdh_ib_hw_stats_index { + IB_STAT_SYMBOL_ERROR = 0, + IB_STAT_LINK_ERROR_RECOVERY, + IB_STAT_LINK_DOWNED, + IB_STAT_PORT_RCV_ERRORS, + IB_STAT_PORT_RCV_REMPHYS_ERRORS, + IB_STAT_PORT_RCV_SWITCH_RELAY_ERRORS, + IB_STAT_PORT_XMIT_DISCARDS, + IB_STAT_PORT_XMIT_CONTRAINT_ERRORS, + IB_STAT_PORT_XMIT_WAIT, + IB_STAT_PORT_RCV_CONSTRAINT_ERRORS, + IB_STAT_LINK_OVERRUN_ERRORS, + IB_STAT_VL15_DROPPED, + IB_STAT_PORT_XMIT_DATA, + IB_STAT_PORT_RCV_DATA, + IB_STAT_PORT_XMIT_PACKETS, + IB_STAT_PORT_RCV_PACKETS, + IB_STAT_PORT_UNICAST_XMIT_PACKETS, + IB_STAT_PORT_UNICAST_RCV_PACKETS, + IB_STAT_PORT_MULTICAST_XMIT_PACKETS, + IB_STAT_PORT_MULTICAST_RCV_PACKETS, + IB_STAT_LOCAL_LINK_INTEGRITY_ERRORS, + IB_STAT_INDEX_MAX, +}; + +enum zxdh_module_type { + ZXDH_IB_STAT = 0, + ZXDH_RDMA_STAT, +}; + +#define ZXDH_MIN_FEATURES 2 + +enum zxdh_feature_type { + ZXDH_FEATURE_FW_INFO = 0, + ZXDH_HW_VERSION_INFO = 1, + ZXDH_QSETS_MAX = 26, + ZXDH_MAX_FEATURES, /* Must be last entry */ +}; + +enum zxdh_sched_prio_type { + ZXDH_PRIO_WEIGHTED_RR = 1, + ZXDH_PRIO_STRICT = 2, + ZXDH_PRIO_WEIGHTED_STRICT = 3, +}; + +enum zxdh_vm_vf_type { + ZXDH_VF_TYPE = 0, + ZXDH_VM_TYPE, + ZXDH_PF_TYPE, +}; + +enum zxdh_cqp_hmc_profile { + ZXDH_HMC_PROFILE_DEFAULT = 1, + ZXDH_HMC_PROFILE_FAVOR_VF = 2, + ZXDH_HMC_PROFILE_EQUAL = 3, +}; + +enum zxdh_quad_entry_type { + ZXDH_QHASH_TYPE_TCP_ESTABLISHED = 1, + ZXDH_QHASH_TYPE_TCP_SYN, + ZXDH_QHASH_TYPE_UDP_UNICAST, + ZXDH_QHASH_TYPE_UDP_MCAST, + ZXDH_QHASH_TYPE_ROCE_MCAST, + ZXDH_QHASH_TYPE_ROCEV2_HW, +}; + +enum zxdh_quad_hash_manage_type { + ZXDH_QHASH_MANAGE_TYPE_DELETE = 0, + ZXDH_QHASH_MANAGE_TYPE_ADD, + ZXDH_QHASH_MANAGE_TYPE_MODIFY, +}; + +enum zxdh_syn_rst_handling { + ZXDH_SYN_RST_HANDLING_HW_TCP_SECURE = 0, + ZXDH_SYN_RST_HANDLING_HW_TCP, + ZXDH_SYN_RST_HANDLING_FW_TCP_SECURE, + ZXDH_SYN_RST_HANDLING_FW_TCP, +}; + +enum zxdh_queue_type { + ZXDH_QUEUE_TYPE_SQ_RQ = 0, + ZXDH_QUEUE_TYPE_CQP, +}; + +enum zxdh_cqe_source_type { + ZXDH_CQE_SOURCE_OTHERQP = 0, + ZXDH_CQE_SOURCE_CQP, +}; + +struct zxdh_sc_dev; +struct zxdh_vsi_pestat; +struct zxdh_src_copy_dest; + +struct zxdh_dcqcn_cc_params { + u8 cc_cfg_valid; + u8 min_dec_factor; + u8 min_rate; + u8 dcqcn_f; + u16 rai_factor; + u16 hai_factor; + u16 dcqcn_t; + u32 dcqcn_b; + u32 rreduce_mperiod; +}; + +struct zxdh_cqp_init_info { + u64 cqp_compl_ctx; + u64 host_ctx_pa; + u64 sq_pa; + struct zxdh_sc_dev *dev; + struct zxdh_cqp_quanta *sq; + struct zxdh_dcqcn_cc_params dcqcn_params; + __le64 *host_ctx; + u64 *scratch_array; + u32 sq_size; + u16 hw_maj_ver; + u16 hw_min_ver; + u8 struct_ver; + u8 hmc_profile; + u8 ena_vf_count; + u8 ceqs_per_vf; + u8 en_datacenter_tcp : 1; + u8 disable_packed : 1; + u8 rocev2_rto_policy : 1; + u8 en_rem_endpoint_trk : 1; + enum zxdh_protocol_used protocol_used; +}; + +struct zxdh_terminate_hdr { + u8 layer_etype; + u8 error_code; + u8 hdrct; + u8 rsvd; +}; + +struct zxdh_cqp_sq_wqe { + __le64 buf[ZXDH_CQP_WQE_SIZE]; +}; + +struct zxdh_sc_aeqe { + __le64 buf[ZXDH_AEQE_SIZE]; +}; + +struct zxdh_ceqe { + __le64 buf[ZXDH_CEQE_SIZE]; +}; + +struct zxdh_cqp_ctx { + __le64 buf[ZXDH_CQP_CTX_SIZE]; +}; + +struct zxdh_cq_shadow_area { + __le64 buf[ZXDH_SHADOW_AREA_SIZE]; +}; + +struct zxdh_dev_hw_stats_offsets { + u32 stats_offset[ZXDH_HW_STAT_INDEX_MAX]; +}; + +struct zxdh_dev_hw_stats { + u64 stats_val[ZXDH_GATHER_STATS_BUF_SIZE / sizeof(u64)]; +}; + +struct zxdh_gather_stats { + u64 val[ZXDH_GATHER_STATS_BUF_SIZE / sizeof(u64)]; +}; + +struct zxdh_hw_stat_map { + u16 byteoff; + u8 bitoff; + u64 bitmask; +}; + +struct zxdh_stats_gather_info { + u8 use_hmc_fcn_index : 1; + u8 use_stats_inst : 1; + u16 hmc_fcn_index; + u8 stats_inst_index; + struct zxdh_dma_mem stats_buff_mem; + void *gather_stats_va; + void *last_gather_stats_va; +}; + +struct zxdh_vsi_pestat { + struct zxdh_hw *hw; + struct zxdh_dev_hw_stats hw_stats; + struct zxdh_stats_gather_info gather_info; + struct timer_list stats_timer; + struct zxdh_sc_vsi *vsi; + spinlock_t lock; /* rdma stats lock */ +}; + +struct zxdh_hw { + u8 __iomem *hw_addr; + u8 __iomem *priv_hw_addr; + u8 __iomem *pci_hw_addr; + struct device *device; + struct zxdh_hmc_info hmc; +}; + +struct zxdh_pfpdu { + struct list_head rxlist; + u32 rcv_nxt; + u32 fps; + u32 max_fpdu_data; + u32 nextseqnum; + u32 rcv_start_seq; + u8 mode : 1; + u8 mpa_crc_err : 1; + u8 marker_len; + u64 total_ieq_bufs; + u64 fpdu_processed; + u64 bad_seq_num; + u64 crc_err; + u64 no_tx_bufs; + u64 tx_err; + u64 out_of_order; + u64 pmode_count; + struct zxdh_sc_ah *ah; + struct zxdh_puda_buf *ah_buf; + spinlock_t lock; /* fpdu processing lock */ + struct zxdh_puda_buf *lastrcv_buf; +}; + +struct zxdh_sc_pd { + struct zxdh_sc_dev *dev; + u32 pd_id; + int abi_ver; +}; + +struct zxdh_cqp_quanta { + __le64 elem[ZXDH_CQP_WQE_SIZE]; +}; + +struct zxdh_sc_cqp { + u32 size; + u64 sq_pa; + u64 host_ctx_pa; + void *back_cqp; + struct zxdh_sc_dev *dev; + int (*process_cqp_sds)(struct zxdh_sc_dev *dev, + struct zxdh_update_sds_info *info); + int (*process_config_pte_table)(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest src_dest); + struct zxdh_dma_mem sdbuf; + struct zxdh_ring sq_ring; + struct zxdh_cqp_quanta *sq_base; + struct zxdh_dcqcn_cc_params dcqcn_params; + __le64 *host_ctx; + u64 *scratch_array; + u32 cqp_id; + u32 sq_size; + u32 hw_sq_size; + u16 hw_maj_ver; + u16 hw_min_ver; + u8 struct_ver; + u8 polarity; + u8 hmc_profile; + u8 ena_vf_count; + u8 timeout_count; + u8 ceqs_per_vf; + u8 en_datacenter_tcp : 1; + u8 disable_packed : 1; + u8 rocev2_rto_policy : 1; + u8 en_rem_endpoint_trk : 1; + u8 state_cfg : 1; // C_RDMA_CQP_CONTEXT_0 [31] + enum zxdh_protocol_used protocol_used; +}; + +struct zxdh_sc_aeq { + u32 size; + u64 aeq_elem_pa; + struct zxdh_sc_dev *dev; + struct zxdh_sc_aeqe *aeqe_base; + void *pbl_list; + u32 elem_cnt; + struct zxdh_ring aeq_ring; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u32 msix_idx; + u8 polarity; + u8 get_polarity_flag; + u8 virtual_map : 1; +}; + +struct zxdh_sc_ceq { + u32 size; + u64 ceq_elem_pa; + struct zxdh_sc_dev *dev; + struct zxdh_ceqe *ceqe_base; + void *pbl_list; + u32 ceq_id; + u32 ceq_index; + u32 elem_cnt; + u32 log2_elem_size; + struct zxdh_ring ceq_ring; + u8 pbl_chunk_size; + u8 tph_val; + u32 first_pm_pbl_idx; + u32 msix_idx; + u8 polarity; + struct zxdh_sc_vsi *vsi; + struct zxdh_sc_cq **reg_cq; + u32 reg_cq_size; + spinlock_t req_cq_lock; /* protect access to reg_cq array */ + u8 virtual_map : 1; + u8 tph_en : 1; + u8 itr_no_expire : 1; +}; + +struct zxdh_sc_cq { + struct zxdh_cq_uk cq_uk; + u64 cq_pa; + u64 shadow_area_pa; + struct zxdh_sc_dev *dev; + struct zxdh_sc_vsi *vsi; + void *pbl_list; + void *back_cq; + u32 ceq_id; + u32 ceq_index; + u32 shadow_read_threshold; + u8 pbl_chunk_size; + u8 cq_type; + u8 tph_val; + u32 first_pm_pbl_idx; + u8 ceqe_mask : 1; + u8 virtual_map : 1; + u8 ceq_id_valid : 1; + u8 tph_en; + u8 cq_st; + u16 is_in_list_cnt; + u16 cq_max; + u16 cq_period; + u8 scqe_break_moderation_en : 1; + u8 cq_overflow_locked_flag : 1; +}; + +struct zxdh_sc_qp { + struct zxdh_qp_uk qp_uk; + u64 sq_pa; + u64 rq_pa; + u64 hw_host_ctx_pa; + u64 shadow_area_pa; + struct zxdh_sc_dev *dev; + struct zxdh_sc_vsi *vsi; + struct zxdh_sc_pd *pd; + struct zxdh_sc_srq *srq; + __le64 *hw_host_ctx; + void *llp_stream_handle; + struct zxdh_pfpdu pfpdu; + u32 ieq_qp; + u8 *q2_buf; + u64 qp_compl_ctx; + u32 qp_ctx_num; + u16 qs_handle; + u16 push_offset; + u8 flush_wqes_count; + u8 sq_tph_val; + u8 rq_tph_val; + u8 qp_state; + u8 hw_sq_size; + u8 hw_rq_size; + u8 src_mac_addr_idx; + u8 on_qoslist : 1; + u8 ieq_pass_thru : 1; + u8 sq_tph_en : 1; + u8 rq_tph_en : 1; + u8 rcv_tph_en : 1; + u8 xmit_tph_en : 1; + u8 virtual_map : 1; + u8 flush_sq : 1; + u8 flush_rq : 1; + u8 sq_flush_code : 1; + u8 rq_flush_code : 1; + u8 is_nvmeof_ioq : 1; + u8 is_nvmeof_tgt : 1; + u32 nvmeof_qid; + enum zxdh_flush_opcode flush_code; + enum zxdh_qp_event_type event_type; + u8 term_flags; + u8 user_pri; + struct list_head list; + u8 is_srq; + u32 tx_last_ack_psn; + u32 aeq_entry_err_last_psn; + u32 aeq_retry_err_last_psn; + u8 entry_err_cnt; + u8 retry_err_cnt; +}; + +struct zxdh_stats_inst_info { + bool use_hmc_fcn_index; + u8 hmc_fn_id; + u8 stats_idx; +}; + +struct zxdh_up_info { + u8 map[8]; + u8 cnp_up_override; + u8 hmc_fcn_idx; + u8 use_vlan : 1; + u8 use_cnp_up_override : 1; +}; + +#define ZXDH_MAX_WS_NODES 0x3FF +#define ZXDH_WS_NODE_INVALID 0xFFFF + +struct zxdh_ws_node_info { + u16 id; + u16 vsi; + u16 parent_id; + u16 qs_handle; + u8 type_leaf : 1; + u8 enable : 1; + u8 prio_type; + u8 tc; + u8 weight; +}; + +#define ZXDH_VCHNL_MAX_VF_MSG_SIZE 512 +#define ZXDH_LEAF_DEFAULT_REL_BW 64 +#define ZXDH_PARENT_DEFAULT_REL_BW 1 + +struct zxdh_qos { + struct list_head qplist; + struct mutex qos_mutex; /* protect QoS attributes per QoS level */ + u64 lan_qos_handle; + u32 l2_sched_node_id; + u16 qs_handle; + u8 traffic_class; + u8 rel_bw; + u8 prio_type; + bool valid; +}; + +struct zxdh_config_check { + u8 config_ok : 1; + u8 lfc_set : 1; + u8 pfc_set : 1; + u8 traffic_class; + u16 qs_handle; +}; + +struct zxdh_vfdev { + struct zxdh_sc_dev *pf_dev; + struct zxdh_sc_vsi *vf_vsi; + u8 *hmc_info_mem; + u8 vf_msg_buf[ZXDH_VCHNL_MAX_VF_MSG_SIZE]; + struct zxdh_hmc_info hmc_info; + u32 max_ceqs; + u32 pbleq_unallocated_pble; + u64 pbleq_fpm_base_addr; + u64 pbleq_next_fpm_addr; + u32 pblemr_unallocated_pble; + u64 pblemr_fpm_base_addr; + u64 pblemr_next_fpm_addr; + + refcount_t refcnt; + u16 pmf_index; + u16 vf_id; + u16 vhca_id; + u16 iw_vf_idx; + u8 stats_initialized : 1; + u8 pf_hmc_initialized : 1; + u8 reset_en : 1; + u8 port_vlan_en : 1; +}; + +#define ZXDH_INVALID_STATS_IDX 0xff +struct zxdh_sc_vsi { + u16 vsi_idx; + struct zxdh_sc_dev *dev; + struct zxdh_vfdev *vf_dev; + void *back_vsi; + u32 ilq_count; + struct zxdh_virt_mem ilq_mem; + struct zxdh_puda_rsrc *ilq; + u32 ieq_count; + struct zxdh_virt_mem ieq_mem; + struct zxdh_puda_rsrc *ieq; + u32 exception_lan_q; + u16 mtu; + u16 vf_id; + enum zxdh_vm_vf_type vm_vf_type; + u8 stats_inst_alloc : 1; + u8 tc_change_pending : 1; + struct zxdh_vsi_pestat *pestat; + atomic_t qp_suspend_reqs; + int (*register_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); + void (*unregister_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); + struct zxdh_config_check cfg_check[ZXDH_MAX_USER_PRIORITY]; + bool tc_print_warning[IEEE_8021QAZ_MAX_TCS]; + u8 qos_rel_bw; + u8 qos_prio_type; + u8 stats_idx; + u8 dscp_map[ZXDH_DSCP_NUM_VAL]; + struct zxdh_qos qos[ZXDH_MAX_USER_PRIORITY]; + u64 hw_stats_regs[ZXDH_HW_STAT_INDEX_MAX]; + u8 dscp_mode : 1; +}; +struct zxdh_srq_axi_ram { + u32 __iomem *db; + u32 __iomem *srql; +}; + +struct zxdh_ceq_axi { + u32 __iomem *ceqe_axi_info; + u32 __iomem *rpble_axi_info; + u32 __iomem *lpble_axi_info; + u32 __iomem *int_info; +}; + +struct zxdh_aeq_vhca_pfvf { + u32 __iomem *aeq_msix_data; + u32 __iomem *aeq_msix_config; + u32 __iomem *aeq_root_axi_data; + u32 __iomem *aeq_leaf_axi_data; + u32 __iomem *aeq_wr_axi_data; + u32 __iomem *aeq_aee_flag; +}; + +struct zxdh_hw_stats { + u64 rdma_stats_entry[ZXDH_HW_STAT_INDEX_MAX]; +}; + +struct zxdh_rdma_stats_get { + u64 rdma_stats_entry[ZXDH_HW_STAT_INDEX_MAX]; + u8 rdma_stats_entry_sta[ZXDH_HW_STAT_INDEX_MAX]; +}; + +struct zxdh_sc_dev { + struct list_head cqp_cmd_head; /* head of the CQP command list */ + spinlock_t cqp_lock; /* protect CQP list access */ + bool stats_idx_array[ZXDH_MAX_STATS_COUNT_GEN1]; + struct zxdh_dma_mem vf_fpm_query_buf[ZXDH_MAX_PE_ENA_VF_COUNT]; + struct zxdh_dma_mem clear_dpu_mem; + struct zxdh_dma_mem nof_clear_dpu_mem; + + u64 pte_l2d_startpa; // PTE L2D PA + u32 pte_l2d_len; // PTE L2D LEN + struct zxdh_hw *hw; + u8 __iomem *db_addr; + u32 __iomem *wqe_alloc_db; + u32 __iomem *cq_arm_db; + u32 __iomem *aeq_alloc_db; + u32 __iomem *cqp_db; + u32 __iomem *cq_ack_db; + u32 __iomem *ceq_itr_mask_db; + u32 __iomem *aeq_itr_mask_db; + u32 __iomem *hw_regs[ZXDH_MAX_REGS]; + u32 __iomem *ceq_itr_enable; + // u32 __iomem *ceq_ep_addr[ZXDH_MAX_EP_NUM]; + // struct zxdh_ep_addr ceq_ep_addr[ZXDH_MAX_EP_NUM]; + struct zxdh_ceq_axi ceq_axi; + u32 __iomem *aeq_itr_enable; + u32 __iomem *aeq_tail_pointer; + // struct zxdh_ep_addr aeq_ep_addr[ZXDH_MAX_EP_NUM]; + struct zxdh_aeq_vhca_pfvf aeq_vhca_pfvf; + // struct zxdh_cm_aeq_axi aeq_axi; + u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */ + struct zxdh_srq_axi_ram srq_axi_ram; + u64 hw_masks[ZXDH_MAX_MASKS]; + u8 hw_shifts[ZXDH_MAX_SHIFTS]; + struct zxdh_hw_stats stats_entry; + u64 hw_stats_regs[ZXDH_HW_STAT_INDEX_MAX]; + u64 hw_stats_vf_regs[ZXDH_HW_STAT_INDEX_MAX]; + u64 feature_info[ZXDH_MAX_FEATURES]; + u64 cqp_cmd_stats[ZXDH_MAX_CQP_OPS]; + struct zxdh_hw_attrs hw_attrs; + struct zxdh_hmc_info *hmc_info; + struct zxdh_vfdev *vf_dev[ZXDH_MAX_PE_ENA_VF_COUNT]; + u8 vf_recv_buf[ZXDH_VCHNL_MAX_VF_MSG_SIZE]; + u16 vf_recv_len; + + spinlock_t + vf_dev_lock; /* sync vf_dev usage with async events like reset */ + struct workqueue_struct *vchnl_wq; + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_aeq *aeq; + struct zxdh_sc_ceq *ceq[ZXDH_CEQ_MAX_COUNT]; + struct zxdh_sc_cq *ccq; + const struct zxdh_irq_ops *irq_ops; + u32 max_ceqs; + u32 base_qpn; + u32 base_cqn; + u32 base_srqn; + u32 base_ceqn; + struct zxdh_ws_node *ws_tree_root; + struct mutex ws_mutex; /* ws tree mutex */ + struct zxdh_qos qos[ZXDH_MAX_USER_PRIORITY]; + u16 num_vfs; + u8 hmc_fn_id; + u16 vf_id; + u16 vhca_id; + u16 vhca_id_pf; + u16 cache_id; + u8 ep_id; + u8 hmc_epid; + u8 soc_tx_rx_cqp_ind; + u8 soc_tx_rx_cqp_axid; + u8 soc_rdma_io_ind; + u16 ird_size; + u16 ws_offset; + u32 total_vhca; + u64 nof_ioq_ddr_addr; + u64 l2d_smmu_addr; + u32 l2d_smmu_l2_offset; + u8 vchnl_up : 1; + u8 ceq_valid : 1; + u8 privileged : 1; + u8 double_vlan_en : 1; + u8 hmc_use_dpu_ddr : 1; + u8 np_mode_low_lat : 1; + u8 vf_mb_init : 1; + struct mutex vchnl_mutex; + u8 pci_rev; + int (*ws_add)(struct zxdh_sc_vsi *vsi, u8 user_pri); + void (*ws_remove)(struct zxdh_sc_vsi *vsi, u8 user_pri); + void (*ws_reset)(struct zxdh_sc_vsi *vsi); + struct zxdh_hmc_obj_manage hmc_pf_manager_info; + struct smmu_pte_address *pte_address; + struct zxdh_vf_hmc_obj_info vf_hmcobjinfo[256]; + u8 ceq_0_ok; + u8 ceq_interrupt; + u8 flag1 : 1; + u8 flag2 : 1; + u8 flag3 : 1; + u8 flag4 : 1; + u8 flag5 : 1; +}; + +struct zxdh_modify_cq_info { + u64 cq_pa; + struct zxdh_cqe *cq_base; + u32 cq_size; + u32 shadow_read_threshold; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u8 virtual_map : 1; + u8 cq_resize : 1; +}; + +struct zxdh_create_qp_info { + u8 ord_valid : 1; + u8 tcp_ctx_valid : 1; + u8 cq_num_valid : 1; + u8 arp_cache_idx_valid : 1; + u8 mac_valid : 1; + bool force_lpb; + u8 next_iwarp_state; +}; + +struct zxdh_modify_qp_info { + u64 rx_win0; + u64 rx_win1; + u64 qpc_tx_mask_low; + u64 qpc_tx_mask_high; + u64 qpc_rx_mask_low; + u64 qpc_rx_mask_high; + u16 new_mss; + u8 next_iwarp_state; + u8 curr_iwarp_state; + u8 termlen; + u16 udp_sport; + u8 ord_valid : 1; + u8 tcp_ctx_valid : 1; + u8 udp_ctx_valid : 1; + u8 cq_num_valid : 1; + u8 arp_cache_idx_valid : 1; + u8 reset_tcp_conn : 1; + u8 remove_hash_idx : 1; + u8 dont_send_term : 1; + u8 dont_send_fin : 1; + u8 cached_var_valid : 1; + u8 mss_change : 1; + u8 force_lpb : 1; + u8 mac_valid : 1; +}; + +struct zxdh_modify_srq_info { + int limit; +}; + +struct zxdh_create_srq_info { + u8 state; +}; + +struct zxdh_destroy_srq_info { + u8 state; +}; + +struct zxdh_ccq_cqe_info { + struct zxdh_sc_cqp *cqp; + u64 scratch; + u64 op_ret_val; + u16 maj_err_code; + u16 min_err_code; + u8 op_code; + u8 mailbox_cqe; + __le64 addrbuf[5]; + bool error; +}; + +struct zxdh_qos_tc_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +struct zxdh_l2params { + struct zxdh_qos_tc_info tc_info[ZXDH_MAX_USER_PRIORITY]; + u32 num_apps; + u16 qs_handle_list[ZXDH_MAX_USER_PRIORITY]; + u16 mtu; + u8 up2tc[ZXDH_MAX_USER_PRIORITY]; + u8 dscp_map[ZXDH_DSCP_NUM_VAL]; + u8 num_tc; + u8 vsi_rel_bw; + u8 vsi_prio_type; + u8 mtu_changed : 1; + u8 tc_changed : 1; + u8 dscp_mode : 1; +}; + +struct zxdh_vsi_init_info { + struct zxdh_sc_dev *dev; + void *back_vsi; + struct zxdh_l2params *params; + u16 exception_lan_q; + u16 pf_data_vsi_num; + enum zxdh_vm_vf_type vm_vf_type; + int (*register_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); + void (*unregister_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); +}; + +struct zxdh_vsi_stats_info { + struct zxdh_vsi_pestat *pestat; + u8 fcn_id; + bool alloc_stats_inst; +}; + +struct zxdh_device_init_info { + struct zxdh_hw *hw; + void __iomem *bar0; + struct workqueue_struct *vchnl_wq; + u16 max_vfs; + u8 hmc_fn_id; + bool privileged; +}; + +struct zxdh_ceq_init_info { + u64 ceqe_pa; + struct zxdh_sc_dev *dev; + u64 *ceqe_base; + void *pbl_list; + u32 elem_cnt; + u32 log2_elem_size; + u32 ceq_id; + u32 ceq_index; + u8 virtual_map : 1; + u8 tph_en : 1; + u8 itr_no_expire : 1; + u8 pbl_chunk_size; + u8 tph_val; + u32 first_pm_pbl_idx; + struct zxdh_sc_vsi *vsi; + struct zxdh_sc_cq **reg_cq; + u32 reg_cq_idx; + u32 msix_idx; +}; + +struct zxdh_aeq_init_info { + u64 aeq_elem_pa; + struct zxdh_sc_dev *dev; + u32 *aeqe_base; + void *pbl_list; + u32 elem_cnt; + bool virtual_map; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u32 msix_idx; +}; + +struct zxdh_ccq_init_info { + u64 cq_pa; + u64 shadow_area_pa; + struct zxdh_sc_dev *dev; + struct zxdh_cqe *cq_base; + __le64 *shadow_area; + void *pbl_list; + u32 num_elem; + u32 ceq_id; + u32 ceq_index; + u32 cq_num; + u32 shadow_read_threshold; + u8 ceqe_mask : 1; + u8 ceq_id_valid : 1; + u8 cqe_size; + u8 virtual_map : 1; + u8 tph_en : 1; + u8 tph_val; + u8 pbl_chunk_size; + u16 cq_max; + u16 cq_period; + u8 scqe_break_moderation_en : 1; + u8 cq_st; + u16 is_in_list_cnt; + u32 first_pm_pbl_idx; + struct zxdh_sc_vsi *vsi; +}; + +struct zxdh_udp_offload_info { + u8 ipv4 : 1; + u8 insert_vlan_tag : 1; + u8 ttl; + u8 tos; + u16 src_port; + u16 dst_port; + u32 dest_ip_addr[4]; + u16 pmtu; + u16 vlan_tag; + u8 dest_mac[ETH_ALEN]; + u32 flow_label; + u8 udp_state; + u32 psn_nxt; + u32 lsn; + u32 epsn; + u32 psn_max; + u32 psn_una; + u32 local_ipaddr[4]; + u32 cwnd; + u8 rexmit_thresh; + u8 rnr_nak_thresh; + u8 timeout; + u8 min_rnr_timer; +}; + +struct zxdh_roce_offload_info { + u16 p_key; + u16 err_rq_idx; + u32 qkey; + u32 dest_qp; + u32 local_qp; + u8 roce_tver; + u8 ack_credits; + u8 err_rq_idx_valid; + u32 pd_id; + u16 ord_size; + u16 ird_size; + u8 is_qp1 : 1; + u8 udprivcq_en : 1; + u8 dcqcn_en : 1; + u8 ecn_en : 1; + u8 rcv_no_icrc : 1; + u8 wr_rdresp_en : 1; + u8 bind_en : 1; + u8 fast_reg_en : 1; + u8 priv_mode_en : 1; + u8 rd_en : 1; + u8 timely_en : 1; + u8 dctcp_en : 1; + u8 fw_cc_enable : 1; + u8 use_stats_inst : 1; + u16 t_high; + u16 t_low; + u8 last_byte_sent; + u8 mac_addr[ETH_ALEN]; + u8 rtomin; +}; + +struct zxdh_iwarp_offload_info { + u16 rcv_mark_offset; + u16 snd_mark_offset; + u8 ddp_ver; + u8 rdmap_ver; + u8 iwarp_mode; + u16 err_rq_idx; + u32 pd_id; + u16 ord_size; + u16 ird_size; + u8 ib_rd_en : 1; + u8 align_hdrs : 1; + u8 rcv_no_mpa_crc : 1; + u8 err_rq_idx_valid : 1; + u8 snd_mark_en : 1; + u8 rcv_mark_en : 1; + u8 wr_rdresp_en : 1; + u8 bind_en : 1; + u8 fast_reg_en : 1; + u8 priv_mode_en : 1; + u8 rd_en : 1; + u8 timely_en : 1; + u8 use_stats_inst : 1; + u8 ecn_en : 1; + u8 dctcp_en : 1; + u16 t_high; + u16 t_low; + u8 last_byte_sent; + u8 mac_addr[ETH_ALEN]; + u8 rtomin; +}; + +struct zxdh_tcp_offload_info { + u8 ipv4 : 1; + u8 no_nagle : 1; + u8 insert_vlan_tag : 1; + u8 time_stamp : 1; + u8 drop_ooo_seg : 1; + u8 avoid_stretch_ack : 1; + u8 wscale : 1; + u8 ignore_tcp_opt : 1; + u8 ignore_tcp_uns_opt : 1; + u8 cwnd_inc_limit; + u8 dup_ack_thresh; + u8 ttl; + u8 src_mac_addr_idx; + u8 tos; + u16 src_port; + u16 dst_port; + u32 dest_ip_addr[4]; + //u32 dest_ip_addr0; + //u32 dest_ip_addr1; + //u32 dest_ip_addr2; + //u32 dest_ip_addr3; + u32 snd_mss; + u16 syn_rst_handling; + u16 vlan_tag; + u16 arp_idx; + u32 flow_label; + u8 tcp_state; + u8 snd_wscale; + u8 rcv_wscale; + u32 time_stamp_recent; + u32 time_stamp_age; + u32 snd_nxt; + u32 snd_wnd; + u32 rcv_nxt; + u32 rcv_wnd; + u32 snd_max; + u32 snd_una; + u32 srtt; + u32 rtt_var; + u32 ss_thresh; + u32 cwnd; + u32 snd_wl1; + u32 snd_wl2; + u32 max_snd_window; + u8 rexmit_thresh; + u32 local_ipaddr[4]; +}; + +struct zxdh_qp_host_ctx_info { + u64 qp_compl_ctx; + union { + struct zxdh_tcp_offload_info *tcp_info; + struct zxdh_udp_offload_info *udp_info; + }; + union { + struct zxdh_iwarp_offload_info *iwarp_info; + struct zxdh_roce_offload_info *roce_info; + }; + u32 send_cq_num; + u32 rcv_cq_num; + u32 rem_endpoint_idx; + u8 stats_idx; + u8 srq_valid : 1; + u8 tcp_info_valid : 1; + u8 iwarp_info_valid : 1; + u8 stats_idx_valid : 1; + u8 user_pri; + u8 next_qp_state; + u8 use_srq : 1; +}; + +struct zxdh_aeqe_info { + u64 compl_ctx; + u32 qp_cq_id; + u16 ae_id; + u16 wqe_idx; + u8 tcp_state; + u8 iwarp_state; + u8 qp : 1; + u8 cq : 1; + u8 sq : 1; + u8 rq : 1; + u8 srq : 1; + u8 in_rdrsp_wr : 1; + u8 out_rdrsp : 1; + u8 aeqe_overflow : 1; + u8 q2_data_written; + u8 ae_src; + u32 vhca_id; +}; + +struct zxdh_allocate_stag_info { + u64 total_len; + u64 first_pm_pbl_idx; + u32 chunk_size; + u32 stag_idx; + u32 page_size; + u32 pd_id; + u16 access_rights; + u8 remote_access : 1; + u8 use_hmc_fcn_index : 1; + u8 use_pf_rid : 1; + u16 hmc_fcn_index; +}; + +struct zxdh_mw_alloc_info { + u32 mw_stag_index; + u32 page_size; + u32 pd_id; + u8 remote_access : 1; + u8 mw_wide : 1; + u8 mw1_bind_dont_vldt_key : 1; +}; + +struct zxdh_reg_ns_stag_info { + u64 reg_addr_pa; + u64 va; + u64 total_len; + u32 page_size; + u32 chunk_size; + u32 first_pm_pbl_index; + enum zxdh_addressing_type addr_type; + zxdh_stag_index stag_idx; + u16 access_rights; + u32 pd_id; + zxdh_stag_key stag_key; + u8 use_hmc_fcn_index : 1; + u16 hmc_fcn_index; + u8 use_pf_rid : 1; +}; + +struct zxdh_fast_reg_stag_info { + u64 wr_id; + u64 reg_addr_pa; + u64 fbo; + void *va; + u64 total_len; + u32 page_size; + u32 chunk_size; + u32 first_pm_pbl_index; + enum zxdh_addressing_type addr_type; + zxdh_stag_index stag_idx; + u16 access_rights; + u32 pd_id; + zxdh_stag_key stag_key; + u8 local_fence : 1; + u8 read_fence : 1; + u8 signaled : 1; + u8 push_wqe : 1; + u8 use_hmc_fcn_index : 1; + u16 hmc_fcn_index; + u8 use_pf_rid : 1; + u8 defer_flag : 1; +}; + +struct zxdh_dealloc_stag_info { + u32 stag_idx; + u32 pd_id; + u8 mr : 1; + u8 dealloc_pbl : 1; +}; + +struct zxdh_register_shared_stag { + u64 va; + enum zxdh_addressing_type addr_type; + zxdh_stag_index new_stag_idx; + zxdh_stag_index parent_stag_idx; + u32 access_rights; + u32 pd_id; + u32 page_size; + zxdh_stag_key new_stag_key; +}; + +struct zxdh_qp_init_info { + struct zxdh_qp_uk_init_info qp_uk_init_info; + struct zxdh_sc_pd *pd; + struct zxdh_sc_vsi *vsi; + struct zxdh_sc_dev *dev; + __le64 *host_ctx; + u8 *q2; + u64 sq_pa; + u64 rq_pa; + u64 host_ctx_pa; + u64 q2_pa; + u64 shadow_area_pa; + u8 sq_tph_val; + u8 rq_tph_val; + u8 sq_tph_en : 1; + u8 rq_tph_en : 1; + u8 rcv_tph_en : 1; + u8 xmit_tph_en : 1; + u8 virtual_map : 1; +}; + +struct zxdh_cq_init_info { + struct zxdh_sc_dev *dev; + u64 cq_base_pa; + u64 shadow_area_pa; + u32 ceq_id; + u32 ceq_index; + u32 shadow_read_threshold; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u8 virtual_map : 1; + u8 ceqe_mask : 1; + u8 ceq_id_valid : 1; + u8 tph_en : 1; + u8 tph_val; + u8 type; + struct zxdh_cq_uk_init_info cq_uk_init_info; + struct zxdh_sc_vsi *vsi; +}; + +struct zxdh_upload_context_info { + u64 buf_pa; + u32 qp_id; + u8 qp_type; + u8 freeze_qp : 1; + u8 raw_format : 1; +}; + +struct zxdh_local_mac_entry_info { + u8 mac_addr[6]; + u16 entry_idx; +}; + +struct zxdh_add_arp_cache_entry_info { + u8 mac_addr[ETH_ALEN]; + u32 reach_max; + u16 arp_index; + bool permanent; +}; + +struct zxdh_apbvt_info { + u16 port; + bool add; +}; + +struct zxdh_qhash_table_info { + struct zxdh_sc_vsi *vsi; + enum zxdh_quad_hash_manage_type manage; + enum zxdh_quad_entry_type entry_type; + u8 vlan_valid : 1; + u8 ipv4_valid : 1; + u8 mac_addr[ETH_ALEN]; + u16 vlan_id; + u8 user_pri; + u32 qp_num; + u32 dest_ip[4]; + u32 src_ip[4]; + u16 dest_port; + u16 src_port; +}; + +struct zxdh_cqp_manage_push_page_info { + u32 push_idx; + u16 qs_handle; + u8 free_page; + u8 push_page_type; +}; + +struct zxdh_qp_flush_info { + u16 sq_minor_code; + u16 sq_major_code; + u16 rq_minor_code; + u16 rq_major_code; + u16 ae_code; + u8 ae_src; + u8 sq : 1; + u8 rq : 1; + u8 userflushcode : 1; + u8 generate_ae : 1; +}; + +struct zxdh_gen_ae_info { + u16 ae_code; + u8 ae_src; +}; + +struct zxdh_cqp_timeout { + u64 compl_cqp_cmds; + u32 count; +}; + +struct zxdh_src_copy_dest { + u64 src; + u32 len; + u64 dest; +}; + +struct zxdh_dam_read_bycqe { + u8 num; + u8 bitwidth; // 0:64 1:32 + u8 valuetype; + __le64 addrbuf[5]; +}; + +struct zxdh_dma_write64_date { + u8 num; + __le64 addrbuf[3]; + __le64 databuf[3]; +}; + +struct zxdh_dma_write32_date { + u8 num; + u8 inter_sour_sel; + u8 need_inter; + __le64 addrbuf[4]; + __le64 databuf[4]; +}; + +struct zxdh_path_index { + u16 vhca_id; + u8 obj_id; + u8 waypartion; + u8 path_select; + u8 inter_select; +}; + +struct zxdh_mailboxhead_data { + u64 msg0; + u64 msg1; + u64 msg2; + u64 msg3; + u64 msg4; +}; + +struct zxdh_irq_ops { + void (*zxdh_cfg_aeq)(struct zxdh_sc_dev *dev, u32 irq_idx); + void (*zxdh_ceq_en_irq)(struct zxdh_sc_dev *dev, u32 idx); + void (*zxdh_aeq_en_irq)(struct zxdh_sc_dev *dev, bool enable); +}; + +u32 zxdh_num_to_log(u32 size_num); + +void zxdh_sc_ccq_arm(struct zxdh_sc_cq *ccq); +int zxdh_sc_ccq_create(struct zxdh_sc_cq *ccq, u64 scratch, bool post_sq); +int zxdh_sc_ccq_destroy(struct zxdh_sc_cq *ccq, u64 scratch, bool post_sq); +int zxdh_sc_ccq_get_cqe_info(struct zxdh_sc_cq *ccq, + struct zxdh_ccq_cqe_info *info); +int zxdh_sc_ccq_init(struct zxdh_sc_cq *ccq, struct zxdh_ccq_init_info *info); + +int zxdh_sc_cceq_create(struct zxdh_sc_ceq *ceq, u64 scratch); +int zxdh_sc_cceq_destroy_done(struct zxdh_sc_ceq *ceq); + +int zxdh_sc_ceq_destroy(struct zxdh_sc_ceq *ceq, u64 scratch, bool post_sq); +int zxdh_sc_ceq_init(struct zxdh_sc_ceq *ceq, struct zxdh_ceq_init_info *info); +void zxdh_sc_cleanup_ceqes(struct zxdh_sc_cq *cq, struct zxdh_sc_ceq *ceq); +void *zxdh_sc_process_ceq(struct zxdh_sc_dev *dev, struct zxdh_sc_ceq *ceq); + +int zxdh_sc_aeq_init(struct zxdh_sc_aeq *aeq, struct zxdh_aeq_init_info *info); +int zxdh_sc_get_next_aeqe(struct zxdh_sc_aeq *aeq, struct zxdh_aeqe_info *info); +int zxdh_sc_repost_aeq_tail(struct zxdh_sc_dev *dev, u32 idx); + +void zxdh_sc_pd_init(struct zxdh_sc_dev *dev, struct zxdh_sc_pd *pd, u32 pd_id, + int abi_ver); +void zxdh_cfg_aeq(struct zxdh_sc_dev *dev, u32 irq_idx); +#if IS_ENABLED(CONFIG_CONFIGFS_FS) +void zxdh_set_irq_rate_limit(struct zxdh_sc_dev *dev, u32 idx, u32 interval); +#endif +void zxdh_check_cqp_progress(struct zxdh_cqp_timeout *cqp_timeout, + struct zxdh_sc_dev *dev); +int zxdh_sc_cqp_create(struct zxdh_sc_cqp *cqp, u16 *maj_err, u16 *min_err); +int zxdh_sc_cqp_destroy(struct zxdh_sc_cqp *cqp, bool free_hwcqp); +int zxdh_sc_cqp_init(struct zxdh_sc_cqp *cqp, struct zxdh_cqp_init_info *info); +void zxdh_sc_cqp_post_sq(struct zxdh_sc_cqp *cqp); +int zxdh_sc_poll_for_cqp_op_done(struct zxdh_sc_cqp *cqp, u8 opcode, + struct zxdh_ccq_cqe_info *cmpl_info); +int zxdh_sc_qp_create(struct zxdh_sc_qp *qp, u64 scratch, bool post_sq); +int zxdh_sc_qp_destroy(struct zxdh_sc_qp *qp, u64 scratch, bool ignore_mw_bnd, + bool post_sq); +int zxdh_sc_qp_flush_wqes(struct zxdh_sc_qp *qp, + struct zxdh_qp_flush_info *info, u64 scratch, + bool post_sq); +int zxdh_sc_qp_init(struct zxdh_sc_qp *qp, struct zxdh_qp_init_info *info); +int zxdh_sc_qp_modify(struct zxdh_sc_qp *qp, struct zxdh_modify_qp_info *info, + u64 scratch, bool post_sq); +void zxdh_sc_qp_setctx_roce(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_qp_host_ctx_info *info); +void zxdh_sc_qp_resetctx_roce(struct zxdh_sc_qp *qp, __le64 *qp_ctx); +u16 zxdh_get_rc_gqp_id(u16 ws_index, u16 vhca_id, u32 total_vhca); +u16 get_ud_gqp_id(u16 vhca_id, u32 total_vhca); +int zxdh_sc_cq_destroy(struct zxdh_sc_cq *cq, u64 scratch, bool post_sq); +int zxdh_sc_cq_init(struct zxdh_sc_cq *cq, struct zxdh_cq_init_info *info); +void zxdh_sc_cq_resize(struct zxdh_sc_cq *cq, struct zxdh_modify_cq_info *info); +int zxdh_sc_aeq_destroy(struct zxdh_sc_aeq *aeq, u64 scratch, bool post_sq); + +void sc_vsi_update_stats(struct zxdh_sc_vsi *vsi); +void zxdh_sc_qp_modify_ctx_udp_sport(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_qp_host_ctx_info *info); +void zxdh_sc_qp_modify_private_cmd_qpc(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_modify_qpc_item *info); +struct cqp_info { + union { + struct { + struct zxdh_sc_qp *qp; + struct zxdh_create_qp_info info; + u64 scratch; + } qp_create; + + struct { + struct zxdh_sc_qp *qp; + struct zxdh_modify_qp_info info; + u64 scratch; + } qp_modify; + + struct { + struct zxdh_sc_qp *qp; + u64 scratch; + bool remove_hash_idx; + bool ignore_mw_bnd; + } qp_destroy; + + struct { + struct zxdh_sc_srq *srq; + struct zxdh_create_srq_info info; + u64 scratch; + } srq_create; + + struct { + struct zxdh_sc_srq *srq; + struct zxdh_modify_srq_info info; + u64 scratch; + } srq_modify; + + struct { + struct zxdh_sc_srq *srq; + u64 scratch; + struct zxdh_destroy_srq_info info; + // bool remove_hash_idx; + } srq_destroy; + + struct { + struct zxdh_sc_cq *cq; + u64 scratch; + } cq_create; + + struct { + struct zxdh_sc_cq *cq; + struct zxdh_modify_cq_info info; + u64 scratch; + } cq_modify; + + struct { + struct zxdh_sc_cq *cq; + u64 scratch; + } cq_destroy; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_allocate_stag_info info; + u64 scratch; + } alloc_stag; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_mw_alloc_info info; + u64 scratch; + } mw_alloc; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_reg_ns_stag_info info; + u64 scratch; + } mr_reg_non_shared; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_dealloc_stag_info info; + u64 scratch; + } dealloc_stag; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_add_arp_cache_entry_info info; + u64 scratch; + } add_arp_cache_entry; + + struct { + struct zxdh_sc_cqp *cqp; + u64 scratch; + u16 arp_index; + } del_arp_cache_entry; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_local_mac_entry_info info; + u64 scratch; + } add_local_mac_entry; + + struct { + struct zxdh_sc_cqp *cqp; + u64 scratch; + u8 entry_idx; + u8 ignore_ref_count; + } del_local_mac_entry; + + struct { + struct zxdh_sc_cqp *cqp; + u64 scratch; + } alloc_local_mac_entry; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_manage_vf_pble_info info; + u64 scratch; + } manage_vf_pble_bp; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_cqp_manage_push_page_info info; + u64 scratch; + } manage_push_page; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_upload_context_info info; + u64 scratch; + } qp_upload_context; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_hmc_fcn_info info; + u64 scratch; + } manage_hmc_pm; + + struct { + struct zxdh_sc_ceq *ceq; + u64 scratch; + } ceq_create; + + struct { + struct zxdh_sc_ceq *ceq; + u64 scratch; + } ceq_destroy; + + struct { + struct zxdh_sc_aeq *aeq; + u64 scratch; + } aeq_create; + + struct { + struct zxdh_sc_aeq *aeq; + u64 scratch; + } aeq_destroy; + + struct { + struct zxdh_sc_qp *qp; + struct zxdh_qp_flush_info info; + u64 scratch; + } qp_flush_wqes; + + struct { + struct zxdh_sc_qp *qp; + struct zxdh_gen_ae_info info; + u64 scratch; + } gen_ae; + + struct { + struct zxdh_sc_cqp *cqp; + void *fpm_val_va; + u64 fpm_val_pa; + u8 hmc_fn_id; + u64 scratch; + } query_fpm_val; + + struct { + struct zxdh_sc_cqp *cqp; + void *fpm_val_va; + u64 fpm_val_pa; + u8 hmc_fn_id; + u64 scratch; + } commit_fpm_val; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_apbvt_info info; + u64 scratch; + } manage_apbvt_entry; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_qhash_table_info info; + u64 scratch; + } manage_qhash_table_entry; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_update_sds_info info; + u64 scratch; + } update_pe_sds; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_qp *qp; + u64 scratch; + } suspend_resume; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_ah_info info; + u64 scratch; + } ah_create; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_ah_info info; + u64 scratch; + } ah_destroy; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_mcast_grp_info *info; + u64 scratch; + } mc_create; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_mcast_grp_info *info; + u64 scratch; + } mc_destroy; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_mcast_grp_info *info; + u64 scratch; + } mc_modify; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_stats_inst_info info; + u64 scratch; + } stats_manage; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_stats_gather_info info; + u64 scratch; + } stats_gather; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_ws_node_info info; + u64 scratch; + } ws_node; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_up_info info; + u64 scratch; + } up_map; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_dma_mem query_buff_mem; + u64 scratch; + } query_rdma; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_src_copy_dest src_dest; + struct zxdh_path_index src_path_index; + struct zxdh_path_index dest_path_index; + bool host; + u64 scratch; + } dma_writeread; + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_mailboxhead_data mbhead_data; + u64 scratch; + u32 dst_vf_id; + } hmc_mb; + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_path_index dest_path_index; + struct zxdh_dma_write32_date dma_data; + u64 scratch; + } dma_write32data; + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_path_index dest_path_index; + struct zxdh_dma_write64_date dma_data; + u64 scratch; + } dma_write64data; + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_dam_read_bycqe dma_rcqe; + struct zxdh_path_index src_path_index; + u64 scratch; + } dma_read_cqe; + struct { + struct zxdh_sc_qp *qp; + u64 qpc_buf_pa; + u64 scratch; + } query_qpc; + struct { + struct zxdh_sc_cq *cq; + u64 cqc_buf_pa; + u64 scratch; + } query_cqc; + struct { + struct zxdh_sc_ceq *ceq; + u64 ceqc_buf_pa; + u64 scratch; + } query_ceqc; + struct { + struct zxdh_sc_aeq *aeq; + u64 aeqc_buf_pa; + u64 scratch; + } query_aeqc; + struct { + struct zxdh_sc_srq *srq; + u64 srqc_buf_pa; + u64 scratch; + } query_srqc; + + struct { + struct zxdh_sc_cqp *cqp; + u64 scratch; + u32 mkeyindex; + } query_mkey; + + } u; +}; + +struct cqp_cmds_info { + struct list_head cqp_cmd_entry; + u8 cqp_cmd; + u8 post_sq; + struct cqp_info in; +}; + +struct zxdh_virtchnl_work { + struct work_struct work; + u8 vf_msg_buf[ZXDH_VCHNL_MAX_VF_MSG_SIZE]; + struct zxdh_sc_dev *dev; + u16 vf_id; + u16 len; +}; + +__le64 *zxdh_sc_cqp_get_next_send_wqe_idx(struct zxdh_sc_cqp *cqp, u64 scratch, + u32 *wqe_idx); + +/** + * zxdh_sc_cqp_get_next_send_wqe - get next wqe on cqp sq + * @cqp: struct for cqp hw + * @scratch: private data for CQP WQE + */ +static inline __le64 *zxdh_sc_cqp_get_next_send_wqe(struct zxdh_sc_cqp *cqp, + u64 scratch) +{ + u32 wqe_idx; + + return zxdh_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); +} +#endif /* ZXDH_TYPE_H */ diff --git a/src/rdma/src/ubuntu_kcompat.h b/src/rdma/src/ubuntu_kcompat.h new file mode 100644 index 0000000000000000000000000000000000000000..11c5e0947ebdf6cfc33989fc85a5a1fd2923e2bb --- /dev/null +++ b/src/rdma/src/ubuntu_kcompat.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef UBUNTU_KCOMPAT_H +#define UBUNTU_KCOMPAT_H + +#ifdef UBUNTU_220404 +/* Ubuntu 22.04 */ +#define COPY_USER_PGADDR_VER_4 +#define PROCESS_MAD_VER_3 +#define ALLOC_HW_STATS_V3 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_3 +#define ALLOC_UCONTEXT_VER_2 +#define CREATE_AH_VER_5 +#define CREATE_CQ_VER_3 +#define CREATE_QP_VER_2 +#define GLOBAL_QP_MEM +#define DEALLOC_PD_VER_4 +#define DEALLOC_UCONTEXT_VER_2 +#define DEREG_MR_VER_2 +#define DESTROY_AH_VER_4 +#define DESTROY_QP_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V2 +#define GET_HW_STATS_V2 +#define GET_LINK_LAYER_V2 +#define HAS_IB_SET_DEVICE_OP +#define IB_DEALLOC_DRIVER_SUPPORT +#define IB_UMEM_GET_V3 +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_2 +#define ZXDH_DESTROY_CQ_VER_4 +#define ZXDH_DESTROY_SRQ_VER_3 +#define ZXDH_IRQ_UPDATE_AFFINITY +#define ZXDH_AUX_GET_SET_DRV_DATA +#define IN_IFADDR +#define IW_PORT_IMMUTABLE_V2 +#define MODIFY_PORT_V2 +#define REREG_MR_VER_2 +#define NETDEV_TO_IBDEV_SUPPORT +#define QUERY_GID_V2 +#define QUERY_GID_ROCE_V2 +#define QUERY_PKEY_V2 +#define QUERY_PORT_V2 +#define ROCE_PORT_IMMUTABLE_V2 +#define SET_BEST_PAGE_SZ_V2 +#define RDMA_MMAP_DB_SUPPORT +#define SET_ROCE_CM_INFO_VER_3 + +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, dev) +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_typeq_ib_wr const +#endif /* UBUNTU_220404 */ + +#ifdef UBUNTU_200404 +/* Ubuntu 20.04.4 */ +#define ALLOC_HW_STATS_V2 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_3 +#define ALLOC_UCONTEXT_VER_2 +#define COPY_USER_PGADDR_VER_3 +#define CREATE_AH_VER_5 +#define CREATE_CQ_VER_3 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_4 +#define DEALLOC_UCONTEXT_VER_2 +#define DEREG_MR_VER_2 +#define DESTROY_AH_VER_4 +#define DESTROY_QP_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V2 +#define GET_HW_STATS_V2 +#define GET_LINK_LAYER_V2 +#define HAS_IB_SET_DEVICE_OP +#define IB_DEALLOC_DRIVER_SUPPORT +#define IB_UMEM_GET_V3 +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_2 +#define ZXDH_DESTROY_CQ_VER_4 +#define IN_IFADDR +#define IW_PORT_IMMUTABLE_V2 +#define MODIFY_PORT_V2 +#define REREG_MR_VER_2 +#define NETDEV_TO_IBDEV_SUPPORT +#define QUERY_GID_V2 +#define QUERY_GID_ROCE_V2 +#define QUERY_PKEY_V2 +#define QUERY_PORT_V2 +#define ROCE_PORT_IMMUTABLE_V2 +#define RDMA_MMAP_DB_SUPPORT +#define SET_ROCE_CM_INFO_VER_3 + +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, dev) +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_typeq_ib_wr const +#endif /* UBUNTU_200404 */ + +#ifdef UBUNTU_200403 +/* Ubuntu 20.04.3 */ +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_3 +#define ALLOC_UCONTEXT_VER_2 +#define COPY_USER_PGADDR_VER_3 +#define CREATE_AH_VER_5 +#define CREATE_CQ_VER_3 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_4 +#define DEALLOC_UCONTEXT_VER_2 +#define DEREG_MR_VER_2 +#define DESTROY_AH_VER_4 +#define DESTROY_QP_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V2 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define HAS_IB_SET_DEVICE_OP +#define IB_DEALLOC_DRIVER_SUPPORT +#define IB_IW_PKEY +#define IB_UMEM_GET_V3 +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_2 +#define ZXDH_DESTROY_CQ_VER_4 +#define IN_IFADDR +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define REREG_MR_VER_2 +#define NETDEV_TO_IBDEV_SUPPORT +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define ROCE_PORT_IMMUTABLE_V1 +#define RDMA_MMAP_DB_SUPPORT +#define SET_ROCE_CM_INFO_VER_3 + +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, dev) +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_typeq_ib_wr const +#endif /* UBUNTU_200403 */ + +#ifdef UBUNTU_200402 +/* Ubuntu 20.04.2 */ +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_3 +#define ALLOC_UCONTEXT_VER_2 +#define COPY_USER_PGADDR_VER_3 +#define CREATE_AH_VER_5 +#define CREATE_CQ_VER_3 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_3 +#define DEALLOC_UCONTEXT_VER_2 +#define DEREG_MR_VER_2 +#define DESTROY_AH_VER_3 +#define DESTROY_QP_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define HAS_IB_SET_DEVICE_OP +#define IB_DEALLOC_DRIVER_SUPPORT +#define IB_IW_PKEY +#define IB_UMEM_GET_V3 +#define ZXDH_ALLOC_MR_VER_1 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_4 +#define IN_IFADDR +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define NETDEV_TO_IBDEV_SUPPORT +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define RDMA_MMAP_DB_SUPPORT +#define SET_ROCE_CM_INFO_VER_3 +#define UVERBS_CMD_MASK +#define USE_KMAP + +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name) +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_typeq_ib_wr const +#endif /* UBUNTU_200402 */ + +#ifdef UBUNTU_2004 +/* Ubuntu 20.04 backport from > 5.4 kernel */ +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_3 +#define ALLOC_UCONTEXT_VER_2 +#define COPY_USER_PGADDR_VER_3 +#define CREATE_AH_VER_2 +#define CREATE_CQ_VER_3 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_3 +#define DEALLOC_UCONTEXT_VER_2 +#define DEREG_MR_VER_2 +#define DESTROY_AH_VER_3 +#define DESTROY_QP_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define HAS_IB_SET_DEVICE_OP +#define IB_DEALLOC_DRIVER_SUPPORT +#define IB_IW_PKEY +#define IB_UMEM_GET_V2 +#define ZXDH_ALLOC_MR_VER_1 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_3 +#define IN_IFADDR +#define IW_PORT_IMMUTABLE_V1 +#define MODIFY_PORT_V1 +#define NETDEV_TO_IBDEV_SUPPORT +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define RDMA_MMAP_DB_SUPPORT +#define SET_ROCE_CM_INFO_VER_3 +#define UVERBS_CMD_MASK +#define USE_KMAP + +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name) +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_typeq_ib_wr const +#endif /* UBUNTU_2004 */ + +#ifdef UBUNTU_1804 +#define ALLOC_HW_STATS_V1 +#define ALLOC_HW_STATS_STRUCT_V1 +#define ALLOC_PD_VER_1 +#define ALLOC_UCONTEXT_VER_1 +#define COPY_USER_PGADDR_VER_1 +#define CREATE_AH_VER_1_2 +#define CREATE_CQ_VER_1 +#define CREATE_QP_VER_1 +#define DEALLOC_PD_VER_1 +#define DEALLOC_UCONTEXT_VER_1 +#define DEREG_MR_VER_1 +#define DESTROY_AH_VER_1 +#define DESTROY_QP_VER_1 +#define ETHER_COPY_VER_2 +#define GET_ETH_SPEED_AND_WIDTH_V1 +#define GET_HW_STATS_V1 +#define GET_LINK_LAYER_V1 +#define IB_GET_CACHED_GID +#define IB_IW_MANDATORY_AH_OP +#define IB_IW_PKEY +#define IB_UMEM_GET_V0 +#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION +#define ZXDH_ADD_DEL_GID +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_ALLOC_MW_VER_1 +#define ZXDH_DESTROY_CQ_VER_1 +#define IW_PORT_IMMUTABLE_V1 +#define FOR_IFA +#define MODIFY_PORT_V1 +#define QUERY_GID_V1 +#define QUERY_GID_ROCE_V1 +#define QUERY_PKEY_V1 +#define QUERY_PORT_V1 +#define REREG_MR_VER_1 +#define ROCE_PORT_IMMUTABLE_V1 +#define SET_BEST_PAGE_SZ_V1 +#define SET_ROCE_CM_INFO_VER_1 +#define UVERBS_CMD_MASK +#define VMA_DATA +#define USE_KMAP + +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#define kc_set_ibdev_add_del_gid(ibdev) \ + do { \ + ibdev->add_gid = zxdh_add_gid; \ + ibdev->del_gid = zxdh_del_gid; \ + } while (0) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define set_ibdev_dma_device(ibdev, dev) +#define set_max_sge(props, rf) \ + ((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags) +#define kc_set_props_ip_gid_caps(props) \ + ((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS) +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + ib_gid_to_network_type(gid_type, gid) +#define kc_deref_sgid_attr(sgid_attr) (sgid_attr.ndev) +#define rdma_query_gid(ibdev, port, index, gid) \ + ib_get_cached_gid(ibdev, port, index, gid, NULL) + +#define kc_get_ucontext(udata) to_ucontext(context) +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, NULL) +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + to_ucontext(ibpd->uobject->context) +#define kc_typeq_ib_wr +#define ib_device_put(dev) +#define ib_alloc_device(zxdh_device, ibdev) \ + ((struct zxdh_device *)ib_alloc_device(sizeof(struct zxdh_device))) +#endif /* UBUNTU_1804 */ + +#endif /* UBUNTU_KCOMPAT_H */ diff --git a/src/rdma/src/uda.c b/src/rdma/src/uda.c new file mode 100644 index 0000000000000000000000000000000000000000..1f3846753a1ce7a9ff62fc4bcc076ba2600d5f3a --- /dev/null +++ b/src/rdma/src/uda.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "uda.h" +#include "uda_d.h" +#include "vf.h" +#include "virtchnl.h" + +/** + * zxdh_sc_access_ah() - Create, modify or delete AH + * @cqp: struct for cqp hw + * @info: ah information + * @op: Operation + * @scratch: u64 saved to be used during cqp completion + */ +int zxdh_sc_access_ah(struct zxdh_sc_cqp *cqp, struct zxdh_ah_info *info, + u32 op, u64 scratch) +{ + __le64 *wqe; + u64 qw1, qw2; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + info->tc_tos &= ~ECN_CODE_PT_MASK; + info->tc_tos |= ECN_CODE_PT_VAL; + + qw1 = FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_PDINDEX, info->pd_idx) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_TC, info->tc_tos) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_INSERTVLANTAG, + info->insert_vlan_tag) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_OPCODE, op); + + qw2 = FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl); + set_64bit_val(wqe, 8, qw2); + + set_64bit_val(wqe, 16, + FIELD_PREP(ZXDH_UDAQPC_VLANTAG, info->vlan_tag) | + LS_64_1(info->mac_addr[5], 16) | + LS_64_1(info->mac_addr[4], 24) | + LS_64_1(info->mac_addr[3], 32) | + LS_64_1(info->mac_addr[2], 40) | + LS_64_1(info->mac_addr[1], 48) | + LS_64_1(info->mac_addr[0], 56)); + + set_64bit_val(wqe, 24, + LS_64_1(info->dmac[5], 16) | LS_64_1(info->dmac[4], 24) | + LS_64_1(info->dmac[3], 32) | + LS_64_1(info->dmac[2], 40) | + LS_64_1(info->dmac[1], 48) | + LS_64_1(info->dmac[0], 56)); + + if (!info->ipv4_valid) { + set_64bit_val(wqe, 32, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR1, + info->dest_ip_addr[1]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR0, + info->dest_ip_addr[0])); + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR3, + info->dest_ip_addr[3]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR2, + info->dest_ip_addr[2])); + set_64bit_val(wqe, 48, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR1, + info->src_ip_addr[1]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR0, + info->src_ip_addr[0])); + set_64bit_val(wqe, 56, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR3, + info->src_ip_addr[3]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR2, + info->src_ip_addr[2])); + + } else { + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR3, + info->dest_ip_addr[0])); + set_64bit_val(wqe, 56, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR3, + info->src_ip_addr[0])); + } + + dma_wmb(); /* need write block before writing WQE header */ + + set_64bit_val(wqe, 0, qw1); + + print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_create_mg_ctx() - create a mcg context + * @info: multicast group context info + */ +static int zxdh_create_mg_ctx(struct zxdh_mcast_grp_info *info) +{ + struct zxdh_mcast_grp_ctx_entry_info *entry_info = NULL; + u32 idx = 0; /* index in the array */ + u32 ctx_idx = 0; /* index in the MG context */ + + memset(info->dma_mem_mc.va, 0, + ZXDH_MAX_MGS_PER_CTX * sizeof(u32) + sizeof(u64)); + + for (idx = 0; idx < ZXDH_MAX_MGS_PER_CTX; idx++) { + entry_info = &info->mg_ctx_info[idx]; + if (entry_info->valid_entry) { + set_32bit_val((__le32 *)info->dma_mem_mc.va, + sizeof(u64) + ctx_idx * sizeof(u32), + FIELD_PREP(ZXDH_UDA_MGCTX_QPID, + entry_info->qp_id)); + ctx_idx++; + } + } + set_64bit_val((__le64 *)info->dma_mem_mc.va, 0, ctx_idx); + + return 0; +} + +/** + * zxdh_access_mcast_grp() - Access mcast group based on op + * @cqp: Control QP + * @info: multicast group context info + * @op: operation to perform + * @scratch: u64 saved to be used during cqp completion + */ +int zxdh_access_mcast_grp(struct zxdh_sc_cqp *cqp, + struct zxdh_mcast_grp_info *info, u32 op, u64 scratch) +{ + __le64 *wqe; + u64 dmac; + + if (info->mg_id >= ZXDH_UDA_MAX_FSI_MGS) { + pr_err("WQE: mg_id out of range\n"); + return -EINVAL; + } + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) { + pr_err("WQE: ring full\n"); + return -ENOSPC; + } + + zxdh_create_mg_ctx(info); + + dmac = LS_64_1(info->dest_mac_addr[5], 0) | + LS_64_1(info->dest_mac_addr[4], 8) | + LS_64_1(info->dest_mac_addr[3], 16) | + LS_64_1(info->dest_mac_addr[2], 24) | + LS_64_1(info->dest_mac_addr[1], 32) | + LS_64_1(info->dest_mac_addr[0], 40); + + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_DMAC, dmac) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_VLANID, + info->vlan_id)); + + if (!info->ipv4_valid) { + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR0, + info->dest_ip_addr[0]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR1, + info->dest_ip_addr[1])); + set_64bit_val(wqe, 16, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR2, + info->dest_ip_addr[2]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR3, + info->dest_ip_addr[3])); + } else { + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR0, + info->dest_ip_addr[0])); + } + + set_64bit_val(wqe, 32, info->dma_mem_mc.pa); + + dma_wmb(); /* need write memory block before writing the WQE header. */ + + set_64bit_val(wqe, 0, + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_MGIDX, info->mg_id) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_VLANVALID, + info->vlan_valid) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_IPV4VALID, + info->ipv4_valid) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_WQEVALID, + cqp->polarity) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_OPCODE, op)); + + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_compare_mgs - Compares two multicast group structures + * @entry1: Multcast group info + * @entry2: Multcast group info in context + */ +static bool zxdh_compare_mgs(struct zxdh_mcast_grp_ctx_entry_info *entry1, + struct zxdh_mcast_grp_ctx_entry_info *entry2) +{ + if (entry1->dest_port == entry2->dest_port && + entry1->qp_id == entry2->qp_id) + return true; + + return false; +} + +/** + * zxdh_sc_add_mcast_grp - Allocates mcast group entry in ctx + * @ctx: Multcast group context + * @mg: Multcast group info + */ +int zxdh_sc_add_mcast_grp(struct zxdh_mcast_grp_info *ctx, + struct zxdh_mcast_grp_ctx_entry_info *mg) +{ + u32 idx; + bool free_entry_found = false; + u32 free_entry_idx = 0; + + /* find either an identical or a free entry for a multicast group */ + for (idx = 0; idx < ZXDH_MAX_MGS_PER_CTX; idx++) { + if (ctx->mg_ctx_info[idx].valid_entry) { + if (zxdh_compare_mgs(&ctx->mg_ctx_info[idx], mg)) { + ctx->mg_ctx_info[idx].use_cnt++; + return 0; + } + continue; + } + if (!free_entry_found) { + free_entry_found = true; + free_entry_idx = idx; + } + } + + if (free_entry_found) { + ctx->mg_ctx_info[free_entry_idx] = *mg; + ctx->mg_ctx_info[free_entry_idx].valid_entry = true; + ctx->mg_ctx_info[free_entry_idx].use_cnt = 1; + ctx->no_of_mgs++; + return 0; + } + + return -ENOMEM; +} + +/** + * zxdh_sc_del_mcast_grp - Delete mcast group + * @ctx: Multcast group context + * @mg: Multcast group info + * + * Finds and removes a specific mulicast group from context, all + * parameters must match to remove a multicast group. + */ +int zxdh_sc_del_mcast_grp(struct zxdh_mcast_grp_info *ctx, + struct zxdh_mcast_grp_ctx_entry_info *mg) +{ + u32 idx; + + /* find an entry in multicast group context */ + for (idx = 0; idx < ZXDH_MAX_MGS_PER_CTX; idx++) { + if (!ctx->mg_ctx_info[idx].valid_entry) + continue; + + if (zxdh_compare_mgs(mg, &ctx->mg_ctx_info[idx])) { + ctx->mg_ctx_info[idx].use_cnt--; + + if (!ctx->mg_ctx_info[idx].use_cnt) { + ctx->mg_ctx_info[idx].valid_entry = false; + ctx->no_of_mgs--; + /* Remove gap if element was not the last */ + if (idx != ctx->no_of_mgs && + ctx->no_of_mgs > 0) { + memcpy(&ctx->mg_ctx_info[idx], + &ctx->mg_ctx_info[ctx->no_of_mgs - + 1], + sizeof(ctx->mg_ctx_info[idx])); + ctx->mg_ctx_info[ctx->no_of_mgs - 1] + .valid_entry = false; + } + } + + return 0; + } + } + + return -EINVAL; +} diff --git a/src/rdma/src/uda.h b/src/rdma/src/uda.h new file mode 100644 index 0000000000000000000000000000000000000000..3e921f2acbbe1f8b8e812cc464141276f45ebc04 --- /dev/null +++ b/src/rdma/src/uda.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_UDA_H +#define ZXDH_UDA_H + +#define ZXDH_UDA_MAX_FSI_MGS 8192 +#define ZXDH_UDA_MAX_PFS 16 +#define ZXDH_UDA_MAX_VFS 128 + +struct zxdh_sc_cqp; + +struct zxdh_ah_info { + struct zxdh_sc_vsi *vsi; + u32 pd_idx; + u32 dest_ip_addr[4]; + u32 src_ip_addr[4]; + u32 flow_label; + u32 ah_idx; + u16 vlan_tag; + u8 insert_vlan_tag; + u8 tc_tos; + u8 hop_ttl; + u8 mac_addr[ETH_ALEN]; + u8 dmac[ETH_ALEN]; + u8 ah_valid : 1; + u8 ipv4_valid : 1; + u8 do_lpbk : 1; +}; + +struct zxdh_sc_ah { + struct zxdh_sc_dev *dev; + struct zxdh_ah_info ah_info; +}; + +int zxdh_sc_add_mcast_grp(struct zxdh_mcast_grp_info *ctx, + struct zxdh_mcast_grp_ctx_entry_info *mg); +int zxdh_sc_del_mcast_grp(struct zxdh_mcast_grp_info *ctx, + struct zxdh_mcast_grp_ctx_entry_info *mg); +int zxdh_sc_access_ah(struct zxdh_sc_cqp *cqp, struct zxdh_ah_info *info, + u32 op, u64 scratch); +int zxdh_access_mcast_grp(struct zxdh_sc_cqp *cqp, + struct zxdh_mcast_grp_info *info, u32 op, + u64 scratch); + +static inline void zxdh_sc_init_ah(struct zxdh_sc_dev *dev, + struct zxdh_sc_ah *ah) +{ + ah->dev = dev; +} + +static inline int zxdh_sc_create_ah(struct zxdh_sc_cqp *cqp, + struct zxdh_ah_info *info, u64 scratch) +{ + return zxdh_sc_access_ah(cqp, info, ZXDH_CQP_OP_CREATE_AH, scratch); +} + +static inline int zxdh_sc_destroy_ah(struct zxdh_sc_cqp *cqp, + struct zxdh_ah_info *info, u64 scratch) +{ + return zxdh_sc_access_ah(cqp, info, ZXDH_CQP_OP_DESTROY_AH, scratch); +} + +static inline int zxdh_sc_create_mcast_grp(struct zxdh_sc_cqp *cqp, + struct zxdh_mcast_grp_info *info, + u64 scratch) +{ + return zxdh_access_mcast_grp(cqp, info, ZXDH_CQP_OP_CREATE_MCAST_GRP, + scratch); +} + +static inline int zxdh_sc_modify_mcast_grp(struct zxdh_sc_cqp *cqp, + struct zxdh_mcast_grp_info *info, + u64 scratch) +{ + return zxdh_access_mcast_grp(cqp, info, ZXDH_CQP_OP_MODIFY_MCAST_GRP, + scratch); +} + +static inline int zxdh_sc_destroy_mcast_grp(struct zxdh_sc_cqp *cqp, + struct zxdh_mcast_grp_info *info, + u64 scratch) +{ + return zxdh_access_mcast_grp(cqp, info, ZXDH_CQP_OP_DESTROY_MCAST_GRP, + scratch); +} +#endif /* ZXDH_UDA_H */ diff --git a/src/rdma/src/uda_d.h b/src/rdma/src/uda_d.h new file mode 100644 index 0000000000000000000000000000000000000000..9458a148fc6e60f3c52c304fd4138d853927f2e6 --- /dev/null +++ b/src/rdma/src/uda_d.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_UDA_D_H +#define ZXDH_UDA_D_H +/* L4 packet type */ +#define ZXDH_E_UDA_SQ_L4T_UNKNOWN 0 +#define ZXDH_E_UDA_SQ_L4T_TCP 1 +#define ZXDH_E_UDA_SQ_L4T_SCTP 2 +#define ZXDH_E_UDA_SQ_L4T_UDP 3 +/* Inner IP header type */ +#define ZXDH_E_UDA_SQ_IIPT_UNKNOWN 0 +#define ZXDH_E_UDA_SQ_IIPT_IPV6 1 +#define ZXDH_E_UDA_SQ_IIPT_IPV4_NO_CSUM 2 +#define ZXDH_E_UDA_SQ_IIPT_IPV4_CSUM 3 +#define ZXDH_UDA_QPSQ_PUSHWQE_S 56 +#define ZXDH_UDA_QPSQ_PUSHWQE BIT_ULL(56) +#define ZXDH_UDA_QPSQ_INLINEDATAFLAG_S 57 +#define ZXDH_UDA_QPSQ_INLINEDATAFLAG BIT_ULL(57) +#define ZXDH_UDA_QPSQ_INLINEDATALEN_S 48 +#define ZXDH_UDA_QPSQ_INLINEDATALEN GENMASK_ULL(55, 48) +#define ZXDH_UDA_QPSQ_ADDFRAGCNT_S 38 +#define ZXDH_UDA_QPSQ_ADDFRAGCNT GENMASK_ULL(41, 38) +#define ZXDH_UDA_QPSQ_IPFRAGFLAGS_S 42 +#define ZXDH_UDA_QPSQ_IPFRAGFLAGS GENMASK_ULL(43, 42) +#define ZXDH_UDA_QPSQ_NOCHECKSUM_S 45 +#define ZXDH_UDA_QPSQ_NOCHECKSUM BIT_ULL(45) +#define ZXDH_UDA_QPSQ_AHIDXVALID_S 46 +#define ZXDH_UDA_QPSQ_AHIDXVALID BIT_ULL(46) +#define ZXDH_UDA_QPSQ_LOCAL_FENCE_S 61 +#define ZXDH_UDA_QPSQ_LOCAL_FENCE BIT_ULL(61) +#define ZXDH_UDA_QPSQ_AHIDX_S 0 +#define ZXDH_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0) +#define ZXDH_UDA_QPSQ_PROTOCOL_S 16 +#define ZXDH_UDA_QPSQ_PROTOCOL GENMASK_ULL(23, 16) +#define ZXDH_UDA_QPSQ_EXTHDRLEN_S 32 +#define ZXDH_UDA_QPSQ_EXTHDRLEN GENMASK_ULL(40, 32) +#define ZXDH_UDA_QPSQ_MULTICAST_S 63 +#define ZXDH_UDA_QPSQ_MULTICAST BIT_ULL(63) +#define ZXDH_UDA_QPSQ_MACLEN_S 56 +#define ZXDH_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56) +#define ZXDH_UDA_QPSQ_MACLEN_LINE 2 +#define ZXDH_UDA_QPSQ_IPLEN_S 48 +#define ZXDH_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48) +#define ZXDH_UDA_QPSQ_IPLEN_LINE 2 +#define ZXDH_UDA_QPSQ_L4T_S 30 +#define ZXDH_UDA_QPSQ_L4T GENMASK_ULL(31, 30) +#define ZXDH_UDA_QPSQ_L4T_LINE 2 +#define ZXDH_UDA_QPSQ_IIPT_S 28 +#define ZXDH_UDA_QPSQ_IIPT GENMASK_ULL(29, 28) +#define ZXDH_UDA_QPSQ_IIPT_LINE 2 +#define ZXDH_UDA_QPSQ_DO_LPB_LINE 3 +#define ZXDH_UDA_QPSQ_FWD_PROG_CONFIRM_S 45 +#define ZXDH_UDA_QPSQ_FWD_PROG_CONFIRM BIT_ULL(45) +#define ZXDH_UDA_QPSQ_FWD_PROG_CONFIRM_LINE 3 +#define ZXDH_UDA_QPSQ_IMMDATA_S 0 +#define ZXDH_UDA_QPSQ_IMMDATA GENMASK_ULL(63, 0) +/* Byte Offset 0 */ +#define ZXDH_UDAQPC_IPV4_S 3 +#define ZXDH_UDAQPC_IPV4 BIT_ULL(3) +#define ZXDH_UDAQPC_INSERTVLANTAG_S 5 +#define ZXDH_UDAQPC_INSERTVLANTAG BIT_ULL(5) +#define ZXDH_UDAQPC_ISQP1_S 6 +#define ZXDH_UDAQPC_ISQP1 BIT_ULL(6) +#define ZXDH_UDAQPC_RQWQESIZE_S IRDMAQPC_RQWQESIZE_S +#define ZXDH_UDAQPC_RQWQESIZE IRDMAQPC_RQWQESIZE +#define ZXDH_UDAQPC_ECNENABLE_S 14 +#define ZXDH_UDAQPC_ECNENABLE BIT_ULL(14) +#define ZXDH_UDAQPC_PDINDEXHI_S 20 +#define ZXDH_UDAQPC_PDINDEXHI GENMASK_ULL(21, 20) +#define ZXDH_UDAQPC_DCTCPENABLE_S 25 +#define ZXDH_UDAQPC_DCTCPENABLE BIT_ULL(25) +#define ZXDH_UDAQPC_RCVTPHEN_S IRDMAQPC_RCVTPHEN_S +#define ZXDH_UDAQPC_RCVTPHEN IRDMAQPC_RCVTPHEN +#define ZXDH_UDAQPC_XMITTPHEN_S IRDMAQPC_XMITTPHEN_S +#define ZXDH_UDAQPC_XMITTPHEN IRDMAQPC_XMITTPHEN +#define ZXDH_UDAQPC_RQTPHEN_S IRDMAQPC_RQTPHEN_S +#define ZXDH_UDAQPC_RQTPHEN IRDMAQPC_RQTPHEN +#define ZXDH_UDAQPC_SQTPHEN_S IRDMAQPC_SQTPHEN_S +#define ZXDH_UDAQPC_SQTPHEN IRDMAQPC_SQTPHEN +#define ZXDH_UDAQPC_PPIDX_S IRDMAQPC_PPIDX_S +#define ZXDH_UDAQPC_PPIDX IRDMAQPC_PPIDX +#define ZXDH_UDAQPC_PMENA_S IRDMAQPC_PMENA_S +#define ZXDH_UDAQPC_PMENA IRDMAQPC_PMENA +#define ZXDH_UDAQPC_INSERTTAG2_S 11 +#define ZXDH_UDAQPC_INSERTTAG2 BIT_ULL(11) +#define ZXDH_UDAQPC_INSERTTAG3_S 14 +#define ZXDH_UDAQPC_INSERTTAG3 BIT_ULL(14) +#define ZXDH_UDAQPC_RQSIZE_S IRDMAQPC_RQSIZE_S +#define ZXDH_UDAQPC_RQSIZE IRDMAQPC_RQSIZE +#define ZXDH_UDAQPC_SQSIZE_S IRDMAQPC_SQSIZE_S +#define ZXDH_UDAQPC_SQSIZE IRDMAQPC_SQSIZE +#define ZXDH_UDAQPC_TXCQNUM_S IRDMAQPC_TXCQNUM_S +#define ZXDH_UDAQPC_TXCQNUM IRDMAQPC_TXCQNUM +#define ZXDH_UDAQPC_RXCQNUM_S IRDMAQPC_RXCQNUM_S +#define ZXDH_UDAQPC_RXCQNUM IRDMAQPC_RXCQNUM +#define ZXDH_UDAQPC_QPCOMPCTX_S IRDMAQPC_QPCOMPCTX_S +#define ZXDH_UDAQPC_QPCOMPCTX IRDMAQPC_QPCOMPCTX +#define ZXDH_UDAQPC_SQTPHVAL_S IRDMAQPC_SQTPHVAL_S +#define ZXDH_UDAQPC_SQTPHVAL IRDMAQPC_SQTPHVAL +#define ZXDH_UDAQPC_RQTPHVAL_S IRDMAQPC_RQTPHVAL_S +#define ZXDH_UDAQPC_RQTPHVAL IRDMAQPC_RQTPHVAL +#define ZXDH_UDAQPC_QSHANDLE_S IRDMAQPC_QSHANDLE_S +#define ZXDH_UDAQPC_QSHANDLE IRDMAQPC_QSHANDLE +#define ZXDH_UDAQPC_RQHDRRINGBUFSIZE_S 48 +#define ZXDH_UDAQPC_RQHDRRINGBUFSIZE GENMASK_ULL(49, 48) +#define ZXDH_UDAQPC_SQHDRRINGBUFSIZE_S 32 +#define ZXDH_UDAQPC_SQHDRRINGBUFSIZE GENMASK_ULL(33, 32) +#define ZXDH_UDAQPC_PRIVILEGEENABLE_S 25 +#define ZXDH_UDAQPC_PRIVILEGEENABLE BIT_ULL(25) +#define ZXDH_UDAQPC_USE_STATISTICS_INSTANCE_S 26 +#define ZXDH_UDAQPC_USE_STATISTICS_INSTANCE BIT_ULL(26) +#define ZXDH_UDAQPC_STATISTICS_INSTANCE_INDEX_S 0 +#define ZXDH_UDAQPC_STATISTICS_INSTANCE_INDEX GENMASK_ULL(6, 0) +#define ZXDH_UDAQPC_PRIVHDRGENENABLE_S 0 +#define ZXDH_UDAQPC_PRIVHDRGENENABLE BIT_ULL(0) +#define ZXDH_UDAQPC_RQHDRSPLITENABLE_S 3 +#define ZXDH_UDAQPC_RQHDRSPLITENABLE BIT_ULL(3) +#define ZXDH_UDAQPC_RQHDRRINGBUFENABLE_S 2 +#define ZXDH_UDAQPC_RQHDRRINGBUFENABLE BIT_ULL(2) +#define ZXDH_UDAQPC_SQHDRRINGBUFENABLE_S 1 +#define ZXDH_UDAQPC_SQHDRRINGBUFENABLE BIT_ULL(1) +#define ZXDH_UDAQPC_IPID_S 32 +#define ZXDH_UDAQPC_IPID GENMASK_ULL(47, 32) +#define ZXDH_UDAQPC_SNDMSS_S 16 +#define ZXDH_UDAQPC_SNDMSS GENMASK_ULL(29, 16) +#define ZXDH_UDAQPC_VLANTAG_S 0 +#define ZXDH_UDAQPC_VLANTAG GENMASK_ULL(15, 0) +#define ZXDH_UDA_CQPSQ_MAV_PDINDEXHI_S 20 +#define ZXDH_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(21, 20) +#define ZXDH_UDA_CQPSQ_MAV_PDINDEXLO_S 48 +#define ZXDH_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48) +#define ZXDH_UDA_CQPSQ_MAV_PDINDEX_S 0 +#define ZXDH_UDA_CQPSQ_MAV_PDINDEX GENMASK_ULL(19, 0) +#define ZXDH_UDA_CQPSQ_MAV_SRCMACADDRINDEX_S 24 +#define ZXDH_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24) +#define ZXDH_UDA_CQPSQ_MAV_ARPINDEX_S 48 +#define ZXDH_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48) +#define ZXDH_UDA_CQPSQ_MAV_TC_S 47 +#define ZXDH_UDA_CQPSQ_MAV_TC GENMASK_ULL(54, 47) +#define ZXDH_UDA_CQPSQ_MAV_HOPLIMIT_S 32 +#define ZXDH_UDA_CQPSQ_MAV_HOPLIMIT GENMASK_ULL(39, 32) +#define ZXDH_UDA_CQPSQ_MAV_FLOWLABEL_S 0 +#define ZXDH_UDA_CQPSQ_MAV_FLOWLABEL GENMASK_ULL(19, 0) +#define ZXDH_UDA_CQPSQ_MAV_ADDR3_S 0 +#define ZXDH_UDA_CQPSQ_MAV_ADDR3 GENMASK_ULL(31, 0) +#define ZXDH_UDA_CQPSQ_MAV_ADDR2_S 32 +#define ZXDH_UDA_CQPSQ_MAV_ADDR2 GENMASK_ULL(63, 32) +#define ZXDH_UDA_CQPSQ_MAV_ADDR1_S 0 +#define ZXDH_UDA_CQPSQ_MAV_ADDR1 GENMASK_ULL(31, 0) +#define ZXDH_UDA_CQPSQ_MAV_ADDR0_S 32 +#define ZXDH_UDA_CQPSQ_MAV_ADDR0 GENMASK_ULL(63, 32) +#define ZXDH_UDA_CQPSQ_MAV_WQEVALID_S 57 +#define ZXDH_UDA_CQPSQ_MAV_WQEVALID BIT_ULL(57) +#define ZXDH_UDA_CQPSQ_MAV_OPCODE_S 58 +#define ZXDH_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(63, 58) +#define ZXDH_UDA_CQPSQ_MAV_DOLOOPBACKK_S 62 +#define ZXDH_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62) +#define ZXDH_UDA_CQPSQ_MAV_IPV4VALID_S 56 +#define ZXDH_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(56) + +#define ZXDH_UDA_CQPSQ_MAV_AVIDX_S 24 +#define ZXDH_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(42, 24) +#define ZXDH_UDA_CQPSQ_MAV_INSERTVLANTAG_S 55 +#define ZXDH_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(55) +#define ZXDH_UDA_MGCTX_VFFLAG_S 29 +#define ZXDH_UDA_MGCTX_VFFLAG BIT_ULL(29) +#define ZXDH_UDA_MGCTX_DESTPORT_S 32 +#define ZXDH_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32) +#define ZXDH_UDA_MGCTX_VFID_S 22 +#define ZXDH_UDA_MGCTX_VFID GENMASK_ULL(28, 22) +#define ZXDH_UDA_MGCTX_VALIDENT_S 31 +#define ZXDH_UDA_MGCTX_VALIDENT BIT_ULL(31) +#define ZXDH_UDA_MGCTX_PFID_S 18 +#define ZXDH_UDA_MGCTX_PFID GENMASK_ULL(21, 18) +#define ZXDH_UDA_MGCTX_FLAGIGNOREDPORT_S 30 +#define ZXDH_UDA_MGCTX_FLAGIGNOREDPORT BIT_ULL(30) +#define ZXDH_UDA_MGCTX_QPID_S 0 +#define ZXDH_UDA_MGCTX_QPID GENMASK_ULL(23, 0) + +#define ZXDH_UDA_CQPSQ_MG_WQEVALID_S 57 +#define ZXDH_UDA_CQPSQ_MG_WQEVALID BIT_ULL(57) +#define ZXDH_UDA_CQPSQ_MG_OPCODE_S 58 +#define ZXDH_UDA_CQPSQ_MG_OPCODE GENMASK_ULL(63, 58) +#define ZXDH_UDA_CQPSQ_MG_MGIDX_S 0 +#define ZXDH_UDA_CQPSQ_MG_MGIDX GENMASK_ULL(20, 0) +#define ZXDH_UDA_CQPSQ_MG_IPV4VALID_S 56 +#define ZXDH_UDA_CQPSQ_MG_IPV4VALID BIT_ULL(56) +#define ZXDH_UDA_CQPSQ_MG_VLANVALID_S 55 +#define ZXDH_UDA_CQPSQ_MG_VLANVALID BIT_ULL(55) + +#define ZXDH_UDA_CQPSQ_MG_DMAC_S 0 +#define ZXDH_UDA_CQPSQ_MG_DMAC GENMASK_ULL(47, 0) +#define ZXDH_UDA_CQPSQ_MG_VLANID_S 48 +#define ZXDH_UDA_CQPSQ_MG_VLANID GENMASK_ULL(63, 48) + +#define ZXDH_UDA_CQPSQ_MG_HMC_FCN_ID_S 0 +#define ZXDH_UDA_CQPSQ_MG_HMC_FCN_ID GENMASK_ULL(5, 0) + +#define ZXDH_UDA_CQPSQ_QS_HANDLE_S 0 +#define ZXDH_UDA_CQPSQ_QS_HANDLE GENMASK_ULL(9, 0) +#define ZXDH_UDA_CQPSQ_QHASH_QPN_S 32 +#define ZXDH_UDA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32) +#define ZXDH_UDA_CQPSQ_QHASH__S 0 +#define ZXDH_UDA_CQPSQ_QHASH_ BIT_ULL(0) +#define ZXDH_UDA_CQPSQ_QHASH_SRC_PORT_S 16 +#define ZXDH_UDA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16) +#define ZXDH_UDA_CQPSQ_QHASH_DEST_PORT_S 0 +#define ZXDH_UDA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0) +#define ZXDH_UDA_CQPSQ_QHASH_ADDR0_S 32 +#define ZXDH_UDA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32) +#define ZXDH_UDA_CQPSQ_QHASH_ADDR1_S 0 +#define ZXDH_UDA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0) +#define ZXDH_UDA_CQPSQ_QHASH_ADDR2_S 32 +#define ZXDH_UDA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32) +#define ZXDH_UDA_CQPSQ_QHASH_ADDR3_S 0 +#define ZXDH_UDA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0) +#define ZXDH_UDA_CQPSQ_QHASH_WQEVALID_S 63 +#define ZXDH_UDA_CQPSQ_QHASH_WQEVALID BIT_ULL(63) +#define ZXDH_UDA_CQPSQ_QHASH_OPCODE_S 32 +#define ZXDH_UDA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32) +#define ZXDH_UDA_CQPSQ_QHASH_MANAGE_S 61 +#define ZXDH_UDA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61) +#define ZXDH_UDA_CQPSQ_QHASH_IPV4VALID_S 60 +#define ZXDH_UDA_CQPSQ_QHASH_IPV4VALID BIT_ULL(60) +#define ZXDH_UDA_CQPSQ_QHASH_LANFWD_S 59 +#define ZXDH_UDA_CQPSQ_QHASH_LANFWD BIT_ULL(59) +#define ZXDH_UDA_CQPSQ_QHASH_ENTRYTYPE_S 42 +#define ZXDH_UDA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42) +#endif /* ZXDH_UDA_D_H */ diff --git a/src/rdma/src/uk.c b/src/rdma/src/uk.c new file mode 100644 index 0000000000000000000000000000000000000000..dfc3cc389b0f7bb832d6fa9a63f12ff7ca14ffa7 --- /dev/null +++ b/src/rdma/src/uk.c @@ -0,0 +1,1914 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "defs.h" +#include "user.h" +#include "zrdma.h" +#include "type.h" +#include "srq.h" + +/** + * zxdh_set_fragment - set fragment in wqe + * @wqe: wqe for setting fragment + * @offset: offset value + * @sge: sge length and stag + * @valid: The wqe valid + */ +static void zxdh_set_fragment(__le64 *wqe, u32 offset, struct zxdh_sge *sge, + u8 valid) +{ + if (sge) { + set_64bit_val(wqe, offset + 8, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off)); + set_64bit_val(wqe, offset, + FIELD_PREP(IRDMAQPSQ_VALID, valid) | + FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) | + FIELD_PREP(IRDMAQPSQ_FRAG_STAG, + sge->stag)); + } else { + set_64bit_val(wqe, offset + 8, 0); + set_64bit_val(wqe, offset, FIELD_PREP(IRDMAQPSQ_VALID, valid)); + } +} + +/** + * zxdh_nop_1 - insert a NOP wqe + * @qp: hw qp ptr + */ +static int zxdh_nop_1(struct zxdh_qp_uk *qp) +{ + u64 hdr; + __le64 *wqe; + u32 wqe_idx; + bool signaled = false; + + if (!qp->sq_ring.head) + return -EINVAL; + + wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring); + wqe = qp->sq_base[wqe_idx].elem; + + qp->sq_wrtrk_array[wqe_idx].quanta = ZXDH_QP_WQE_MIN_QUANTA; + + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + set_64bit_val(wqe, 24, 0); + + hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_NOP) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) | + FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); + + /* make sure WQE is written before valid bit is set */ + dma_wmb(); + + set_64bit_val(wqe, 0, hdr); + + return 0; +} + +/** + * zxdh_clr_wqes - clear next 128 sq entries + * @qp: hw qp ptr + * @qp_wqe_idx: wqe_idx + */ +void zxdh_clr_wqes(struct zxdh_qp_uk *qp, u32 qp_wqe_idx) +{ + __le64 *wqe; + u32 wqe_idx; + + if (!(qp_wqe_idx & 0x7F)) { + wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; + wqe = qp->sq_base[wqe_idx].elem; + if (wqe_idx) + memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000); + else + memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000); + } +} + +/** + * zxdh_uk_qp_post_wr - ring doorbell + * @qp: hw qp ptr + */ +void zxdh_uk_qp_post_wr(struct zxdh_qp_uk *qp) +{ + dma_wmb(); + + writel(qp->qp_id, qp->wqe_alloc_db); + qp->initial_ring.head = qp->sq_ring.head; +} + +/** + * zxdh_uk_qp_set_shadow_area - fill SW_RQ_Head + * @qp: hw qp ptr + */ +void zxdh_uk_qp_set_shadow_area(struct zxdh_qp_uk *qp) +{ + set_64bit_val(qp->shadow_area, 0, + FIELD_PREP(IRDMAQPDBSA_RQ_POLARITY, qp->rwqe_polarity) | + FIELD_PREP(IRDMAQPDBSA_RQ_SW_HEAD, + ZXDH_RING_CURRENT_HEAD(qp->rq_ring))); +} + +#ifdef Z_CONFIG_RDMA_PUSH_MODE +/** + * zxdh_qp_ring_push_db - ring qp doorbell + * @qp: hw qp ptr + * @wqe_idx: wqe index + */ +static void zxdh_qp_ring_push_db(struct zxdh_qp_uk *qp, u32 wqe_idx) +{ + set_32bit_val(qp->push_db, 0, + FIELD_PREP(ZXDH_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | + qp->qp_id); + qp->initial_ring.head = qp->sq_ring.head; + qp->push_mode = true; + qp->push_dropped = false; +} + +void zxdh_qp_push_wqe(struct zxdh_qp_uk *qp, __le64 *wqe, u16 quanta, + u32 wqe_idx, bool post_sq) +{ + __le64 *push; + + if (ZXDH_RING_CURRENT_HEAD(qp->initial_ring) != + ZXDH_RING_CURRENT_TAIL(qp->sq_ring) && + !qp->push_mode) { + if (post_sq) + zxdh_uk_qp_post_wr(qp); + } else { + push = (__le64 *)((uintptr_t)qp->push_wqe + + (wqe_idx & 0x7) * 0x20); + memcpy(push, wqe, quanta * ZXDH_QP_WQE_MIN_SIZE); + zxdh_qp_ring_push_db(qp, wqe_idx); + } +} +#endif +/** + * zxdh_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go + * @qp: hw qp ptr + * @wqe_idx: return wqe index + * @quanta: size of WR in quanta + * @total_size: size of WR in bytes + * @info: info on WR + */ +__le64 *zxdh_qp_get_next_send_wqe(struct zxdh_qp_uk *qp, u32 *wqe_idx, + u16 quanta, u32 total_size, + struct zxdh_post_sq_info *info) +{ + __le64 *wqe; + u16 avail_quanta; + u16 i; + + avail_quanta = ZXDH_MAX_SQ_WQES_PER_PAGE - + (ZXDH_RING_CURRENT_HEAD(qp->sq_ring) % + ZXDH_MAX_SQ_WQES_PER_PAGE); + + if (quanta <= avail_quanta) { + /* WR fits in current chunk */ + if (quanta > ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring)) + return NULL; + } else { + /* Need to pad with NOP */ + if (quanta + avail_quanta > + ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring)) + return NULL; + + for (i = 0; i < avail_quanta; i++) { + zxdh_nop_1(qp); + ZXDH_RING_MOVE_HEAD_NOCHECK(qp->sq_ring); + } + } + + *wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring); + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + + ZXDH_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta); + + wqe = qp->sq_base[*wqe_idx].elem; + + qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size; + qp->sq_wrtrk_array[*wqe_idx].quanta = quanta; + + return wqe; +} + +/** + * zxdh_qp_get_next_recv_wqe - get next qp's rcv wqe + * @qp: hw qp ptr + * @wqe_idx: return wqe index + */ +__le64 *zxdh_qp_get_next_recv_wqe(struct zxdh_qp_uk *qp, u32 *wqe_idx) +{ + __le64 *wqe; + int ret_code; + + if (ZXDH_RING_FULL_ERR(qp->rq_ring)) + return NULL; + + ZXDH_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code); + if (ret_code) + return NULL; + + if (!*wqe_idx) + qp->rwqe_polarity = !qp->rwqe_polarity; + /* rq_wqe_size_multiplier is no of 16 byte quanta in one rq wqe */ + wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem; + + return wqe; +} + +/** + * zxdh_uk_rdma_write - rdma write operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_rdma_write(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq) +{ + u64 hdr; + __le64 *wqe; + struct zxdh_rdma_write *op_info; + u32 i, wqe_idx; + u32 total_size = 0, byte_off; + int ret_code; + u32 frag_cnt, addl_frag_cnt; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + + op_info = &info->op.rdma_write; + if (op_info->num_lo_sges > qp->max_sq_frag_cnt) + return -EINVAL; + + for (i = 0; i < op_info->num_lo_sges; i++) { + total_size += op_info->lo_sg_list[i].len; + if (0 != i && 0 == op_info->lo_sg_list[i].len) + return -EINVAL; + } + + if (total_size > ZXDH_MAX_SQ_PAYLOAD_SIZE) + return -EINVAL; + + read_fence |= info->read_fence; + + if (imm_data_flag) + frag_cnt = op_info->num_lo_sges ? (op_info->num_lo_sges + 1) : + 2; + else + frag_cnt = op_info->num_lo_sges; + addl_frag_cnt = op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : + 0; + ret_code = zxdh_fragcnt_to_quanta_sq(frag_cnt, &quanta); + if (ret_code) + return ret_code; + + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + if (op_info->num_lo_sges) { + set_64bit_val( + wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, + op_info->lo_sg_list->len == + ZXDH_MAX_SQ_PAYLOAD_SIZE ? + 1 : + 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, + op_info->lo_sg_list->len) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + op_info->lo_sg_list->stag)); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, + op_info->lo_sg_list->tag_off)); + } else { + /*if zero sge,post a special sge with zero lenth*/ + set_64bit_val(wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + 0x100)); + set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_FRAG_TO, 0)); + } + + if (imm_data_flag) { + set_64bit_val( + wqe, ZXDH_SQ_WQE_BYTESIZE, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); + i = 1; + for (byte_off = ZXDH_SQ_WQE_BYTESIZE + ZXDH_QP_FRAG_BYTESIZE; + i < op_info->num_lo_sges; i++) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, + &op_info->lo_sg_list[i], + qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + } else { + i = 1; + for (byte_off = ZXDH_SQ_WQE_BYTESIZE; i < op_info->num_lo_sges; + i++) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, + &op_info->lo_sg_list[i], + qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + } + + /* if not an odd number set valid bit in next fragment */ + if (!(frag_cnt & 0x01) && frag_cnt) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, + qp->swqe_polarity); + } + + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag); + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_rdma_read - rdma read command + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_rdma_read(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq) +{ + struct zxdh_rdma_read *op_info; + int ret_code; + u32 i, byte_off, total_size = 0; + bool local_fence = false; + bool ord_fence = false; + u32 addl_frag_cnt; + __le64 *wqe; + u32 wqe_idx; + u16 quanta; + u64 hdr; + + op_info = &info->op.rdma_read; + if (qp->max_sq_frag_cnt < op_info->num_lo_sges) + return -EINVAL; + + for (i = 0; i < op_info->num_lo_sges; i++) { + total_size += op_info->lo_sg_list[i].len; + if (0 != i && 0 == op_info->lo_sg_list[i].len) + return -EINVAL; + } + + if (total_size > ZXDH_MAX_SQ_PAYLOAD_SIZE) + return -EINVAL; + + ret_code = zxdh_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta); + if (ret_code) + return ret_code; + + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); + if (!wqe) + return -ENOSPC; + + if (qp->rd_fence_rate && (qp->ord_cnt++ == qp->rd_fence_rate)) { + ord_fence = true; + qp->ord_cnt = 0; + } + + zxdh_clr_wqes(qp, wqe_idx); + + addl_frag_cnt = op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : + 0; + local_fence |= info->local_fence; + + if (op_info->num_lo_sges) { + set_64bit_val( + wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, + op_info->lo_sg_list->len == + ZXDH_MAX_SQ_PAYLOAD_SIZE ? + 1 : + 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, + op_info->lo_sg_list->len) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + op_info->lo_sg_list->stag)); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, + op_info->lo_sg_list->tag_off)); + } else { + /*if zero sge,post a special sge with zero lenth*/ + set_64bit_val(wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + 0x100)); + set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_FRAG_TO, 0)); + } + + i = 1; + for (byte_off = ZXDH_SQ_WQE_BYTESIZE; i < op_info->num_lo_sges; i++) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, + &op_info->lo_sg_list[i], + qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + + /* if not an odd number set valid bit in next fragment */ + if (!(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, + qp->swqe_polarity); + } + + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_READ) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, + info->read_fence || ord_fence ? 1 : 0) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag); + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_rc_send - rdma send command + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_rc_send(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_post_send *op_info; + u64 hdr; + u32 i, wqe_idx, total_size = 0, byte_off; + int ret_code; + u32 frag_cnt, addl_frag_cnt; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + + op_info = &info->op.send; + if (qp->max_sq_frag_cnt < op_info->num_sges) + return -EINVAL; + + for (i = 0; i < op_info->num_sges; i++) { + total_size += op_info->sg_list[i].len; + if (0 != i && 0 == op_info->sg_list[i].len) + return -EINVAL; + } + + if (total_size > ZXDH_MAX_SQ_PAYLOAD_SIZE) + return -EINVAL; + + if (imm_data_flag) + frag_cnt = op_info->num_sges ? (op_info->num_sges + 1) : 2; + else + frag_cnt = op_info->num_sges; + ret_code = zxdh_fragcnt_to_quanta_sq(frag_cnt, &quanta); + if (ret_code) + return ret_code; + + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + read_fence |= info->read_fence; + addl_frag_cnt = op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0; + if (op_info->num_sges) { + set_64bit_val( + wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, + op_info->sg_list->len == + ZXDH_MAX_SQ_PAYLOAD_SIZE ? + 1 : + 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, + op_info->sg_list->len) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + op_info->sg_list->stag)); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, + op_info->sg_list->tag_off)); + } else { + /*if zero sge,post a special sge with zero lenth*/ + set_64bit_val(wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + 0x100)); + set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_FRAG_TO, 0)); + } + + if (imm_data_flag) { + set_64bit_val( + wqe, ZXDH_SQ_WQE_BYTESIZE, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); + i = 1; + for (byte_off = ZXDH_SQ_WQE_BYTESIZE + ZXDH_QP_FRAG_BYTESIZE; + i < op_info->num_sges; i++) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, + &op_info->sg_list[i], + qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + } else { + i = 1; + for (byte_off = ZXDH_SQ_WQE_BYTESIZE; i < op_info->num_sges; + i++) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, + &op_info->sg_list[i], + qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + } + + /* if not an odd number set valid bit in next fragment */ + if (!(frag_cnt & 0x01) && frag_cnt) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, + qp->swqe_polarity); + } + + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv); + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 0) | + FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, 0)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_ud_send - rdma send command + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_ud_send(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq) +{ + __le64 *wqe_base; + __le64 *wqe_ex = NULL; + struct zxdh_post_send *op_info; + u64 hdr; + u32 i, wqe_idx, total_size = 0, byte_off; + int ret_code; + u32 frag_cnt, addl_frag_cnt; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + + op_info = &info->op.send; + if (qp->max_sq_frag_cnt < op_info->num_sges) + return -EINVAL; + + for (i = 0; i < op_info->num_sges; i++) { + total_size += op_info->sg_list[i].len; + if (0 != i && 0 == op_info->sg_list[i].len) + return -EINVAL; + } + + if (total_size > ZXDH_MAX_SQ_PAYLOAD_SIZE) + return -EINVAL; + + if (imm_data_flag) + frag_cnt = op_info->num_sges ? (op_info->num_sges + 1) : 2; + else + frag_cnt = op_info->num_sges; + ret_code = zxdh_fragcnt_to_quanta_sq(frag_cnt, &quanta); + if (ret_code) + return ret_code; + + if (quanta > ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring)) + return -ENOSPC; + + wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring); + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + + ZXDH_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta); + + wqe_base = qp->sq_base[wqe_idx].elem; + qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = total_size; + qp->sq_wrtrk_array[wqe_idx].quanta = quanta; + + zxdh_clr_wqes(qp, wqe_idx); + + read_fence |= info->read_fence; + addl_frag_cnt = op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0; + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_UD_INLINEDATAFLAG, 0) | + FIELD_PREP(IRDMAQPSQ_UD_INLINEDATALEN, 0) | + FIELD_PREP(IRDMAQPSQ_UD_ADDFRAGCNT, addl_frag_cnt) | + FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id); + + if (op_info->num_sges) { + set_64bit_val( + wqe_base, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, + op_info->sg_list->len == + ZXDH_MAX_SQ_PAYLOAD_SIZE ? + 1 : + 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, + op_info->sg_list->len) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + op_info->sg_list->stag)); + set_64bit_val(wqe_base, 8, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, + op_info->sg_list->tag_off)); + } else { + /*if zero sge,post a special sge with zero lenth*/ + set_64bit_val(wqe_base, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + 0x100)); + set_64bit_val(wqe_base, 8, FIELD_PREP(IRDMAQPSQ_FRAG_TO, 0)); + } + + if (imm_data_flag) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + set_64bit_val( + wqe_ex, 0, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); + i = 1; + for (byte_off = ZXDH_QP_FRAG_BYTESIZE; i < op_info->num_sges; + i++) { + if (!(i & 0x1)) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + } + qp->wqe_ops.iw_set_fragment( + wqe_ex, byte_off % ZXDH_SQ_WQE_BYTESIZE, + &op_info->sg_list[i], qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + } else { + i = 1; + for (byte_off = 0; i < op_info->num_sges; i++) { + if (i & 0x1) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + } + qp->wqe_ops.iw_set_fragment( + wqe_ex, byte_off % ZXDH_SQ_WQE_BYTESIZE, + &op_info->sg_list[i], qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + } + + /* if not an odd number set valid bit in next fragment */ + if (!(frag_cnt & 0x01) && frag_cnt && wqe_ex) { + qp->wqe_ops.iw_set_fragment(wqe_ex, ZXDH_QP_FRAG_BYTESIZE, NULL, + qp->swqe_polarity); + } + + set_64bit_val(wqe_base, 24, + FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp) | + FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe_base, 0, hdr); + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_set_mw_bind_wqe - set mw bind in wqe + * @wqe: wqe for setting mw bind + * @op_info: info for setting wqe values + */ +static void zxdh_set_mw_bind_wqe(__le64 *wqe, struct zxdh_bind_window *op_info) +{ + set_64bit_val(wqe, 8, (uintptr_t)op_info->va); + set_64bit_val(wqe, 16, + FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag)); + set_64bit_val(wqe, 24, op_info->bind_len); +} + +/** + * zxdh_copy_inline_data - Copy inline data to wqe + * @dest: pointer to wqe + * @src: pointer to inline data + * @len: length of inline data to copy + * @polarity: polarity of wqe valid bit + * @imm_data_flag: flag to imm_data + */ +static void zxdh_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity, + bool imm_data_flag) +{ + u8 inline_valid = polarity << ZXDH_INLINE_VALID_S; + u32 copy_size; + u8 *inline_valid_addr; + + dest += ZXDH_WQE_SIZE_32; /* point to additional 32 byte quanta */ + + if (len) { + inline_valid_addr = dest + WQE_OFFSET_7BYTES; + if (imm_data_flag) { + copy_size = len < INLINE_DATASIZE_24BYTES ? + len : + INLINE_DATASIZE_24BYTES; + dest += WQE_OFFSET_8BYTES; + memcpy(dest, src, copy_size); + len -= copy_size; + dest += WQE_OFFSET_24BYTES; + src += copy_size; + } else { + if (len <= INLINE_DATASIZE_7BYTES) { + copy_size = len; + memcpy(dest, src, copy_size); + *inline_valid_addr = inline_valid; + return; + } + memcpy(dest, src, INLINE_DATASIZE_7BYTES); + len -= INLINE_DATASIZE_7BYTES; + dest += WQE_OFFSET_8BYTES; + src += INLINE_DATA_OFFSET_7BYTES; + copy_size = len < INLINE_DATASIZE_24BYTES ? + len : + INLINE_DATASIZE_24BYTES; + memcpy(dest, src, copy_size); + len -= copy_size; + dest += WQE_OFFSET_24BYTES; + src += copy_size; + } + *inline_valid_addr = inline_valid; + } + + while (len) { + inline_valid_addr = dest + WQE_OFFSET_7BYTES; + if (len <= INLINE_DATASIZE_7BYTES) { + copy_size = len; + memcpy(dest, src, copy_size); + *inline_valid_addr = inline_valid; + return; + } else { + memcpy(dest, src, INLINE_DATASIZE_7BYTES); + len -= INLINE_DATASIZE_7BYTES; + dest += WQE_OFFSET_8BYTES; + src += INLINE_DATA_OFFSET_7BYTES; + copy_size = len < INLINE_DATASIZE_24BYTES ? + len : + INLINE_DATASIZE_24BYTES; + memcpy(dest, src, copy_size); + len -= copy_size; + dest += WQE_OFFSET_24BYTES; + src += copy_size; + } + *inline_valid_addr = inline_valid; + } +} + +/** + * zxdh_inline_data_size_to_quanta - based on inline data, quanta + * @data_size: data size for inline + * @imm_data_flag: flag to imm_data + * @imm_data_flag: flag for immediate data + * + * Gets the quanta based on inline and immediate data. + */ +static u16 zxdh_inline_data_size_to_quanta(u32 data_size, bool imm_data_flag) +{ + if (imm_data_flag) + data_size += INLINE_DATASIZE_7BYTES; + + return data_size % 31 ? data_size / 31 + 2 : data_size / 31 + 1; +} + +/** + * zxdh_uk_inline_rdma_write - inline rdma write operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_inline_rdma_write(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq) +{ + __le64 *wqe; + u8 imm_valid; + struct zxdh_inline_rdma_write *op_info; + u64 hdr = 0; + u32 wqe_idx; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + + op_info = &info->op.inline_rdma_write; + + if (op_info->len > qp->max_inline_data) + return -EINVAL; + if (imm_data_flag && op_info->len > ZXDH_MAX_SQ_INLINE_DATELEN_WITH_IMM) + return -EINVAL; + + quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len, + imm_data_flag); + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len, + info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + read_fence |= info->read_fence; + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_WRITE_INLINEDATAFLAG, 1) | + FIELD_PREP(IRDMAQPSQ_WRITE_INLINEDATALEN, op_info->len) | + FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, quanta - 1) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag); + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); + + if (imm_data_flag) { + /* if inline exist, not update imm valid */ + imm_valid = (op_info->len == 0) ? qp->swqe_polarity : + (!qp->swqe_polarity); + set_64bit_val(wqe, 32, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, imm_valid) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, + info->imm_data)); + } + + qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len, + qp->swqe_polarity, imm_data_flag); + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_rc_inline_send - inline send operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_rc_inline_send(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq) +{ + __le64 *wqe; + u8 imm_valid; + struct zxdh_post_inline_send *op_info; + u64 hdr; + u32 wqe_idx; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + + op_info = &info->op.inline_send; + + if (op_info->len > qp->max_inline_data) + return -EINVAL; + if (imm_data_flag && op_info->len > ZXDH_MAX_SQ_INLINE_DATELEN_WITH_IMM) + return -EINVAL; + + quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len, + imm_data_flag); + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len, + info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + read_fence |= info->read_fence; + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, quanta - 1) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv); + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | + FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, + op_info->len)); + + if (imm_data_flag) { + /* if inline exist, not update imm valid */ + imm_valid = (op_info->len == 0) ? qp->swqe_polarity : + (!qp->swqe_polarity); + set_64bit_val(wqe, 32, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, imm_valid) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, + info->imm_data)); + } + + qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len, + qp->swqe_polarity, imm_data_flag); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_ud_inline_send - inline send operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_ud_inline_send(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq) +{ + __le64 *wqe_base; + __le64 *wqe_ex; + struct zxdh_post_inline_send *op_info; + u64 hdr; + u32 wqe_idx; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + u8 *inline_dest; + u8 *inline_src; + u32 inline_len; + u32 copy_size; + u8 *inline_valid_addr; + + op_info = &info->op.inline_send; + inline_len = op_info->len; + + if (op_info->len > qp->max_inline_data) + return -EINVAL; + if (imm_data_flag && op_info->len > ZXDH_MAX_SQ_INLINE_DATELEN_WITH_IMM) + return -EINVAL; + + quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len, + imm_data_flag); + if (quanta > ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring)) + return -ENOSPC; + + wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring); + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + + ZXDH_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta); + + wqe_base = qp->sq_base[wqe_idx].elem; + qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len; + qp->sq_wrtrk_array[wqe_idx].quanta = quanta; + + zxdh_clr_wqes(qp, wqe_idx); + + read_fence |= info->read_fence; + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_UD_INLINEDATAFLAG, 1) | + FIELD_PREP(IRDMAQPSQ_UD_INLINEDATALEN, op_info->len) | + FIELD_PREP(IRDMAQPSQ_UD_ADDFRAGCNT, quanta - 1) | + FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id); + set_64bit_val(wqe_base, 24, + FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp) | + FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey)); + + if (imm_data_flag) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + + if (inline_len) { + /* imm and inline use the same valid, valid set after inline data updated*/ + copy_size = inline_len < INLINE_DATASIZE_24BYTES ? + inline_len : + INLINE_DATASIZE_24BYTES; + inline_dest = (u8 *)wqe_ex + WQE_OFFSET_8BYTES; + inline_src = (u8 *)op_info->data; + memcpy(inline_dest, inline_src, copy_size); + inline_len -= copy_size; + inline_src += copy_size; + } + set_64bit_val( + wqe_ex, 0, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); + + } else if (inline_len) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + inline_dest = (u8 *)wqe_ex; + inline_src = (u8 *)op_info->data; + + if (inline_len <= INLINE_DATASIZE_7BYTES) { + copy_size = inline_len; + memcpy(inline_dest, inline_src, copy_size); + inline_len = 0; + } else { + copy_size = INLINE_DATASIZE_7BYTES; + memcpy(inline_dest, inline_src, copy_size); + inline_len -= copy_size; + inline_src += copy_size; + inline_dest += WQE_OFFSET_8BYTES; + copy_size = inline_len < INLINE_DATASIZE_24BYTES ? + inline_len : + INLINE_DATASIZE_24BYTES; + memcpy(inline_dest, inline_src, copy_size); + inline_len -= copy_size; + inline_src += copy_size; + } + inline_valid_addr = (u8 *)wqe_ex + WQE_OFFSET_7BYTES; + *inline_valid_addr = qp->swqe_polarity << ZXDH_INLINE_VALID_S; + } + + while (inline_len) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + inline_dest = (u8 *)wqe_ex; + + if (inline_len <= INLINE_DATASIZE_7BYTES) { + copy_size = inline_len; + memcpy(inline_dest, inline_src, copy_size); + inline_len = 0; + } else { + copy_size = INLINE_DATASIZE_7BYTES; + memcpy(inline_dest, inline_src, copy_size); + inline_len -= copy_size; + inline_src += copy_size; + inline_dest += WQE_OFFSET_8BYTES; + copy_size = inline_len < INLINE_DATASIZE_24BYTES ? + inline_len : + INLINE_DATASIZE_24BYTES; + memcpy(inline_dest, inline_src, copy_size); + inline_len -= copy_size; + inline_src += copy_size; + } + inline_valid_addr = (u8 *)wqe_ex + WQE_OFFSET_7BYTES; + *inline_valid_addr = qp->swqe_polarity << ZXDH_INLINE_VALID_S; + } + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe_base, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_stag_local_invalidate - stag invalidate operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_stag_local_invalidate(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq) +{ + __le64 *wqe; + struct zxdh_inv_local_stag *op_info; + u64 hdr; + u32 wqe_idx; + bool local_fence = true; + + op_info = &info->op.inv_local_stag; + + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, ZXDH_QP_WQE_MIN_QUANTA, 0, + info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_LOCAL_INV) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->target_stag); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_mw_bind - bind Memory Window + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_mw_bind(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_bind_window *op_info; + u64 hdr; + u32 wqe_idx; + bool local_fence; + + info->push_wqe = qp->push_db ? true : false; + op_info = &info->op.bind_window; + local_fence = info->local_fence; + + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, ZXDH_QP_WQE_MIN_QUANTA, 0, + info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info); + + hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_BIND_MW) | + FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag) | + FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, ((op_info->ena_reads << 2) | + (op_info->ena_writes << 3))) | + FIELD_PREP(IRDMAQPSQ_VABASEDTO, + (op_info->addressing_type == ZXDH_ADDR_TYPE_VA_BASED ? + 1 : + 0)) | + FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE, + (op_info->mem_window_type_1 ? 1 : 0)) | + FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_post_receive - post receive wqe + * @qp: hw qp ptr + * @info: post rq information + */ +int zxdh_uk_post_receive(struct zxdh_qp_uk *qp, struct zxdh_post_rq_info *info) +{ + u32 wqe_idx, i, byte_off; + __le64 *wqe; + struct zxdh_sge *sge; + + if (qp->max_rq_frag_cnt < info->num_sges) + return -EINVAL; + + wqe = zxdh_qp_get_next_recv_wqe(qp, &wqe_idx); + if (!wqe) + return -ENOSPC; + + qp->rq_wrid_array[wqe_idx] = info->wr_id; + + for (i = 0, byte_off = ZXDH_QP_FRAG_BYTESIZE; i < info->num_sges; i++) { + sge = &info->sg_list[i]; + set_64bit_val(wqe, byte_off, sge->tag_off); + set_64bit_val(wqe, byte_off + 8, + FIELD_PREP(IRDMAQPRQ_FRAG_LEN, sge->len) | + FIELD_PREP(IRDMAQPRQ_STAG, sge->stag)); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + + /* + * while info->num_sges < qp->max_rq_frag_cnt, or 0 == info->num_sges, + * fill next fragment with FRAG_LEN=0, FRAG_STAG=0x00000100, + * witch indicates a invalid fragment + */ + if (info->num_sges < qp->max_rq_frag_cnt || 0 == info->num_sges) { + set_64bit_val(wqe, byte_off, 0); + set_64bit_val(wqe, byte_off + 8, + FIELD_PREP(IRDMAQPRQ_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPRQ_STAG, 0x00000100)); + } + + set_64bit_val(wqe, 0, + FIELD_PREP(IRDMAQPRQ_ADDFRAGCNT, info->num_sges) | + FIELD_PREP(IRDMAQPRQ_SIGNATURE, + qp->rwqe_signature)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPRQ_VALID, qp->rwqe_polarity)); + + return 0; +} + +/** + * zxdh_uk_cq_resize - reset the cq buffer info + * @cq: cq to resize + * @cq_base: new cq buffer addr + * @cq_size: number of cqes + */ +void zxdh_uk_cq_resize(struct zxdh_cq_uk *cq, void *cq_base, int cq_size) +{ + cq->cq_base = cq_base; + cq->cq_size = cq_size; + cq->cq_log_size = zxdh_num_to_log(cq_size); + ZXDH_RING_INIT(cq->cq_ring, cq->cq_size); + cq->polarity = 1; +} + +/** + * zxdh_uk_cq_set_resized_cnt - record the count of the resized buffers + * @cq: cq to resize + * @cq_cnt: the count of the resized cq buffers + */ +void zxdh_uk_cq_set_resized_cnt(struct zxdh_cq_uk *cq, u16 cq_cnt) +{ + u64 temp_val; + u16 sw_cq_sel; + u8 arm_next; + u8 arm_seq_num; + + get_64bit_val(cq->shadow_area, 0, &temp_val); + + sw_cq_sel = (u16)FIELD_GET(ZXDH_CQ_DBSA_SW_CQ_SELECT, temp_val); + sw_cq_sel += cq_cnt; + + arm_seq_num = (u8)FIELD_GET(ZXDH_CQ_DBSA_ARM_SEQ_NUM, temp_val); + arm_next = (u8)FIELD_GET(ZXDH_CQ_DBSA_ARM_NEXT, temp_val); + cq->cqe_rd_cnt = 0; + + temp_val = FIELD_PREP(ZXDH_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | + FIELD_PREP(ZXDH_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | + FIELD_PREP(ZXDH_CQ_DBSA_ARM_NEXT, arm_next) | + FIELD_PREP(ZXDH_CQ_DBSA_CQEIDX, cq->cqe_rd_cnt); + + set_64bit_val(cq->shadow_area, 0, temp_val); +} + +/** + * zxdh_uk_cq_request_notification - cq notification request (door bell) + * @cq: hw cq + * @cq_notify: notification type + */ +void zxdh_uk_cq_request_notification(struct zxdh_cq_uk *cq, + enum zxdh_cmpl_notify cq_notify) +{ + u64 temp_val; + u16 sw_cq_sel; + u8 arm_next = 0; + u8 arm_seq_num; + u32 cqe_index; + u32 hdr; + + cq->armed = true; + get_64bit_val(cq->shadow_area, 0, &temp_val); + arm_seq_num = (u8)FIELD_GET(ZXDH_CQ_DBSA_ARM_SEQ_NUM, temp_val); + arm_seq_num++; + sw_cq_sel = (u16)FIELD_GET(ZXDH_CQ_DBSA_SW_CQ_SELECT, temp_val); + cqe_index = (u32)FIELD_GET(ZXDH_CQ_DBSA_CQEIDX, temp_val); + + if (cq_notify == ZXDH_CQ_COMPL_SOLICITED) + arm_next = 1; + temp_val = FIELD_PREP(ZXDH_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | + FIELD_PREP(ZXDH_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | + FIELD_PREP(ZXDH_CQ_DBSA_ARM_NEXT, arm_next) | + FIELD_PREP(ZXDH_CQ_DBSA_CQEIDX, cqe_index); + + set_64bit_val(cq->shadow_area, 0, temp_val); + + hdr = FIELD_PREP(ZXDH_CQ_ARM_DBSA_VLD, 0) | + FIELD_PREP(ZXDH_CQ_ARM_CQ_ID, cq->cq_id); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + writel(hdr, cq->cqe_alloc_db); +} + +/** + * zxdh_uk_cq_poll_cmpl - get cq completion info + * @cq: hw cq + * @info: cq poll information returned + */ +int zxdh_uk_cq_poll_cmpl(struct zxdh_cq_uk *cq, struct zxdh_cq_poll_info *info) +{ + u64 comp_ctx, qword0, qword2, qword3; + __le64 *cqe; + struct zxdh_qp_uk *qp; + struct zxdh_sc_qp *sc_qp; + struct zxdh_sc_srq *sc_srq; + struct zxdh_srq_uk *srq_uk = NULL; + struct zxdh_ring *pring = NULL; + u32 wqe_idx, q_type; + int ret_code; + bool move_cq_head = true; + u8 polarity; + u8 qp_type; + u8 pring_handle = true; + + cqe = ZXDH_GET_CURRENT_EXTENDED_CQ_ELEM(cq); + + get_64bit_val(cqe, 0, &qword0); + polarity = (u8)FIELD_GET(ZXDH_CQ_VALID, qword0); + if (polarity != cq->polarity) + return -ENOENT; + + /* Ensure CQE contents are read after valid bit is checked */ + dma_rmb(); + get_64bit_val(cqe, 8, &comp_ctx); + get_64bit_val(cqe, 16, &qword2); + get_64bit_val(cqe, 24, &qword3); + + qp = (struct zxdh_qp_uk *)(unsigned long)comp_ctx; + if (!qp || qp->destroy_pending) { + ret_code = -EFAULT; + goto exit; + } + info->qp_handle = (zxdh_qp_handle)(unsigned long)qp; + qp_type = qp->qp_type; + q_type = (u8)FIELD_GET(ZXDH_CQ_SQ, qword0); + info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword0); + wqe_idx = (u32)FIELD_GET(ZXDH_CQ_WQEIDX, qword0); + info->error = (bool)FIELD_GET(ZXDH_CQ_ERROR, qword0); + + if (info->error) { + info->major_err = FIELD_GET(ZXDH_CQ_MAJERR, qword0); + info->minor_err = FIELD_GET(ZXDH_CQ_MINERR, qword0); + if (info->major_err == ZXDH_FLUSH_MAJOR_ERR) { + info->comp_status = ZXDH_COMPL_STATUS_FLUSHED; + /* Set the min error to standard flush error code for remaining cqes */ + if (info->minor_err != FLUSH_GENERAL_ERR) { + qword0 &= ~ZXDH_CQ_MINERR; + qword0 |= FIELD_PREP(ZXDH_CQ_MINERR, + FLUSH_GENERAL_ERR); + set_64bit_val(cqe, 0, qword0); + } + } else { + info->comp_status = ZXDH_COMPL_STATUS_UNKNOWN; + } + } else { + info->comp_status = ZXDH_COMPL_STATUS_SUCCESS; + } + + info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2); + info->imm_valid = false; + info->ud_smac_valid = false; + info->ud_vlan_valid = false; + + info->qp_handle = (zxdh_qp_handle)(unsigned long)qp; + + if (q_type == ZXDH_CQE_QTYPE_RQ) { + u64 qword4; + + if (qp->is_srq == true) { + sc_qp = container_of(qp, struct zxdh_sc_qp, qp_uk); + sc_srq = sc_qp->srq; + srq_uk = &sc_srq->srq_uk; + pring_handle = false; + zxdh_free_srq_wqe(srq_uk, wqe_idx); + } + + if (info->comp_status == ZXDH_COMPL_STATUS_FLUSHED || + info->comp_status == ZXDH_COMPL_STATUS_UNKNOWN) { + if (qp->is_srq == false) { + if (!ZXDH_RING_MORE_WORK(qp->rq_ring)) { + ret_code = -ENOENT; + goto exit; + } + + info->wr_id = + qp->rq_wrid_array[qp->rq_ring.tail]; + wqe_idx = qp->rq_ring.tail; + } else { + info->wr_id = srq_uk->srq_wrid_array[wqe_idx]; + } + } else { + if (qp->is_srq == false) + info->wr_id = qp->rq_wrid_array[wqe_idx]; + else + info->wr_id = srq_uk->srq_wrid_array[wqe_idx]; + } + + info->imm_valid = (bool)FIELD_GET(ZXDH_CQ_IMMVALID, qword2); + if (info->imm_valid) { + info->imm_data = + (u32)FIELD_GET(ZXDH_CQ_IMMDATA, qword3); + } + + info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword3); + + if (info->imm_valid) + info->op_type = ZXDH_OP_TYPE_REC_IMM; + else + info->op_type = ZXDH_OP_TYPE_REC; + + if (qp_type == ZXDH_QP_TYPE_ROCE_RC) { + if (qword2 & IRDMACQ_STAG) { + info->stag_invalid_set = true; + info->inv_stag = + (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2); + } else { + info->stag_invalid_set = false; + } + } else if (qp_type == ZXDH_QP_TYPE_ROCE_UD) { + info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword2); + info->ud_src_qpn = + (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2); + + info->ud_smac_valid = + (bool)FIELD_GET(ZXDH_CQ_UDSMACVALID, qword2); + info->ud_vlan_valid = + (bool)FIELD_GET(ZXDH_CQ_UDVLANVALID, qword2); + if (info->ud_smac_valid || info->ud_vlan_valid) { + get_64bit_val(cqe, 32, &qword4); + if (info->ud_vlan_valid) + info->ud_vlan = (u16)FIELD_GET( + ZXDH_CQ_UDVLAN, qword4); + if (info->ud_smac_valid) { + info->ud_smac[5] = qword4 & 0xFF; + info->ud_smac[4] = (qword4 >> 8) & 0xFF; + info->ud_smac[3] = (qword4 >> 16) & + 0xFF; + info->ud_smac[2] = (qword4 >> 24) & + 0xFF; + info->ud_smac[1] = (qword4 >> 32) & + 0xFF; + info->ud_smac[0] = (qword4 >> 40) & + 0xFF; + } + } + } + if (qp->is_srq == false) { + ZXDH_RING_SET_TAIL(qp->rq_ring, wqe_idx + 1); + if (info->comp_status == ZXDH_COMPL_STATUS_FLUSHED) { + qp->rq_flush_seen = true; + if (!ZXDH_RING_MORE_WORK(qp->rq_ring)) + qp->rq_flush_complete = true; + else + move_cq_head = false; + } + pring = &qp->rq_ring; + } + } else { /* q_type is ZXDH_CQE_QTYPE_SQ */ + if (info->comp_status != ZXDH_COMPL_STATUS_FLUSHED) { + info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; + if (!info->comp_status) + info->bytes_xfered = + qp->sq_wrtrk_array[wqe_idx].wr_len; + info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword0); + ZXDH_RING_SET_TAIL( + qp->sq_ring, + wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta); + } else { + if (!ZXDH_RING_MORE_WORK(qp->sq_ring)) { + ret_code = -ENOENT; + goto exit; + } + + do { + __le64 *sw_wqe; + u64 wqe_qword; + u8 op_type; + u32 tail; + + tail = qp->sq_ring.tail; + sw_wqe = qp->sq_base[tail].elem; + get_64bit_val(sw_wqe, 0, &wqe_qword); + op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, + wqe_qword); + info->op_type = op_type; + ZXDH_RING_SET_TAIL( + qp->sq_ring, + tail + qp->sq_wrtrk_array[tail].quanta); + if (op_type != ZXDH_OP_TYPE_NOP) { + info->wr_id = + qp->sq_wrtrk_array[tail].wrid; + info->bytes_xfered = + qp->sq_wrtrk_array[tail].wr_len; + break; + } + } while (1); + qp->sq_flush_seen = true; + if (!ZXDH_RING_MORE_WORK(qp->sq_ring)) + qp->sq_flush_complete = true; + } + pring = &qp->sq_ring; + } + + ret_code = 0; + +exit: + if (pring_handle == true) { + if (!ret_code && info->comp_status == ZXDH_COMPL_STATUS_FLUSHED) + if (pring && ZXDH_RING_MORE_WORK(*pring)) + move_cq_head = false; + } + + if (move_cq_head) { + u64 cq_shadow_temp; + + ZXDH_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); + if (!ZXDH_RING_CURRENT_HEAD(cq->cq_ring)) + cq->polarity ^= 1; + + ZXDH_RING_MOVE_TAIL(cq->cq_ring); + cq->cqe_rd_cnt++; + get_64bit_val(cq->shadow_area, 0, &cq_shadow_temp); + cq_shadow_temp &= ~ZXDH_CQ_DBSA_CQEIDX; + cq_shadow_temp |= + FIELD_PREP(ZXDH_CQ_DBSA_CQEIDX, cq->cqe_rd_cnt); + set_64bit_val(cq->shadow_area, 0, cq_shadow_temp); + } else { + qword0 &= ~ZXDH_CQ_WQEIDX; + qword0 |= FIELD_PREP(ZXDH_CQ_WQEIDX, pring->tail); + set_64bit_val(cqe, 0, qword0); + } + + return ret_code; +} + +/** + * zxdh_qp_round_up - return round up qp wq depth + * @wqdepth: wq depth in quanta to round up + */ +static int zxdh_qp_round_up(u32 wqdepth) +{ + int scount = 1; + + for (wqdepth--; scount <= 16; scount *= 2) + wqdepth |= wqdepth >> scount; + + return ++wqdepth; +} + +/** + * zxdh_get_rq_wqe_shift - get shift count for maximum rq wqe size + * @uk_attrs: qp HW attributes + * @sge: Maximum Scatter Gather Elements wqe + * @shift: Returns the shift needed based on sge + * + * Shift can be used to left shift the rq wqe size based on number of SGEs. + * For 1 SGE, shift = 1 (wqe size of 2*16 bytes). + * For 2 or 3 SGEs, shift = 2 (wqe size of 4*16 bytes). + * For 4-7 SGE's Shift of 3. + * For 8-15 SGE's Shift of 4 otherwise (wqe size of 512 bytes). + */ +void zxdh_get_rq_wqe_shift(struct zxdh_uk_attrs *uk_attrs, u32 sge, u8 *shift) +{ + *shift = 0; //16bytes RQE, need to confirm configuration + if (sge < 2) + *shift = 1; + else if (sge < 4) + *shift = 2; + else if (sge < 8) + *shift = 3; + else if (sge < 16) + *shift = 4; + else + *shift = 5; +} + +/** + * zxdh_get_sq_wqe_shift - get shift count for maximum wqe size + * @uk_attrs: qp HW attributes + * @sge: Maximum Scatter Gather Elements wqe + * @inline_data: Maximum inline data size + * @shift: Returns the shift needed based on sge + * + * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size. + * To surport WR with imm_data,shift = 1 (wqe size of 2*32 bytes). + * For 2-7 SGEs or 24 < inline data <= 86, shift = 2 (wqe size of 4*32 bytes). + * Otherwise (wqe size of 256 bytes). + */ +void zxdh_get_sq_wqe_shift(struct zxdh_uk_attrs *uk_attrs, u32 sge, + u32 inline_data, u8 *shift) +{ + *shift = 1; + + if (sge > 1 || inline_data > 24) { + if (sge < 8 && inline_data <= 86) + *shift = 2; + else + *shift = 3; + } +} + +/* + * zxdh_get_sqdepth - get SQ depth (quanta) + * @max_hw_wq_quanta: HW SQ size limit + * @sq_size: SQ size + * @shift: shift which determines size of WQE + * @sqdepth: depth of SQ + */ +int zxdh_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *sqdepth) +{ + if (sq_size > ZXDH_MAX_SQ_DEPTH) + return -EINVAL; + *sqdepth = zxdh_qp_round_up((sq_size << shift) + ZXDH_SQ_RSVD); + + if (*sqdepth < (ZXDH_QP_SW_MIN_WQSIZE << shift)) + *sqdepth = ZXDH_QP_SW_MIN_WQSIZE << shift; + else if (*sqdepth > max_hw_wq_quanta) + return -EINVAL; + + return 0; +} + +/* + * zxdh_get_rqdepth - get RQ/SRQ depth (quanta) + * @max_hw_rq_quanta: HW RQ/SRQ size limit + * @rq_size: RQ/SRQ size + * @shift: shift which determines size of WQE + * @rqdepth: depth of RQ/SRQ + */ +int zxdh_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *rqdepth) +{ + *rqdepth = zxdh_qp_round_up((rq_size << shift) + ZXDH_RQ_RSVD); + + if (*rqdepth < (ZXDH_QP_SW_MIN_WQSIZE << shift)) + *rqdepth = ZXDH_QP_SW_MIN_WQSIZE << shift; + else if (*rqdepth > max_hw_rq_quanta) + return -EINVAL; + + return 0; +} + +static const struct zxdh_wqe_uk_ops iw_wqe_uk_ops = { + .iw_copy_inline_data = zxdh_copy_inline_data, + .iw_inline_data_size_to_quanta = zxdh_inline_data_size_to_quanta, + .iw_set_fragment = zxdh_set_fragment, + .iw_set_mw_bind_wqe = zxdh_set_mw_bind_wqe, +}; + +/** + * zxdh_uk_qp_init - initialize shared qp + * @qp: hw qp (user and kernel) + * @info: qp initialization info + * + * initializes the vars used in both user and kernel mode. + * size of the wqe depends on numbers of max. fragements + * allowed. Then size of wqe * the number of wqes should be the + * amount of memory allocated for sq and rq. + */ +int zxdh_uk_qp_init(struct zxdh_qp_uk *qp, struct zxdh_qp_uk_init_info *info) +{ + int ret_code = 0; + u32 sq_ring_size; + u8 sqshift, rqshift; + + qp->uk_attrs = info->uk_attrs; + if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags || + info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags) + return -EINVAL; + + zxdh_get_sq_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt, + info->max_inline_data, &sqshift); + zxdh_get_rq_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, &rqshift); + qp->qp_caps = info->qp_caps; + qp->sq_base = info->sq; + qp->rq_base = info->rq; + qp->qp_type = info->type; + qp->shadow_area = info->shadow_area; + qp->sq_wrtrk_array = info->sq_wrtrk_array; + + qp->rq_wrid_array = info->rq_wrid_array; + qp->wqe_alloc_db = info->wqe_alloc_db; + qp->rd_fence_rate = info->rd_fence_rate; + qp->qp_id = info->qp_id; + qp->sq_size = info->sq_size; + qp->max_sq_frag_cnt = info->max_sq_frag_cnt; + sq_ring_size = qp->sq_size << sqshift; + ZXDH_RING_INIT(qp->sq_ring, sq_ring_size); + ZXDH_RING_INIT(qp->initial_ring, sq_ring_size); + qp->swqe_polarity = 0; + + qp->swqe_polarity_deferred = 1; + qp->rwqe_polarity = 0; + qp->rwqe_signature = 0; + qp->rq_size = info->rq_size; + qp->max_rq_frag_cnt = info->max_rq_frag_cnt; + qp->max_inline_data = (info->max_inline_data == 0) ? + ZXDH_MAX_INLINE_DATA_SIZE : + info->max_inline_data; + qp->rq_wqe_size = rqshift; + ZXDH_RING_INIT(qp->rq_ring, qp->rq_size); + qp->rq_wqe_size_multiplier = 1 << rqshift; + + qp->wqe_ops = iw_wqe_uk_ops; + return ret_code; +} + +/** + * zxdh_uk_cq_init - initialize shared cq (user and kernel) + * @cq: hw cq + * @info: hw cq initialization info + */ +void zxdh_uk_cq_init(struct zxdh_cq_uk *cq, struct zxdh_cq_uk_init_info *info) +{ + cq->cq_base = info->cq_base; + cq->cq_id = info->cq_id; + cq->cq_size = info->cq_size; + cq->cq_log_size = info->cq_log_size; + cq->cqe_alloc_db = info->cqe_alloc_db; + cq->shadow_area = info->shadow_area; + cq->cqe_size = info->cqe_size; + ZXDH_RING_INIT(cq->cq_ring, cq->cq_size); + cq->polarity = 1; + cq->cqe_rd_cnt = 0; +} + +/** + * zxdh_uk_clean_cq - clean cq entries + * @q: completion context + * @cq: cq to clean + */ +void zxdh_uk_clean_cq(void *q, struct zxdh_cq_uk *cq) +{ + __le64 *cqe; + u64 qword0, comp_ctx; + u32 cq_head; + u8 polarity, temp; + + cq_head = cq->cq_ring.head; + temp = cq->polarity; + do { + if (cq->cqe_size) + cqe = ((struct zxdh_extended_cqe + *)(cq->cq_base))[cq_head] + .buf; + else + cqe = cq->cq_base[cq_head].buf; + get_64bit_val(cqe, 0, &qword0); + polarity = (u8)FIELD_GET(ZXDH_CQ_VALID, qword0); + + if (polarity != temp) + break; + + get_64bit_val(cqe, 8, &comp_ctx); + if ((void *)(unsigned long)comp_ctx == q) + set_64bit_val(cqe, 8, 0); + + cq_head = (cq_head + 1) % cq->cq_ring.size; + if (!cq_head) + temp ^= 1; + } while (true); +} + +/** + * zxdh_nop - post a nop + * @qp: hw qp ptr + * @wr_id: work request id + * @signaled: signaled for completion + * @post_sq: ring doorbell + */ +int zxdh_nop(struct zxdh_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u32 wqe_idx; + struct zxdh_post_sq_info info = {}; + + info.push_wqe = false; + info.wr_id = wr_id; + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, ZXDH_QP_WQE_MIN_QUANTA, 0, + &info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + + hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_NOP) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) | + FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ + * @frag_cnt: number of fragments + * @quanta: quanta for frag_cnt + */ +int zxdh_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta) +{ + if (frag_cnt > ZXDH_MAX_SQ_FRAG) + return -EINVAL; + *quanta = frag_cnt / 2 + 1; + return 0; +} + +/** + * zxdh_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ + * @frag_cnt: number of fragments + * @wqe_size: size in bytes given frag_cnt + */ +int zxdh_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size) +{ + if (frag_cnt < 2) + *wqe_size = 32; + else if (frag_cnt < 4) + *wqe_size = 64; + else if (frag_cnt < 8) + *wqe_size = 128; + else if (frag_cnt < 16) + *wqe_size = 256; + else if (frag_cnt < 32) + *wqe_size = 512; + else + return -EINVAL; + + return 0; +} diff --git a/src/rdma/src/user.h b/src/rdma/src/user.h new file mode 100644 index 0000000000000000000000000000000000000000..0cf4bc245f368cb99cdfa32935ddf10789d8b5ae --- /dev/null +++ b/src/rdma/src/user.h @@ -0,0 +1,518 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_USER_H +#define ZXDH_USER_H + +#define zxdh_handle void * +#define zxdh_adapter_handle zxdh_handle +#define zxdh_qp_handle zxdh_handle +#define zxdh_cq_handle zxdh_handle +#define zxdh_pd_id zxdh_handle +#define zxdh_stag_handle zxdh_handle +#define zxdh_stag_index u32 +#define zxdh_stag u32 +#define zxdh_stag_key u8 +#define zxdh_tagged_offset u64 +#define zxdh_access_privileges u32 +#define zxdh_physical_fragment u64 +#define zxdh_address_list u64 * +#define zxdh_sgl struct zxdh_sge * + +#define ZXDH_MAX_MR_SIZE 0x200000000000ULL + +#define ZXDH_ACCESS_FLAGS_LOCALREAD 0x01 +#define ZXDH_ACCESS_FLAGS_LOCALWRITE 0x02 +#define ZXDH_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04 +#define ZXDH_ACCESS_FLAGS_REMOTEREAD 0x05 +#define ZXDH_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08 +#define ZXDH_ACCESS_FLAGS_REMOTEWRITE 0x0a +#define ZXDH_ACCESS_FLAGS_BIND_WINDOW 0x10 +#define ZXDH_ACCESS_FLAGS_ZERO_BASED 0x20 +#define ZXDH_ACCESS_FLAGS_ALL 0x3f + +#define ZXDH_OP_TYPE_NOP 0x00 +#define ZXDH_OP_TYPE_SEND 0x01 +#define ZXDH_OP_TYPE_SEND_WITH_IMM 0x02 +#define ZXDH_OP_TYPE_SEND_INV 0x03 +#define ZXDH_OP_TYPE_WRITE 0x04 +#define ZXDH_OP_TYPE_WRITE_WITH_IMM 0x05 +#define ZXDH_OP_TYPE_READ 0x06 +#define ZXDH_OP_TYPE_BIND_MW 0x07 +#define ZXDH_OP_TYPE_FAST_REG_MR 0x08 +#define ZXDH_OP_TYPE_LOCAL_INV 0x09 +#define ZXDH_OP_TYPE_UD_SEND 0x0a +#define ZXDH_OP_TYPE_UD_SEND_WITH_IMM 0x0b + +#define ZXDH_OP_TYPE_REC 0x3e +#define ZXDH_OP_TYPE_REC_IMM 0x3f + +#define ZXDH_FLUSH_MAJOR_ERR 1 + +#define ZXDH_MAX_MSIX_INTERRUPT_SIZE 24 + +#define ZXDH_MIN_ROCE_QP_ID 1 +#define ZXDH_MIN_ROCE_SRQ_ID 1 + +#define ZXDH_SQE_SIZE 4 +#define ZXDH_RQE_SIZE 2 +#define IRDMARX_RD_TIME_LIMIT_VALUE 0x20 + +enum zxdh_hw_stats_state { + ZXDH_HW_STATS_INVALID = 0, + ZXDH_HW_STATS_VALID, +}; + +enum zxdh_cfg_ram_state { + ZXDH_CFG_RAM_FREE = 0, + ZXDH_CFG_RAM_BUSY, +}; + +enum zxdh_stat_rd_clr_mode { + ZXDH_STAT_RD_MODE_UNCLR = 0, //Not reading clearly + ZXDH_STAT_RD_MODE_CLR, // Read Clearly +}; + +enum zxdh_device_caps_const { + ZXDH_WQE_SIZE = 4, + ZXDH_CQP_WQE_SIZE = 8, + ZXDH_CQE_SIZE = 8, + ZXDH_EXTENDED_CQE_SIZE = 8, + ZXDH_AEQE_SIZE = 4, + ZXDH_CEQE_SIZE = 2, + ZXDH_CQP_CTX_SIZE = 8, + ZXDH_SHADOW_AREA_SIZE = 1, + ZXDH_GATHER_STATS_BUF_SIZE = 1024, + ZXDH_MIN_IW_QP_ID = 0, + ZXDH_QUERY_FPM_BUF_SIZE = 176, + ZXDH_COMMIT_FPM_BUF_SIZE = 176, + ZXDH_MAX_IW_QP_ID = 262143, + ZXDH_MIN_CEQID = 0, + ZXDH_MAX_CEQID = 4095, + ZXDH_CEQ_MAX_COUNT = ZXDH_MAX_CEQID + 1, + ZXDH_MIN_CQID = 0, + ZXDH_MAX_CQID = 524287, + ZXDH_MIN_AEQ_ENTRIES = 1, + ZXDH_MAX_AEQ_ENTRIES = 131072, // 64k QP + 32k CQ + 32k SRQ + ZXDH_MIN_CEQ_ENTRIES = 1, + ZXDH_MAX_CEQ_ENTRIES = 32768, // 32k CQ + ZXDH_MIN_CQ_SIZE = 1, + ZXDH_MAX_CQ_SIZE = 4194304, // 4M + ZXDH_DB_ID_ZERO = 0, + ZXDH_MAX_OUTBOUND_MSG_SIZE = 2147483647, + ZXDH_MAX_INBOUND_MSG_SIZE = 2147483647, + ZXDH_MAX_PUSH_PAGE_COUNT = 1024, + ZXDH_MAX_PE_ENA_VF_COUNT = 32, + ZXDH_MAX_VF_FPM_ID = 47, + ZXDH_MAX_SQ_PAYLOAD_SIZE = 2147483648, + ZXDH_MAX_INLINE_DATA_SIZE = 217, + ZXDH_MAX_WQ_ENTRIES = 32768, + ZXDH_Q2_BUF_SIZE = 256, + ZXDH_QP_CTX_SIZE = 512, + ZXDH_CQ_CTX_SIZE = 64, + ZXDH_CEQ_CTX_SIZE = 32, + ZXDH_AEQ_CTX_SIZE = 32, + ZXDH_SRQ_CTX_SIZE = 64, + ZXDH_MAX_PDS = 1048576, // 1M +}; + +enum zxdh_host_epid { + ZXDH_HOST_EP0_ID = 5, + ZXDH_HOST_EP1_ID = 6, + ZXDH_HOST_EP2_ID = 7, + ZXDH_HOST_EP3_ID = 8, + ZXDH_HOST_EP4_ID = 9, +}; + +enum zxdh_addressing_type { + ZXDH_ADDR_TYPE_ZERO_BASED = 0, + ZXDH_ADDR_TYPE_VA_BASED = 1, +}; + +enum zxdh_queue_status { + ZXDH_QUEUE_STATE_INVALID = 0, + ZXDH_QUEUE_STATE_OK, +}; + +enum zxdh_ceqe_size { + ZXDH_CEQE_SIZE_16_BYTE = 0, + ZXDH_CEQE_SIZE_32_BYTE, + ZXDH_CEQE_SIZE_64_BYTE, + ZXDH_CEQE_SIZE_128_BYTE, +}; + +enum zxdh_irq_type { + ZXDH_IRQ_TYPE_MSIX = 0, + ZXDH_IRQ_TYPE_PIN, +}; + +enum zxdh_ceq_aggregation_cnt { + IRMDA_CEQ_AGGREGATION_CNT_0, + IRMDA_CEQ_AGGREGATION_CNT_1 = 1, + ZXDH_CEQ_AGGREGATION_CNT_2 = 2, +}; + +enum zxdh_vf_active_state { + IRMDA_VF_STATE_INVALID = 0, + ZXDH_VF_STATE_VALID, +}; + +enum zxdh_flush_opcode { + FLUSH_INVALID = 0, + FLUSH_GENERAL_ERR, + FLUSH_PROT_ERR, + FLUSH_REM_ACCESS_ERR, + FLUSH_LOC_QP_OP_ERR, + FLUSH_REM_OP_ERR, + FLUSH_LOC_LEN_ERR, + FLUSH_FATAL_ERR, + FLUSH_RETRY_EXC_ERR, + FLUSH_MW_BIND_ERR, + FLUSH_REM_INV_REQ_ERR, + FLUSH_MR_FASTREG_ERR, +}; + +enum zxdh_cmpl_status { + ZXDH_COMPL_STATUS_SUCCESS = 0, + ZXDH_COMPL_STATUS_FLUSHED, + ZXDH_COMPL_STATUS_INVALID_WQE, + ZXDH_COMPL_STATUS_QP_CATASTROPHIC, + ZXDH_COMPL_STATUS_REMOTE_TERMINATION, + ZXDH_COMPL_STATUS_INVALID_STAG, + ZXDH_COMPL_STATUS_BASE_BOUND_VIOLATION, + ZXDH_COMPL_STATUS_ACCESS_VIOLATION, + ZXDH_COMPL_STATUS_INVALID_PD_ID, + ZXDH_COMPL_STATUS_WRAP_ERROR, + ZXDH_COMPL_STATUS_STAG_INVALID_PDID, + ZXDH_COMPL_STATUS_RDMA_READ_ZERO_ORD, + ZXDH_COMPL_STATUS_QP_NOT_PRIVLEDGED, + ZXDH_COMPL_STATUS_STAG_NOT_INVALID, + ZXDH_COMPL_STATUS_INVALID_PHYS_BUF_SIZE, + ZXDH_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY, + ZXDH_COMPL_STATUS_INVALID_FBO, + ZXDH_COMPL_STATUS_INVALID_LEN, + ZXDH_COMPL_STATUS_INVALID_ACCESS, + ZXDH_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG, + ZXDH_COMPL_STATUS_INVALID_VIRT_ADDRESS, + ZXDH_COMPL_STATUS_INVALID_REGION, + ZXDH_COMPL_STATUS_INVALID_WINDOW, + ZXDH_COMPL_STATUS_INVALID_TOTAL_LEN, + ZXDH_COMPL_STATUS_UNKNOWN, +}; + +enum zxdh_cmpl_notify { + ZXDH_CQ_COMPL_EVENT = 0, + ZXDH_CQ_COMPL_SOLICITED = 1, +}; + +enum zxdh_qp_caps { + ZXDH_WRITE_WITH_IMM = 1, + ZXDH_SEND_WITH_IMM = 2, + ZXDH_ROCE = 4, + ZXDH_PUSH_MODE = 8, +}; + +struct zxdh_qp_uk; +struct zxdh_cq_uk; +struct zxdh_qp_uk_init_info; +struct zxdh_cq_uk_init_info; + +struct zxdh_sge { + zxdh_tagged_offset tag_off; + u32 len; + zxdh_stag stag; +}; + +struct zxdh_ring { + u32 head; + u32 tail; + u32 size; +}; + +struct zxdh_cqe { + __le64 buf[ZXDH_CQE_SIZE]; +}; + +struct zxdh_extended_cqe { + __le64 buf[ZXDH_EXTENDED_CQE_SIZE]; +}; + +struct zxdh_post_send { + zxdh_sgl sg_list; + u32 num_sges; + u32 qkey; + u32 dest_qp; + u32 ah_id; +}; + +struct zxdh_post_inline_send { + void *data; + u32 len; + u32 qkey; + u32 dest_qp; + u32 ah_id; +}; + +struct zxdh_post_rq_info { + u64 wr_id; + zxdh_sgl sg_list; + u32 num_sges; +}; + +struct zxdh_rdma_write { + zxdh_sgl lo_sg_list; + u32 num_lo_sges; + struct zxdh_sge rem_addr; +}; + +struct zxdh_inline_rdma_write { + void *data; + u32 len; + struct zxdh_sge rem_addr; +}; + +struct zxdh_rdma_read { + zxdh_sgl lo_sg_list; + u32 num_lo_sges; + struct zxdh_sge rem_addr; +}; + +struct zxdh_bind_window { + zxdh_stag mr_stag; + u64 bind_len; + void *va; + enum zxdh_addressing_type addressing_type; + u8 ena_reads : 1; + u8 ena_writes : 1; + zxdh_stag mw_stag; + u8 mem_window_type_1 : 1; +}; + +struct zxdh_inv_local_stag { + zxdh_stag target_stag; +}; + +struct zxdh_post_sq_info { + u64 wr_id; + u8 op_type; + u8 l4len; + u8 signaled : 1; + u8 solicited : 1; + u8 read_fence : 1; + u8 local_fence : 1; + u8 inline_data : 1; + u8 imm_data_valid : 1; + u8 push_wqe : 1; + u8 report_rtt : 1; + u8 udp_hdr : 1; + u8 defer_flag : 1; + u32 imm_data; + u32 stag_to_inv; + union { + struct zxdh_post_send send; + struct zxdh_rdma_write rdma_write; + struct zxdh_rdma_read rdma_read; + struct zxdh_bind_window bind_window; + struct zxdh_inv_local_stag inv_local_stag; + struct zxdh_inline_rdma_write inline_rdma_write; + struct zxdh_post_inline_send inline_send; + } op; +}; + +struct zxdh_cq_poll_info { + u64 wr_id; + zxdh_qp_handle qp_handle; + u32 bytes_xfered; + u32 tcp_seq_num_rtt; + u32 qp_id; + u32 ud_src_qpn; + u32 imm_data; + zxdh_stag inv_stag; /* or L_R_Key */ + enum zxdh_cmpl_status comp_status; + u16 major_err; + u16 minor_err; + u16 ud_vlan; + u8 ud_smac[6]; + u8 op_type; + u8 stag_invalid_set : 1; /* or L_R_Key set */ + u8 push_dropped : 1; + u8 error : 1; + u8 solicited_event : 1; + u8 ipv4 : 1; + u8 ud_vlan_valid : 1; + u8 ud_smac_valid : 1; + u8 imm_valid : 1; +}; + +int zxdh_uk_inline_rdma_write(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq); +int zxdh_uk_rc_inline_send(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq); +int zxdh_uk_ud_inline_send(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq); +int zxdh_uk_mw_bind(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq); +int zxdh_uk_post_nop(struct zxdh_qp_uk *qp, u64 wr_id, bool signaled, + bool post_sq); +int zxdh_uk_post_receive(struct zxdh_qp_uk *qp, struct zxdh_post_rq_info *info); +void zxdh_uk_qp_post_wr(struct zxdh_qp_uk *qp); +void zxdh_uk_qp_set_shadow_area(struct zxdh_qp_uk *qp); +int zxdh_uk_rdma_read(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq); +int zxdh_uk_rdma_write(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq); +int zxdh_uk_rc_send(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq); +int zxdh_uk_ud_send(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq); +int zxdh_uk_stag_local_invalidate(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq); + +struct zxdh_wqe_uk_ops { + void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity, + bool imm_data_flag); + u16 (*iw_inline_data_size_to_quanta)(u32 data_size, bool imm_data_flag); + void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct zxdh_sge *sge, + u8 valid); + void (*iw_set_mw_bind_wqe)(__le64 *wqe, + struct zxdh_bind_window *op_info); +}; + +int zxdh_uk_cq_poll_cmpl(struct zxdh_cq_uk *cq, struct zxdh_cq_poll_info *info); +void zxdh_uk_cq_request_notification(struct zxdh_cq_uk *cq, + enum zxdh_cmpl_notify cq_notify); +void zxdh_uk_cq_resize(struct zxdh_cq_uk *cq, void *cq_base, int size); +void zxdh_uk_cq_set_resized_cnt(struct zxdh_cq_uk *qp, u16 cnt); +void zxdh_uk_cq_init(struct zxdh_cq_uk *cq, struct zxdh_cq_uk_init_info *info); +int zxdh_uk_qp_init(struct zxdh_qp_uk *qp, struct zxdh_qp_uk_init_info *info); +struct zxdh_sq_uk_wr_trk_info { + u64 wrid; + u32 wr_len; + u16 quanta; + u8 reserved[2]; +}; + +struct zxdh_qp_sq_quanta { + __le64 elem[ZXDH_SQE_SIZE]; +}; + +struct zxdh_qp_rq_quanta { + __le64 elem[ZXDH_RQE_SIZE]; +}; + +struct zxdh_qp_uk { + struct zxdh_qp_sq_quanta *sq_base; + struct zxdh_qp_rq_quanta *rq_base; + struct zxdh_uk_attrs *uk_attrs; + u32 __iomem *wqe_alloc_db; + struct zxdh_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + __le64 *shadow_area; + __le32 *push_db; + __le64 *push_wqe; + struct zxdh_ring sq_ring; + struct zxdh_ring rq_ring; + struct zxdh_ring initial_ring; + u32 qp_id; + u32 qp_caps; + u32 sq_size; + u32 rq_size; + u32 max_sq_frag_cnt; + u32 max_rq_frag_cnt; + u32 max_inline_data; + struct zxdh_wqe_uk_ops wqe_ops; + u16 conn_wqes; + u8 qp_type; + u8 swqe_polarity; + u8 swqe_polarity_deferred; + u8 rwqe_polarity; + u8 rq_wqe_size; + u8 rq_wqe_size_multiplier; + u8 deferred_flag : 1; + u8 push_mode : 1; /* whether the last post wqe was pushed */ + u8 push_dropped : 1; + u8 first_sq_wq : 1; + u8 sq_flush_complete : 1; /* Indicates flush was seen and SQ was empty after the flush */ + u8 rq_flush_complete : 1; /* Indicates flush was seen and RQ was empty after the flush */ + u8 destroy_pending : 1; /* Indicates the QP is being destroyed */ + void *back_qp; + spinlock_t *lock; + u8 dbg_rq_flushed; + u16 ord_cnt; + u16 ws_index; + u16 rwqe_signature; + u8 sq_flush_seen; + u8 rq_flush_seen; + u8 rd_fence_rate; + u8 user_pri; + u8 pmtu; + u8 is_srq; +}; + +struct zxdh_cq_uk { + struct zxdh_cqe *cq_base; + u32 __iomem *cqe_alloc_db; + u32 __iomem *cq_ack_db; + __le64 *shadow_area; + u32 cq_id; + u32 cq_size; + u32 cq_log_size; + u32 cqe_rd_cnt; + struct zxdh_ring cq_ring; + u8 polarity; + u8 armed : 1; + u8 cqe_size; +}; + +struct zxdh_qp_uk_init_info { + struct zxdh_qp_sq_quanta *sq; + struct zxdh_qp_rq_quanta *rq; + struct zxdh_uk_attrs *uk_attrs; + u32 __iomem *wqe_alloc_db; + __le64 *shadow_area; + struct zxdh_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + u32 qp_id; + u32 qp_caps; + u32 sq_size; + u32 rq_size; + u32 max_sq_frag_cnt; + u32 max_rq_frag_cnt; + u32 max_inline_data; + u8 first_sq_wq; + u8 type; + u8 rd_fence_rate; + int abi_ver; + bool legacy_mode; +}; + +struct zxdh_cq_uk_init_info { + u32 __iomem *cqe_alloc_db; + u32 __iomem *cq_ack_db; + struct zxdh_cqe *cq_base; + __le64 *shadow_area; + u32 cq_size; + u32 cq_log_size; + u32 cq_id; + u8 cqe_size; +}; + +__le64 *zxdh_qp_get_next_send_wqe(struct zxdh_qp_uk *qp, u32 *wqe_idx, + u16 quanta, u32 total_size, + struct zxdh_post_sq_info *info); +__le64 *zxdh_qp_get_next_recv_wqe(struct zxdh_qp_uk *qp, u32 *wqe_idx); +void zxdh_uk_clean_cq(void *q, struct zxdh_cq_uk *cq); +int zxdh_nop(struct zxdh_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq); +int zxdh_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta); +int zxdh_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size); +void zxdh_get_sq_wqe_shift(struct zxdh_uk_attrs *uk_attrs, u32 sge, + u32 inline_data, u8 *shift); +void zxdh_get_rq_wqe_shift(struct zxdh_uk_attrs *uk_attrs, u32 sge, u8 *shift); +int zxdh_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *wqdepth); +int zxdh_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *wqdepth); +#ifdef Z_CONFIG_PUSH_MODE +void zxdh_qp_push_wqe(struct zxdh_qp_uk *qp, __le64 *wqe, u16 quanta, + u32 wqe_idx, bool post_sq); +#endif +void zxdh_clr_wqes(struct zxdh_qp_uk *qp, u32 qp_wqe_idx); +#endif /* ZXDH_USER_H */ diff --git a/src/rdma/src/utils.c b/src/rdma/src/utils.c new file mode 100644 index 0000000000000000000000000000000000000000..cf3e2d33285290bc0482da5609be2ddedf3c4337 --- /dev/null +++ b/src/rdma/src/utils.c @@ -0,0 +1,2701 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "main.h" +#include "icrdma_hw.h" +#include + +extern u32 dpp_stat_port_RDMA_packet_msg_tx_cnt_get(dpp_pf_info_t *pf_info, + u32 index, u32 mode, + u64 *p_pkB_cnt, + u64 *p_pk_cnt); + +LIST_HEAD(zxdh_handlers); +DEFINE_SPINLOCK(zxdh_handler_lock); + +/** + * wr32 - write 32 bits to hw register + * @hw: hardware information including registers + * @reg: register offset + * @val: value to write to register + */ +inline void wr32(struct zxdh_hw *hw, u32 reg, u32 val) +{ + writel(val, hw->hw_addr + reg); +} + +/** + * rd32 - read a 32 bit hw register + * @hw: hardware information including registers + * @reg: register offset + * + * Return value of register content + */ +inline u32 rd32(struct zxdh_hw *hw, u32 reg) +{ + return readl(hw->hw_addr + reg); +} + +/** + * rd64 - read a 64 bit hw register + * @hw: hardware information including registers + * @reg: register offset + * + * Return value of register content + */ +inline u64 rd64(struct zxdh_hw *hw, u32 reg) +{ + return readq(hw->hw_addr + reg); +} + +/** + * zxdh_add_handler - add a handler to the list + * @hdl: handler to be added to the handler list + */ +void zxdh_add_handler(struct zxdh_handler *hdl) +{ + unsigned long flags; + + spin_lock_irqsave(&zxdh_handler_lock, flags); + list_add(&hdl->list, &zxdh_handlers); + spin_unlock_irqrestore(&zxdh_handler_lock, flags); +} + +/** + * zxdh_del_handler - delete a handler from the list + * @hdl: handler to be deleted from the handler list + */ +void zxdh_del_handler(struct zxdh_handler *hdl) +{ + unsigned long flags; + + spin_lock_irqsave(&zxdh_handler_lock, flags); + list_del(&hdl->list); + spin_unlock_irqrestore(&zxdh_handler_lock, flags); +} + +/** + * zxdh_alloc_and_get_cqp_request - get cqp struct + * @cqp: device cqp ptr + * @wait: cqp to be used in wait mode + */ +struct zxdh_cqp_request *zxdh_alloc_and_get_cqp_request(struct zxdh_cqp *cqp, + bool wait) +{ + struct zxdh_cqp_request *cqp_request = NULL; + unsigned long flags; + + spin_lock_irqsave(&cqp->req_lock, flags); + if (!list_empty(&cqp->cqp_avail_reqs)) { + cqp_request = list_entry(cqp->cqp_avail_reqs.next, + struct zxdh_cqp_request, list); + list_del_init(&cqp_request->list); + } + spin_unlock_irqrestore(&cqp->req_lock, flags); + if (!cqp_request) { + cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC); + if (cqp_request) { + cqp_request->dynamic = true; + if (wait) + init_waitqueue_head(&cqp_request->waitq); + } + } + if (!cqp_request) { + pr_err("ERR: CQP Request Fail: No Memory"); + return NULL; + } + + cqp_request->waiting = wait; + refcount_set(&cqp_request->refcnt, 1); + memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info)); + + return cqp_request; +} + +/** + * zxdh_get_cqp_request - increase refcount for cqp_request + * @cqp_request: pointer to cqp_request instance + */ +static inline void zxdh_get_cqp_request(struct zxdh_cqp_request *cqp_request) +{ + refcount_inc(&cqp_request->refcnt); +} + +/** + * zxdh_free_cqp_request - free cqp request + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +void zxdh_free_cqp_request(struct zxdh_cqp *cqp, + struct zxdh_cqp_request *cqp_request) +{ + unsigned long flags; + + if (cqp_request->dynamic) { + kfree(cqp_request); + } else { + cqp_request->request_done = false; + cqp_request->callback_fcn = NULL; + cqp_request->waiting = false; + + spin_lock_irqsave(&cqp->req_lock, flags); + list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); + spin_unlock_irqrestore(&cqp->req_lock, flags); + } + wake_up(&cqp->remove_wq); +} + +/** + * zxdh_put_cqp_request - dec ref count and free if 0 + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +void zxdh_put_cqp_request(struct zxdh_cqp *cqp, + struct zxdh_cqp_request *cqp_request) +{ + if (refcount_dec_and_test(&cqp_request->refcnt)) + zxdh_free_cqp_request(cqp, cqp_request); +} + +/** + * zxdh_free_pending_cqp_request -free pending cqp request objs + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +static void zxdh_free_pending_cqp_request(struct zxdh_cqp *cqp, + struct zxdh_cqp_request *cqp_request) +{ + if (cqp_request->waiting) { + cqp_request->compl_info.error = true; + cqp_request->request_done = true; + wake_up(&cqp_request->waitq); + } + wait_event_timeout(cqp->remove_wq, + refcount_read(&cqp_request->refcnt) == 1, 1000); + zxdh_put_cqp_request(cqp, cqp_request); +} + +/** + * zxdh_cleanup_pending_cqp_op - clean-up cqp with no + * completions + * @rf: RDMA PCI function + */ +void zxdh_cleanup_pending_cqp_op(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_cqp *cqp = &rf->cqp; + struct zxdh_cqp_request *cqp_request = NULL; + struct cqp_cmds_info *pcmdinfo = NULL; + u32 i, pending_work, wqe_idx; + + pending_work = ZXDH_RING_USED_QUANTA(cqp->sc_cqp.sq_ring); + wqe_idx = ZXDH_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring); + for (i = 0; i < pending_work; i++) { + cqp_request = (struct zxdh_cqp_request *)(unsigned long) + cqp->scratch_array[wqe_idx]; + if (cqp_request) + zxdh_free_pending_cqp_request(cqp, cqp_request); + wqe_idx = (wqe_idx + 1) % ZXDH_RING_SIZE(cqp->sc_cqp.sq_ring); + } + + while (!list_empty(&dev->cqp_cmd_head)) { + pcmdinfo = zxdh_remove_cqp_head(dev); + cqp_request = + container_of(pcmdinfo, struct zxdh_cqp_request, info); + if (cqp_request) + zxdh_free_pending_cqp_request(cqp, cqp_request); + } +} + +/** + * zxdh_wait_event - wait for completion + * @rf: RDMA PCI function + * @cqp_request: cqp request to wait + */ +static int zxdh_wait_event(struct zxdh_pci_f *rf, + struct zxdh_cqp_request *cqp_request) +{ + struct zxdh_cqp_timeout cqp_timeout = {}; + bool cqp_error = false; + int err_code = 0; + + cqp_timeout.compl_cqp_cmds = + rf->sc_dev.cqp_cmd_stats[ZXDH_OP_CMPL_CMDS]; + do { + int wait_time_ms = + rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms; + + zxdh_cqp_ce_handler(rf, &rf->ccq.sc_cq); + if (wait_event_timeout(cqp_request->waitq, + cqp_request->request_done, + msecs_to_jiffies(wait_time_ms))) + break; + + zxdh_check_cqp_progress(&cqp_timeout, &rf->sc_dev); + + if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD) + continue; + + if (!rf->reset) { + // rf->reset = true; + rf->gen_ops.request_reset(rf); + } + return -ETIMEDOUT; + } while (1); + + cqp_error = cqp_request->compl_info.error; + if (cqp_error) { + err_code = -EIO; + if (cqp_request->compl_info.maj_err_code == 0xFFFF) { + if (cqp_request->compl_info.min_err_code == 0x8002) { + err_code = -EBUSY; + } else if (cqp_request->compl_info.min_err_code == + 0x8029) { + if (!rf->reset) { + // rf->reset = true; + //rf->gen_ops.request_reset(rf); + } + } + } + } + + return err_code; +} + +static const char *const zxdh_cqp_cmd_names[ZXDH_MAX_CQP_OPS] = { + [ZXDH_OP_CEQ_DESTROY] = "Destroy CEQ Cmd", + [ZXDH_OP_AEQ_DESTROY] = "Destroy AEQ Cmd", + [ZXDH_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd", + [ZXDH_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd", + [ZXDH_OP_CEQ_CREATE] = "CEQ Create Cmd", + [ZXDH_OP_AEQ_CREATE] = "AEQ Destroy Cmd", + [ZXDH_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd", + [ZXDH_OP_QP_MODIFY] = "Modify QP Cmd", + [ZXDH_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd", + [ZXDH_OP_CQ_CREATE] = "Create CQ Cmd", + [ZXDH_OP_CQ_DESTROY] = "Destroy CQ Cmd", + [ZXDH_OP_QP_CREATE] = "Create QP Cmd", + [ZXDH_OP_QP_DESTROY] = "Destroy QP Cmd", + [ZXDH_OP_ALLOC_STAG] = "Allocate STag Cmd", + [ZXDH_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd", + [ZXDH_OP_DEALLOC_STAG] = "Deallocate STag Cmd", + [ZXDH_OP_MW_ALLOC] = "Allocate Memory Window Cmd", + [ZXDH_OP_QP_FLUSH_WQES] = "Flush QP Cmd", + [ZXDH_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd", + [ZXDH_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd", + [ZXDH_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd", + [ZXDH_OP_SUSPEND] = "Suspend QP Cmd", + [ZXDH_OP_RESUME] = "Resume QP Cmd", + [ZXDH_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd", + [ZXDH_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd", + [ZXDH_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd", + [ZXDH_OP_AH_CREATE] = "Create Address Handle Cmd", + [ZXDH_OP_AH_MODIFY] = "Modify Address Handle Cmd", + [ZXDH_OP_AH_DESTROY] = "Destroy Address Handle Cmd", + [ZXDH_OP_MC_CREATE] = "Create Multicast Group Cmd", + [ZXDH_OP_MC_DESTROY] = "Destroy Multicast Group Cmd", + [ZXDH_OP_MC_MODIFY] = "Modify Multicast Group Cmd", + [ZXDH_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd", + [ZXDH_OP_STATS_FREE] = "Free Statistics Instance Cmd", + [ZXDH_OP_STATS_GATHER] = "Gather Statistics Cmd", + [ZXDH_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd", + [ZXDH_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd", + [ZXDH_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd", + [ZXDH_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd", + [ZXDH_OP_GEN_AE] = "Generate AE Cmd", + [ZXDH_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd", + [ZXDH_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd", + [ZXDH_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd", + [ZXDH_OP_CQ_MODIFY] = "CQ Modify Cmd", + [ZXDH_OP_CONFIG_PTE_TAB] = "Config PTE Tab Cmd", + [ZXDH_OP_QUERY_PTE_TAB] = "Query PTE Tab Cmd", + [ZXDH_OP_CONFIG_PBLE_TAB] = "Config PBLE Tab Cmd", + [ZXDH_OP_CONFIG_MAILBOX] = "Config Mailbox Cmd", + [ZXDH_OP_DMA_WRITE] = "Dma Write Cmd", + [ZXDH_OP_DMA_WRITE32] = "Dma Write32 Cmd", + [ZXDH_OP_DMA_WRITE64] = "Dma Write64 Cmd", + [ZXDH_OP_DMA_READ] = "Dma Read Cmd", + [ZXDH_OP_DMA_READ_USE_CQE] = "Dma Read Use Cqe Cmd", + [ZXDH_OP_QUERY_QPC] = "Query HW QPC Cmd", + [ZXDH_OP_QUERY_CQC] = "Query HW CQC Cmd", + [ZXDH_OP_QUERY_SRQC] = "Query HW SRQC Cmd", + [ZXDH_OP_QUERY_CEQC] = "Query HW CEQC Cmd", + [ZXDH_OP_QUERY_AEQC] = "Query HW AEQC Cmd", +}; + +static const struct zxdh_cqp_err_info zxdh_noncrit_err_list[] = { + { 0xffff, 0x8002, "Invalid State" }, + { 0xffff, 0x8006, "Flush No Wqe Pending" }, + { 0xffff, 0x8007, "Modify QP Bad Close" }, + { 0xffff, 0x8009, "LLP Closed" }, + { 0xffff, 0x800a, "Reset Not Sent" } +}; + +/** + * zxdh_cqp_crit_err - check if CQP error is critical + * @dev: pointer to dev structure + * @cqp_cmd: code for last CQP operation + * @maj_err_code: major error code + * @min_err_code: minot error code + */ +bool zxdh_cqp_crit_err(struct zxdh_sc_dev *dev, u8 cqp_cmd, u16 maj_err_code, + u16 min_err_code) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(zxdh_noncrit_err_list); ++i) { + if (maj_err_code == zxdh_noncrit_err_list[i].maj && + min_err_code == zxdh_noncrit_err_list[i].min) { + pr_err("CQP: [%s Error][%s] maj=0x%x min=0x%x\n", + zxdh_noncrit_err_list[i].desc, + zxdh_cqp_cmd_names[cqp_cmd], maj_err_code, + min_err_code); + return false; + } + } + return true; +} + +/** + * zxdh_handle_cqp_op - process cqp command + * @rf: RDMA PCI function + * @cqp_request: cqp request to process + */ +int zxdh_handle_cqp_op(struct zxdh_pci_f *rf, + struct zxdh_cqp_request *cqp_request) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct cqp_cmds_info *info = &cqp_request->info; + int status; + bool put_cqp_request = true; + + if (rf->reset) + return -EBUSY; + + zxdh_get_cqp_request(cqp_request); + status = zxdh_process_cqp_cmd(dev, info); + if (status) + goto err; + + if (cqp_request->waiting) { + put_cqp_request = false; + status = zxdh_wait_event(rf, cqp_request); + if (status) + goto err; + } + + return 0; + +err: + if (zxdh_cqp_crit_err(dev, info->cqp_cmd, + cqp_request->compl_info.maj_err_code, + cqp_request->compl_info.min_err_code)) + dev_err(idev_to_dev(dev), + "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n", + zxdh_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, + status, cqp_request->waiting, + cqp_request->compl_info.error, + cqp_request->compl_info.maj_err_code, + cqp_request->compl_info.min_err_code); + + if (put_cqp_request) + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +void zxdh_qp_add_ref(struct ib_qp *ibqp) +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + + refcount_inc(&iwqp->refcnt); +} + +void zxdh_qp_rem_ref(struct ib_qp *ibqp) +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_device *iwdev = iwqp->iwdev; + unsigned long flags; + + spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); + if (!refcount_dec_and_test(&iwqp->refcnt)) { + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + return; + } + + iwdev->rf->qp_table[iwqp->sc_qp.qp_ctx_num - iwqp->sc_qp.dev->base_qpn] = + NULL; + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + complete(&iwqp->free_qp); +} + +void zxdh_cq_add_ref(struct ib_cq *ibcq) +{ + struct zxdh_cq *iwcq = to_iwcq(ibcq); + + refcount_inc(&iwcq->refcnt); +} + +void zxdh_cq_rem_ref(struct ib_cq *ibcq) +{ + struct zxdh_cq *iwcq = to_iwcq(ibcq); + struct zxdh_pci_f *rf = + container_of(iwcq->sc_cq.dev, struct zxdh_pci_f, sc_dev); + unsigned long flags; + + spin_lock_irqsave(&rf->cqtable_lock, flags); + if (!refcount_dec_and_test(&iwcq->refcnt)) { + spin_unlock_irqrestore(&rf->cqtable_lock, flags); + return; + } + + rf->cq_table[iwcq->cq_num - rf->sc_dev.base_cqn] = NULL; + spin_unlock_irqrestore(&rf->cqtable_lock, flags); + complete(&iwcq->free_cq); +} + +struct ib_device *zxdh_get_ibdev(struct zxdh_sc_dev *dev) +{ + return &(container_of(dev, struct zxdh_pci_f, sc_dev))->iwdev->ibdev; +} + +/** + * zxdh_remove_cqp_head - return head entry and remove + * @dev: device + */ +void *zxdh_remove_cqp_head(struct zxdh_sc_dev *dev) +{ + struct list_head *entry; + struct list_head *list = &dev->cqp_cmd_head; + + if (list_empty(list)) + return NULL; + + entry = list->next; + list_del(entry); + + return entry; +} + +/** + * zxdh_terminate_del_timer - delete terminate timeout + * @qp: hardware control qp + */ +void zxdh_terminate_del_timer(struct zxdh_sc_qp *qp) +{ + struct zxdh_qp *iwqp; + int ret; + + iwqp = qp->qp_uk.back_qp; + ret = del_timer(&iwqp->terminate_timer); + if (ret) + zxdh_qp_rem_ref(&iwqp->ibqp); +} + +/** + * zxdh_cq_wq_destroy - send cq destroy cqp + * @rf: RDMA PCI function + * @cq: hardware control cq + */ +void zxdh_cq_wq_destroy(struct zxdh_pci_f *rf, struct zxdh_sc_cq *cq) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_CQ_DESTROY; + cqp_info->post_sq = 1; + cqp_info->in.u.cq_destroy.cq = cq; + cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request; + + zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); +} + +/** + * zxdh_hw_modify_qp - setup cqp for modify qp + * @iwdev: RDMA device + * @iwqp: qp ptr (user or kernel) + * @info: info for modify qp + * @wait: flag to wait or not for modify qp completion + */ +int zxdh_hw_modify_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_modify_qp_info *info, bool wait) +{ + int status; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_modify_qp_info *m_info; + + wait = true; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, wait); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + m_info = &cqp_info->in.u.qp_modify.info; + memcpy(m_info, info, sizeof(*m_info)); + cqp_info->cqp_cmd = ZXDH_OP_QP_MODIFY; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp; + cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +/** + * zxdh_cqp_qp_destroy_cmd - destroy the cqp + * @dev: device pointer + * @qp: pointer to qp + */ +int zxdh_cqp_qp_destroy_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_qp *qp) +{ + struct zxdh_pci_f *rf = dev_to_rf(dev); + struct zxdh_cqp *iwcqp = &rf->cqp; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(iwcqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + memset(cqp_info, 0, sizeof(*cqp_info)); + cqp_info->cqp_cmd = ZXDH_OP_QP_DESTROY; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_destroy.qp = qp; + cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; + + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +static void zxdh_set_rx_ram_reg(struct zxdh_sc_dev *dev, u32 ram_num, + u32 ram_width, u32 ram_addr, u32 ram_read_cnt) +{ + writel(ram_num, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + writel(ram_width, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_WIDTH)); + writel(ram_addr, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + writel(ram_read_cnt, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_READ_LENGTH)); + writel(0, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_READ_FLAG)); +} + +static int zxdh_read_rx_ram_flag(struct zxdh_sc_dev *dev) +{ + u32 val; + + udelay(1000); //to be modified smaller + val = readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_READ_FLAG)); + if (val != 1) { + udelay(2000); //to be modified smaller + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMARX_RAM_READ_FLAG)); + if (val != 1) + return -EIO; + } + val = readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_READ_ERROR_FLAG)); + val |= readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_READ_CNT_ERROR)); + val |= readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_REDUN_FLAG)); + val |= readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_DOUBLE_VLD_FLAG)); + if (val != 0) + return -EIO; + + return val; +} + +static u32 zxdh_read_rx_ram_data(struct zxdh_sc_dev *dev, u32 offset_idx) +{ + u32 val; + + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMARX_RAM_MAINTENANCE_RAM(offset_idx))); + return val; +} + +static void zxdh_set_tx_ram_reg(struct zxdh_sc_dev *dev, u32 ram_num, + u32 ram_width, u32 ram_addr, u32 ram_read_cnt) +{ + writel(ram_num, (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_NUM)); + writel(ram_width, (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_WIDTH)); + writel(ram_addr, (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_ADDR)); + writel(ram_read_cnt, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_READ_LENGTH)); + writel(0, (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_READ_FLAG)); +} +static int zxdh_read_tx_ram_flag(struct zxdh_sc_dev *dev) +{ + u32 val; + + udelay(1000); //to be modified smaller + val = readl((u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_READ_FLAG)); + if (val != 1) { + udelay(2000); //to be modified smaller + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMATX_RAM_READ_FLAG)); + if (val != 1) + return -EIO; + } + val = readl((u32 __iomem *)(dev->hw->hw_addr + RDMATX_READ_ERROR_FLAG)); + val |= readl((u32 __iomem *)(dev->hw->hw_addr + RDMATX_READ_CNT_ERROR)); + val |= readl((u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_REDUN_FLAG)); + val |= readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_DOUBLE_VLD_FLAG)); + if (val != 0) + return -EIO; + return val; +} + +static u32 zxdh_read_tx_ram_data(struct zxdh_sc_dev *dev, u32 offset_idx) +{ + u32 val; + + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMATX_RAM_MAINTENANCE_RAM(offset_idx))); + return val; +} + +static u32 zxdh_get_vhca_ram(u32 vhca_id) +{ + u32 ram_num; + + if (vhca_id < 255) + ram_num = ZXDH_RAM_H12; + else if (vhca_id < 511) + ram_num = ZXDH_RAM_H13; + else if (vhca_id < 767) + ram_num = ZXDH_RAM_H14; + else + ram_num = ZXDH_RAM_H15; + return ram_num; +} + +static u32 zxdh_get_vhca_ram_addr(u32 vhca_id) +{ + u32 ram_addr = 0; + + if (vhca_id < 255) + ram_addr = vhca_id; + else if (vhca_id < 511) + ram_addr = (vhca_id - 256); + else if (vhca_id < 767) + ram_addr = (vhca_id - 512); + else + ram_addr = (vhca_id - 768); + return ram_addr; +} +static int zxdh_get_ram_msg_h11(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + u32 rtt_cfg; + + rtt_cfg = readl((u32 __iomem *)(dev->hw->hw_addr + RDMATX_RTT_CFG)); + /* if rtt enabled, rp_cnp_handled not count */ + if (rtt_cfg != 0) + return 0; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H11, ZXDH_RAM_WIDTH_64_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H11) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + rdma_stats->rdma_stats_entry[HW_STAT_RP_CNP_HANDLED] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RP_CNP_HANDLED] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} +static void zxdh_get_ram_for_rx_stats(struct zxdh_sc_dev *dev, u64 *p_pkB_cnt, + u64 *p_pk_cnt) +{ + u64 temp_val = 0; + u64 stat_val1, stat_val2; + + // ipv6 unicast + stat_val1 = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_2); + stat_val1 = (stat_val1 << IRMDA_BIT_WIDTH_16); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + stat_val1 |= + ((temp_val & ZXDH_32_BIT_MASK_16_31) >> IRMDA_BIT_WIDTH_16); + // ipv4 unicast + stat_val2 = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + stat_val2 = ((stat_val2 & ZXDH_32_BIT_MASK_0_15) << IRMDA_BIT_WIDTH_32); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + stat_val2 |= temp_val; + *p_pk_cnt = stat_val1 + stat_val2; + + // ipv6 + stat_val1 = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_12); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_13); + stat_val1 |= ((temp_val & ZXDH_32_BIT_MASK_0_15) << IRMDA_BIT_WIDTH_32); + // ipv4 + stat_val2 = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_9); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_10); + stat_val2 |= ((temp_val & ZXDH_32_BIT_MASK_0_15) << IRMDA_BIT_WIDTH_32); + stat_val2 |= temp_val; + *p_pkB_cnt = stat_val1 + stat_val2; +} +static int zxdh_get_rx_stat(struct zxdh_sc_dev *dev, u64 *p_pkB_cnt, + u64 *p_pk_cnt) +{ + u32 check_ram_num, check_ram_addr; + u32 ram_num; + u32 ram_addr; + int ret; + int i; + + ram_num = zxdh_get_vhca_ram(dev->vhca_id); + ram_addr = zxdh_get_vhca_ram_addr(dev->vhca_id); + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ram_num, ZXDH_RAM_WIDTH_480_BIT, + ram_addr, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ram_num) || + (check_ram_addr != ram_addr)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + + zxdh_get_ram_for_rx_stats(dev, p_pkB_cnt, p_pk_cnt); + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h12_to_h15(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 pkB_cnt, pkts_cnt; + u64 val = 0; + u64 temp_val = 0; + u32 check_ram_num, check_ram_addr; + u32 ram_num; + u32 ram_addr; + int ret; + int i; + + ram_num = zxdh_get_vhca_ram(dev->vhca_id); + ram_addr = zxdh_get_vhca_ram_addr(dev->vhca_id); + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ram_num, ZXDH_RAM_WIDTH_480_BIT, + ram_addr, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ram_num) || + (check_ram_addr != ram_addr)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + + zxdh_get_ram_for_rx_stats(dev, &pkB_cnt, &pkts_cnt); + + rdma_stats->rdma_stats_entry[HW_STAT_RDMA_RX_BYTES] = pkB_cnt; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RDMA_RX_BYTES] = + ZXDH_HW_STATS_VALID; + rdma_stats->rdma_stats_entry[HW_STAT_RDMA_RX_PKTS] = pkts_cnt; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RDMA_RX_PKTS] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_3); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_4); + val |= ((temp_val & ZXDH_32_BIT_MASK_0_15) + << IRMDA_BIT_WIDTH_32); + rdma_stats->rdma_stats_entry[HW_STAT_RX_ICRC_ENCAPSULATED] = + val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RX_ICRC_ENCAPSULATED] = + ZXDH_HW_STATS_VALID; + + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h25(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_tx_ram_reg(dev, ZXDH_RAM_H25, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_tx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H25) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_2); + rdma_stats->rdma_stats_entry[HW_STAT_RNR_NAK_RETRY_ERR] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RNR_NAK_RETRY_ERR] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h26(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_tx_ram_reg(dev, ZXDH_RAM_H26, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_tx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H26) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_PACKET_SEQ_ERR] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_PACKET_SEQ_ERR] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + rdma_stats + ->rdma_stats_entry[HW_STAT_REQ_REMOTE_INVALID_REQUEST] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_REQ_REMOTE_INVALID_REQUEST] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_2); + rdma_stats->rdma_stats_entry[HW_STAT_REQ_REMOTE_ACCESS_ERRORS] = + val; + rdma_stats + ->rdma_stats_entry_sta[HW_STAT_REQ_REMOTE_ACCESS_ERRORS] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_3); + rdma_stats + ->rdma_stats_entry[HW_STAT_REQ_REMOTE_OPERATION_ERRORS] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_REQ_REMOTE_OPERATION_ERRORS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h63(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H63, ZXDH_RAM_WIDTH_32_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H63) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_DUPLICATE_REQUEST] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_DUPLICATE_REQUEST] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h29(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_tx_ram_reg(dev, ZXDH_RAM_H29, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_tx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H29) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + rdma_stats->rdma_stats_entry[HW_STAT_REQ_LOCAL_LENGTH_ERROR] = + val; + rdma_stats + ->rdma_stats_entry_sta[HW_STAT_REQ_LOCAL_LENGTH_ERROR] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} +static int zxdh_get_ram_msg_h61(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H61, ZXDH_RAM_WIDTH_32_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H61) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_RX_WRITE_REQUESTS] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RX_WRITE_REQUESTS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h62(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H62, ZXDH_RAM_WIDTH_32_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H62) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_RX_READ_REQUESTS] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RX_READ_REQUESTS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h64(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H64, ZXDH_RAM_WIDTH_32_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H64) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_ROCE_SLOW_RESTART_CNPS] = + val; + rdma_stats + ->rdma_stats_entry_sta[HW_STAT_ROCE_SLOW_RESTART_CNPS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h104(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H104, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H104) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_OUT_OF_SEQUENCE] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_OUT_OF_SEQUENCE] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + rdma_stats->rdma_stats_entry[HW_STAT_RESP_RNR_NAK] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RESP_RNR_NAK] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_2); + rdma_stats + ->rdma_stats_entry[HW_STAT_RESP_REMOTE_INVALID_REQUEST] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_RESP_REMOTE_INVALID_REQUEST] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_3); + rdma_stats->rdma_stats_entry[HW_STAT_RESP_REMOTE_ACCESS_ERRORS] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_RESP_REMOTE_ACCESS_ERRORS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h105(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H105, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H105) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats + ->rdma_stats_entry[HW_STAT_RESP_REMOTE_OPERATION_ERRORS] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_RESP_REMOTE_OPERATION_ERRORS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h106(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H106, ZXDH_RAM_WIDTH_64_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H106) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats + ->rdma_stats_entry[HW_STAT_NP_ECN_MARKED_ROCE_PACKETS] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_NP_ECN_MARKED_ROCE_PACKETS] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + rdma_stats->rdma_stats_entry[HW_STAT_NP_CNP_SENT] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_NP_CNP_SENT] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h19D(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u64 temp_val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H19D, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H19D) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + val = (val & ZXDH_32_BIT_MASK_0_15); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + temp_val = ((temp_val & ZXDH_32_BIT_MASK_16_31) >> + IRMDA_BIT_WIDTH_16); + if (val >= temp_val) + val = val - temp_val; + else if (val < temp_val) + val = val + (ZXDH_CQE_ERR_MAX - temp_val); + rdma_stats->rdma_stats_entry[HW_STAT_REQ_CQE_ERROR] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_REQ_CQE_ERROR] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + val = (val & ZXDH_32_BIT_MASK_0_15); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + temp_val = ((temp_val & ZXDH_32_BIT_MASK_16_31) >> + IRMDA_BIT_WIDTH_16); + if (val >= temp_val) + val = val - temp_val; + else if (val < temp_val) + val = val + (ZXDH_CQE_ERR_MAX - temp_val); + rdma_stats->rdma_stats_entry[HW_STAT_RESP_CQE_ERROR] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RESP_CQE_ERROR] = + ZXDH_HW_STATS_VALID; + + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} +static u16 zxdh_get_vport(struct zxdh_pci_f *rf) +{ + u16 vport; + u16 ftype; + + ftype = rf->ftype; + vport = (rf->vf_id & ZXDH_32_BIT_MASK_0_7) | + ((rf->pf_id & ZXDH_32_BIT_MASK_0_3) << IRMDA_BIT_WIDTH_8) | + (ftype << IRMDA_BIT_WIDTH_11) | + ((rf->ep_id & ZXDH_32_BIT_MASK_0_3) << IRMDA_BIT_WIDTH_12); + return vport; +} + +static void zxdh_get_np_tx_stats(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + struct zxdh_pci_f *rf = NULL; + u64 tx_pkts = 0; + u64 tx_bytes = 0; + u16 vport = 0; + dpp_pf_info_t pf_info = { 0 }; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + pf_info.slot = rf->pcidev->bus->number; + pf_info.vport = zxdh_get_vport(rf); + + dpp_stat_port_RDMA_packet_msg_tx_cnt_get(&pf_info, dev->vhca_id, + ZXDH_STAT_RD_MODE_UNCLR, + &tx_bytes, &tx_pkts); + + rdma_stats->rdma_stats_entry[HW_STAT_RDMA_TX_PKTS] = tx_pkts; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RDMA_TX_PKTS] = + ZXDH_HW_STATS_VALID; + + rdma_stats->rdma_stats_entry[HW_STAT_RDMA_TX_BYTES] = tx_bytes; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RDMA_TX_BYTES] = + ZXDH_HW_STATS_VALID; + + pr_info("%s dev->vhca_id:%d vport:0x%x tx_pkts:%llu tx_bytes:%llu\n", + __func__, dev->vhca_id, vport, tx_pkts, tx_bytes); +} + +static void zxdh_rdma_stats_ram_num_read(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + zxdh_get_ram_msg_h11(dev, rdma_stats); + zxdh_get_ram_msg_h12_to_h15(dev, rdma_stats); + zxdh_get_ram_msg_h25(dev, rdma_stats); + zxdh_get_ram_msg_h26(dev, rdma_stats); + zxdh_get_ram_msg_h63(dev, rdma_stats); + zxdh_get_ram_msg_h29(dev, rdma_stats); + zxdh_get_ram_msg_h61(dev, rdma_stats); + zxdh_get_ram_msg_h62(dev, rdma_stats); + zxdh_get_ram_msg_h64(dev, rdma_stats); + zxdh_get_ram_msg_h104(dev, rdma_stats); + zxdh_get_ram_msg_h105(dev, rdma_stats); + zxdh_get_ram_msg_h106(dev, rdma_stats); + zxdh_get_ram_msg_h19D(dev, rdma_stats); + zxdh_get_np_tx_stats(dev, rdma_stats); +} +static void zxdh_rdma_stats_ram_read(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u32 ram_use_cnt; + + writel((dev->vhca_id + 1), + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_USE_VHCA_ID)); + zxdh_rdma_stats_ram_num_read(dev, rdma_stats); + ram_use_cnt = + readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_USE_CNT)); + ram_use_cnt++; + writel(ram_use_cnt, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_USE_CNT)); + writel(ZXDH_CFG_RAM_FREE, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_USE_VHCA_ID)); + writel(ZXDH_CFG_RAM_FREE, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_USE_FLAG)); +} + +static int zxdh_rdma_stats_ram_read_cfg(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u32 ram_use_flag; + u32 ram_use_vhca_id; + u32 ram_use_start_cnt; + u32 ram_use_end_cnt; + int cnt = 10; + int i; + u32 delay_us_time; + + ram_use_flag = + readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_USE_FLAG)); + ram_use_start_cnt = + readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_USE_CNT)); + if (ram_use_flag == ZXDH_CFG_RAM_FREE) { + writel(ZXDH_CFG_RAM_BUSY, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_USE_FLAG)); + ram_use_vhca_id = + readl((u32 __iomem *)(dev->hw->hw_addr + + RDMARX_RAM_USE_VHCA_ID)); + if (ram_use_vhca_id == ZXDH_CFG_RAM_FREE) { + delay_us_time = 100 + (dev->vhca_id * 100); + udelay(delay_us_time); + ram_use_vhca_id = + readl((u32 __iomem *)(dev->hw->hw_addr + + RDMARX_RAM_USE_VHCA_ID)); + if (ram_use_vhca_id == ZXDH_CFG_RAM_FREE) { + zxdh_rdma_stats_ram_read(dev, rdma_stats); + return 0; + } + } + } + + for (i = 0; i < cnt; i++) { + ram_use_flag = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMARX_RAM_USE_FLAG)); + ram_use_start_cnt = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_USE_CNT)); + if (ram_use_flag == ZXDH_CFG_RAM_FREE) { + writel(ZXDH_CFG_RAM_BUSY, + (u32 __iomem *)(dev->hw->hw_addr + + RDMARX_RAM_USE_FLAG)); + ram_use_vhca_id = + readl((u32 __iomem *)(dev->hw->hw_addr + + RDMARX_RAM_USE_VHCA_ID)); + if (ram_use_vhca_id == ZXDH_CFG_RAM_FREE) { + delay_us_time = 100 + (dev->vhca_id * 100); + udelay(delay_us_time); + ram_use_vhca_id = readl(( + u32 __iomem *)(dev->hw->hw_addr + + RDMARX_RAM_USE_VHCA_ID)); + if (ram_use_vhca_id == ZXDH_CFG_RAM_FREE) { + zxdh_rdma_stats_ram_read(dev, + rdma_stats); + return 0; + } + pr_err("%s: ram data read failed!\n", __func__); + return -EBUSY; + } + pr_err("%s: ram data read failed!\n", __func__); + return -EBUSY; + } else { + delay_us_time = 100 + (dev->vhca_id * 100); + udelay(delay_us_time); + } + } + + ram_use_end_cnt = + readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_USE_CNT)); + if (ram_use_start_cnt == ram_use_end_cnt) { + writel(ZXDH_CFG_RAM_BUSY, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_USE_FLAG)); + zxdh_rdma_stats_ram_read(dev, rdma_stats); + return 0; + } + pr_err("%s: ram data read failed!\n", __func__); + return -EBUSY; +} + +void zxdh_rdma_stats_read(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + zxdh_rdma_stats_ram_read_cfg(dev, rdma_stats); +} + +static void zxdh_get_pma_cnt_ext(struct zxdh_sc_dev *dev, + struct ib_pma_portcounters_ext *pma_cnt_ext) +{ + u64 val; + u64 rx_pkts = 0; + u64 rx_bytes = 0; + u64 tx_pkts = 0; + u64 tx_bytes = 0; + + struct zxdh_pci_f *rf = NULL; + dpp_pf_info_t pf_info = { 0 }; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + pf_info.vport = zxdh_get_vport(rf); + pf_info.slot = rf->pcidev->bus->number; + + dpp_stat_port_RDMA_packet_msg_tx_cnt_get(&pf_info, dev->vhca_id, + ZXDH_STAT_RD_MODE_UNCLR, + &tx_bytes, &tx_pkts); + zxdh_get_rx_stat(dev, &rx_bytes, &rx_pkts); + + val = tx_bytes; + /* Total number of data octets, divided by 4 (lanes), transmitted on all VLs. This is 64 bit counter. */ + val = (val / 4); + val = cpu_to_be64(val); + pma_cnt_ext->port_xmit_data = val; + + val = rx_bytes; + /* Total number of data octets, divided by 4 (lanes), received on all VLs. This is 64 bit counter. */ + val = (val / 4); + val = cpu_to_be64(val); + pma_cnt_ext->port_rcv_data = val; + + val = tx_pkts; + val = cpu_to_be64(val); + pma_cnt_ext->port_xmit_packets = val; + + val = rx_pkts; + val = cpu_to_be64(val); + pma_cnt_ext->port_rcv_packets = val; + + val = tx_pkts; + val = cpu_to_be64(val); + pma_cnt_ext->port_unicast_xmit_packets = val; + + val = rx_pkts; + val = cpu_to_be64(val); + pma_cnt_ext->port_unicast_rcv_packets = val; + pma_cnt_ext->port_multicast_xmit_packets = 0; + pma_cnt_ext->port_multicast_rcv_packets = 0; +} + +static void zxdh_get_pma_cnt(struct zxdh_sc_dev *dev, + struct ib_pma_portcounters *pma_cnt) +{ + pma_cnt->symbol_error_counter = 0; + pma_cnt->link_error_recovery_counter = 0; + pma_cnt->link_downed_counter = 0; + pma_cnt->port_rcv_errors = 0; + pma_cnt->port_rcv_remphys_errors = 0; + pma_cnt->port_rcv_switch_relay_errors = 0; + pma_cnt->port_xmit_discards = 0; + pma_cnt->port_xmit_constraint_errors = 0; + pma_cnt->port_xmit_wait = 0; + pma_cnt->port_rcv_constraint_errors = 0; + pma_cnt->link_overrun_errors = 0; + pma_cnt->vl15_dropped = 0; +} +/** + * zxdh_process_pma_cmd - process pma cmd + * @dev: pointer to device structure + * @port: the port number this packet came in on + * @in_mad: the incoming MAD + * @out_mad: any outgoing MAD reply + */ +int zxdh_process_pma_cmd(struct zxdh_sc_dev *dev, u8 port, + const struct ib_mad *in_mad, struct ib_mad *out_mad) +{ + // *out_mad = *in_mad; + pr_debug( + "%s %d vhca_id:%d attr_id:0x%x counters_ext:0x%x counter:0x%x\n", + __func__, __LINE__, dev->vhca_id, in_mad->mad_hdr.attr_id, + IB_PMA_PORT_COUNTERS_EXT, IB_PMA_PORT_COUNTERS); + /* Declaring support of extended counters */ + if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { + struct ib_class_port_info cpi = {}; + + cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; + memcpy((out_mad->data + 40), &cpi, sizeof(cpi)); + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + } + + if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) { + struct ib_pma_portcounters_ext *pma_cnt_ext = + (struct ib_pma_portcounters_ext *)(out_mad->data + 40); + zxdh_get_pma_cnt_ext(dev, pma_cnt_ext); + + } else if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS) { + struct ib_pma_portcounters *pma_cnt = + (struct ib_pma_portcounters *)(out_mad->data + 40); + zxdh_get_pma_cnt(dev, pma_cnt); + } + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +} + +/** + * zxdh_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0 + * @dev: pointer to device info + * @sc_ceq: pointer to ceq structure + * @op: Create or Destroy + */ +int zxdh_cqp_ceq_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_ceq *sc_ceq, u8 op) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = op; + cqp_info->in.u.ceq_create.ceq = sc_ceq; + cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request; + + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +/** + * zxdh_cqp_aeq_cmd - Create/Destroy AEQ + * @dev: pointer to device info + * @sc_aeq: pointer to aeq structure + * @op: Create or Destroy + */ +int zxdh_cqp_aeq_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_aeq *sc_aeq, u8 op) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = op; + cqp_info->in.u.aeq_create.aeq = sc_aeq; + cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request; + + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +/** + * zxdh_cqp_up_map_cmd - Set the up-up mapping + * @dev: pointer to device structure + * @cmd: map command + * @map_info: pointer to up map info + */ +int zxdh_cqp_up_map_cmd(struct zxdh_sc_dev *dev, u8 cmd, + struct zxdh_up_info *map_info) +{ + return 0; +} + +/** + * zxdh_ah_cqp_op - perform an AH cqp operation + * @rf: RDMA PCI function + * @sc_ah: address handle + * @cmd: AH operation + * @wait: wait if true + * @callback_fcn: Callback function on CQP op completion + * @cb_param: parameter for callback function + * + * returns errno + */ +int zxdh_ah_cqp_op(struct zxdh_pci_f *rf, struct zxdh_sc_ah *sc_ah, u8 cmd, + bool wait, void (*callback_fcn)(struct zxdh_cqp_request *), + void *cb_param) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int status; + + wait = true; + + if (cmd != ZXDH_OP_AH_CREATE && cmd != ZXDH_OP_AH_DESTROY) + return -EINVAL; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, wait); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = cmd; + cqp_info->post_sq = 1; + if (cmd == ZXDH_OP_AH_CREATE) { + cqp_info->in.u.ah_create.info = sc_ah->ah_info; + cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp; + } else if (cmd == ZXDH_OP_AH_DESTROY) { + cqp_info->in.u.ah_destroy.info = sc_ah->ah_info; + cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp; + } + + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + if (status) + return -ENOMEM; + + sc_ah->ah_info.ah_valid = (cmd == ZXDH_OP_AH_CREATE); + + return 0; +} + +/** + * zxdh_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP + * @cqp_request: pointer to cqp_request of create AH + */ +void zxdh_gsi_ud_qp_ah_cb(struct zxdh_cqp_request *cqp_request) +{ + struct zxdh_sc_ah *sc_ah = cqp_request->param; + + if (!cqp_request->compl_info.op_ret_val) + sc_ah->ah_info.ah_valid = true; + else + sc_ah->ah_info.ah_valid = false; +} + +/** + * zxdh_prm_add_pble_mem - add moemory to pble resources + * @pprm: pble resource manager + * @pchunk: chunk of memory to add + */ +int zxdh_prm_add_pble_mem(struct zxdh_pble_prm *pprm, struct zxdh_chunk *pchunk) +{ + u64 sizeofbitmap; + + if (pchunk->size & 0xfff) + return -EINVAL; + + sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift; + + pchunk->bitmapmem.size = sizeofbitmap >> 3; + pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL); + + if (!pchunk->bitmapmem.va) + return -ENOMEM; + + pchunk->bitmapbuf = pchunk->bitmapmem.va; + bitmap_zero(pchunk->bitmapbuf, sizeofbitmap); + + pchunk->sizeofbitmap = sizeofbitmap; + /* each pble is 8 bytes hence shift by 3 */ + pprm->total_pble_alloc += pchunk->size >> 3; + pprm->free_pble_cnt += pchunk->size >> 3; + + return 0; +} + +/** + * zxdh_prm_get_pbles - get pble's from prm + * @pprm: pble resource manager + * @chunkinfo: nformation about chunk where pble's were acquired + * @mem_size: size of pble memory needed + * @vaddr: returns virtual address of pble memory + * @fpm_addr: returns fpm address of pble memory + * @paaddr: returns pa address of pble memory + */ +int zxdh_prm_get_pbles(struct zxdh_pble_prm *pprm, + struct zxdh_pble_chunkinfo *chunkinfo, u64 mem_size, + u64 **vaddr, u64 *fpm_addr, dma_addr_t *paaddr) +{ + u64 bits_needed; + u64 bit_idx = PBLE_INVALID_IDX; + struct zxdh_chunk *pchunk = NULL; + struct list_head *chunk_entry = pprm->clist.next; + u32 offset; + unsigned long flags; + *vaddr = NULL; + *fpm_addr = 0; + *paaddr = 0; + + bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift)); + + spin_lock_irqsave(&pprm->prm_lock, flags); + while (chunk_entry != &pprm->clist) { + pchunk = (struct zxdh_chunk *)chunk_entry; + bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf, + pchunk->sizeofbitmap, 0, + bits_needed, 0); + if (bit_idx < pchunk->sizeofbitmap) + break; + + /* list.next used macro */ + chunk_entry = pchunk->list.next; + } + + if (!pchunk || bit_idx >= pchunk->sizeofbitmap) { + spin_unlock_irqrestore(&pprm->prm_lock, flags); + return -ENOMEM; + } + + bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed); + offset = bit_idx << pprm->pble_shift; + *vaddr = pchunk->vaddr + offset; + *fpm_addr = pchunk->fpm_addr + offset; + *paaddr = pchunk->pa + offset; + chunkinfo->pchunk = pchunk; + chunkinfo->bit_idx = bit_idx; + chunkinfo->bits_used = bits_needed; + /* 3 is sizeof pble divide */ + pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3); + spin_unlock_irqrestore(&pprm->prm_lock, flags); + + return 0; +} + +/** + * zxdh_prm_return_pbles - return pbles back to prm + * @pprm: pble resource manager + * @chunkinfo: chunk where pble's were acquired and to be freed + */ +void zxdh_prm_return_pbles(struct zxdh_pble_prm *pprm, + struct zxdh_pble_chunkinfo *chunkinfo) +{ + unsigned long flags; + + spin_lock_irqsave(&pprm->prm_lock, flags); + pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3); + bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx, + chunkinfo->bits_used); + spin_unlock_irqrestore(&pprm->prm_lock, flags); +} + +int zxdh_map_vm_page_list(struct zxdh_hw *hw, void *va, dma_addr_t *pg_dma, + u32 pg_cnt) +{ + struct page *vm_page; + int i; + u8 *addr; + + addr = (u8 *)(uintptr_t)va; + for (i = 0; i < pg_cnt; i++) { + vm_page = vmalloc_to_page(addr); + if (!vm_page) + goto err; + + pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hw->device, pg_dma[i])) + goto err; + + addr += PAGE_SIZE; + } + + return 0; + +err: + zxdh_unmap_vm_page_list(hw, pg_dma, i); + return -ENOMEM; +} + +void zxdh_unmap_vm_page_list(struct zxdh_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt) +{ + int i; + + for (i = 0; i < pg_cnt; i++) + dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, + DMA_BIDIRECTIONAL); +} + +/** + * zxdh_pble_free_paged_mem - free virtual paged memory + * @chunk: chunk to free with paged memory + */ +void zxdh_pble_free_paged_mem(struct zxdh_chunk *chunk) +{ + if (!chunk->pg_cnt) + goto done; + + zxdh_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs, + chunk->pg_cnt); + +done: + kfree(chunk->dmainfo.dmaaddrs); + chunk->dmainfo.dmaaddrs = NULL; + vfree(chunk->vaddr); + chunk->vaddr = NULL; + chunk->type = 0; +} + +/** + * zxdh_modify_qp_to_err - Modify a QP to error + * @sc_qp: qp structure + */ +void zxdh_modify_qp_to_err(struct zxdh_sc_qp *sc_qp) +{ + struct zxdh_qp *qp = sc_qp->qp_uk.back_qp; + struct ib_qp_attr attr; + + if (qp->iwdev->rf->reset) + return; + attr.qp_state = IB_QPS_ERR; + + zxdh_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL); +} + +void zxdh_ib_qp_event(struct zxdh_qp *iwqp, enum zxdh_qp_event_type event) +{ + struct ib_event ibevent; + + if (!iwqp->ibqp.event_handler) + return; + + switch (event) { + case ZXDH_QP_EVENT_CATASTROPHIC: + ibevent.event = IB_EVENT_QP_FATAL; + break; + case ZXDH_QP_EVENT_ACCESS_ERR: + ibevent.event = IB_EVENT_QP_ACCESS_ERR; + break; + case ZXDH_QP_EVENT_REQ_ERR: + ibevent.event = IB_EVENT_QP_REQ_ERR; + break; + } + ibevent.device = iwqp->ibqp.device; + ibevent.element.qp = &iwqp->ibqp; + iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); +} + +/** + * zxdh_upload_qp_context - upload raw QP context + * @iwqp: QP pointer + * @freeze: freeze QP + * @raw: raw context flag + */ +int zxdh_upload_qp_context(struct zxdh_qp *iwqp, bool freeze, bool raw) +{ + return 0; +} + +int zxdh_cqp_rdma_read_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest, u8 src_dir, + u8 dest_dir) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_READ; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + cqp_info->in.u.dma_writeread.src_path_index.path_select = src_dir; + + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + if (dev->cache_id == 0) { + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + dest_dir; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + } else { + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + dev->cache_id; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_CACHE; + } + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_damreadbycqe_cmd(struct zxdh_sc_dev *dev, + struct zxdh_dam_read_bycqe *dmadata, + struct zxdh_path_index *src_path_index, u64 *arr) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status, i = 0; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_READ_USE_CQE; + cqp_info->in.u.dma_read_cqe.cqp = dev->cqp; + cqp_info->in.u.dma_read_cqe.dma_rcqe.num = dmadata->num; + cqp_info->in.u.dma_read_cqe.dma_rcqe.bitwidth = dmadata->bitwidth; + cqp_info->in.u.dma_read_cqe.dma_rcqe.valuetype = dmadata->valuetype; + for (i = 0; i < dmadata->num; i++) { + cqp_info->in.u.dma_read_cqe.dma_rcqe.addrbuf[i] = + dmadata->addrbuf[i]; + } + + cqp_info->in.u.dma_read_cqe.src_path_index.vhca_id = + src_path_index->vhca_id; + cqp_info->in.u.dma_read_cqe.src_path_index.obj_id = + src_path_index->obj_id; + cqp_info->in.u.dma_read_cqe.src_path_index.path_select = + src_path_index->path_select; + cqp_info->in.u.dma_read_cqe.src_path_index.inter_select = + src_path_index->inter_select; + + cqp_info->in.u.dma_read_cqe.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + + for (i = 0; i < 5; i++) + arr[i] = cqp_request->compl_info.addrbuf[i]; + + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_rdma_write32_cmd(struct zxdh_sc_dev *dev, + struct zxdh_dma_write32_date *dma_data) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status, i = 0; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_WRITE32; + cqp_info->in.u.dma_write32data.cqp = dev->cqp; + cqp_info->in.u.dma_write32data.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_write32data.dma_data.num = dma_data->num; + cqp_info->in.u.dma_write32data.dma_data.inter_sour_sel = + dma_data->inter_sour_sel; + cqp_info->in.u.dma_write32data.dma_data.need_inter = + dma_data->need_inter; + for (i = 0; i < dma_data->num; i++) { + cqp_info->in.u.dma_write32data.dma_data.addrbuf[i] = + dma_data->addrbuf[i]; + cqp_info->in.u.dma_write32data.dma_data.databuf[i] = + dma_data->databuf[i]; + } + + cqp_info->in.u.dma_write32data.dest_path_index.obj_id = ZXDH_REG_OBJ_ID; + cqp_info->in.u.dma_write32data.dest_path_index.path_select = + ZXDH_INDICATE_REGISTER; + cqp_info->in.u.dma_write32data.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + cqp_info->in.u.dma_write32data.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_dpuddr_to_host_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_WRITE; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + cqp_info->in.u.dma_writeread.src_path_index.path_select = + ZXDH_INDICATE_DPU_DDR; + + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_rdma_write_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest, u8 src_dir, + u8 dest_dir) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_WRITE; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + cqp_info->in.u.dma_writeread.src_path_index.path_select = src_dir; + + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + if (dev->cache_id == 0) { + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + dest_dir; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + } else { + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + dev->cache_id; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_CACHE; + } + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_rdma_readreg_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_READ; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_REG_OBJ_ID; + + cqp_info->in.u.dma_writeread.src_path_index.path_select = + ZXDH_INDICATE_REGISTER; + + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_rdma_read_mrte_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_READ; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_MR_OBJ_ID; + cqp_info->in.u.dma_writeread.src_path_index.path_select = dev->cache_id; + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_CACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_rdma_read_tx_window_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_READ; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = + ZXDH_TX_WINDOW_OBJ_ID; + cqp_info->in.u.dma_writeread.src_path_index.path_select = dev->cache_id; + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_CACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_config_pble_table_cmd(struct zxdh_sc_dev *dev, + struct zxdh_pble_info *pbleinfo, u32 len, + bool pbletype) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + u64 baseaddr = 0; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_CONFIG_PBLE_TAB; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = pbleinfo->pa; + cqp_info->in.u.dma_writeread.src_dest.len = len; + cqp_info->in.u.dma_writeread.src_dest.dest = pbleinfo->smmu_fpm_addr; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = + ZXDH_DMA_OBJ_ID; // 0 | 1 + cqp_info->in.u.dma_writeread.src_path_index.waypartion = 0; + + cqp_info->in.u.dma_writeread.src_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; // 到host不经过SMMU + + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; // 不经过cache + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = + (pbletype == true) ? ZXDH_PBLE_MR_OBJ_ID : + ZXDH_PBLE_QUEUE_OBJ_ID; // 0 | 1 + cqp_info->in.u.dma_writeread.dest_path_index.waypartion = 0; + + if (dev->cache_id == 0) { + if (dev->hmc_use_dpu_ddr == true) { + cqp_info->in.u.dma_writeread.dest_path_index + .path_select = ZXDH_INDICATE_DPU_DDR; // + } else { + cqp_info->in.u.dma_writeread.dest_path_index + .path_select = ZXDH_INDICATE_HOST_SMMU; + } + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; // 经过cache + cqp_info->in.u.dma_writeread.src_dest.dest = + pbleinfo->smmu_fpm_addr; + } else { + if (pbletype == true) { + baseaddr = + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].base; + } else { + baseaddr = + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].base; + } + cqp_info->in.u.dma_writeread.src_dest.dest = + pbleinfo->smmu_fpm_addr - baseaddr; + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + dev->cache_id; // + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_CACHE; // 经过cache + } + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +bool zxdh_cq_empty(struct zxdh_cq *iwcq) +{ + struct zxdh_cq_uk *ukcq; + u64 qword0; + __le64 *cqe; + u8 polarity; + + ukcq = &iwcq->sc_cq.cq_uk; + cqe = ZXDH_GET_CURRENT_CQ_ELEM(ukcq); + get_64bit_val(cqe, 0, &qword0); + polarity = (u8)FIELD_GET(ZXDH_CQ_VALID, qword0); + + return polarity != ukcq->polarity; +} + +void zxdh_remove_cmpls_list(struct zxdh_cq *iwcq) +{ + struct zxdh_cmpl_gen *cmpl_node; + struct list_head *tmp_node, *list_node; + + list_for_each_safe(list_node, tmp_node, &iwcq->cmpl_generated) { + cmpl_node = list_entry(list_node, struct zxdh_cmpl_gen, list); + list_del(&cmpl_node->list); + kfree(cmpl_node); + } +} + +int zxdh_generated_cmpls(struct zxdh_cq *iwcq, + struct zxdh_cq_poll_info *cq_poll_info) +{ + struct zxdh_cmpl_gen *cmpl; + + if (!iwcq || list_empty(&iwcq->cmpl_generated)) + return -ENOENT; + cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, + struct zxdh_cmpl_gen, list); + list_del(&cmpl->list); + memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info)); + kfree(cmpl); + + return 0; +} + +/** + * zxdh_set_cpi_common_values - fill in values for polling info struct + * @cpi: resulting structure of cq_poll_info type + * @qp: QPair + * @qp_num: id of the QP + */ +static void zxdh_set_cpi_common_values(struct zxdh_cq_poll_info *cpi, + struct zxdh_qp_uk *qp, u32 qp_num) +{ + cpi->comp_status = ZXDH_COMPL_STATUS_FLUSHED; + cpi->error = 1; + cpi->major_err = ZXDH_FLUSH_MAJOR_ERR; + cpi->minor_err = FLUSH_GENERAL_ERR; + cpi->qp_handle = (zxdh_qp_handle)(uintptr_t)qp; + cpi->qp_id = qp_num; +} + +static inline void zxdh_comp_handler(struct zxdh_cq *cq) +{ + if (cq->sc_cq.cq_uk.armed && cq->ibcq.comp_handler) + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); +} + +/** + * zxdh_generate_flush_completions - generate completion from WRs + * @iwqp: pointer to QP + */ +void zxdh_generate_flush_completions(struct zxdh_qp *iwqp) +{ + struct zxdh_qp_uk *qp = &iwqp->sc_qp.qp_uk; + struct zxdh_ring *sq_ring = &qp->sq_ring; + struct zxdh_ring *rq_ring = &qp->rq_ring; + struct zxdh_cmpl_gen *cmpl; + __le64 *sw_wqe; + u64 wqe_qword; + u32 wqe_idx; + u8 compl_generated = 0; + unsigned long flags; + +#define SQ_COMPL_GENERATED (0x01) +#define RQ_COMPL_GENERATED (0x02) + + spin_lock_irqsave(&iwqp->iwscq->lock, flags); + if (zxdh_cq_empty(iwqp->iwscq)) { + while (ZXDH_RING_MORE_WORK(*sq_ring)) { + cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL); + if (!cmpl) { + spin_unlock_irqrestore(&iwqp->iwscq->lock, + flags); + return; + } + + wqe_idx = sq_ring->tail; + zxdh_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); + + cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; + sw_wqe = qp->sq_base[wqe_idx].elem; + get_64bit_val(sw_wqe, 24, &wqe_qword); + cmpl->cpi.op_type = + (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword); + /* remove the SQ WR by moving SQ tail*/ + ZXDH_RING_SET_TAIL( + *sq_ring, + sq_ring->tail + + qp->sq_wrtrk_array[sq_ring->tail] + .quanta); + + list_add_tail(&cmpl->list, + &iwqp->iwscq->cmpl_generated); + compl_generated |= SQ_COMPL_GENERATED; + } + } else { + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + ZXDH_FLUSH_DELAY_MS / 2); + } + spin_unlock_irqrestore(&iwqp->iwscq->lock, flags); + + spin_lock_irqsave(&iwqp->iwrcq->lock, flags); + if (zxdh_cq_empty(iwqp->iwrcq)) { + while (ZXDH_RING_MORE_WORK(*rq_ring)) { + cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL); + if (!cmpl) { + spin_unlock_irqrestore(&iwqp->iwrcq->lock, + flags); + return; + } + + wqe_idx = rq_ring->tail; + zxdh_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); + + cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx]; + cmpl->cpi.op_type = ZXDH_OP_TYPE_REC; + /* remove the RQ WR by moving RQ tail */ + ZXDH_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); + list_add_tail(&cmpl->list, + &iwqp->iwrcq->cmpl_generated); + + compl_generated |= RQ_COMPL_GENERATED; + } + } else { + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + ZXDH_FLUSH_DELAY_MS / 2); + } + spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags); + + if (iwqp->iwscq == iwqp->iwrcq) { + if (compl_generated) + zxdh_comp_handler(iwqp->iwscq); + return; + } + if (compl_generated & SQ_COMPL_GENERATED) + zxdh_comp_handler(iwqp->iwscq); + if (compl_generated & RQ_COMPL_GENERATED) + zxdh_comp_handler(iwqp->iwrcq); + if (compl_generated) + pr_info("VERBS: 0x%X (SQ 0x1, RQ 0x2, both 0x3) completions generated for QP %d\n", + compl_generated, iwqp->ibqp.qp_num); +} + +u64 zxdh_get_path_index(struct zxdh_path_index *path_index) +{ + u64 path_index_result = 0, tmp = 0; + + tmp = path_index->inter_select; + path_index_result |= tmp; + + tmp = path_index->path_select; + tmp <<= 8; + path_index_result |= tmp; + + tmp = path_index->waypartion; + tmp <<= 12; + path_index_result |= tmp; + + tmp = path_index->obj_id; + tmp <<= 16; + path_index_result |= tmp; + + tmp = path_index->vhca_id; + tmp <<= 24; + path_index_result |= tmp; + + return path_index_result; +} + +int zxdh_cqp_config_pte_table_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest scr_dest) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_CONFIG_PTE_TAB; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = scr_dest.src; + cqp_info->in.u.dma_writeread.src_dest.len = scr_dest.len; + cqp_info->in.u.dma_writeread.src_dest.dest = scr_dest.dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = + ZXDH_DMA_OBJ_ID; // DMA 搬移 使用宏定义 + cqp_info->in.u.dma_writeread.src_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; // 到host不经过SMMU + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; // 不经过cache + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = + ZXDH_L2D_OBJ_ID; // L2D + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_L2D; // L2D + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; // 不经过cache + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_sc_send_mailbox_cmd(struct zxdh_sc_dev *dev, u8 opt, u64 msg2, + u64 msg3, u64 msg4, u16 dst_vf_id) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + pr_info("cqp_head=%d,cqp_tail=%d,cqp_size=%d\n", dev->cqp->sq_ring.head, + dev->cqp->sq_ring.tail, dev->cqp->sq_ring.size); + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_CONFIG_MAILBOX; + cqp_info->in.u.hmc_mb.cqp = dev->cqp; + cqp_info->in.u.hmc_mb.dst_vf_id = dst_vf_id; + cqp_info->in.u.hmc_mb.mbhead_data.msg0 = opt; + cqp_info->in.u.hmc_mb.mbhead_data.msg1 = dev->vhca_id; + cqp_info->in.u.hmc_mb.mbhead_data.msg2 = msg2; + cqp_info->in.u.hmc_mb.mbhead_data.msg3 = msg3; + cqp_info->in.u.hmc_mb.mbhead_data.msg4 = msg4; + cqp_info->in.u.hmc_mb.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_sc_query_mkey_cmd(struct zxdh_sc_dev *dev, u32 mekyindex) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_MKEY; + cqp_info->in.u.query_mkey.cqp = dev->cqp; + cqp_info->in.u.query_mkey.mkeyindex = mekyindex; + cqp_info->in.u.query_mkey.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +static const char *const _zxdh_qp_state_to_string[ZXDH_QPS_RSV] = { + [ZXDH_QPS_RESET] = "RESET", [ZXDH_QPS_INIT] = "INIT", + [ZXDH_QPS_RTR] = "RTR", [ZXDH_QPS_RTS] = "RTS", + [ZXDH_QPS_SQE] = "SQE", [ZXDH_QPS_SQD] = "SQD", + [ZXDH_QPS_ERR] = "ERROR", +}; + +const char *zxdh_qp_state_to_string(enum ib_qp_state state) +{ + return _zxdh_qp_state_to_string[state]; +} + +void get_pci_board_bdf(char *pci_board_bdf, struct zxdh_pci_f *rf) +{ + int domain; + int bus; + int device; + domain = pci_domain_nr(rf->pcidev->bus); + bus = rf->pcidev->bus->number; + device = PCI_SLOT(rf->pcidev->devfn); + sprintf(pci_board_bdf, "%04d:%02x:%02x", domain, bus, device); + pr_info("get_pci_board_bdf succ:%s\n", pci_board_bdf); +} diff --git a/src/rdma/src/verbs.c b/src/rdma/src/verbs.c new file mode 100644 index 0000000000000000000000000000000000000000..451776adba46d872ee878b9ddc75d8ed34f08440 --- /dev/null +++ b/src/rdma/src/verbs.c @@ -0,0 +1,3746 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include +#include "main.h" +#include "icrdma_hw.h" +#include "srq.h" +#include "restrack.h" +#include "private_verbs_cmd.h" +/** + * zxdh_query_device - get device attributes + * @ibdev: device pointer from stack + * @props: returning device attributes + * @udata: user data + */ +static int zxdh_query_device(struct ib_device *ibdev, + struct ib_device_attr *props, + struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_pci_f *rf = iwdev->rf; + struct pci_dev *pcidev = iwdev->rf->pcidev; + struct zxdh_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs; + + struct ethtool_drvinfo info; + int major, sub_major, minor, sub_minor; + __u32 val; + __u16 unit_period; + + memset(&info, 0, sizeof(info)); + iwdev->netdev->ethtool_ops->get_drvinfo(iwdev->netdev, &info); + sscanf(info.fw_version, "%d.%d.%d.%d", &major, &sub_major, &minor, + &sub_minor); + + if (udata->inlen || udata->outlen) + return -EINVAL; + + memset(props, 0, sizeof(*props)); + ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); + props->fw_ver = ((u64)major << 48 | (u64)sub_major << 32 | + (u64)minor << 16 | sub_minor); + props->device_cap_flags = iwdev->device_cap_flags; + props->vendor_id = pcidev->vendor; + props->vendor_part_id = pcidev->device; + props->hw_ver = pcidev->revision; + props->page_size_cap = SZ_4K | SZ_2M | SZ_1G; + props->max_mr_size = hw_attrs->max_mr_size; + props->max_qp = rf->max_qp - rf->used_qps; + props->max_qp_wr = hw_attrs->max_qp_wr; + set_max_sge(props, rf); + props->max_cq = rf->max_cq - rf->used_cqs; + props->max_cqe = rf->max_cqe - 1; + props->max_mr = rf->max_mr - rf->used_mrs; + props->max_mw = props->max_mr; + props->max_pd = rf->max_pd - rf->used_pds; + props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges; + props->max_qp_rd_atom = hw_attrs->max_hw_ird; + props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; + props->max_qp_init_rd_atom = hw_attrs->max_hw_ord; + props->max_srq = rf->max_srq - rf->used_srqs; + props->max_srq_wr = hw_attrs->max_srq_wr; + props->max_srq_sge = hw_attrs->uk_attrs.max_hw_wq_frags; + props->local_ca_ack_delay = 16; + props->hca_core_clock = 1000 * 1000UL; + props->max_wq_type_rq = props->max_qp; + if (rdma_protocol_roce(ibdev, 1)) { + props->max_pkeys = ZXDH_PKEY_TBL_SZ; + props->max_ah = rf->max_ah; + if (hw_attrs->uk_attrs.hw_rev == ZXDH_GEN_2) { + props->max_mcast_grp = 0; + props->max_mcast_qp_attach = 0; + props->max_total_mcast_qp_attach = 0; + } + } + props->max_fast_reg_page_list_len = ZXDH_MAX_PAGES_PER_FMR; + val = readl(rf->sc_dev.hw->hw_addr + RDMARX_CQ_PERIOD_CFG); + unit_period = (__u16)(val & 0xffff); + props->cq_caps.max_cq_moderation_count = ZXDH_MAX_CQ_COUNT; + props->cq_caps.max_cq_moderation_period = + NS_TO_US(unit_period * ZXDH_MAX_CQ_PERIOD); +#define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff + if (hw_attrs->uk_attrs.hw_rev >= ZXDH_GEN_2) + props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK; + + return 0; +} + +static int zxdh_mmap_legacy(struct zxdh_ucontext *ucontext, + struct vm_area_struct *vma) +{ + u64 pfn; + + if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + vma->vm_private_data = ucontext; + pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev + .hw_regs[ZXDH_DB_ADDR_OFFSET] + + pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> + PAGE_SHIFT; + +#ifdef RDMA_MMAP_DB_SUPPORT + return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot), NULL); +#else + return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot)); +#endif +} + +void *zxdh_zalloc_mapped(struct zxdh_device *dev, dma_addr_t *dma_addr, + size_t size, enum dma_data_direction dir) +{ + void *addr; + addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); + if (!addr) + return NULL; + *dma_addr = dma_map_single(&dev->rf->pcidev->dev, addr, size, dir); + if (dma_mapping_error(&dev->rf->pcidev->dev, *dma_addr)) { + pr_err("failed to map DMA address\n"); + free_pages_exact(addr, size); + return NULL; + } + return addr; +} + +void zxdh_free_mapped(struct zxdh_device *dev, void *cpu_addr, + dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir) +{ + dma_unmap_single(&dev->rf->pcidev->dev, dma_addr, size, dir); + free_pages_exact(cpu_addr, size); +} + +static int zxdh_mmap_for_cap(struct zxdh_ucontext *ucontext, + struct vm_area_struct *vma, + struct zxdh_user_mmap_entry *entry) +{ + u64 pfn; + u64 start = vma->vm_start; // 虚拟内存区域的起始地址 + u64 size = vma->vm_end - vma->vm_start; // 虚拟内存区域的大小 + + pfn = entry->bar_offset >> ZXDH_HW_PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) { + pr_info("zxdh_mmap_for_cap error!\n"); + return -EAGAIN; + } + + pr_info("zxdh_mmap_for_cap remap_pfn_range end.start:%llx,size:%llx\n", + start, size); + return 0; +} + +#ifdef RDMA_MMAP_DB_SUPPORT +static void zxdh_mmap_free(struct rdma_user_mmap_entry *rdma_entry) +{ + struct zxdh_user_mmap_entry *entry = to_zxdh_mmap_entry(rdma_entry); + + kfree(entry); +} + +struct rdma_user_mmap_entry * +zxdh_user_mmap_entry_insert(struct zxdh_ucontext *ucontext, u64 bar_offset, + enum zxdh_mmap_flag mmap_flag, u64 *mmap_offset) +{ + struct zxdh_user_mmap_entry *entry = + kzalloc(sizeof(*entry), GFP_KERNEL); + int ret; + + if (!entry) + return NULL; + + entry->bar_offset = bar_offset; + entry->mmap_flag = mmap_flag; + + ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, + &entry->rdma_entry, PAGE_SIZE); + if (ret) { + kfree(entry); + return NULL; + } + *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); + + return &entry->rdma_entry; +} + +struct rdma_user_mmap_entry * +zxdh_mp_mmap_entry_insert(struct zxdh_ucontext *ucontext, u64 phy_addr, + size_t length, enum zxdh_mmap_flag mmap_flag, + u64 *mmap_offset) +{ + struct zxdh_user_mmap_entry *entry = + kzalloc(sizeof(*entry), GFP_KERNEL); + int ret; + + if (!entry) + return NULL; + + entry->bar_offset = phy_addr; + entry->mmap_flag = mmap_flag; + printk(KERN_INFO "zxdh_mp_mmap_entry_insert entry->address:%lld\n", + entry->bar_offset); + + ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, + &entry->rdma_entry, length); + if (ret) { + kfree(entry); + return NULL; + } + *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); + + return &entry->rdma_entry; +} + +struct rdma_user_mmap_entry * +zxdh_cap_mmap_entry_insert(struct zxdh_ucontext *ucontext, void *address, + size_t length, enum zxdh_mmap_flag mmap_flag, + u64 *mmap_offset) +{ + struct zxdh_user_mmap_entry *entry = + kzalloc(sizeof(*entry), GFP_KERNEL); + int ret; + + if (!entry) + return NULL; + + entry->bar_offset = virt_to_phys(address); + entry->mmap_flag = mmap_flag; + printk(KERN_INFO "zxdh mmap entry insert entry->address:%lld\n", + entry->bar_offset); + ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, + &entry->rdma_entry, length); + if (ret) { + printk(KERN_ERR "zxdh rdma user mmap entry insert failed\n"); + kfree(entry); + return NULL; + } + *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); + + return &entry->rdma_entry; +} + +#else /* RDMA_MMAP_DB_SUPPORT */ +static inline bool find_key_in_mmap_tbl(struct zxdh_ucontext *ucontext, u64 key) +{ + struct zxdh_user_mmap_entry *entry; + + hash_for_each_possible(ucontext->mmap_hash_tbl, entry, hlist, key) { + if (entry->pgoff_key == key) + return true; + } + + return false; +} + +struct zxdh_user_mmap_entry * +zxdh_user_mmap_entry_add_hash(struct zxdh_ucontext *ucontext, u64 bar_offset, + enum zxdh_mmap_flag mmap_flag, u64 *mmap_offset) +{ + struct zxdh_user_mmap_entry *entry = + kzalloc(sizeof(*entry), GFP_KERNEL); + unsigned long flags; + int retry_cnt = 0; + + if (!entry) + return NULL; + + entry->bar_offset = bar_offset; + entry->mmap_flag = mmap_flag; + entry->ucontext = ucontext; + do { + get_random_bytes(&entry->pgoff_key, sizeof(entry->pgoff_key)); + + /* The key is a page offset */ + entry->pgoff_key >>= PAGE_SHIFT; + + /*In the event of a collision in the hash table, retry a new key */ + spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags); + if (!find_key_in_mmap_tbl(ucontext, entry->pgoff_key)) { + hash_add(ucontext->mmap_hash_tbl, &entry->hlist, + entry->pgoff_key); + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + goto hash_add_done; + } + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + } while (retry_cnt++ < 10); + + kfree(entry); + return NULL; + +hash_add_done: + /*libc mmap uses a byte offset */ + *mmap_offset = entry->pgoff_key << PAGE_SHIFT; + + return entry; +} + +static struct zxdh_user_mmap_entry * +zxdh_find_user_mmap_entry(struct zxdh_ucontext *ucontext, + struct vm_area_struct *vma) +{ + struct zxdh_user_mmap_entry *entry; + unsigned long flags; + + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return NULL; + + spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags); + hash_for_each_possible(ucontext->mmap_hash_tbl, entry, hlist, + vma->vm_pgoff) { + if (entry->pgoff_key == vma->vm_pgoff) { + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + return entry; + } + } + + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + + return NULL; +} + +void zxdh_user_mmap_entry_del_hash(struct zxdh_user_mmap_entry *entry) +{ + struct zxdh_ucontext *ucontext = entry->ucontext; + unsigned long flags; + + spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags); + hash_del(&entry->hlist); + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + + kfree(entry); +} + +#endif /* RDMA_MMAP_DB_SUPPORT */ +/** + * zxdh_mmap - user memory map + * @context: context created during alloc + * @vma: kernel info for user memory map + */ +static int zxdh_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ +#ifdef RDMA_MMAP_DB_SUPPORT + struct rdma_user_mmap_entry *rdma_entry; +#endif + struct zxdh_user_mmap_entry *entry; + struct zxdh_ucontext *ucontext; + u64 pfn; + int ret; + + ucontext = to_ucontext(context); + + /* Legacy support for libi40iw with hard-coded mmap key */ + if (ucontext->legacy_mode) + return zxdh_mmap_legacy(ucontext, vma); + +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma); + if (!rdma_entry) { + pr_err("VERBS: pgoff[0x%lx] does not have valid entry\n", + vma->vm_pgoff); + return -EINVAL; + } + + entry = to_zxdh_mmap_entry(rdma_entry); +#else + entry = zxdh_find_user_mmap_entry(ucontext, vma); + if (!entry) { + pr_err("VERBS: pgoff[0x%lx] does not have valid entry\n", + vma->vm_pgoff); + return -EINVAL; + } +#endif + + pfn = (entry->bar_offset + + pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> + PAGE_SHIFT; + + switch (entry->mmap_flag) { + case ZXDH_MMAP_PFN: + ret = zxdh_mmap_for_cap(ucontext, vma, entry); + break; + case ZXDH_MMAP_IO_NC: +#ifdef RDMA_MMAP_DB_SUPPORT + ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot), + rdma_entry); +#else + ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot)); +#endif + break; + case ZXDH_MMAP_IO_WC: +#ifdef RDMA_MMAP_DB_SUPPORT + ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + pgprot_writecombine(vma->vm_page_prot), + rdma_entry); +#else + ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + pgprot_writecombine(vma->vm_page_prot)); +#endif + break; + default: + ret = -EINVAL; + } + + if (ret) + pr_err("VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n", + entry->bar_offset, entry->mmap_flag, ret); + +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_put(rdma_entry); +#endif + + return ret; +} + +/** + * zxdh_get_pbl - Retrieve pbl from a list given a virtual + * address + * @va: user virtual address + * @pbl_list: pbl list to search in (QP's or CQ's) + */ +struct zxdh_pbl *zxdh_get_pbl(unsigned long va, struct list_head *pbl_list) +{ + struct zxdh_pbl *iwpbl; + + list_for_each_entry(iwpbl, pbl_list, list) { + if (iwpbl->user_base == va) { + list_del(&iwpbl->list); + iwpbl->on_list = false; + return iwpbl; + } + } + + return NULL; +} + +/** + * zxdh_clean_cqes - clean cq entries for qp + * @iwqp: qp ptr (user or kernel) + * @iwcq: cq ptr + */ +void zxdh_clean_cqes(struct zxdh_qp *iwqp, struct zxdh_cq *iwcq) +{ + struct zxdh_cq_uk *ukcq = &iwcq->sc_cq.cq_uk; + unsigned long flags; + + spin_lock_irqsave(&iwcq->lock, flags); + zxdh_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq); + spin_unlock_irqrestore(&iwcq->lock, flags); +} + +/** + * zxdh_setup_virt_qp - setup for allocation of virtual qp + * @iwdev: zrdma device + * @iwqp: qp ptr + * @init_info: initialize info to return + */ +void zxdh_setup_virt_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_qp_init_info *init_info) +{ + struct zxdh_pbl *iwpbl = iwqp->iwpbl; + struct zxdh_qp_mr *qpmr = &iwpbl->qp_mr; + + iwqp->page = qpmr->sq_page; + init_info->shadow_area_pa = qpmr->shadow; + if (iwpbl->pbl_allocated) { + init_info->virtual_map = true; + init_info->sq_pa = qpmr->sq_pbl.idx; + if (iwqp->is_srq == false) + init_info->rq_pa = qpmr->rq_pbl.idx; + } else { + init_info->sq_pa = qpmr->sq_pbl.addr; + if (iwqp->is_srq == false) + init_info->rq_pa = qpmr->rq_pbl.addr; + } +} + +/** + * zxdh_setup_kmode_qp - setup initialization for kernel mode qp + * @iwdev: iwarp device + * @iwqp: qp ptr (user or kernel) + * @info: initialize info to return + * @init_attr: Initial QP create attributes + */ +int zxdh_setup_kmode_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_qp_init_info *info, + struct ib_qp_init_attr *init_attr) +{ + struct zxdh_dma_mem *mem = &iwqp->kqp.dma_mem; + u32 sqdepth, rqdepth; + u8 sqshift, rqshift; + u32 size; + int status; + struct zxdh_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; + struct zxdh_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; + + zxdh_get_sq_wqe_shift(uk_attrs, ukinfo->max_sq_frag_cnt, + ukinfo->max_inline_data, &sqshift); + status = zxdh_get_sqdepth(uk_attrs->max_hw_wq_quanta, ukinfo->sq_size, + sqshift, &sqdepth); + if (status) + return status; + if (iwqp->is_srq == false) { + zxdh_get_rq_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, + &rqshift); + + status = zxdh_get_rqdepth(uk_attrs->max_hw_rq_quanta, + ukinfo->rq_size, rqshift, &rqdepth); + } + if (status) + return status; + + ukinfo->sq_size = sqdepth >> sqshift; + iwqp->kqp.sq_wrid_mem = + kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); + if (!iwqp->kqp.sq_wrid_mem) + return -ENOMEM; + if (iwqp->is_srq == false) { + ukinfo->rq_size = rqdepth >> rqshift; + iwqp->kqp.rq_wrid_mem = kcalloc(ukinfo->rq_size, + sizeof(*iwqp->kqp.rq_wrid_mem), + GFP_KERNEL); + if (!iwqp->kqp.rq_wrid_mem) { + kfree(iwqp->kqp.sq_wrid_mem); + iwqp->kqp.sq_wrid_mem = NULL; + return -ENOMEM; + } + ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem; + } + + ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem; + if (iwqp->is_srq == false) + size = sqdepth * ZXDH_QP_SQ_WQE_MIN_SIZE + + rqdepth * ZXDH_QP_RQ_WQE_MIN_SIZE; + else + size = sqdepth * ZXDH_QP_SQ_WQE_MIN_SIZE; + size += (ZXDH_SHADOW_AREA_SIZE << 3); + + mem->size = ALIGN(size, 4096); + mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size, &mem->pa, + GFP_KERNEL); + if (!mem->va) { + kfree(iwqp->kqp.sq_wrid_mem); + iwqp->kqp.sq_wrid_mem = NULL; + kfree(iwqp->kqp.rq_wrid_mem); + iwqp->kqp.rq_wrid_mem = NULL; + return -ENOMEM; + } + + ukinfo->sq = mem->va; + info->sq_pa = mem->pa; + if (iwqp->is_srq == false) { + ukinfo->rq = (struct zxdh_qp_rq_quanta *)&ukinfo->sq[sqdepth]; + info->rq_pa = info->sq_pa + (sqdepth * ZXDH_QP_SQ_WQE_MIN_SIZE); + ukinfo->shadow_area = ukinfo->rq[rqdepth].elem; + info->shadow_area_pa = + info->rq_pa + (rqdepth * ZXDH_QP_RQ_WQE_MIN_SIZE); + } else { + ukinfo->shadow_area = (__le64 *)&ukinfo->sq[sqdepth]; + info->shadow_area_pa = + info->sq_pa + (sqdepth * ZXDH_QP_SQ_WQE_MIN_SIZE); + } + set_64bit_val(ukinfo->shadow_area, 0, 0x8000); + ukinfo->qp_id = iwqp->ibqp.qp_num; + + init_attr->cap.max_send_wr = (sqdepth - ZXDH_SQ_RSVD) >> sqshift; + if (iwqp->is_srq == false) + init_attr->cap.max_recv_wr = (rqdepth - ZXDH_RQ_RSVD) >> + rqshift; + + return 0; +} + +int zxdh_cqp_create_qp_cmd(struct zxdh_qp *iwqp) +{ + struct zxdh_pci_f *rf = iwqp->iwdev->rf; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + + cqp_info->cqp_cmd = ZXDH_OP_QP_CREATE; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_create.qp = &iwqp->sc_qp; + cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +void zxdh_roce_fill_and_set_qpctx_info(struct zxdh_qp *iwqp, + struct zxdh_qp_host_ctx_info *ctx_info) +{ + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct zxdh_roce_offload_info *roce_info; + struct zxdh_udp_offload_info *udp_info; + + udp_info = &iwqp->udp_info; + // udp_info->pmtu = ib_mtu_int_to_enum(iwdev->vsi.mtu); + udp_info->pmtu = ib_mtu_int_to_enum( + iwdev->netdev->mtu); //TODO:use netdev active pmtu to enum + udp_info->cwnd = iwdev->roce_cwnd; + udp_info->rexmit_thresh = 2; + udp_info->rnr_nak_thresh = 2; + udp_info->src_port = 0xc000; + udp_info->dst_port = ROCE_V2_UDP_DPORT; + if (iwqp->sc_qp.qp_uk.qp_type == ZXDH_QP_TYPE_ROCE_RC) + udp_info->timeout = 0x1f; + else + udp_info->timeout = 0x0; + roce_info = &iwqp->roce_info; + ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr); + + roce_info->rd_en = false; + roce_info->wr_rdresp_en = false; + roce_info->bind_en = true; + roce_info->dcqcn_en = true; //dcqcn/ecn is set to default on + roce_info->ecn_en = false; + roce_info->rtomin = 5; + +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + roce_info->dcqcn_en = iwdev->roce_dcqcn_en; + roce_info->timely_en = iwdev->roce_timely_en; + roce_info->dctcp_en = iwdev->roce_dctcp_en; + roce_info->rtomin = iwdev->roce_rtomin; + roce_info->rcv_no_icrc = iwdev->roce_no_icrc_en; +#endif + roce_info->ack_credits = iwdev->roce_ackcreds; + roce_info->ird_size = dev->hw_attrs.max_hw_ird; + roce_info->ord_size = dev->hw_attrs.max_hw_ord; + + if (!iwqp->user_mode) { + roce_info->priv_mode_en = true; + roce_info->fast_reg_en = true; + roce_info->udprivcq_en = true; + } + roce_info->roce_tver = 0; + + ctx_info->roce_info = &iwqp->roce_info; + ctx_info->udp_info = &iwqp->udp_info; + zxdh_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); +} + +int zxdh_validate_qp_attrs(struct ib_qp_init_attr *init_attr, + struct zxdh_device *iwdev) +{ + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct zxdh_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; + + if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline || + init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags || + init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags) + return -EINVAL; + + if (rdma_protocol_roce(&iwdev->ibdev, 1)) { + if (init_attr->qp_type != IB_QPT_RC && + init_attr->qp_type != IB_QPT_UD && + init_attr->qp_type != IB_QPT_GSI) + return -EOPNOTSUPP; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +void zxdh_flush_worker(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct zxdh_qp *iwqp = container_of(dwork, struct zxdh_qp, dwork_flush); + unsigned long flags; + + spin_lock_irqsave( + &iwqp->lock, + flags); /* Don't allow more posting while generating completions */ + zxdh_generate_flush_completions(iwqp); + spin_unlock_irqrestore(&iwqp->lock, flags); +} + +static int zxdh_get_ib_acc_flags(struct zxdh_qp *iwqp) +{ + int acc_flags = 0; + + if (rdma_protocol_roce(iwqp->ibqp.device, 1)) { + if (iwqp->roce_info.wr_rdresp_en) { + acc_flags |= IB_ACCESS_LOCAL_WRITE; + acc_flags |= IB_ACCESS_REMOTE_WRITE; + } + if (iwqp->roce_info.rd_en) + acc_flags |= IB_ACCESS_REMOTE_READ; + if (iwqp->roce_info.bind_en) + acc_flags |= IB_ACCESS_MW_BIND; + } else { + if (iwqp->iwarp_info.wr_rdresp_en) { + acc_flags |= IB_ACCESS_LOCAL_WRITE; + acc_flags |= IB_ACCESS_REMOTE_WRITE; + } + if (iwqp->iwarp_info.rd_en) + acc_flags |= IB_ACCESS_REMOTE_READ; + if (iwqp->iwarp_info.bind_en) + acc_flags |= IB_ACCESS_MW_BIND; + } + return acc_flags; +} + +/** + * zxdh_query_qp - query qp attributes + * @ibqp: qp pointer + * @attr: attributes pointer + * @attr_mask: Not used + * @init_attr: qp attributes to return + */ +static int zxdh_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_qp_init_attr *init_attr) +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_sc_qp *qp = &iwqp->sc_qp; + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_dma_mem qpc_buf; + int err_code = 0; + + memset(attr, 0, sizeof(*attr)); + memset(init_attr, 0, sizeof(*init_attr)); + qpc_buf.va = NULL; + + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, qpc_buf.size, + &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("res qp entry raw failed:ENOMEM\n"); + return -ENOMEM; + } + err_code = zxdh_fill_qpc(&iwqp->sc_qp, &qpc_buf); + if (err_code) { + pr_err("res qp entry raw fill qpc failed:%d\n", err_code); + goto free_rsrc; + } + attr->path_mig_state = IB_MIG_MIGRATED; + attr->qp_state = iwqp->ibqp_state; + attr->cur_qp_state = iwqp->ibqp_state; + attr->cap.max_send_wr = iwqp->max_send_wr; + attr->cap.max_recv_wr = iwqp->max_recv_wr; + attr->cap.max_inline_data = qp->qp_uk.max_inline_data; + attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt; + attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt; + attr->qp_access_flags = zxdh_get_ib_acc_flags(iwqp); + attr->port_num = 1; + if (rdma_protocol_roce(ibqp->device, 1)) { + attr->path_mtu = iwqp->udp_info.pmtu; + attr->qkey = iwqp->roce_info.qkey; + attr->rq_psn = ZXDH_GET_QPC_ITEM(u32, qpc_buf.va, + ZXDH_QPC_SEND_EPSN_BYTE_OFFSET, + RDMAQPC_RX_EPSN); + attr->sq_psn = ZXDH_GET_QPC_ITEM(u32, qpc_buf.va, + ZXDH_QPC_SEND_PSN_BYTE_OFFSET, + RDMAQPC_TX_PSN_NEXT); + attr->dest_qp_num = iwqp->roce_info.dest_qp; + attr->pkey_index = iwqp->roce_info.p_key; + attr->retry_cnt = iwqp->udp_info.rexmit_thresh; + attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh; + attr->max_rd_atomic = iwqp->roce_info.ord_size; + attr->max_dest_rd_atomic = iwqp->roce_info.ird_size; + } + + init_attr->event_handler = iwqp->ibqp.event_handler; + init_attr->qp_context = iwqp->ibqp.qp_context; + init_attr->send_cq = iwqp->ibqp.send_cq; + init_attr->recv_cq = iwqp->ibqp.recv_cq; + init_attr->cap = attr->cap; +free_rsrc: + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, qpc_buf.va, + qpc_buf.pa); + qpc_buf.va = NULL; + return err_code; +} + +static u16 zxdh_get_udp_sport(const struct rdma_ah_attr *ah, u32 src_qp_num, + u32 dest_qp_num) +{ + u32 flow_label = ah->grh.flow_label; + + if (!flow_label) + flow_label = rdma_calc_flow_label(src_qp_num, dest_qp_num); + return rdma_flow_label_to_udp_sport(flow_label); +} + +/** + * zxdh_modify_qp_roce - modify qp request + * @ibqp: qp's pointer for modify + * @attr: access attributes + * @attr_mask: state mask + * @udata: user data + */ +int zxdh_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct zxdh_pd *iwpd = to_iwpd(ibqp->pd); + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct zxdh_qp_host_ctx_info *ctx_info; + struct zxdh_roce_offload_info *roce_info; + struct zxdh_udp_offload_info *udp_info; + struct zxdh_modify_qp_info info = {}; + struct zxdh_modify_qp_resp uresp = {}; + struct zxdh_modify_qp_req ureq = {}; + char s_straddr[INET6_ADDRSTRLEN + 20] = { 0 }; + char d_straddr[INET6_ADDRSTRLEN + 20] = { 0 }; + int buf_size = 0; + char *log_buf = NULL; + enum ib_qp_state tmp_state; + unsigned long flags; + u8 issue_modify_qp = 0; + int ret = 0; + u64 qpc_tx_mask_low = 0; + u64 qpc_tx_mask_high = 0; + u64 qpc_rx_mask_low = 0; + u64 qpc_rx_mask_high = 0; + u16 netdev_pmtu; + + ctx_info = &iwqp->ctx_info; + roce_info = &iwqp->roce_info; + udp_info = &iwqp->udp_info; + tmp_state = iwqp->ibqp_state; + if (attr_mask & IB_QP_RATE_LIMIT) { + if (attr->rate_limit & ZXDH_QP_MODIFY_NVMEOF_FLR) { + writel(1, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_QUEUE_VHCA_FLAG)); + } + if (attr->rate_limit & ZXDH_QP_MODIFY_NVMEOF_IOQ) { + iwqp->sc_qp.is_nvmeof_ioq = + (attr->rate_limit & ZXDH_QP_NVMEOF_IOQ_MASK) ? + 1 : + 0; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_NVMEOF_IOQ; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_NVMEOF_IOQ; + } + if (attr->rate_limit & ZXDH_QP_MODIFY_NVMEOF_TGT) { + iwqp->sc_qp.is_nvmeof_tgt = + (attr->rate_limit & ZXDH_QP_NVMEOF_TGT_MASK) ? + 1 : + 0; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_NVMEOF_TGT; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_NVMEOF_TGT; + } + if (attr->rate_limit & ZXDH_QP_MODIFY_NVMEOF_QID) { + iwqp->sc_qp.nvmeof_qid = attr->rate_limit & + ZXDH_QP_NVMEOF_QID_MASK; + writel(dev->vhca_id, + (u32 __iomem *)(dev->hw->hw_addr + + NOF_IOQ_VHCA_ID( + iwqp->sc_qp.nvmeof_qid))); + writel(iwpd->sc_pd.pd_id, + (u32 __iomem *)(dev->hw->hw_addr + + NOF_IOQ_PD_ID( + iwqp->sc_qp.nvmeof_qid))); + iwqp->sc_qp.virtual_map = 0; + iwqp->sc_qp.sq_pa = dev->nof_ioq_ddr_addr + + NOF_IOQ_SQ_WQE_SIZE * + NOF_IOQ_SQ_SIZE * + iwqp->sc_qp.nvmeof_qid; + iwqp->sc_qp.hw_sq_size = NOF_IOQ_SQ_LOG_SIZE; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_ACK_CREDITS; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_NVMEOF_QID; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_SQ_VMAP; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_SQ_LPBL_SIZE; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_SQ_PA; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LOG_SQ_SIZE; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_ACK_CREDITS; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_NVMEOF_QID; + + ret = zxdh_clear_nof_ioq( + dev, NOF_IOQ_SQ_WQE_SIZE * NOF_IOQ_SQ_SIZE, + iwqp->sc_qp.sq_pa); + if (dev->nof_clear_dpu_mem.va) { + dma_free_coherent(dev->hw->device, + dev->nof_clear_dpu_mem.size, + dev->nof_clear_dpu_mem.va, + dev->nof_clear_dpu_mem.pa); + dev->nof_clear_dpu_mem.va = NULL; + } + if (ret) + return ret; + } + } + + if (refcount_read(&iwdev->trace_switch.t_switch)) { + log_buf = vzalloc(ZXDH_LOG_BUF_SIZE); + if (log_buf == NULL) + ibdev_notice(&iwdev->ibdev, "alloc log buf failed\n"); + } + + if (attr_mask & IB_QP_DEST_QPN) { + roce_info->dest_qp = attr->dest_qp_num; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_DEST_QPN; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_DEST_QPN; + if (log_buf) { + char qpn_buf[64] = { 0 }; + + sprintf(qpn_buf, ", dest_qpn:%d", roce_info->dest_qp); + strncat(log_buf, qpn_buf, sizeof(log_buf) - 1); + buf_size += strlen(qpn_buf); + } + } + + if (attr_mask & IB_QP_PKEY_INDEX) { + ret = zxdh_query_pkey(ibqp->device, 0, attr->pkey_index, + &roce_info->p_key); + if (ret) + return ret; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_PKEY; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_PKEY; + } + + if (attr_mask & IB_QP_QKEY) { + roce_info->qkey = attr->qkey; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_QKEY; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_QKEY; + } + + if (attr_mask & IB_QP_PATH_MTU) { + udp_info->pmtu = attr->path_mtu; + iwqp->sc_qp.qp_uk.pmtu = attr->path_mtu; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_PMTU; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_PMTU; + netdev_pmtu = ib_mtu_int_to_enum(iwdev->netdev->mtu); + + if (attr->path_mtu > netdev_pmtu) { + pr_info("WARNING: attr->path_mtu(%d) larger than netdev_pmtu(%d)\n", + attr->path_mtu, netdev_pmtu); + } + } + + if (attr_mask & IB_QP_SQ_PSN) { + udp_info->psn_nxt = attr->sq_psn; + udp_info->psn_una = attr->sq_psn; + udp_info->psn_max = attr->sq_psn - 1; + iwqp->sc_qp.aeq_entry_err_last_psn = attr->sq_psn - 1; + iwqp->sc_qp.aeq_retry_err_last_psn = attr->sq_psn - 1; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LAST_ACK_PSN; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LSN; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_PSN_MAX; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_PSN_NXT; + } + + if (attr_mask & IB_QP_RQ_PSN) { + udp_info->epsn = attr->rq_psn; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_EPSN; + } + + if (attr_mask & IB_QP_RNR_RETRY) { + udp_info->rnr_nak_thresh = attr->rnr_retry; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RNR_RETRY_THRESHOLD; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RNR_RETRY_CNT; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RNR_CUR_RETRY_CNT; + } + + if (attr_mask & IB_QP_RETRY_CNT) { + if (attr->retry_cnt == 7) + attr->retry_cnt = 6; + + udp_info->rexmit_thresh = attr->retry_cnt; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RETRY_CNT; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_CUR_RETRY_CNT; + } + + if (attr_mask & IB_QP_TIMEOUT) { + udp_info->timeout = attr->timeout; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LOCAL_ACK_TIMEOUT; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_ACK_TIMEOUT; + } + + if (attr_mask & IB_QP_MIN_RNR_TIMER) { + udp_info->min_rnr_timer = attr->min_rnr_timer; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_RNR_TIMER; + } + + ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_PD_ID; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_PD_ID; + + if (attr_mask & IB_QP_AV) { + struct zxdh_av *av = &iwqp->roce_ah.av; + u16 vlan_id = VLAN_N_VID; + u32 local_ip[4] = {}; + + memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah)); + if (attr->ah_attr.ah_flags & IB_AH_GRH) { + udp_info->ttl = attr->ah_attr.grh.hop_limit; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_TTL; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_TTL; + udp_info->flow_label = attr->ah_attr.grh.flow_label; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_FLOWLABLE; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_FLOWLABLE; + udp_info->src_port = zxdh_get_udp_sport( + &attr->ah_attr, iwqp->sc_qp.qp_uk.qp_id, + attr->dest_qp_num); + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_SRC_PORT; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_SRC_PORT; + udp_info->tos = attr->ah_attr.grh.traffic_class; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_TOS; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_TOS; + zxdh_qp_rem_qos(&iwqp->sc_qp); + // ctx_info->user_pri = rt_tos2priority(udp_info->tos); //TODO: figure out why they do this + ctx_info->user_pri = (udp_info->tos >> 2) / 8; + iwqp->sc_qp.user_pri = ctx_info->user_pri; + iwqp->sc_qp.qp_uk.user_pri = ctx_info->user_pri; + zxdh_qp_add_qos(&iwqp->sc_qp); + + if (log_buf && udp_info->src_port) { + char port_buf[32] = { 0 }; + + sprintf(port_buf, ", src_port:%d", + udp_info->src_port); + strncat(log_buf, port_buf, + sizeof(log_buf) - buf_size - 1); + buf_size += strlen(port_buf); + } + } + ret = kc_zxdh_set_roce_cm_info(iwqp, attr, &vlan_id); + if (ret) + return ret; + + if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) + vlan_id = 0; + if (vlan_id < VLAN_N_VID) { + udp_info->insert_vlan_tag = true; + udp_info->vlan_tag = + vlan_id | ctx_info->user_pri << VLAN_PRIO_SHIFT; + } else { + udp_info->insert_vlan_tag = false; + } + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_INSERT_VLANTAG; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_VLANTAG; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_INSERT_VLANTAG; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_VLANTAG; + + av->attrs = attr->ah_attr; + rdma_gid2ip((struct sockaddr *)&av->dgid_addr, + &attr->ah_attr.grh.dgid); + if (av->sgid_addr.saddr.sa_family == AF_INET6) { + __be32 *daddr = av->dgid_addr.saddr_in6.sin6_addr.in6_u + .u6_addr32; + __be32 *saddr = av->sgid_addr.saddr_in6.sin6_addr.in6_u + .u6_addr32; + + zxdh_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr); + zxdh_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr); + + udp_info->ipv4 = false; + zxdh_copy_ip_ntohl(local_ip, daddr); + sprintf(s_straddr, ", src_ip: %pI6", + &av->sgid_addr.saddr_in6.sin6_addr); + sprintf(d_straddr, ", dest_ip: %pI6", + &av->dgid_addr.saddr_in6.sin6_addr); + } else { + __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr; + __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr; + + local_ip[0] = ntohl(daddr); + + udp_info->ipv4 = true; + udp_info->dest_ip_addr[0] = 0; + udp_info->dest_ip_addr[1] = 0; + udp_info->dest_ip_addr[2] = 0; + udp_info->dest_ip_addr[3] = local_ip[0]; + + udp_info->local_ipaddr[0] = 0; + udp_info->local_ipaddr[1] = 0; + udp_info->local_ipaddr[2] = 0; + udp_info->local_ipaddr[3] = ntohl(saddr); + + sprintf(s_straddr, ", src_ip: %pI4", + &av->sgid_addr.saddr_in.sin_addr); + sprintf(d_straddr, ", dest_ip: %pI4", + &av->dgid_addr.saddr_in.sin_addr); + } + ether_addr_copy(udp_info->dest_mac, + ah_attr_to_dmac(attr->ah_attr)); + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_IPV4; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_DEST_IP_LOW; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_DEST_IP_HIGH; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LOCAL_IP_LOW; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LOCAL_IP_HIGH; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_DEST_MAC; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_IPV4; + qpc_rx_mask_high |= RDMAQPC_RX_MASKH_DEST_IP; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_LOCAL_IP; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_DEST_MAC; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_HDR_LEN; + + if (log_buf) { + strncat(log_buf, s_straddr, + sizeof(log_buf) - buf_size - 1); + buf_size += strlen(s_straddr); + strncat(log_buf, d_straddr, + sizeof(log_buf) - buf_size - 1); + buf_size += strlen(d_straddr); + } + } + + iwqp->sc_qp.qp_uk.ws_index = + zxdh_get_ws_index(&iwqp->sc_qp, udp_info->dest_ip_addr[3]); + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_GQP_ID; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_QUEUE_TC; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_WS_IDX; + qpc_rx_mask_high |= RDMAQPC_RX_MASKH_QUEUE_TC; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_GQP_ID; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_WS_IDX; + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) { + ibdev_err(&iwdev->ibdev, + "rd_atomic = %d, above max_hw_ord=%d\n", + attr->max_rd_atomic, + dev->hw_attrs.max_hw_ord); + return -EINVAL; + } + if (attr->max_rd_atomic) { + roce_info->ord_size = attr->max_rd_atomic; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_ORD_SIZE; + } + info.ord_valid = true; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { + ibdev_err(&iwdev->ibdev, + "rd_atomic = %d, above max_hw_ird=%d\n", + attr->max_rd_atomic, + dev->hw_attrs.max_hw_ird); + return -EINVAL; + } + if (attr->max_dest_rd_atomic) { + roce_info->ird_size = dev->hw_attrs.max_hw_ird; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_IRD_SIZE; + } + } + + if (attr_mask & IB_QP_ACCESS_FLAGS) { + if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) { + roce_info->wr_rdresp_en = true; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_WRITE_EN; + } + + if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) { + roce_info->wr_rdresp_en = true; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_WRITE_EN; + } + if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) { + roce_info->rd_en = true; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_READ_EN; + } + } + + wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); + + spin_lock_irqsave(&iwqp->lock, flags); + if (attr_mask & IB_QP_STATE) { + if (!kc_ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state, + iwqp->ibqp.qp_type, attr_mask, + IB_LINK_LAYER_ETHERNET)) { + ibdev_warn( + &iwdev->ibdev, + "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n", + iwqp->ibqp.qp_num, iwqp->ibqp_state, + attr->qp_state); + ret = -EINVAL; + goto exit; + } + info.curr_iwarp_state = iwqp->iwarp_state; + + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_QP_STATE; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_QP_STATE; + + switch (attr->qp_state) { + case IB_QPS_INIT: + if (iwqp->iwarp_state > ZXDH_QPS_INIT) { + ret = -EINVAL; + goto exit; + } + + if (iwqp->iwarp_state == ZXDH_QPS_INIT) { + ctx_info->next_qp_state = ZXDH_QPS_INIT; + issue_modify_qp = 1; + } + + if (iwqp->iwarp_state == ZXDH_QPS_RESET) { + ctx_info->next_qp_state = ZXDH_QPS_INIT; + issue_modify_qp = 1; + } + break; + case IB_QPS_RTR: + if (iwqp->iwarp_state > ZXDH_QPS_INIT) { + ret = -EINVAL; + goto exit; + } + ctx_info->next_qp_state = ZXDH_QPS_RTR; + issue_modify_qp = 1; + break; + case IB_QPS_RTS: + if (iwqp->ibqp_state < IB_QPS_RTR || + iwqp->ibqp_state == IB_QPS_ERR) { + ret = -EINVAL; + goto exit; + } + + ctx_info->next_qp_state = ZXDH_QPS_RTS; + issue_modify_qp = 1; + break; + case IB_QPS_SQD: + if (iwqp->iwarp_state == ZXDH_QPS_SQD) + goto exit; + + if (iwqp->iwarp_state != ZXDH_QPS_RTS) { + ret = -EINVAL; + goto exit; + } + + ctx_info->next_qp_state = ZXDH_QPS_SQD; + issue_modify_qp = 1; + break; + case IB_QPS_SQE: + case IB_QPS_ERR: + case IB_QPS_RESET: + if (iwqp->iwarp_state == ZXDH_QPS_RTS) { + // spin_unlock_irqrestore(&iwqp->lock, flags); + // ctx_info->next_qp_state = ZXDH_QPS_SQD; + // zxdh_hw_modify_qp(iwdev, iwqp, &info, true); + // spin_lock_irqsave(&iwqp->lock, flags); + } + + if (iwqp->iwarp_state == ZXDH_QPS_ERR) { + spin_unlock_irqrestore(&iwqp->lock, flags); + if (udata) { + if (ib_copy_from_udata( + &ureq, udata, + min(sizeof(ureq), + udata->inlen))) + return -EINVAL; + + zxdh_flush_wqes( + iwqp, + (ureq.sq_flush ? ZXDH_FLUSH_SQ : + 0) | + (ureq.rq_flush ? + ZXDH_FLUSH_RQ : + 0) | + ZXDH_REFLUSH); + } + return 0; + } + + ctx_info->next_qp_state = ZXDH_QPS_ERR; + issue_modify_qp = 1; + break; + default: + ret = -EINVAL; + goto exit; + } + + iwqp->ibqp_state = attr->qp_state; + } + + zxdh_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); + spin_unlock_irqrestore(&iwqp->lock, flags); + if (ctx_info->next_qp_state == ZXDH_QPS_ERR) { + info.qpc_tx_mask_low = qpc_tx_mask_low; + info.qpc_tx_mask_high = qpc_tx_mask_high; + info.qpc_rx_mask_low = qpc_rx_mask_low; + info.qpc_rx_mask_high = qpc_rx_mask_high; + } else { + info.qpc_tx_mask_low = 0x1FFFFFF | qpc_tx_mask_low; + info.qpc_tx_mask_high = (0x1UL << 18) | qpc_tx_mask_high; + info.qpc_rx_mask_low = 0xDA3CE8081E7FFCF0 | qpc_rx_mask_low; + info.qpc_rx_mask_high = 0x1E9 | qpc_rx_mask_high; + } + + if (attr_mask & IB_QP_RATE_LIMIT) { + info.qpc_tx_mask_low = 0x1FFFFFF | qpc_tx_mask_low; + info.qpc_tx_mask_high = (0x1UL << 18) | qpc_tx_mask_high; + info.qpc_rx_mask_low = 0xDA3CE8081E7FFCF0 | qpc_rx_mask_low; + info.qpc_rx_mask_high = 0x1E9 | qpc_rx_mask_high; + zxdh_hw_modify_qp(iwdev, iwqp, &info, true); + } + + if (attr_mask & IB_QP_STATE) { + if (issue_modify_qp) { + if (zxdh_hw_modify_qp(iwdev, iwqp, &info, true)) + return -EINVAL; + spin_lock_irqsave(&iwqp->lock, flags); + if (iwqp->iwarp_state == info.curr_iwarp_state) { + iwqp->iwarp_state = ctx_info->next_qp_state; + iwqp->ibqp_state = attr->qp_state; + } + if (iwqp->ibqp_state > IB_QPS_RTS && + !iwqp->flush_issued) { + iwqp->flush_issued = 1; + if (!iwqp->user_mode) + queue_delayed_work( + iwqp->iwdev->cleanup_wq, + &iwqp->dwork_flush, + msecs_to_jiffies( + ZXDH_FLUSH_DELAY_MS)); + spin_unlock_irqrestore(&iwqp->lock, flags); + zxdh_flush_wqes(iwqp, ZXDH_FLUSH_SQ | + ZXDH_FLUSH_RQ | + ZXDH_FLUSH_WAIT); + } else { + spin_unlock_irqrestore(&iwqp->lock, flags); + } + } else { + iwqp->ibqp_state = attr->qp_state; + } + if (udata) { + uresp.rd_fence_rate = iwdev->rd_fence_rate; + ret = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), + udata->outlen)); + if (ret) { + pr_err("VERBS: copy_to_udata failed\n"); + return ret; + } + } + if (log_buf) { + ibdev_notice( + &iwdev->ibdev, + "QP[%u]: modify QP, type %d, ib qpn 0x%X, state: %s => %s%s\n", + iwqp->ibqp.qp_num, iwqp->ibqp.qp_type, + iwqp->ibqp.qp_num, + zxdh_qp_state_to_string(tmp_state), + zxdh_qp_state_to_string(attr->qp_state), + log_buf); + } + } + + if (log_buf) + vfree(log_buf); + +#ifdef Z_DH_DEBUG + //zxdh_query_qpc(&iwqp->sc_qp); +#endif + + return 0; +exit: + if (log_buf) + vfree(log_buf); + + spin_unlock_irqrestore(&iwqp->lock, flags); + + return ret; +} + +/** + * zxdh_cq_free_rsrc - free up resources for cq + * @rf: RDMA PCI function + * @iwcq: cq ptr + */ +void zxdh_cq_free_rsrc(struct zxdh_pci_f *rf, struct zxdh_cq *iwcq) +{ + struct zxdh_sc_cq *cq = &iwcq->sc_cq; + + if (!iwcq->user_mode) { + dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size, + iwcq->kmem.va, iwcq->kmem.pa); + iwcq->kmem.va = NULL; + dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem_shadow.size, + iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa); + iwcq->kmem_shadow.va = NULL; + } + if (cq->dev) { + zxdh_free_rsrc(rf, rf->allocated_cqs, + iwcq->cq_num - cq->dev->base_cqn); + } +} + +/** + * zxdh_free_cqbuf - worker to free a cq buffer + * @work: provides access to the cq buffer to free + */ +static void zxdh_free_cqbuf(struct work_struct *work) +{ + struct zxdh_cq_buf *cq_buf = + container_of(work, struct zxdh_cq_buf, work); + + dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size, + cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa); + cq_buf->kmem_buf.va = NULL; + kfree(cq_buf); +} + +/** + * zxdh_process_resize_list - remove resized cq buffers from the resize_list + * @iwcq: cq which owns the resize_list + * @iwdev: zrdma device + * @lcqe_buf: the buffer where the last cqe is received + */ +int zxdh_process_resize_list(struct zxdh_cq *iwcq, struct zxdh_device *iwdev, + struct zxdh_cq_buf *lcqe_buf) +{ + struct list_head *tmp_node, *list_node; + struct zxdh_cq_buf *cq_buf; + int cnt = 0; + + list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { + cq_buf = list_entry(list_node, struct zxdh_cq_buf, list); + if (cq_buf == lcqe_buf) + return cnt; + + list_del(&cq_buf->list); + queue_work(iwdev->cleanup_wq, &cq_buf->work); + cnt++; + } + + return cnt; +} + +/** + * zxdh_resize_cq - resize cq + * @ibcq: cq to be resized + * @entries: desired cq size + * @udata: user data + */ +static int zxdh_resize_cq(struct ib_cq *ibcq, int entries, + struct ib_udata *udata) +{ + struct zxdh_cq *iwcq = to_iwcq(ibcq); + struct zxdh_sc_dev *dev = iwcq->sc_cq.dev; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_modify_cq_info *m_info; + struct zxdh_modify_cq_info info = {}; + struct zxdh_dma_mem kmem_buf; + struct zxdh_cq_mr *cqmr_buf; + struct zxdh_pbl *iwpbl_buf; + struct zxdh_device *iwdev; + struct zxdh_pci_f *rf; + struct zxdh_cq_buf *cq_buf = NULL; + unsigned long flags; + int ret; + + iwdev = to_iwdev(ibcq->device); + rf = iwdev->rf; + + if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags & + ZXDH_FEATURE_CQ_RESIZE)) + return -EOPNOTSUPP; + + if (entries > rf->max_cqe) + return -EINVAL; + + if (!iwcq->user_mode) { + entries++; + if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= ZXDH_GEN_2) + entries *= 2; + } + + info.cq_size = zxdh_cq_round_up(max(entries, 4)); + + if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1) + return 0; + + if (udata) { + struct zxdh_resize_cq_req req = {}; + struct zxdh_ucontext *ucontext = + kc_rdma_udata_to_drv_context(ibcq, udata); + + /* CQ resize not supported with legacy GEN_1 libi40iw */ + if (ucontext->legacy_mode) + return -EOPNOTSUPP; + + if (ib_copy_from_udata(&req, udata, + min(sizeof(req), udata->inlen))) + return -EINVAL; + + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + iwpbl_buf = zxdh_get_pbl((unsigned long)req.user_cq_buffer, + &ucontext->cq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + + if (!iwpbl_buf) + return -ENOMEM; + + cqmr_buf = &iwpbl_buf->cq_mr; + if (iwpbl_buf->pbl_allocated) { + info.virtual_map = true; + info.pbl_chunk_size = 1; + info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx; + } else { + info.cq_pa = cqmr_buf->cq_pbl.addr; + } + } else { + /* Kmode CQ resize */ + int rsize; + + rsize = info.cq_size * sizeof(struct zxdh_cqe); + kmem_buf.size = ALIGN(round_up(rsize, 256), 256); + kmem_buf.va = dma_alloc_coherent(dev->hw->device, kmem_buf.size, + &kmem_buf.pa, GFP_KERNEL); + if (!kmem_buf.va) + return -ENOMEM; + + info.cq_base = kmem_buf.va; + info.cq_pa = kmem_buf.pa; + cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL); + if (!cq_buf) { + ret = -ENOMEM; + goto error; + } + } + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + ret = -ENOMEM; + goto error; + } + + info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold; + info.cq_resize = true; + + cqp_info = &cqp_request->info; + m_info = &cqp_info->in.u.cq_modify.info; + memcpy(m_info, &info, sizeof(*m_info)); + + cqp_info->cqp_cmd = ZXDH_OP_CQ_MODIFY; + cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq; + cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request; + cqp_info->post_sq = 1; + ret = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (ret) + goto error; + + spin_lock_irqsave(&iwcq->lock, flags); + if (cq_buf) { + cq_buf->kmem_buf = iwcq->kmem; + cq_buf->hw = dev->hw; + memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, + sizeof(cq_buf->cq_uk)); + INIT_WORK(&cq_buf->work, zxdh_free_cqbuf); + list_add_tail(&cq_buf->list, &iwcq->resize_list); + iwcq->kmem = kmem_buf; + } + + zxdh_sc_cq_resize(&iwcq->sc_cq, &info); + ibcq->cqe = info.cq_size - 1; + spin_unlock_irqrestore(&iwcq->lock, flags); + + return 0; +error: + if (!udata) { + dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va, + kmem_buf.pa); + kmem_buf.va = NULL; + } + kfree(cq_buf); + + return ret; +} + +static int zxdh_modify_cq(struct ib_cq *ibcq, u16 cq_count, u16 cq_period) +{ + struct zxdh_device *iwdev = to_iwdev(ibcq->device); + struct zxdh_cq *iwcq = to_iwcq(ibcq); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf; + int ret; + u32 val = 0; + u16 unit_period = 0; + + rf = iwdev->rf; + val = readl(rf->sc_dev.hw->hw_addr + RDMARX_CQ_PERIOD_CFG); + unit_period = (u16)(val & 0xffff); + + if (cq_count > ZXDH_MAX_CQ_COUNT || + (US_TO_NS(cq_period) / unit_period) > ZXDH_MAX_CQ_PERIOD) { + pr_info("cq_count and cq_period validate fail\n"); + return -EINVAL; + } + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + ret = -ENOMEM; + + cqp_info = &cqp_request->info; + + cqp_info->cqp_cmd = ZXDH_OP_CQ_MODIFY_MODERATION; + cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq; + cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request; + cqp_info->post_sq = 1; + + cqp_info->in.u.cq_modify.cq->cq_max = cq_count; + cqp_info->in.u.cq_modify.cq->cq_period = + (uint16_t)(US_TO_NS(cq_period) / unit_period); + cqp_info->in.u.cq_modify.cq->scqe_break_moderation_en = + iwcq->sc_cq.scqe_break_moderation_en; + ret = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (ret) + zxdh_dbg(iwdev_to_idev(iwdev), "MODIFY CQ: modify_cq failed\n"); + + return ret; +} + +/** + * zxdh_get_mr_access - get hw MR access permissions from IB access flags + * @access: IB access flags + */ +static inline u16 zxdh_get_mr_access(int access) +{ + u16 hw_access = 0; + + hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ? + ZXDH_ACCESS_FLAGS_LOCALWRITE : + 0; + hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ? + ZXDH_ACCESS_FLAGS_REMOTEWRITE : + 0; + hw_access |= (access & IB_ACCESS_REMOTE_READ) ? + ZXDH_ACCESS_FLAGS_REMOTEREAD : + 0; + hw_access |= (access & IB_ACCESS_MW_BIND) ? + ZXDH_ACCESS_FLAGS_BIND_WINDOW : + 0; + hw_access |= (access & IB_ZERO_BASED) ? ZXDH_ACCESS_FLAGS_ZERO_BASED : + 0; + hw_access |= ZXDH_ACCESS_FLAGS_LOCALREAD; + + return hw_access; +} + +/** + * zxdh_free_stag - free stag resource + * @iwdev: zrdma device + * @stag: stag to free + */ +void zxdh_free_stag(struct zxdh_device *iwdev, u32 stag) +{ + u32 stag_idx; + + stag_idx = (stag) >> ZXDH_CQPSQ_STAG_IDX_S; + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx); +} + +/** + * zxdh_create_stag - create random stag + * @iwdev: zrdma device + */ +u32 zxdh_create_stag(struct zxdh_device *iwdev) +{ + u32 stag = 0; + u32 stag_index = 0; + u32 random; + u8 consumer_key; + int ret; + + get_random_bytes(&random, sizeof(random)); + consumer_key = (u8)random; + + ret = zxdh_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, + iwdev->rf->max_mr, &stag_index, + &iwdev->rf->next_mr); + + if (ret) + return stag; + stag = stag_index << ZXDH_CQPSQ_STAG_IDX_S; + stag |= consumer_key; + + return stag; +} + +/** + * zxdh_check_mem_contiguous - check if pbls stored in arr are contiguous + * @arr: lvl1 pbl array + * @npages: page count + * @pg_size: page size + * + */ +static bool zxdh_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size) +{ + u32 pg_idx; + + for (pg_idx = 0; pg_idx < npages; pg_idx++) { + if ((*arr + (pg_size * pg_idx)) != arr[pg_idx]) + return false; + } + + return true; +} + +/** + * zxdh_check_mr_contiguous - check if MR is physically contiguous + * @palloc: pbl allocation struct + * @pg_size: page size + */ +static bool zxdh_check_mr_contiguous(struct zxdh_pble_alloc *palloc, + u32 pg_size) +{ + struct zxdh_pble_level2 *lvl2 = &palloc->level2; + struct zxdh_pble_info *leaf = lvl2->leaf; + u64 *arr = NULL; + u64 *start_addr = NULL; + int i; + bool ret; + + if (palloc->level == PBLE_LEVEL_1) { + arr = palloc->level1.addr; + ret = zxdh_check_mem_contiguous(arr, palloc->total_cnt, + pg_size); + return ret; + } + + start_addr = leaf->addr; + + for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { + arr = leaf->addr; + if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr) + return false; + ret = zxdh_check_mem_contiguous(arr, leaf->cnt, pg_size); + if (!ret) + return false; + } + + return true; +} + +/** + * zxdh_setup_pbles - copy user pg address to pble's + * @rf: RDMA PCI function + * @iwmr: mr pointer for this memory registration + * @use_pbles: flag if to use pble's + * @pble_type: flag if to pble type(mr or queue) + */ +static int zxdh_setup_pbles(struct zxdh_pci_f *rf, struct zxdh_mr *iwmr, + bool use_pbles, bool pble_type) +{ + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + struct zxdh_pble_alloc *palloc = &iwpbl->pble_alloc; + struct zxdh_pble_info *pinfo = NULL; + struct zxdh_hmc_pble_rsrc *pble_rsrc_com; + u64 *pbl; + int status; + enum zxdh_pble_level level = PBLE_LEVEL_1; + bool b_level1_only = true; + + if (use_pbles) { + if (pble_type == PBLE_QUEUE) { + pble_rsrc_com = rf->pble_rsrc; + b_level1_only = true; + } else { + pble_rsrc_com = rf->pble_mr_rsrc; + b_level1_only = false; + } + + status = zxdh_get_pble(pble_rsrc_com, palloc, iwmr->page_cnt, + b_level1_only); + if (status) + return status; + + iwpbl->pbl_allocated = true; + level = palloc->level; + pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 : + palloc->level2.leaf; + pbl = pinfo->addr; + pinfo->pble_copy = pble_rsrc_com->pble_copy; + } else { + pbl = iwmr->pgaddrmem; + } + + zxdh_copy_user_pgaddrs(iwmr, pbl, &pinfo, level, use_pbles, pble_type); + + if (use_pbles) + iwmr->pgaddrmem[0] = *pbl; + + return 0; +} + +/** + * zxdh_handle_q_mem - handle memory for qp and cq + * @iwdev: zrdma device + * @req: information for q memory management + * @iwpbl: pble struct + * @use_pbles: flag to use pble + */ +static int zxdh_handle_q_mem(struct zxdh_device *iwdev, + struct zxdh_mem_reg_req *req, + struct zxdh_pbl *iwpbl, bool use_pbles) +{ + struct zxdh_pble_alloc *palloc = &iwpbl->pble_alloc; + struct zxdh_mr *iwmr = iwpbl->iwmr; + struct zxdh_qp_mr *qpmr = &iwpbl->qp_mr; + struct zxdh_cq_mr *cqmr = &iwpbl->cq_mr; + struct zxdh_srq_mr *srqmr = &iwpbl->srq_mr; + struct zxdh_hmc_pble *hmc_p; + u64 *arr = iwmr->pgaddrmem; + u32 pg_size, total; + int err = 0; + bool ret = true; + + pg_size = iwmr->page_size; + err = zxdh_setup_pbles(iwdev->rf, iwmr, use_pbles, + PBLE_QUEUE); // queue mr + if (err) + return err; + + if (use_pbles && palloc->level != PBLE_LEVEL_1) { + zxdh_free_pble(iwdev->rf->pble_rsrc, palloc); + iwpbl->pbl_allocated = false; + return -ENOMEM; + } + + if (use_pbles) + arr = palloc->level1.addr; + + switch (iwmr->type) { + case ZXDH_MEMREG_TYPE_QP: + total = req->sq_pages + req->rq_pages; + hmc_p = &qpmr->sq_pbl; + qpmr->shadow = (dma_addr_t)arr[total]; + if (use_pbles) { + ret = zxdh_check_mem_contiguous(arr, req->sq_pages, + pg_size); + if (ret) + ret = zxdh_check_mem_contiguous( + &arr[req->sq_pages], req->rq_pages, + pg_size); + } + + if (!ret) { + hmc_p->idx = palloc->level1.idx; + hmc_p = &qpmr->rq_pbl; + hmc_p->idx = palloc->level1.idx + req->sq_pages; + } else { + hmc_p->addr = arr[0]; + hmc_p = &qpmr->rq_pbl; + hmc_p->addr = arr[req->sq_pages]; + } + break; + case ZXDH_MEMREG_TYPE_CQ: + hmc_p = &cqmr->cq_pbl; + + if (!cqmr->split) + cqmr->shadow = (dma_addr_t)arr[req->cq_pages]; + + if (use_pbles) + ret = zxdh_check_mem_contiguous(arr, req->cq_pages, + pg_size); + + if (!ret) + hmc_p->idx = palloc->level1.idx; + else + hmc_p->addr = arr[0]; + break; + case ZXDH_MEMREG_TYPE_SRQ: + total = req->srq_pages + req->srq_list_pages; + hmc_p = &srqmr->srq_pbl; + srqmr->db_addr = (dma_addr_t)arr[total]; + + if (use_pbles) { + ret = zxdh_check_mem_contiguous(arr, req->srq_pages, + pg_size); + if (ret) + ret = zxdh_check_mem_contiguous( + &arr[req->srq_pages], + req->srq_list_pages, pg_size); + } + + if (!ret) { + hmc_p->idx = palloc->level1.idx; + hmc_p = &srqmr->srq_list_pbl; + hmc_p->idx = palloc->level1.idx + req->srq_pages; + } else { + hmc_p->addr = arr[0]; + hmc_p = &srqmr->srq_list_pbl; + hmc_p->addr = arr[req->srq_pages]; + } + break; + default: + pr_err("VERBS: MR type error\n"); + err = -EINVAL; + } + + if (use_pbles && ret) { + zxdh_free_pble(iwdev->rf->pble_rsrc, palloc); + iwpbl->pbl_allocated = false; + } + + return err; +} + +/** + * zxdh_hw_alloc_mw - create the hw memory window + * @iwdev: zrdma device + * @iwmr: pointer to memory window info + */ +int zxdh_hw_alloc_mw(struct zxdh_device *iwdev, struct zxdh_mr *iwmr) +{ + struct zxdh_mw_alloc_info *info; + struct zxdh_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.mw_alloc.info; + memset(info, 0, sizeof(*info)); + if (iwmr->ibmw.type == IB_MW_TYPE_1) + info->mw_wide = true; + + info->page_size = PAGE_SIZE; + info->mw_stag_index = iwmr->stag >> ZXDH_CQPSQ_STAG_IDX_S; + info->pd_id = iwpd->sc_pd.pd_id; + info->remote_access = true; + cqp_info->cqp_cmd = ZXDH_OP_MW_ALLOC; + cqp_info->post_sq = 1; + cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev; + cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(iwdev->rf, cqp_request); + zxdh_put_cqp_request(&iwdev->rf->cqp, cqp_request); + + return status; +} + +/** + * zxdh_dealloc_mw - Dealloc memory window + * @ibmw: memory window structure. + */ +static int zxdh_dealloc_mw(struct ib_mw *ibmw) +{ + struct ib_pd *ibpd = ibmw->pd; + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_mr *iwmr = to_iwmr((struct ib_mr *)ibmw); + struct zxdh_device *iwdev = to_iwdev(ibmw->device); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_dealloc_stag_info *info; + + cqp_request = zxdh_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.dealloc_stag.info; + memset(info, 0, sizeof(*info)); + info->pd_id = iwpd->sc_pd.pd_id; + info->stag_idx = RS_64_1(ibmw->rkey, ZXDH_CQPSQ_STAG_IDX_S); + info->mr = false; + cqp_info->cqp_cmd = ZXDH_OP_DEALLOC_STAG; + cqp_info->post_sq = 1; + cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; + cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; + zxdh_handle_cqp_op(iwdev->rf, cqp_request); + zxdh_put_cqp_request(&iwdev->rf->cqp, cqp_request); + zxdh_free_stag(iwdev, iwmr->stag); +#ifdef ZXDH_ALLOC_MW_VER_1 + kfree(iwmr); +#endif /* ZXDH_ALLOC_MW_VER_1 */ + + return 0; +} + +/** + * zxdh_hw_alloc_stag - cqp command to allocate stag + * @iwdev: zrdma device + * @iwmr: zrdma mr pointer + */ +int zxdh_hw_alloc_stag(struct zxdh_device *iwdev, struct zxdh_mr *iwmr) +{ + struct zxdh_allocate_stag_info *info; + struct zxdh_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + int status; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + + cqp_request = zxdh_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.alloc_stag.info; + memset(info, 0, sizeof(*info)); + info->page_size = PAGE_SIZE; + info->stag_idx = iwmr->stag >> ZXDH_CQPSQ_STAG_IDX_S; + info->pd_id = iwpd->sc_pd.pd_id; + info->total_len = iwmr->len; + info->remote_access = true; + cqp_info->cqp_cmd = ZXDH_OP_ALLOC_STAG; + cqp_info->post_sq = 1; + cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev; + cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(iwdev->rf, cqp_request); + zxdh_put_cqp_request(&iwdev->rf->cqp, cqp_request); + if (!status) + iwmr->is_hwreg = 1; + + return status; +} + +/** + * zxdh_set_page - populate pbl list for fmr + * @ibmr: ib mem to access iwarp mr pointer + * @addr: page dma address fro pbl list + */ +static int zxdh_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct zxdh_mr *iwmr = to_iwmr(ibmr); + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + struct zxdh_pble_alloc *palloc = &iwpbl->pble_alloc; + u64 *pbl; + + if (unlikely(iwmr->npages == iwmr->page_cnt)) + return -ENOMEM; + + pbl = palloc->level1.addr; + pbl[iwmr->npages++] = addr; + + return 0; +} + +/** + * zxdh_map_mr_sg - map of sg list for fmr + * @ibmr: ib mem to access iwarp mr pointer + * @sg: scatter gather list + * @sg_nents: number of sg pages + * @sg_offset: scatter gather list for fmr + */ +static int zxdh_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset) +{ + struct zxdh_mr *iwmr = to_iwmr(ibmr); + struct zxdh_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc; + int ret = 0; + + iwmr->npages = 0; + + ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, zxdh_set_page); + + if (iwmr->npages > 1) { + zxdh_cqp_config_pble_table_cmd(iwmr->sc_dev, &(palloc->level1), + iwmr->npages << 3, PBLE_MR); + } + + return ret; +} + +/** + * zxdh_hwreg_mr - send cqp command for memory registration + * @iwdev: zrdma device + * @iwmr: zrdma mr pointer + * @access: access for MR + */ +int zxdh_hwreg_mr(struct zxdh_device *iwdev, struct zxdh_mr *iwmr, u16 access) +{ + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + struct zxdh_reg_ns_stag_info *stag_info; + struct zxdh_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct zxdh_pble_alloc *palloc = &iwpbl->pble_alloc; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int ret; + + cqp_request = zxdh_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + stag_info = &cqp_info->in.u.mr_reg_non_shared.info; + memset(stag_info, 0, sizeof(*stag_info)); + stag_info->va = iwpbl->user_base; + stag_info->stag_idx = iwmr->stag >> ZXDH_CQPSQ_STAG_IDX_S; + stag_info->stag_key = (u8)iwmr->stag; + stag_info->total_len = iwmr->len; + stag_info->access_rights = zxdh_get_mr_access(access); + stag_info->pd_id = iwpd->sc_pd.pd_id; + if (stag_info->access_rights & ZXDH_ACCESS_FLAGS_ZERO_BASED) + stag_info->addr_type = ZXDH_ADDR_TYPE_ZERO_BASED; + else + stag_info->addr_type = ZXDH_ADDR_TYPE_VA_BASED; + stag_info->page_size = iwmr->page_size; + + if (iwpbl->pbl_allocated) { + if (palloc->level == PBLE_LEVEL_1) { + stag_info->first_pm_pbl_index = palloc->level1.idx; + stag_info->chunk_size = 1; + } else { + stag_info->first_pm_pbl_index = palloc->level2.root.idx; + stag_info->chunk_size = 3; + } + } else { + stag_info->reg_addr_pa = iwmr->pgaddrmem[0]; + } + + cqp_info->cqp_cmd = ZXDH_OP_MR_REG_NON_SHARED; + cqp_info->post_sq = 1; + cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev; + cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; + ret = zxdh_handle_cqp_op(iwdev->rf, cqp_request); + zxdh_put_cqp_request(&iwdev->rf->cqp, cqp_request); + + if (!ret) + iwmr->is_hwreg = 1; + + return ret; +} + +#ifdef SET_BEST_PAGE_SZ_V1 +/** + * zxdh_set_hugetlb_val - set MR pg size and mask to huge pg values. + * @addr: virtual address + * @iwmr: mr pointer for this memory registration + */ +static void zxdh_set_hugetlb_val(u64 addr, struct zxdh_mr *iwmr) +{ + struct vm_area_struct *vma; + struct hstate *h; + + vma = find_vma(current->mm, addr); + if (vma && is_vm_hugetlb_page(vma)) { + h = hstate_vma(vma); + if (huge_page_size(h) == 0x200000 || + huge_page_size(h) == 0x40000000) { + iwmr->page_size = huge_page_size(h); + iwmr->page_msk = huge_page_mask(h); + } + } +} + +#endif +/** + * zxdh_reg_user_mr - Register a user memory region + * @pd: ptr of pd + * @start: virtual start address + * @len: length of mr + * @virt: virtual address + * @access: access of mr + * @udata: user data + */ +static struct ib_mr *zxdh_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, + u64 virt, int access, + struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(pd->device); + struct zxdh_ucontext *ucontext; + struct zxdh_pble_alloc *palloc; + struct zxdh_pbl *iwpbl; + struct zxdh_mr *iwmr; + struct ib_umem *region; + struct zxdh_mem_reg_req req = {}; + struct zxdh_reg_mr_resp resp = {}; + u32 total, stag = 0; + u8 shadow_pgcnt = 1; + bool use_pbles = false; + unsigned long flags; + int err = -EINVAL; + int ret; + + if (!len || len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) + return ERR_PTR(-EINVAL); + +#ifdef IB_UMEM_GET_V3 + region = ib_umem_get(pd->device, start, len, access); +#endif +#ifdef IB_UMEM_GET_V2 + region = ib_umem_get(udata, start, len, access); +#endif +#ifdef IB_UMEM_GET_V1 + region = ib_umem_get(udata, start, len, access, 0); +#endif +#ifdef IB_UMEM_GET_V0 + region = ib_umem_get(pd->uobject->context, start, len, access, 0); +#endif + + if (IS_ERR(region)) { + pr_err("VERBS: Failed to create ib_umem region\n"); + return (struct ib_mr *)region; + } + + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { + ib_umem_release(region); + return ERR_PTR(-EFAULT); + } + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) { + ib_umem_release(region); + return ERR_PTR(-ENOMEM); + } + + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->region = region; + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + iwmr->ibmr.iova = virt; + iwmr->ibmr.length = len; + iwmr->page_size = PAGE_SIZE; + +#ifdef SET_BEST_PAGE_SZ_V1 + iwmr->page_msk = PAGE_MASK; + if (region->hugetlb && req.reg_type == ZXDH_MEMREG_TYPE_MEM) + zxdh_set_hugetlb_val(start, iwmr); +#endif +#ifdef SET_BEST_PAGE_SZ_V2 + if (req.reg_type == ZXDH_MEMREG_TYPE_MEM) { + iwmr->page_size = ib_umem_find_best_pgsz( + region, SZ_4K | SZ_2M | SZ_1G, virt); + if (unlikely(!iwmr->page_size)) { + kfree(iwmr); + ib_umem_release(region); + return ERR_PTR(-EOPNOTSUPP); + } + } +#endif + iwmr->len = region->length; + iwpbl->user_base = virt; + palloc = &iwpbl->pble_alloc; + iwmr->type = req.reg_type; +#ifdef rdma_umem_for_each_dma_block +#ifdef ib_umem_num_dma_blocks + iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); +#else + iwmr->page_cnt = + zxdh_ib_umem_num_dma_blocks(region, iwmr->page_size, virt); +#endif +#else + iwmr->page_cnt = + zxdh_ib_umem_num_dma_blocks(region, iwmr->page_size, virt); +#endif + + switch (req.reg_type) { + case ZXDH_MEMREG_TYPE_QP: + total = req.sq_pages + req.rq_pages + shadow_pgcnt; + if (total > iwmr->page_cnt) { + err = -EINVAL; + goto error; + } + total = req.sq_pages + req.rq_pages; + use_pbles = (total > 2); + err = zxdh_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + if (err) + goto error; + + ucontext = kc_rdma_udata_to_drv_context(pd, udata); + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); + list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); + iwpbl->on_list = true; + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + break; + case ZXDH_MEMREG_TYPE_CQ: + if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & + ZXDH_FEATURE_CQ_RESIZE) + shadow_pgcnt = 0; + total = req.cq_pages + shadow_pgcnt; + if (total > iwmr->page_cnt) { + err = -EINVAL; + goto error; + } + + use_pbles = (req.cq_pages > 1); + err = zxdh_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + if (err) + goto error; + + ucontext = kc_rdma_udata_to_drv_context(pd, udata); + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); + iwpbl->on_list = true; + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + break; + case ZXDH_MEMREG_TYPE_SRQ: + total = req.srq_pages + req.srq_list_pages + shadow_pgcnt; + if (total > iwmr->page_cnt) { + err = -EINVAL; + goto error; + } + + total = req.srq_pages + req.srq_list_pages; + use_pbles = (total > 2); + err = zxdh_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + if (err) + goto error; + + ucontext = kc_rdma_udata_to_drv_context(pd, udata); + spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags); + list_add_tail(&iwpbl->list, &ucontext->srq_reg_mem_list); + iwpbl->on_list = true; + spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags); + break; + case ZXDH_MEMREG_TYPE_MEM: + use_pbles = (iwmr->page_cnt != 1); + + err = zxdh_setup_pbles(iwdev->rf, iwmr, use_pbles, + PBLE_MR); // mr + if (err) + goto error; + + if (use_pbles) { + ret = zxdh_check_mr_contiguous(palloc, iwmr->page_size); + if (ret) { + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, palloc); + iwpbl->pbl_allocated = false; + } + } + + stag = zxdh_create_stag(iwdev); + if (!stag) { + err = -ENOMEM; + goto error; + } + + iwmr->stag = stag; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; + iwmr->access = access; + err = zxdh_hwreg_mr(iwdev, iwmr, access); + if (err) { + zxdh_free_stag(iwdev, stag); + goto error; + } + + if (iwpbl->pbl_allocated == true) { + if (iwpbl->pble_alloc.level == PBLE_LEVEL_1) { + resp.mr_pa_low = iwpbl->pble_alloc.level1.idx; + resp.mr_pa_hig = 0; + resp.leaf_pbl_size = 1; + } else { + resp.mr_pa_low = + iwpbl->pble_alloc.level2.root.idx; + resp.mr_pa_hig = 0; + resp.leaf_pbl_size = 3; + } + + } else { + resp.mr_pa_low = (u32)(iwmr->pgaddrmem[0] & 0xffffffff); + resp.mr_pa_hig = (u32)((iwmr->pgaddrmem[0] & + 0xffffffff00000000) >> + 32); + resp.leaf_pbl_size = 0; + } + + if (iwmr->page_size == 0x40000000) + resp.host_page_size = ZXDH_PAGE_SIZE_1G; + else if (iwmr->page_size == 0x200000) + resp.host_page_size = ZXDH_PAGE_SIZE_2M; + else if (iwmr->page_size == 0x1000) + resp.host_page_size = ZXDH_PAGE_SIZE_4K; + + if (ib_copy_to_udata(udata, &resp, + min(sizeof(resp), udata->outlen))) { + goto error; + } + + break; + default: + goto error; + } + + iwmr->type = req.reg_type; + + return &iwmr->ibmr; + +error: + if (req.reg_type == ZXDH_MEMREG_TYPE_MEM) { + if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, palloc); + } else { + if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) + zxdh_free_pble(iwdev->rf->pble_rsrc, palloc); + } + ib_umem_release(region); + kfree(iwmr); + + return ERR_PTR(err); +} + +int zxdh_hwdereg_mr(struct ib_mr *ib_mr) +{ + struct zxdh_device *iwdev = to_iwdev(ib_mr->device); + struct zxdh_mr *iwmr = to_iwmr(ib_mr); + struct zxdh_pd *iwpd = to_iwpd(ib_mr->pd); + struct zxdh_dealloc_stag_info *info; + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int status; + + /* Skip HW MR de-register when it is already de-registered + * during an MR re-reregister and the re-registration fails + */ + if (!iwmr->is_hwreg) + return 0; + + cqp_request = zxdh_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.dealloc_stag.info; + memset(info, 0, sizeof(*info)); + info->pd_id = iwpd->sc_pd.pd_id; + info->stag_idx = RS_64_1(ib_mr->rkey, ZXDH_CQPSQ_STAG_IDX_S); + info->mr = true; + if (iwpbl->pbl_allocated) + info->dealloc_pbl = true; + + cqp_info->cqp_cmd = ZXDH_OP_DEALLOC_STAG; + cqp_info->post_sq = 1; + cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; + cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(iwdev->rf, cqp_request); + zxdh_put_cqp_request(&iwdev->rf->cqp, cqp_request); + + if (!status) + iwmr->is_hwreg = 0; + + return status; +} + +/* + * zxdh_rereg_mr_trans - Re-register a user MR for a change translation. + * @iwmr: ptr of iwmr + * @start: virtual start address + * @len: length of mr + * @virt: virtual address + * + * Re-register a user memory region when a change translation is requested. + * Re-register a new region while reusing the stag from the original registration. + */ +struct ib_mr *zxdh_rereg_mr_trans(struct zxdh_mr *iwmr, u64 start, u64 len, + u64 virt, struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(iwmr->ibmr.device); + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + struct zxdh_pble_alloc *palloc = &iwpbl->pble_alloc; + struct ib_pd *pd = iwmr->ibmr.pd; + struct ib_umem *region; + bool use_pbles; + int err; + +#ifdef IB_UMEM_GET_V3 + region = ib_umem_get(pd->device, start, len, iwmr->access); +#endif +#ifdef IB_UMEM_GET_V2 + region = ib_umem_get(udata, start, len, iwmr->access); +#endif +#ifdef IB_UMEM_GET_V1 + region = ib_umem_get(udata, start, len, iwmr->access, 0); +#endif +#ifdef IB_UMEM_GET_V0 + region = ib_umem_get(pd->uobject->context, start, len, iwmr->access, 0); +#endif + + if (IS_ERR(region)) { + pr_err("VERBS: Failed to create ib_umem region\n"); + return (struct ib_mr *)region; + } + + iwmr->region = region; + iwmr->ibmr.iova = virt; + iwmr->ibmr.pd = pd; + iwmr->page_size = PAGE_SIZE; + +#ifdef SET_BEST_PAGE_SZ_V1 + iwmr->page_msk = PAGE_MASK; + if (region->hugetlb) + zxdh_set_hugetlb_val(start, iwmr); +#endif +#ifdef SET_BEST_PAGE_SZ_V2 + iwmr->page_size = + ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M | SZ_1G, virt); + if (unlikely(!iwmr->page_size)) { + ib_umem_release(region); + return ERR_PTR(-EOPNOTSUPP); + } +#endif + iwmr->len = region->length; + iwpbl->user_base = virt; +#ifdef rdma_umem_for_each_dma_block +#ifdef ib_umem_num_dma_blocks + iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); +#else + iwmr->page_cnt = + zxdh_ib_umem_num_dma_blocks(region, iwmr->page_size, virt); +#endif +#else + iwmr->page_cnt = + zxdh_ib_umem_num_dma_blocks(region, iwmr->page_size, virt); +#endif + + use_pbles = (iwmr->page_cnt != 1); + + err = zxdh_setup_pbles(iwdev->rf, iwmr, use_pbles, PBLE_MR); // mr + if (err) + goto error; + + if (use_pbles) { + err = zxdh_check_mr_contiguous(palloc, iwmr->page_size); + if (err) { + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, palloc); + iwpbl->pbl_allocated = false; + } + } + + err = zxdh_hwreg_mr(iwdev, iwmr, iwmr->access); + if (err) + goto error; + + return &iwmr->ibmr; + +error: + if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) { + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, palloc); + iwpbl->pbl_allocated = false; + } + ib_umem_release(region); + iwmr->region = NULL; + + return ERR_PTR(err); +} + +/** + * zxdh_reg_phys_mr - register kernel physical memory + * @pd: ibpd pointer + * @addr: physical address of memory to register + * @size: size of memory to register + * @access: Access rights + * @iova_start: start of virtual address for physical buffers + */ +struct ib_mr *zxdh_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access, + u64 *iova_start) +{ + struct zxdh_device *iwdev = to_iwdev(pd->device); + struct zxdh_pbl *iwpbl; + struct zxdh_mr *iwmr; + u32 stag; + int ret; + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) + return ERR_PTR(-ENOMEM); + + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->type = ZXDH_MEMREG_TYPE_MEM; + iwpbl->user_base = *iova_start; + stag = zxdh_create_stag(iwdev); + if (!stag) { + ret = -ENOMEM; + goto err; + } + + iwmr->stag = stag; + iwmr->ibmr.iova = *iova_start; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; + iwmr->page_cnt = 1; + iwmr->pgaddrmem[0] = addr; + iwmr->len = size; + iwmr->page_size = SZ_4K; + ret = zxdh_hwreg_mr(iwdev, iwmr, access); + if (ret) { + zxdh_free_stag(iwdev, stag); + goto err; + } + + return &iwmr->ibmr; + +err: + kfree(iwmr); + + return ERR_PTR(ret); +} + +/** + * zxdh_get_dma_mr - register physical mem + * @pd: ptr of pd + * @acc: access for memory + */ +static struct ib_mr *zxdh_get_dma_mr(struct ib_pd *pd, int acc) +{ + u64 kva = 0; + + return zxdh_reg_phys_mr(pd, 0, 0, acc, &kva); +} + +/** + * zxdh_del_memlist - Deleting pbl list entries for CQ/QP + * @iwmr: iwmr for IB's user page addresses + * @ucontext: ptr to user context + */ +void zxdh_del_memlist(struct zxdh_mr *iwmr, struct zxdh_ucontext *ucontext) +{ + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + unsigned long flags; + + switch (iwmr->type) { + case ZXDH_MEMREG_TYPE_CQ: + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + if (iwpbl->on_list) { + iwpbl->on_list = false; + list_del(&iwpbl->list); + } + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + break; + case ZXDH_MEMREG_TYPE_QP: + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); + if (iwpbl->on_list) { + iwpbl->on_list = false; + list_del(&iwpbl->list); + } + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + break; + case ZXDH_MEMREG_TYPE_SRQ: + spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags); + if (iwpbl->on_list) { + iwpbl->on_list = false; + list_del(&iwpbl->list); + } + spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags); + break; + default: + break; + } +} + +/** + * zxdh_copy_sg_list - copy sg list for qp + * @sg_list: copied into sg_list + * @sgl: copy from sgl + * @num_sges: count of sg entries + */ +static void zxdh_copy_sg_list(struct zxdh_sge *sg_list, struct ib_sge *sgl, + int num_sges) +{ + unsigned int i; + + for (i = 0; i < num_sges; i++) { + sg_list[i].tag_off = sgl[i].addr; + sg_list[i].len = sgl[i].length; + sg_list[i].stag = sgl[i].lkey; + } +} + +/** + * zxdh_get_inline_data - get inline_multi_sge data + * @inline_data: uint8_t* + * @ib_wr: work request ptr + * @len: sge total length + */ +static int zxdh_get_inline_data(uint8_t *inline_data, + const struct ib_send_wr *ib_wr, __u32 *len) +{ + int num = 0; + int offset = 0; + while (num < ib_wr->num_sge) { + *len += ib_wr->sg_list[num].length; + if (*len > ZXDH_MAX_INLINE_DATA_SIZE) { + pr_err("err:inline bytes over max inline length\n"); + return -EINVAL; + } + memcpy(inline_data + offset, + (void *)(uintptr_t)ib_wr->sg_list[num].addr, + ib_wr->sg_list[num].length); + offset += ib_wr->sg_list[num].length; + num++; + } + return 0; +} + +/** + * zxdh_post_send - kernel application wr + * @ibqp: qp ptr for wr + * @ib_wr: work request ptr + * @bad_wr: return of bad wr if err + */ +static int zxdh_post_send(struct ib_qp *ibqp, + kc_typeq_ib_wr struct ib_send_wr *ib_wr, + kc_typeq_ib_wr struct ib_send_wr **bad_wr) +{ + struct zxdh_qp *iwqp; + struct zxdh_qp_uk *ukqp; + struct zxdh_sc_dev *dev; + struct zxdh_post_sq_info info; + int err = 0; + unsigned long flags; + struct zxdh_ah *ah; + + iwqp = to_iwqp(ibqp); + ukqp = &iwqp->sc_qp.qp_uk; + dev = &iwqp->iwdev->rf->sc_dev; + + if (iwqp->iwarp_state != ZXDH_QPS_RTS) { + *bad_wr = ib_wr; + pr_info("err:post send at state:%d\n", iwqp->iwarp_state); + return -EINVAL; + } + + spin_lock_irqsave(&iwqp->lock, flags); + while (ib_wr) { + memset(&info, 0, sizeof(info)); + info.wr_id = (ib_wr->wr_id); + if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all) + info.signaled = true; + if (ib_wr->send_flags & IB_SEND_FENCE) + info.read_fence = true; + switch (ib_wr->opcode) { + case IB_WR_SEND_WITH_IMM: + if (ukqp->qp_caps & ZXDH_SEND_WITH_IMM) { + info.imm_data_valid = true; + info.imm_data = ntohl(ib_wr->ex.imm_data); + } else { + err = -EINVAL; + break; + } + fallthrough; + case IB_WR_SEND: + case IB_WR_SEND_WITH_INV: + if (ib_wr->send_flags & IB_SEND_SOLICITED) + info.solicited = 1; + + if (ib_wr->opcode == IB_WR_SEND) { + if (iwqp->ibqp.qp_type == IB_QPT_UD || + iwqp->ibqp.qp_type == IB_QPT_GSI) + info.op_type = ZXDH_OP_TYPE_UD_SEND; + else + info.op_type = ZXDH_OP_TYPE_SEND; + } else if (ib_wr->opcode == IB_WR_SEND_WITH_IMM) { + if (iwqp->ibqp.qp_type == IB_QPT_UD || + iwqp->ibqp.qp_type == IB_QPT_GSI) + info.op_type = + ZXDH_OP_TYPE_UD_SEND_WITH_IMM; + else + info.op_type = + ZXDH_OP_TYPE_SEND_WITH_IMM; + } else { + info.op_type = ZXDH_OP_TYPE_SEND_INV; + info.stag_to_inv = ib_wr->ex.invalidate_rkey; + } + + if ((ib_wr->send_flags & IB_SEND_INLINE) && + (ib_wr->num_sge != 0)) { + err = zxdh_get_inline_data( + iwqp->inline_data, ib_wr, + &info.op.inline_send.len); + if (err) { + pr_err("err: get_inline_data failed\n"); + spin_unlock_irqrestore(&iwqp->lock, + flags); + return -EINVAL; + } + info.op.inline_send.data = iwqp->inline_data; + + if (iwqp->ibqp.qp_type == IB_QPT_UD || + iwqp->ibqp.qp_type == IB_QPT_GSI) { + ah = to_iwah(ud_wr(ib_wr)->ah); + info.op.inline_send.ah_id = + ah->sc_ah.ah_info.ah_idx; + info.op.inline_send.qkey = + ud_wr(ib_wr)->remote_qkey; + info.op.inline_send.dest_qp = + ud_wr(ib_wr)->remote_qpn; + err = zxdh_uk_ud_inline_send( + ukqp, &info, false); + } else { + err = zxdh_uk_rc_inline_send( + ukqp, &info, false); + } + } else { + info.op.send.num_sges = ib_wr->num_sge; + info.op.send.sg_list = + (struct zxdh_sge *)ib_wr->sg_list; + if (iwqp->ibqp.qp_type == IB_QPT_UD || + iwqp->ibqp.qp_type == IB_QPT_GSI) { + ah = to_iwah(ud_wr(ib_wr)->ah); + info.op.send.ah_id = + ah->sc_ah.ah_info.ah_idx; + info.op.send.qkey = + ud_wr(ib_wr)->remote_qkey; + info.op.send.dest_qp = + ud_wr(ib_wr)->remote_qpn; + err = zxdh_uk_ud_send(ukqp, &info, + false); + } else { + err = zxdh_uk_rc_send(ukqp, &info, + false); + } + } + break; + case IB_WR_RDMA_WRITE_WITH_IMM: + if (ukqp->qp_caps & ZXDH_WRITE_WITH_IMM) { + info.imm_data_valid = true; + info.imm_data = ntohl(ib_wr->ex.imm_data); + } else { + err = -EINVAL; + break; + } + fallthrough; + case IB_WR_RDMA_WRITE: + if (ib_wr->send_flags & IB_SEND_SOLICITED) + info.solicited = 1; + + if (ib_wr->opcode == IB_WR_RDMA_WRITE) + info.op_type = ZXDH_OP_TYPE_WRITE; + else + info.op_type = ZXDH_OP_TYPE_WRITE_WITH_IMM; + + if ((ib_wr->send_flags & IB_SEND_INLINE) && + (ib_wr->num_sge != 0)) { + err = zxdh_get_inline_data( + iwqp->inline_data, ib_wr, + &info.op.inline_rdma_write.len); + if (err) { + pr_err("err: get_inline_data failed\n"); + spin_unlock_irqrestore(&iwqp->lock, + flags); + return -EINVAL; + } + info.op.inline_rdma_write.data = + iwqp->inline_data; + + info.op.inline_rdma_write.rem_addr.tag_off = + rdma_wr(ib_wr)->remote_addr; + info.op.inline_rdma_write.rem_addr.stag = + rdma_wr(ib_wr)->rkey; + err = zxdh_uk_inline_rdma_write(ukqp, &info, + false); + } else { + info.op.rdma_write.lo_sg_list = + (void *)ib_wr->sg_list; + info.op.rdma_write.num_lo_sges = ib_wr->num_sge; + info.op.rdma_write.rem_addr.tag_off = + rdma_wr(ib_wr)->remote_addr; + info.op.rdma_write.rem_addr.stag = + rdma_wr(ib_wr)->rkey; + err = zxdh_uk_rdma_write(ukqp, &info, false); + } + break; + case IB_WR_RDMA_READ: + if (ib_wr->num_sge > + dev->hw_attrs.uk_attrs.max_hw_read_sges) { + err = -EINVAL; + break; + } + info.op_type = ZXDH_OP_TYPE_READ; + info.op.rdma_read.rem_addr.tag_off = + rdma_wr(ib_wr)->remote_addr; + info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey; + info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list; + info.op.rdma_read.num_lo_sges = ib_wr->num_sge; + err = zxdh_uk_rdma_read(ukqp, &info, false); + break; + case IB_WR_LOCAL_INV: + info.op_type = ZXDH_OP_TYPE_LOCAL_INV; + info.op.inv_local_stag.target_stag = + ib_wr->ex.invalidate_rkey; + err = zxdh_uk_stag_local_invalidate(ukqp, &info, true); + break; + case IB_WR_REG_MR: { + struct zxdh_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr); + struct zxdh_pble_alloc *palloc = + &iwmr->iwpbl.pble_alloc; + struct zxdh_fast_reg_stag_info stag_info = {}; + + stag_info.signaled = info.signaled; + stag_info.read_fence = info.read_fence; + stag_info.access_rights = + zxdh_get_mr_access(reg_wr(ib_wr)->access); + stag_info.stag_key = reg_wr(ib_wr)->key & 0xff; + stag_info.stag_idx = reg_wr(ib_wr)->key >> 8; + stag_info.page_size = reg_wr(ib_wr)->mr->page_size; + stag_info.wr_id = ib_wr->wr_id; + stag_info.addr_type = ZXDH_ADDR_TYPE_VA_BASED; + stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova; + stag_info.total_len = iwmr->ibmr.length; + stag_info.reg_addr_pa = *palloc->level1.addr; + stag_info.first_pm_pbl_index = palloc->level1.idx; + stag_info.local_fence = ib_wr->send_flags & + IB_SEND_FENCE; + if (iwmr->npages > ZXDH_MIN_PAGES_PER_FMR) + stag_info.chunk_size = 1; + err = zxdh_sc_mr_fast_register(&iwqp->sc_qp, &stag_info, + true); + break; + } + default: + err = -EINVAL; + pr_err("VERBS: upost_send bad opcode = 0x%x\n", + ib_wr->opcode); + break; + } + + if (err) + break; + ib_wr = ib_wr->next; + } + + if (!iwqp->flush_issued && iwqp->iwarp_state == ZXDH_QPS_RTS) + zxdh_uk_qp_post_wr(ukqp); + else if (iwqp->flush_issued) + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + ZXDH_FLUSH_DELAY_MS); + spin_unlock_irqrestore(&iwqp->lock, flags); + if (err) + *bad_wr = ib_wr; + + return err; +} + +/** + * zxdh_post_recv - post receive wr for kernel application + * @ibqp: ib qp pointer + * @ib_wr: work request for receive + * @bad_wr: bad wr caused an error + */ +static int zxdh_post_recv(struct ib_qp *ibqp, + kc_typeq_ib_wr struct ib_recv_wr *ib_wr, + kc_typeq_ib_wr struct ib_recv_wr **bad_wr) +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_qp_uk *ukqp = &iwqp->sc_qp.qp_uk; + struct zxdh_post_rq_info post_recv = {}; + struct zxdh_sge *sg_list = iwqp->sg_list; + unsigned long flags; + int err = 0; + + if (iwqp->iwarp_state == ZXDH_QPS_RESET || iwqp->is_srq) { + *bad_wr = ib_wr; + return -EINVAL; + } + + spin_lock_irqsave(&iwqp->lock, flags); + + while (ib_wr) { + if (ib_wr->num_sge > ukqp->max_rq_frag_cnt) { + err = -EINVAL; + goto out; + } + post_recv.num_sges = ib_wr->num_sge; + post_recv.wr_id = ib_wr->wr_id; + zxdh_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge); + post_recv.sg_list = sg_list; + err = zxdh_uk_post_receive(ukqp, &post_recv); + if (err) { + pr_err("VERBS: post_recv err %d\n", err); + goto out; + } + + ib_wr = ib_wr->next; + } + +out: + if (iwqp->flush_issued) + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + ZXDH_FLUSH_DELAY_MS); + else + zxdh_uk_qp_set_shadow_area(ukqp); + spin_unlock_irqrestore(&iwqp->lock, flags); + if (err) + *bad_wr = ib_wr; + + return err; +} + +/** + * zxdh_flush_err_to_ib_wc_status - return change flush error code to IB status + * @opcode: iwarp flush code + */ +static enum ib_wc_status +zxdh_flush_err_to_ib_wc_status(enum zxdh_flush_opcode opcode) +{ + switch (opcode) { + case FLUSH_PROT_ERR: + return IB_WC_LOC_PROT_ERR; + case FLUSH_REM_ACCESS_ERR: + return IB_WC_REM_ACCESS_ERR; + case FLUSH_LOC_QP_OP_ERR: + return IB_WC_LOC_QP_OP_ERR; + case FLUSH_REM_OP_ERR: + return IB_WC_REM_OP_ERR; + case FLUSH_LOC_LEN_ERR: + return IB_WC_LOC_LEN_ERR; + case FLUSH_GENERAL_ERR: + return IB_WC_WR_FLUSH_ERR; + case FLUSH_MW_BIND_ERR: + return IB_WC_MW_BIND_ERR; + case FLUSH_REM_INV_REQ_ERR: + return IB_WC_REM_INV_REQ_ERR; + case FLUSH_RETRY_EXC_ERR: + return IB_WC_RETRY_EXC_ERR; + case FLUSH_FATAL_ERR: + default: + return IB_WC_FATAL_ERR; + } +} + +/** + * zxdh_process_cqe - process cqe info + * @entry: processed cqe + * @cq_poll_info: cqe info + */ +static void zxdh_process_cqe(struct ib_wc *entry, + struct zxdh_cq_poll_info *cq_poll_info) +{ + struct zxdh_qp *iwqp; + struct zxdh_sc_qp *qp; + + entry->wc_flags = 0; + entry->pkey_index = 0; + entry->wr_id = cq_poll_info->wr_id; + + qp = cq_poll_info->qp_handle; + iwqp = qp->qp_uk.back_qp; + entry->qp = qp->qp_uk.back_qp; + + if (cq_poll_info->error) { + entry->status = (cq_poll_info->comp_status == + ZXDH_COMPL_STATUS_FLUSHED) ? + zxdh_flush_err_to_ib_wc_status( + cq_poll_info->minor_err) : + IB_WC_GENERAL_ERR; + + entry->vendor_err = cq_poll_info->major_err << 16 | + cq_poll_info->minor_err; + } else { + entry->status = IB_WC_SUCCESS; + if (cq_poll_info->imm_valid) { + entry->ex.imm_data = htonl(cq_poll_info->imm_data); + entry->wc_flags |= IB_WC_WITH_IMM; + } + if (cq_poll_info->ud_smac_valid) { + ether_addr_copy(entry->smac, cq_poll_info->ud_smac); + entry->wc_flags |= IB_WC_WITH_SMAC; + } + + if (cq_poll_info->ud_vlan_valid && + iwqp->iwdev->rf->vlan_parse_en) { + u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK; + + entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT; + if (vlan) { + entry->vlan_id = vlan; + entry->wc_flags |= IB_WC_WITH_VLAN; + } + } else { + entry->sl = 0; + } + } + + switch (cq_poll_info->op_type) { + case ZXDH_OP_TYPE_SEND: + case ZXDH_OP_TYPE_SEND_WITH_IMM: + case ZXDH_OP_TYPE_SEND_INV: + case ZXDH_OP_TYPE_UD_SEND: + case ZXDH_OP_TYPE_UD_SEND_WITH_IMM: + entry->opcode = IB_WC_SEND; + break; + case ZXDH_OP_TYPE_WRITE: + case ZXDH_OP_TYPE_WRITE_WITH_IMM: + entry->opcode = IB_WC_RDMA_WRITE; + break; + case ZXDH_OP_TYPE_READ: + entry->opcode = IB_WC_RDMA_READ; + break; + case ZXDH_OP_TYPE_FAST_REG_MR: + entry->opcode = IB_WC_REG_MR; + break; + case ZXDH_OP_TYPE_LOCAL_INV: + entry->opcode = IB_WC_LOCAL_INV; + break; + case ZXDH_OP_TYPE_REC_IMM: + case ZXDH_OP_TYPE_REC: + entry->opcode = cq_poll_info->op_type == ZXDH_OP_TYPE_REC_IMM ? + IB_WC_RECV_RDMA_WITH_IMM : + IB_WC_RECV; + if (qp->qp_uk.qp_type != ZXDH_QP_TYPE_ROCE_UD && + cq_poll_info->stag_invalid_set) { + entry->ex.invalidate_rkey = cq_poll_info->inv_stag; + entry->wc_flags |= IB_WC_WITH_INVALIDATE; + } + break; + default: + ibdev_err(&iwqp->iwdev->ibdev, "Invalid opcode = %d in CQE\n", + cq_poll_info->op_type); + entry->status = IB_WC_GENERAL_ERR; + return; + } + + if (qp->qp_uk.qp_type == ZXDH_QP_TYPE_ROCE_UD) { + entry->src_qp = cq_poll_info->ud_src_qpn; + entry->slid = 0; + entry->wc_flags |= (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE); + entry->network_hdr_type = cq_poll_info->ipv4 ? + RDMA_NETWORK_IPV4 : + RDMA_NETWORK_IPV6; + } else { + entry->src_qp = cq_poll_info->qp_id; + } + + entry->byte_len = cq_poll_info->bytes_xfered; +} + +/** + * zxdh_poll_one - poll one entry of the CQ + * @ukcq: ukcq to poll + * @cur_cqe: current CQE info to be filled in + * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ + * + * Returns the internal zrdma device error code or 0 on success + */ +static inline int zxdh_poll_one(struct zxdh_cq_uk *ukcq, + struct zxdh_cq_poll_info *cur_cqe, + struct ib_wc *entry) +{ + int ret = zxdh_uk_cq_poll_cmpl(ukcq, cur_cqe); + + if (ret) + return ret; + + zxdh_process_cqe(entry, cur_cqe); + + return 0; +} + +/** + * __zxdh_poll_cq - poll cq for completion (kernel apps) + * @iwcq: cq to poll + * @num_entries: number of entries to poll + * @entry: wr of a completed entry + */ +static int __zxdh_poll_cq(struct zxdh_cq *iwcq, int num_entries, + struct ib_wc *entry) +{ + struct list_head *tmp_node, *list_node; + struct zxdh_cq_buf *last_buf = NULL; + struct zxdh_cq_poll_info *cur_cqe = &iwcq->cur_cqe; + struct zxdh_cq_buf *cq_buf; + int ret; + struct zxdh_device *iwdev; + struct zxdh_cq_uk *ukcq; + bool cq_new_cqe = false; + int resized_bufs = 0; + int npolled = 0; + + iwdev = to_iwdev(iwcq->ibcq.device); + ukcq = &iwcq->sc_cq.cq_uk; + + /* go through the list of previously resized CQ buffers */ + list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { + cq_buf = container_of(list_node, struct zxdh_cq_buf, list); + while (npolled < num_entries) { + ret = zxdh_poll_one(&cq_buf->cq_uk, cur_cqe, + entry + npolled); + if (!ret) { + ++npolled; + cq_new_cqe = true; + continue; + } + if (ret == -ENOENT) + break; + /* QP using the CQ is destroyed. Skip reporting this CQE */ + if (ret == -EFAULT) { + cq_new_cqe = true; + continue; + } + goto error; + } + + /* save the resized CQ buffer which received the last cqe */ + if (cq_new_cqe) + last_buf = cq_buf; + cq_new_cqe = false; + } + + /* check the current CQ for new cqes */ + while (npolled < num_entries) { + ret = zxdh_poll_one(ukcq, cur_cqe, entry + npolled); + if (ret == -ENOENT) { + ret = zxdh_generated_cmpls(iwcq, cur_cqe); + if (!ret) + zxdh_process_cqe(entry + npolled, cur_cqe); + } + if (!ret) { + ++npolled; + cq_new_cqe = true; + continue; + } + + if (ret == -ENOENT) + break; + /* QP using the CQ is destroyed. Skip reporting this CQE */ + if (ret == -EFAULT) { + cq_new_cqe = true; + continue; + } + goto error; + } + + if (cq_new_cqe) + /* all previous CQ resizes are complete */ + resized_bufs = zxdh_process_resize_list(iwcq, iwdev, NULL); + else if (last_buf) + /* only CQ resizes up to the last_buf are complete */ + resized_bufs = zxdh_process_resize_list(iwcq, iwdev, last_buf); + if (resized_bufs) + /* report to the HW the number of complete CQ resizes */ + zxdh_uk_cq_set_resized_cnt(ukcq, resized_bufs); + + return npolled; +error: + pr_err("VERBS: %s: Error polling CQ, zxdh_err: %d\n", __func__, ret); + + return ret; +} + +/** + * zxdh_poll_cq - poll cq for completion (kernel apps) + * @ibcq: cq to poll + * @num_entries: number of entries to poll + * @entry: wr of a completed entry + */ +static int zxdh_poll_cq(struct ib_cq *ibcq, int num_entries, + struct ib_wc *entry) +{ + struct zxdh_cq *iwcq; + unsigned long flags; + int ret; + + iwcq = to_iwcq(ibcq); + + spin_lock_irqsave(&iwcq->lock, flags); + ret = __zxdh_poll_cq(iwcq, num_entries, entry); + spin_unlock_irqrestore(&iwcq->lock, flags); + + return ret; +} + +/** + * zxdh_req_notify_cq - arm cq kernel application + * @ibcq: cq to arm + * @notify_flags: notofication flags + */ +static int zxdh_req_notify_cq(struct ib_cq *ibcq, + enum ib_cq_notify_flags notify_flags) +{ + struct zxdh_cq *iwcq; + struct zxdh_cq_uk *ukcq; + unsigned long flags; + enum zxdh_cmpl_notify cq_notify = ZXDH_CQ_COMPL_EVENT; + bool promo_event = false; + int ret = 0; + + iwcq = to_iwcq(ibcq); + ukcq = &iwcq->sc_cq.cq_uk; + + spin_lock_irqsave(&iwcq->lock, flags); + if (notify_flags == IB_CQ_SOLICITED) { + cq_notify = ZXDH_CQ_COMPL_SOLICITED; + } else { + if (iwcq->last_notify == ZXDH_CQ_COMPL_SOLICITED) + promo_event = true; + } + + if (!iwcq->armed || promo_event) { + iwcq->armed = true; + iwcq->last_notify = cq_notify; + zxdh_uk_cq_request_notification(ukcq, cq_notify); + } + + if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !zxdh_cq_empty(iwcq)) + ret = 1; + spin_unlock_irqrestore(&iwcq->lock, flags); + + return ret; +} + +#ifdef ALLOC_HW_STATS_STRUCT_V2 +const struct rdma_stat_desc zxdh_hw_stat_descs[] = { + /*32-bit */ + [HW_STAT_DUPLICATE_REQUEST].name = "duplicate_request", + [HW_STAT_IMPLIED_NAK_SEQ_ERR].name = "implied_nak_seq_err", + [HW_STAT_LOCAL_ACK_TIMEOUT_ERR].name = "local_ack_timeout_err", + [HW_STAT_NP_CNP_SENT].name = "np_cnp_sent", + [HW_STAT_NP_ECN_MARKED_ROCE_PACKETS].name = + "np_ecn_marked_roce_packets", + [HW_STAT_OUT_OF_SEQUENCE].name = "out_of_sequence", + [HW_STAT_PACKET_SEQ_ERR].name = "packet_seq_err", + [HW_STAT_REQ_CQE_ERROR].name = "req_cqe_error", + [HW_STAT_REQ_REMOTE_ACCESS_ERRORS].name = "req_remote_access_errors", + [HW_STAT_REQ_REMOTE_INVALID_REQUEST].name = + "req_remote_invalid_request", + [HW_STAT_REQ_LOCAL_LENGTH_ERROR].name = "req_local_length_error", + [HW_STAT_RESP_CQE_ERROR].name = "resp_cqe_error", + [HW_STAT_RESP_REMOTE_ACCESS_ERRORS].name = "resp_remote_access_errors", + [HW_STAT_RESP_REMOTE_INVALID_REQUEST].name = + "resp_remote_invalid_request", + [HW_STAT_RESP_REMOTE_OPERATION_ERRORS].name = + "resp_remote_operation_errors", + [HW_STAT_RESP_RNR_NAK].name = "resp_rnr_nak", + [HW_STAT_RNR_NAK_RETRY_ERR].name = "rnr_nak_retry_err", + [HW_STAT_RP_CNP_HANDLED].name = "rp_cnp_handled", + [HW_STAT_RX_READ_REQUESTS].name = "rx_read_requests", + [HW_STAT_RX_WRITE_REQUESTS].name = "rx_write_requests", + [HW_STAT_RX_ICRC_ENCAPSULATED].name = "rx_icrc_encapsulated", + [HW_STAT_ROCE_SLOW_RESTART_CNPS].name = "roce_slow_restart_cnps", + [HW_STAT_RDMA_TX_PKTS].name = "rdma_tx_pkts", + [HW_STAT_RDMA_TX_BYTES].name = "rdma_tx_bytes", + [HW_STAT_RDMA_RX_PKTS].name = "rdma_rx_pkts", + [HW_STAT_RDMA_RX_BYTES].name = "rdma_rx_bytes", +}; + +#endif /* ALLOC_HW_STATS_STRUCT_V2 */ + +/** + * zxdh_query_ah - Query address handle + * @ibah: pointer to address handle + * @ah_attr: address handle attributes + */ +static int zxdh_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) +{ + struct zxdh_ah *ah = to_iwah(ibah); + + memset(ah_attr, 0, sizeof(*ah_attr)); + if (ah->av.attrs.ah_flags & IB_AH_GRH) { + ah_attr->ah_flags = IB_AH_GRH; + ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label; + ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos; + ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl; + ah_attr->grh.sgid_index = ah->sgid_index; + ah_attr->grh.sgid_index = ah->sgid_index; + memcpy(&ah_attr->grh.dgid, &ah->dgid, + sizeof(ah_attr->grh.dgid)); + } + + return 0; +} + +static __be64 zxdh_mac_to_guid(struct net_device *ndev) +{ + const unsigned char *mac = ndev->dev_addr; + __be64 guid; + unsigned char *dst = (unsigned char *)&guid; + + dst[0] = mac[0] ^ 2; + dst[1] = mac[1]; + dst[2] = mac[2]; + dst[3] = 0xff; + dst[4] = 0xfe; + dst[5] = mac[3]; + dst[6] = mac[4]; + dst[7] = mac[5]; + + return guid; +} + +static ssize_t hca_type_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct zxdh_device *iwdev = + rdma_device_to_drv_device(device, struct zxdh_device, ibdev); + + return sysfs_emit(buf, "%d\n", iwdev->rf->pcidev->device); +} +static DEVICE_ATTR_RO(hca_type); + +static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct zxdh_device *iwdev = + rdma_device_to_drv_device(device, struct zxdh_device, ibdev); + return sysfs_emit(buf, "%x\n", iwdev->rf->pcidev->revision); +} +static DEVICE_ATTR_RO(hw_rev); + +static struct attribute *zxdh_class_attributes[] = { + &dev_attr_hw_rev.attr, + &dev_attr_hca_type.attr, + NULL, +}; + +static const struct attribute_group zxdh_attr_group = { + .attrs = zxdh_class_attributes, +}; + +static inline void +zxdh_set_device_sysfs_group(struct ib_device *dev, + const struct attribute_group *group) +{ + dev->groups[1] = group; +} + +#ifdef IB_GET_NETDEV_OP_NOT_DEPRECATED +static struct net_device *zxdh_get_netdev(struct ib_device *ibdev, u8 port_num) +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + + if (iwdev->netdev) { + dev_hold(iwdev->netdev); + return iwdev->netdev; + } + + return NULL; +} + +#endif +#ifdef HAS_IB_SET_DEVICE_OP +static struct ib_device_ops zxdh_roce_dev_ops; +static const struct ib_device_ops zxdh_dev_ops = { +#if KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE || defined(RHEL_8_2) || \ + defined(RHEL_8_3) || defined(RHEL_8_4) || defined(RHEL_8_5) + .owner = THIS_MODULE, + .driver_id = RDMA_DRIVER_ZXDH, + .uverbs_abi_ver = ZXDH_ABI_VER, +#endif +#if defined(ALLOC_HW_STATS_V3) + .alloc_hw_port_stats = zxdh_alloc_hw_port_stats, +#else + .alloc_hw_stats = zxdh_alloc_hw_stats, +#endif + .alloc_mr = zxdh_alloc_mr, + .alloc_mw = zxdh_alloc_mw, + .alloc_pd = zxdh_alloc_pd, + .alloc_ucontext = zxdh_alloc_ucontext, + .create_cq = zxdh_create_cq, + .create_qp = zxdh_create_qp, + .create_srq = zxdh_create_srq, +#ifdef IB_DEALLOC_DRIVER_SUPPORT + .dealloc_driver = zxdh_ib_dealloc_device, +#endif + .dealloc_mw = zxdh_dealloc_mw, + .dealloc_pd = zxdh_dealloc_pd, + .dealloc_ucontext = zxdh_dealloc_ucontext, + .dereg_mr = zxdh_dereg_mr, + .destroy_cq = zxdh_destroy_cq, + .destroy_qp = zxdh_destroy_qp, + .destroy_srq = zxdh_destroy_srq, + .disassociate_ucontext = zxdh_disassociate_ucontext, + .get_dev_fw_str = zxdh_get_dev_fw_str, + .get_dma_mr = zxdh_get_dma_mr, + .get_hw_stats = zxdh_get_hw_stats, +#ifdef IB_GET_NETDEV_OP_NOT_DEPRECATED + .get_netdev = zxdh_get_netdev, +#endif + .map_mr_sg = zxdh_map_mr_sg, + .mmap = zxdh_mmap, +#ifdef RDMA_MMAP_DB_SUPPORT + .mmap_free = zxdh_mmap_free, +#endif + .poll_cq = zxdh_poll_cq, + .post_recv = zxdh_post_recv, + .post_send = zxdh_post_send, + .post_srq_recv = zxdh_post_srq_recv, + .process_mad = zxdh_process_mad, + .query_device = zxdh_query_device, + .query_port = zxdh_query_port, + .modify_port = zxdh_modify_port, + .query_qp = zxdh_query_qp, + .query_srq = zxdh_query_srq, + .reg_user_mr = zxdh_reg_user_mr, + .rereg_user_mr = zxdh_rereg_user_mr, + .req_notify_cq = zxdh_req_notify_cq, + .resize_cq = zxdh_resize_cq, + .modify_srq = zxdh_modify_srq, + .modify_cq = zxdh_modify_cq, +#ifdef INIT_RDMA_OBJ_SIZE + INIT_RDMA_OBJ_SIZE(ib_pd, zxdh_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, zxdh_ucontext, ibucontext), + INIT_RDMA_OBJ_SIZE(ib_srq, zxdh_srq, ibsrq), +#if KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE || defined(RHEL_8_2) || \ + defined(RHEL_8_3) || defined(RHEL_8_4) || defined(RHEL_8_5) + INIT_RDMA_OBJ_SIZE(ib_ah, zxdh_ah, ibah), +#endif /* 5.2.0 */ +#if KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE || defined(RHEL_8_2) || \ + defined(RHEL_8_3) || defined(RHEL_8_4) || defined(RHEL_8_5) + INIT_RDMA_OBJ_SIZE(ib_cq, zxdh_cq, ibcq), +#endif /* 5.3.0 */ +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE + INIT_RDMA_OBJ_SIZE(ib_mw, zxdh_mr, ibmw), +#endif /* 5.10.0 */ +#ifdef GLOBAL_QP_MEM + INIT_RDMA_OBJ_SIZE(ib_qp, zxdh_qp, ibqp), +#endif /* GLOBAL_QP_MEM */ +#endif /* INIT_RDMA_OBJ_SIZE */ +}; + +#endif /* HAS_IB_SET_DEVICE_OP */ +static void zxdh_set_device_ops(struct ib_device *ibdev) +{ +#ifndef HAS_IB_SET_DEVICE_OP + struct ib_device *dev_ops = ibdev; + +#if defined(RHEL_7_7) || defined(RHEL_7_8) || defined(RHEL_7_9) || \ + defined(RHEL_8_2) || defined(RHEL_8_3) || defined(RHEL_8_4) || \ + defined(RHEL_8_5) + dev_ops->uverbs_abi_ver = ZXDH_ABI_VER; + dev_ops->driver_id = RDMA_DRIVER_ZXDH; + dev_ops->owner = THIS_MODULE; +#endif + dev_ops->alloc_hw_stats = zxdh_alloc_hw_stats; + dev_ops->alloc_mr = zxdh_alloc_mr; + dev_ops->alloc_mw = zxdh_alloc_mw; + dev_ops->alloc_pd = zxdh_alloc_pd; + dev_ops->alloc_ucontext = zxdh_alloc_ucontext; + dev_ops->create_cq = zxdh_create_cq; + dev_ops->create_qp = zxdh_create_qp; + dev_ops->create_srq = zxdh_create_srq; +#ifdef IB_DEALLOC_DRIVER_SUPPORT + dev_ops->dealloc_driver = zxdh_ib_dealloc_device, +#endif + dev_ops->dealloc_mw = zxdh_dealloc_mw; + dev_ops->dealloc_pd = zxdh_dealloc_pd; + dev_ops->dealloc_ucontext = zxdh_dealloc_ucontext; + dev_ops->dereg_mr = zxdh_dereg_mr; + dev_ops->destroy_cq = zxdh_destroy_cq; + dev_ops->destroy_qp = zxdh_destroy_qp; + dev_ops->destroy_srq = zxdh_destroy_srq; + dev_ops->disassociate_ucontext = zxdh_disassociate_ucontext; + dev_ops->get_dev_fw_str = zxdh_get_dev_fw_str; + dev_ops->get_dma_mr = zxdh_get_dma_mr; + dev_ops->get_hw_stats = zxdh_get_hw_stats; +#ifndef HAS_IB_SET_DEVICE_OP + dev_ops->get_netdev = zxdh_get_netdev; +#endif + dev_ops->map_mr_sg = zxdh_map_mr_sg; + dev_ops->mmap = zxdh_mmap; +#ifdef RDMA_MMAP_DB_SUPPORT + dev_ops->mmap_free = zxdh_mmap_free; +#endif + dev_ops->poll_cq = zxdh_poll_cq; + dev_ops->post_recv = zxdh_post_recv; + dev_ops->post_send = zxdh_post_send; + dev_ops->post_srq_recv = zxdh_post_srq_recv; + dev_ops->process_mad = zxdh_process_mad; + dev_ops->query_device = zxdh_query_device; + dev_ops->query_port = zxdh_query_port; + dev_ops->modify_port = zxdh_modify_port; + dev_ops->query_qp = zxdh_query_qp; + dev_ops->query_srq = zxdh_query_srq; + dev_ops->reg_user_mr = zxdh_reg_user_mr; + dev_ops->rereg_user_mr = zxdh_rereg_user_mr; + dev_ops->req_notify_cq = zxdh_req_notify_cq; + dev_ops->resize_cq = zxdh_resize_cq; + dev_ops->modify_cq = zxdh_modify_cq; +#else + ib_set_device_ops(ibdev, &zxdh_dev_ops); +#endif + zxdh_set_device_sysfs_group(ibdev, &zxdh_attr_group); +} + +static void zxdh_set_device_roce_ops(struct ib_device *ibdev) +{ +#ifdef HAS_IB_SET_DEVICE_OP + struct ib_device_ops *dev_ops = &zxdh_roce_dev_ops; +#else + struct ib_device *dev_ops = ibdev; +#endif + dev_ops->create_ah = zxdh_create_ah; +#if KERNEL_VERSION(5, 11, 0) <= LINUX_VERSION_CODE || defined(RHEL_8_5) + dev_ops->create_user_ah = zxdh_create_ah; +#endif + dev_ops->destroy_ah = zxdh_destroy_ah; + dev_ops->get_link_layer = zxdh_get_link_layer; + dev_ops->get_port_immutable = zxdh_roce_port_immutable; + dev_ops->modify_qp = zxdh_modify_qp_roce; + dev_ops->modify_srq = zxdh_modify_srq; + dev_ops->query_ah = zxdh_query_ah; + dev_ops->query_gid = zxdh_query_gid_roce; + dev_ops->query_pkey = zxdh_query_pkey; + kc_set_ibdev_add_del_gid(ibdev); +#ifdef HAS_IB_SET_DEVICE_OP + ib_set_device_ops(ibdev, &zxdh_roce_dev_ops); +#endif +} + +/** + * zxdh_init_roce_device - initialization of roce rdma device + * @iwdev: zrdma device + */ +static void zxdh_init_roce_device(struct zxdh_device *iwdev) +{ +#ifdef UVERBS_CMD_MASK + kc_set_roce_uverbs_cmd_mask(iwdev); +#endif + iwdev->ibdev.node_type = RDMA_NODE_IB_CA; + iwdev->ibdev.node_guid = zxdh_mac_to_guid(iwdev->netdev); + zxdh_set_device_roce_ops(&iwdev->ibdev); +} + +static const struct uapi_definition zxdh_ib_defs[] = { + UAPI_DEF_CHAIN(zxdh_ib_dev_defs), + {} +}; + +/** + * zxdh_init_rdma_device - initialization of rdma device + * @iwdev: zrdma device + */ +static int zxdh_init_rdma_device(struct zxdh_device *iwdev) +{ + struct pci_dev *pcidev = iwdev->rf->pcidev; + +#if KERNEL_VERSION(5, 3, 0) > LINUX_VERSION_CODE && !defined(RHEL_8_2) && \ + !defined(RHEL_8_3) && !defined(RHEL_8_4) && !defined(RHEL_8_5) + iwdev->ibdev.owner = THIS_MODULE; + iwdev->ibdev.uverbs_abi_ver = ZXDH_ABI_VER; +#endif +#ifdef UVERBS_CMD_MASK + kc_set_rdma_uverbs_cmd_mask(iwdev); +#endif + + if (iwdev->roce_mode) + zxdh_init_roce_device(iwdev); + else + return -EPFNOSUPPORT; + + iwdev->ibdev.phys_port_cnt = 1; + iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count; + iwdev->ibdev.dev.parent = &pcidev->dev; + set_ibdev_dma_device(iwdev->ibdev, &pcidev->dev); + zxdh_set_device_ops(&iwdev->ibdev); + +#ifdef IB_DEV_OPS_FILL_ENTRY + zxdh_set_restrack_ops(&iwdev->ibdev); +#endif /* >= 5.9.0 */ + iwdev->ibdev.driver_def = zxdh_ib_defs; + return 0; +} + +/** + * zxdh_port_ibevent - indicate port event + * @iwdev: zrdma device + */ +void zxdh_port_ibevent(struct zxdh_device *iwdev) +{ + struct ib_event event; + + event.device = &iwdev->ibdev; + event.element.port_num = 1; + event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : + IB_EVENT_PORT_ERR; + ib_dispatch_event(&event); +} + +/** + * zxdh_ib_unregister_device - unregister rdma device from IB + * core + * @iwdev: zrdma device + */ +void zxdh_ib_unregister_device(struct zxdh_device *iwdev) +{ + iwdev->iw_status = 0; + zxdh_port_ibevent(iwdev); + ib_unregister_device(&iwdev->ibdev); +#if KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE && !defined(RHEL_8_2) && \ + !defined(RHEL_8_3) && !defined(RHEL_8_4) && !defined(RHEL_8_5) + kfree(iwdev->ibdev.iwcm); + iwdev->ibdev.iwcm = NULL; +#endif +} + +/** + * zxdh_ib_register_device - register zrdma device to IB core + * @iwdev: zrdma device + */ +int zxdh_ib_register_device(struct zxdh_device *iwdev) +{ + int ret; + + ret = zxdh_init_rdma_device(iwdev); + if (ret) + return ret; + + kc_set_driver_id(iwdev->ibdev); +#ifdef NETDEV_TO_IBDEV_SUPPORT + ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1); + if (ret) + goto error; +#endif +#if KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE +#ifdef CONFIG_SUSE_KERNEL +#if SLE_VERSION(15, 0, 0) >= SLE_VERSION_CODE + strlcpy(iwdev->ibdev.name, "zrdma%d", IB_DEVICE_NAME_MAX); +#endif /* SLE_VERSION_CODE */ +#else + strlcpy(iwdev->ibdev.name, "zrdma%d", IB_DEVICE_NAME_MAX); +#endif /* CONFIG_SUSE_KERNEL */ +#endif /* LINUX_VERSION_CODE */ +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE + dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX); +#endif + ret = kc_ib_register_device(&iwdev->ibdev, "zrdma%d", + iwdev->rf->hw.device); + if (ret) + goto error; + + iwdev->iw_status = 1; + zxdh_port_ibevent(iwdev); + + return 0; + +error: +#if KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE && !defined(RHEL_8_2) && \ + !defined(RHEL_8_3) && !defined(RHEL_8_4) && !defined(RHEL_8_5) + kfree(iwdev->ibdev.iwcm); + iwdev->ibdev.iwcm = NULL; +#endif + if (ret) + pr_err("VERBS: Register RDMA device fail\n"); + + return ret; +} + +#ifdef IB_DEALLOC_DRIVER_SUPPORT +/** + * zxdh_ib_dealloc_device + * @ibdev: ib device + * + * callback from ibdev dealloc_driver to deallocate resources + * unber zrdma device + */ +void zxdh_ib_dealloc_device(struct ib_device *ibdev) +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + + zxdh_rt_deinit_hw(iwdev); + zxdh_ctrl_deinit_hw(iwdev->rf); + zxdh_del_handler(iwdev->hdl); + kfree(iwdev->hdl); + kfree(iwdev->rf); +} +#endif diff --git a/src/rdma/src/verbs.h b/src/rdma/src/verbs.h new file mode 100644 index 0000000000000000000000000000000000000000..54637a1fe39bcd3d4b97e9addbbbb0661ec7e248 --- /dev/null +++ b/src/rdma/src/verbs.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_VERBS_H +#define ZXDH_VERBS_H + +#define ZXDH_MAX_SAVED_PHY_PGADDR 4 +#define ZXDH_FLUSH_DELAY_MS 200 + +#define ZXDH_MAX_CQ_COUNT 0xFFFF +#define ZXDH_MAX_CQ_PERIOD 0x7FF + +#define US_TO_NS(us) ((us)*1000) +#define NS_TO_US(ns) ((ns) / 1000) + +#define ZXDH_PKEY_TBL_SZ 1 +#define ZXDH_DEFAULT_PKEY 0xFFFF +#define ZXDH_MAX_AH 0x7FFFFFFF +#define ZXDH_MAX_AH_LIST 0x20000 + +#define iwdev_to_idev(iwdev) (&(iwdev)->rf->sc_dev) + +struct zxdh_ucontext { + struct ib_ucontext ibucontext; + struct zxdh_device *iwdev; +#ifdef RDMA_MMAP_DB_SUPPORT + struct rdma_user_mmap_entry *sq_db_mmap_entry; + struct rdma_user_mmap_entry *cq_db_mmap_entry; +#else + struct zxdh_user_mmap_entry *sq_db_mmap_entry; + struct zxdh_user_mmap_entry *cq_db_mmap_entry; + DECLARE_HASHTABLE(mmap_hash_tbl, 6); + spinlock_t mmap_tbl_lock; /* protect mmap hash table entries */ +#endif + struct list_head cq_reg_mem_list; + spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */ + struct list_head qp_reg_mem_list; + spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */ + struct list_head srq_reg_mem_list; + spinlock_t srq_reg_mem_list_lock; /* protect QP memory list */ + /* FIXME: Move to kcompat ideally. Used < 4.20.0 for old diassasscoaite flow */ + struct list_head vma_list; + struct mutex vma_list_mutex; /* protect the vma_list */ + int abi_ver; + bool legacy_mode; +}; + +struct zxdh_pd { + struct ib_pd ibpd; + struct zxdh_sc_pd sc_pd; +}; + +struct zxdh_av { + u8 macaddr[16]; + struct rdma_ah_attr attrs; + union { + struct sockaddr saddr; + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; + } sgid_addr, dgid_addr; + u8 net_type; +}; + +struct zxdh_ah { + struct ib_ah ibah; + struct zxdh_sc_ah sc_ah; + struct zxdh_pd *pd; + struct zxdh_av av; + u8 sgid_index; + union ib_gid dgid; + struct list_head list; + refcount_t refcnt; + struct zxdh_ah *parent_ah; /* AH from cached list */ +}; + +struct zxdh_hmc_pble { + union { + u32 idx; + dma_addr_t addr; + }; +}; + +struct zxdh_cq_mr { + struct zxdh_hmc_pble cq_pbl; + dma_addr_t shadow; + bool split; +}; + +struct zxdh_qp_mr { + struct zxdh_hmc_pble sq_pbl; + struct zxdh_hmc_pble rq_pbl; + dma_addr_t shadow; + struct page *sq_page; +}; + +struct zxdh_srq_mr { + struct zxdh_hmc_pble srq_pbl; + struct zxdh_hmc_pble srq_list_pbl; + struct page *srq_page; + dma_addr_t db_addr; +}; + +struct zxdh_cq_buf { + struct zxdh_dma_mem kmem_buf; + struct zxdh_cq_uk cq_uk; + struct zxdh_hw *hw; + struct list_head list; + struct work_struct work; +}; + +struct zxdh_pbl { + struct list_head list; + union { + struct zxdh_qp_mr qp_mr; + struct zxdh_cq_mr cq_mr; + struct zxdh_srq_mr srq_mr; + }; + + u8 pbl_allocated : 1; + u8 on_list : 1; + u64 user_base; + struct zxdh_pble_alloc pble_alloc; + struct zxdh_mr *iwmr; +}; + +struct zxdh_mr { + union { + struct ib_mr ibmr; + struct ib_mw ibmw; + }; + struct ib_umem *region; + struct zxdh_sc_dev *sc_dev; + int access; + u8 is_hwreg; + u16 type; + u32 page_cnt; + u64 page_size; + u64 page_msk; + u32 npages; + u32 stag; + u64 len; + u64 pgaddrmem[ZXDH_MAX_SAVED_PHY_PGADDR]; + struct zxdh_pbl iwpbl; +}; + +struct zxdh_cq { + struct ib_cq ibcq; + struct zxdh_sc_cq sc_cq; + u16 cq_head; + u16 cq_size; + u32 cq_num; + bool user_mode; + bool armed; + enum zxdh_cmpl_notify last_notify; + u32 polled_cmpls; + u32 cq_mem_size; + struct zxdh_dma_mem kmem; + struct zxdh_dma_mem kmem_shadow; + struct completion free_cq; + refcount_t refcnt; + spinlock_t lock; /* for poll cq */ + struct zxdh_pbl *iwpbl; + struct zxdh_pbl *iwpbl_shadow; + struct list_head resize_list; + struct zxdh_cq_poll_info cur_cqe; + struct list_head cmpl_generated; +}; + +struct zxdh_cmpl_gen { + struct list_head list; + struct zxdh_cq_poll_info cpi; +}; + +struct aeq_qp_work { + struct work_struct work; + struct zxdh_qp *iwqp; +}; + +struct iw_cm_id; + +struct zxdh_qp_kmode { + struct zxdh_dma_mem dma_mem; + struct zxdh_sq_uk_wr_trk_info *sq_wrid_mem; + u64 *rq_wrid_mem; +}; + +struct zxdh_srq_kmode { + struct zxdh_dma_mem dma_mem; + u64 *srq_wrid_mem; +}; + +struct zxdh_qp { + struct ib_qp ibqp; + struct zxdh_sc_qp sc_qp; + struct zxdh_device *iwdev; + struct zxdh_cq *iwscq; + struct zxdh_cq *iwrcq; + struct zxdh_pd *iwpd; + struct zxdh_srq *iwsrq; +#ifdef RDMA_MMAP_DB_SUPPORT + struct rdma_user_mmap_entry *push_wqe_mmap_entry; + struct rdma_user_mmap_entry *push_db_mmap_entry; +#else + struct zxdh_user_mmap_entry *push_wqe_mmap_entry; + struct zxdh_user_mmap_entry *push_db_mmap_entry; +#endif + struct zxdh_qp_host_ctx_info ctx_info; + union { + struct zxdh_iwarp_offload_info iwarp_info; + struct zxdh_roce_offload_info roce_info; + }; + + union { + struct zxdh_tcp_offload_info tcp_info; + struct zxdh_udp_offload_info udp_info; + }; + + struct zxdh_ah roce_ah; + struct list_head teardown_entry; + refcount_t refcnt; + struct iw_cm_id *cm_id; + struct zxdh_cm_node *cm_node; + struct delayed_work dwork_flush; + struct ib_mr *lsmm_mr; + atomic_t hw_mod_qp_pend; + enum ib_qp_state ibqp_state; + u32 qp_mem_size; + u32 last_aeq; + int max_send_wr; + int max_recv_wr; + atomic_t close_timer_started; + spinlock_t lock; /* serialize posting WRs to SQ/RQ */ + struct zxdh_qp_context *iwqp_context; + void *pbl_vbase; + dma_addr_t pbl_pbase; + struct page *page; + u8 active_conn : 1; + u8 user_mode : 1; + u8 hte_added : 1; + u8 flush_issued : 1; + u8 sig_all : 1; + u8 pau_mode : 1; + u8 rsvd : 1; + u8 iwarp_state; + u16 term_sq_flush_code; + u16 term_rq_flush_code; + u8 hw_iwarp_state; + u8 hw_tcp_state; + u8 is_srq; + struct zxdh_qp_kmode kqp; + struct zxdh_dma_mem host_ctx; + struct timer_list terminate_timer; + struct zxdh_pbl *iwpbl; + struct zxdh_sge *sg_list; + struct zxdh_dma_mem ietf_mem; + struct completion free_qp; + wait_queue_head_t waitq; + wait_queue_head_t mod_qp_waitq; + u8 rts_ae_rcvd; + uint8_t inline_data[ZXDH_MAX_INLINE_DATA_SIZE]; +}; + +enum zxdh_mmap_flag { + ZXDH_MMAP_IO_NC, + ZXDH_MMAP_IO_WC, + ZXDH_MMAP_PFN, +}; + +struct zxdh_user_mmap_entry { +#ifdef RDMA_MMAP_DB_SUPPORT + struct rdma_user_mmap_entry rdma_entry; +#else + struct zxdh_ucontext *ucontext; + struct hlist_node hlist; + u64 pgoff_key; /* Used to compute offset (in bytes) returned to user libc's mmap */ +#endif + u64 bar_offset; + u8 mmap_flag; +}; + +static inline u16 zxdh_fw_major_ver(struct zxdh_sc_dev *dev) +{ + return (u16)FIELD_GET(ZXDH_FW_VER_MAJOR, + dev->feature_info[ZXDH_FEATURE_FW_INFO]); +} + +static inline u16 zxdh_fw_minor_ver(struct zxdh_sc_dev *dev) +{ + return (u16)FIELD_GET(ZXDH_FW_VER_MINOR, + dev->feature_info[ZXDH_FEATURE_FW_INFO]); +} + +/** + * zxdh_mcast_mac_v4 - Get the multicast MAC for an IP address + * @ip_addr: IPv4 address + * @mac: pointer to result MAC address + * + */ +static inline void zxdh_mcast_mac_v4(u32 *ip_addr, u8 *mac) +{ + u8 *ip = (u8 *)ip_addr; + unsigned char mac4[ETH_ALEN] = { 0x01, 0x00, 0x5E, + ip[2] & 0x7F, ip[1], ip[0] }; + + ether_addr_copy(mac, mac4); +} + +/** + * zxdh_mcast_mac_v6 - Get the multicast MAC for an IP address + * @ip_addr: IPv6 address + * @mac: pointer to result MAC address + * + */ +static inline void zxdh_mcast_mac_v6(u32 *ip_addr, u8 *mac) +{ + u8 *ip = (u8 *)ip_addr; + unsigned char mac6[ETH_ALEN] = { + 0x33, 0x33, ip[3], ip[2], ip[1], ip[0] + }; + + ether_addr_copy(mac, mac6); +} + +void *zxdh_zalloc_mapped(struct zxdh_device *dev, dma_addr_t *dma_addr, + size_t size, enum dma_data_direction dir); +void zxdh_free_mapped(struct zxdh_device *dev, void *cpu_addr, + dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir); +struct rdma_user_mmap_entry * +zxdh_cap_mmap_entry_insert(struct zxdh_ucontext *ucontext, void *address, + size_t length, enum zxdh_mmap_flag mmap_flag, + u64 *mmap_offset); + +struct rdma_user_mmap_entry * +zxdh_mp_mmap_entry_insert(struct zxdh_ucontext *ucontext, u64 phy_addr, + size_t length, enum zxdh_mmap_flag mmap_flag, + u64 *mmap_offset); +#ifdef RDMA_MMAP_DB_SUPPORT +struct rdma_user_mmap_entry * +zxdh_user_mmap_entry_insert(struct zxdh_ucontext *ucontext, u64 bar_offset, + enum zxdh_mmap_flag mmap_flag, u64 *mmap_offset); +#else +struct zxdh_user_mmap_entry * +zxdh_user_mmap_entry_add_hash(struct zxdh_ucontext *ucontext, u64 bar_offset, + enum zxdh_mmap_flag mmap_flag, u64 *mmap_offset); +void zxdh_user_mmap_entry_del_hash(struct zxdh_user_mmap_entry *entry); +#endif /* RDMA_MMAP_DB_SUPPORT */ +int zxdh_ib_register_device(struct zxdh_device *iwdev); +void zxdh_ib_unregister_device(struct zxdh_device *iwdev); +void zxdh_ib_dealloc_device(struct ib_device *ibdev); +void zxdh_ib_qp_event(struct zxdh_qp *iwqp, enum zxdh_qp_event_type event); +void zxdh_generate_flush_completions(struct zxdh_qp *iwqp); +void zxdh_remove_cmpls_list(struct zxdh_cq *iwcq); +int zxdh_generated_cmpls(struct zxdh_cq *iwcq, + struct zxdh_cq_poll_info *cq_poll_info); +void zxdh_flush_worker(struct work_struct *work); +#endif /* ZXDH_VERBS_H */ diff --git a/src/rdma/src/vf.c b/src/rdma/src/vf.c new file mode 100644 index 0000000000000000000000000000000000000000..4e75f5326bed39042e84fa3989452ef43f58be79 --- /dev/null +++ b/src/rdma/src/vf.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "vf.h" + +/** + * zxdh_manage_vf_pble_bp - manage vf pble + * @cqp: cqp for cqp' sq wqe + * @info: pble info + * @scratch: pointer for completion + * @post_sq: to post and ring + */ +int zxdh_manage_vf_pble_bp(struct zxdh_sc_cqp *cqp, + struct zxdh_manage_vf_pble_info *info, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + u64 temp, hdr, pd_pl_pba; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + temp = FIELD_PREP(ZXDH_CQPSQ_MVPBP_PD_ENTRY_CNT, info->pd_entry_cnt) | + FIELD_PREP(ZXDH_CQPSQ_MVPBP_FIRST_PD_INX, info->first_pd_index) | + FIELD_PREP(ZXDH_CQPSQ_MVPBP_SD_INX, info->sd_index); + set_64bit_val(wqe, 16, temp); + + pd_pl_pba = FIELD_PREP(ZXDH_CQPSQ_MVPBP_PD_PLPBA, info->pd_pl_pba >> 3); + set_64bit_val(wqe, 32, pd_pl_pba); + + hdr = FIELD_PREP(ZXDH_CQPSQ_MVPBP_INV_PD_ENT, + info->inv_pd_ent ? 1 : 0) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MANAGE_VF_PBLE_BP) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: MANAGE VF_PBLE_BP WQE", DUMP_PREFIX_OFFSET, + 16, 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} diff --git a/src/rdma/src/vf.h b/src/rdma/src/vf.h new file mode 100644 index 0000000000000000000000000000000000000000..571872b938afd4a9417650d1fa0860ae9422b8a7 --- /dev/null +++ b/src/rdma/src/vf.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_VF_H +#define ZXDH_VF_H + +struct zxdh_sc_cqp; + +struct zxdh_manage_vf_pble_info { + u32 sd_index; + u16 first_pd_index; + u16 pd_entry_cnt; + u8 inv_pd_ent; + u64 pd_pl_pba; +}; + +int zxdh_manage_vf_pble_bp(struct zxdh_sc_cqp *cqp, + struct zxdh_manage_vf_pble_info *info, u64 scratch, + bool post_sq); +#endif diff --git a/src/rdma/src/virtchnl.c b/src/rdma/src/virtchnl.c new file mode 100644 index 0000000000000000000000000000000000000000..7227632c2d526a1a04d618796ba62726ec9b9cd8 --- /dev/null +++ b/src/rdma/src/virtchnl.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "virtchnl.h" +#include "ws.h" + +/** + * zxdh_find_vf_dev - get vf struct pointer + * @dev: shared device pointer + * @vf_id: virtual function id + */ +struct zxdh_vfdev *zxdh_find_vf_dev(struct zxdh_sc_dev *dev, u16 vf_id) +{ + struct zxdh_vfdev *vf_dev = NULL; + unsigned long flags; + u16 iw_vf_idx; + + spin_lock_irqsave(&dev->vf_dev_lock, flags); + for (iw_vf_idx = 0; iw_vf_idx < dev->num_vfs; iw_vf_idx++) { + if (dev->vf_dev[iw_vf_idx] && + dev->vf_dev[iw_vf_idx]->vf_id == vf_id) { + vf_dev = dev->vf_dev[iw_vf_idx]; + refcount_inc(&vf_dev->refcnt); + break; + } + } + spin_unlock_irqrestore(&dev->vf_dev_lock, flags); + + return vf_dev; +} + +/** + * zxdh_remove_vf_dev - remove vf_dev + * @dev: shared device pointer + * @vf_dev: vf dev to be removed + */ +void zxdh_remove_vf_dev(struct zxdh_sc_dev *dev, struct zxdh_vfdev *vf_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->vf_dev_lock, flags); + dev->vf_dev[vf_dev->iw_vf_idx] = NULL; + spin_unlock_irqrestore(&dev->vf_dev_lock, flags); +} + +/** + * zxdh_put_vfdev - put vfdev and free memory + * @dev: pointer to RDMA dev structure + * @vf_dev: pointer to RDMA vf dev structure + */ +void zxdh_put_vfdev(struct zxdh_sc_dev *dev, struct zxdh_vfdev *vf_dev) +{ + if (refcount_dec_and_test(&vf_dev->refcnt)) { + struct zxdh_virt_mem virt_mem; + + if (vf_dev->hmc_info.sd_table.sd_entry) { + virt_mem.va = vf_dev->hmc_info.sd_table.sd_entry; + virt_mem.size = sizeof(struct zxdh_hmc_sd_entry) * + (vf_dev->hmc_info.hmc_entry_total); + kfree(virt_mem.va); + } + + virt_mem.va = vf_dev; + virt_mem.size = sizeof(*vf_dev); + kfree(virt_mem.va); + } +} diff --git a/src/rdma/src/virtchnl.h b/src/rdma/src/virtchnl.h new file mode 100644 index 0000000000000000000000000000000000000000..6892f13aaad31744ea0bcd065d845ca0f1acc1b7 --- /dev/null +++ b/src/rdma/src/virtchnl.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_VIRTCHNL_H +#define ZXDH_VIRTCHNL_H + +#include "hmc.h" + +#pragma pack(push, 1) + +struct zxdh_virtchnl_op_buf { + u16 op_code; + u16 op_ver; + u16 buf_len; + u16 rsvd; + u64 op_ctx; + /* Member alignment MUST be maintained above this location */ + u8 buf[]; +}; + +struct zxdh_virtchnl_resp_buf { + u64 op_ctx; + u16 buf_len; + s16 op_ret_code; + /* Member alignment MUST be maintained above this location */ + u16 rsvd[2]; + u8 buf[]; +}; + +enum zxdh_virtchnl_ops { + ZXDH_VCHNL_OP_GET_VER = 0, + ZXDH_VCHNL_OP_GET_HMC_FCN = 1, + ZXDH_VCHNL_OP_PUT_HMC_FCN = 2, + ZXDH_VCHNL_OP_ADD_HMC_OBJ_RANGE = 3, + ZXDH_VCHNL_OP_DEL_HMC_OBJ_RANGE = 4, + ZXDH_VCHNL_OP_GET_STATS = 5, + ZXDH_VCHNL_OP_MANAGE_STATS_INST = 6, + ZXDH_VCHNL_OP_MCG = 7, + ZXDH_VCHNL_OP_UP_MAP = 8, + ZXDH_VCHNL_OP_MANAGE_WS_NODE = 9, + ZXDH_VCHNL_OP_VLAN_PARSING = 12, +}; + +#define ZXDH_VCHNL_CHNL_VER_V0 0 +#define ZXDH_VCHNL_CHNL_VER_V1 1 + +#define ZXDH_VCHNL_OP_GET_VER_V0 0 +#define ZXDH_VCHNL_OP_GET_VER_V1 1 + +#define ZXDH_VCHNL_OP_GET_HMC_FCN_V0 0 +#define ZXDH_VCHNL_OP_PUT_HMC_FCN_V0 0 +#define ZXDH_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0 0 +#define ZXDH_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0 0 +#define ZXDH_VCHNL_OP_GET_STATS_V0 0 +#define ZXDH_VCHNL_OP_MANAGE_WS_NODE_V0 0 +#define ZXDH_VCHNL_OP_VLAN_PARSING_V0 0 +#define ZXDH_VCHNL_INVALID_VF_IDX 0xFFFF + +struct zxdh_virtchnl_hmc_obj_range { + u16 obj_type; + u16 rsvd; + u32 start_index; + u32 obj_count; +}; + +struct zxdh_virtchnl_manage_ws_node { + u8 add; + u8 user_pri; +}; + +struct zxdh_vfdev *zxdh_find_vf_dev(struct zxdh_sc_dev *dev, u16 vf_id); +void zxdh_put_vfdev(struct zxdh_sc_dev *dev, struct zxdh_vfdev *vf_dev); +void zxdh_remove_vf_dev(struct zxdh_sc_dev *dev, struct zxdh_vfdev *vf_dev); +struct zxdh_virtchnl_req { + struct zxdh_virtchnl_op_buf *vchnl_msg; + void *parm; + u32 vf_id; + u16 parm_len; + u16 resp_len; +}; + +#pragma pack(pop) + +#endif diff --git a/src/rdma/src/ws.h b/src/rdma/src/ws.h new file mode 100644 index 0000000000000000000000000000000000000000..ff9d786adfaa1e59f1d462ce3bece78144f3dfa1 --- /dev/null +++ b/src/rdma/src/ws.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_WS_H +#define ZXDH_WS_H + +#include "osdep.h" + +struct zxdh_ws_node { + struct list_head siblings; + struct list_head child_list_head; + struct zxdh_ws_node *parent; + u64 lan_qs_handle; /* opaque handle used by LAN */ + u32 l2_sched_node_id; + u16 index; + u16 qs_handle; + u16 vsi_index; + u8 traffic_class; + u8 user_pri; + u8 rel_bw; + u8 abstraction_layer; /* used for splitting a TC */ + u8 prio_type; + u8 type_leaf : 1; + u8 enable : 1; +}; + +struct zxdh_sc_vsi; + +#endif /* ZXDH_WS_H */ diff --git a/src/rdma/src/zrdma-abi.h b/src/rdma/src/zrdma-abi.h new file mode 100644 index 0000000000000000000000000000000000000000..8ab7464fa764b347dca6969d4af283204ac405c9 --- /dev/null +++ b/src/rdma/src/zrdma-abi.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ +/* + * Copyright (c) 2023 - 2024 ZTE Corporation. All rights reserved. + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + */ + +#ifndef ZRDMA_ABI_H +#define ZRDMA_ABI_H + +#include + +/* zrdma must support legacy GEN_1 i40iw kernel + * and user-space whose last ABI ver is 5 + */ +#define ZXDH_ABI_VER 5 + +enum zxdh_memreg_type { + ZXDH_MEMREG_TYPE_MEM = 0, + ZXDH_MEMREG_TYPE_QP = 1, + ZXDH_MEMREG_TYPE_CQ = 2, + ZXDH_MEMREG_TYPE_SRQ = 3, +}; + +enum zxdh_db_addr_type { + ZXDH_DB_ADDR_PHY = 0, + ZXDH_DB_ADDR_BAR = 1, +}; + +struct zxdh_alloc_ucontext_req { + __u32 rsvd32; + __u8 userspace_ver; + __u8 rsvd8[3]; +}; + +struct zxdh_alloc_ucontext_resp { + __u32 max_pds; + __u32 max_qps; + __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */ + __u8 kernel_ver; + __u8 db_addr_type; + __u8 rsvd[2]; + __aligned_u64 feature_flags; + __aligned_u64 sq_db_mmap_key; + __aligned_u64 cq_db_mmap_key; + __aligned_u64 sq_db_pa; + __aligned_u64 cq_db_pa; + __u32 max_hw_wq_frags; + __u32 max_hw_read_sges; + __u32 max_hw_inline; + __u32 max_hw_rq_quanta; + __u32 max_hw_srq_quanta; + __u32 max_hw_wq_quanta; + __u32 max_hw_srq_wr; + __u32 min_hw_cq_size; + __u32 max_hw_cq_size; + __u16 max_hw_sq_chunk; + __u8 hw_rev; + __u8 rsvd2; +}; + +struct zxdh_alloc_pd_resp { + __u32 pd_id; + __u8 rsvd[4]; +}; + +struct zxdh_resize_cq_req { + __aligned_u64 user_cq_buffer; +}; + +struct zxdh_create_cq_req { + __aligned_u64 user_cq_buf; + __aligned_u64 user_shadow_area; +}; + +struct zxdh_create_qp_req { + __aligned_u64 user_wqe_bufs; + __aligned_u64 user_compl_ctx; +}; + +struct zxdh_mem_reg_req { + __u16 reg_type; /* enum zxdh_memreg_type */ + __u16 cq_pages; + __u16 rq_pages; + __u16 sq_pages; + __u16 srq_pages; + __u16 srq_list_pages; + __u8 rsvd[4]; +}; + +struct zxdh_reg_mr_resp { + __u32 mr_pa_low; + __u32 mr_pa_hig; + __u16 host_page_size; + __u16 leaf_pbl_size; + __u8 rsvd[4]; +}; + +struct zxdh_modify_qp_req { + __u8 sq_flush; + __u8 rq_flush; + __u8 rsvd[6]; +}; + +struct zxdh_create_cq_resp { + __u32 cq_id; + __u32 cq_size; +}; + +struct zxdh_create_qp_resp { + __u32 qp_id; + __u32 actual_sq_size; + __u32 actual_rq_size; + __u32 zxdh_drv_opt; + __u16 push_idx; + __u8 lsmm; + __u8 rsvd; + __u32 qp_caps; +}; + +struct zxdh_modify_qp_resp { + __aligned_u64 push_wqe_mmap_key; + __aligned_u64 push_db_mmap_key; + __u16 push_offset; + __u8 push_valid; + __u8 rd_fence_rate; + __u8 rsvd[4]; +}; + +struct zxdh_create_ah_resp { + __u32 ah_id; + __u8 rsvd[4]; +}; +#endif /* ZXDH_ABI_H */ diff --git a/src/rdma/src/zrdma.h b/src/rdma/src/zrdma.h new file mode 100644 index 0000000000000000000000000000000000000000..8d82158f1634ebf3592cea1688f65f0480f0492c --- /dev/null +++ b/src/rdma/src/zrdma.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZRDMA_H +#define ZRDMA_H + +#define RDMA_BIT2(type, a) ((u##type)1UL << a) +#define RDMA_MASK3(type, mask, shift) ((u##type)mask << shift) +#define MAKEMASK(m, s) ((m) << (s)) + +#define ZXDH_WQEALLOC_WQE_DESC_INDEX_S 20 +#define ZXDH_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20) + +#define ZXDH_CQPTAIL_WQTAIL_S 0 +#define ZXDH_CQPTAIL_WQTAIL GENMASK(10, 0) +#define ZXDH_CQPTAIL_CQP_OP_ERR_S 31 +#define ZXDH_CQPTAIL_CQP_OP_ERR BIT(31) + +#define ZXDH_CQPERRCODES_CQP_MINOR_CODE_S 0 +#define ZXDH_CQPERRCODES_CQP_MINOR_CODE GENMASK(15, 0) +#define ZXDH_CQPERRCODES_CQP_MAJOR_CODE_S 16 +#define ZXDH_CQPERRCODES_CQP_MAJOR_CODE GENMASK(31, 16) +// CQP Address Masks +#define ZXDH_CQPADDR_HIGH_S 32 +#define ZXDH_CQPADDR_HIGH GENMASK_ULL(63, 32) +#define ZXDH_CQPADDR_LOW_S 0 +#define ZXDH_CQPADDR_LOW GENMASK_ULL(31, 0) + +#define ZXDH_GLPCI_LBARCTRL_PE_DB_SIZE_S 4 +#define ZXDH_GLPCI_LBARCTRL_PE_DB_SIZE GENMASK(5, 4) +#define ZXDH_GLINT_RATE_INTERVAL_S 0 +#define ZXDH_GLINT_RATE_INTERVAL GENMASK(4, 0) +#define ZXDH_GLINT_RATE_INTRL_ENA_S 6 +#define ZXDH_GLINT_RATE_INTRL_ENA_M BIT(6) +#define ZXDH_GLINT_RATE_INTRL_ENA BIT(6) + +#define ZXDH_GLINT_DYN_CTL_INTENA_S 0 +#define ZXDH_GLINT_DYN_CTL_INTENA BIT(0) +#define ZXDH_GLINT_DYN_CTL_CLEARPBA_S 1 +#define ZXDH_GLINT_DYN_CTL_CLEARPBA BIT(1) +#define ZXDH_GLINT_DYN_CTL_ITR_INDX_S 3 +#define ZXDH_GLINT_DYN_CTL_ITR_INDX GENMASK(4, 3) +#define ZXDH_GLINT_DYN_CTL_INTERVAL_S 5 +#define ZXDH_GLINT_DYN_CTL_INTERVAL GENMASK(16, 5) +#define ZXDH_GLINT_CEQCTL_ITR_INDX_S 11 +#define ZXDH_GLINT_CEQCTL_ITR_INDX GENMASK(12, 11) +#define ZXDH_GLINT_CEQCTL_CAUSE_ENA_S 30 +#define ZXDH_GLINT_CEQCTL_CAUSE_ENA BIT(30) +#define ZXDH_GLINT_CEQCTL_MSIX_INDX_S 0 +#define ZXDH_GLINT_CEQCTL_MSIX_INDX GENMASK(10, 0) +#define ZXDH_PFINT_AEQCTL_MSIX_INDX_S 0 +#define ZXDH_PFINT_AEQCTL_MSIX_INDX GENMASK(10, 0) +#define ZXDH_PFINT_AEQCTL_ITR_INDX_S 11 +#define ZXDH_PFINT_AEQCTL_ITR_INDX GENMASK(12, 11) +#define ZXDH_PFINT_AEQCTL_CAUSE_ENA_S 30 +#define ZXDH_PFINT_AEQCTL_CAUSE_ENA BIT(30) +#define ZXDH_PFHMC_PDINV_PMSDIDX_S 0 +#define ZXDH_PFHMC_PDINV_PMSDIDX GENMASK(11, 0) +#define ZXDH_PFHMC_PDINV_PMSDPARTSEL_S 15 +#define ZXDH_PFHMC_PDINV_PMSDPARTSEL BIT(15) +#define ZXDH_PFHMC_PDINV_PMPDIDX_S 16 +#define ZXDH_PFHMC_PDINV_PMPDIDX GENMASK(24, 16) +#define ZXDH_PFHMC_SDDATALOW_PMSDVALID_S 0 +#define ZXDH_PFHMC_SDDATALOW_PMSDVALID BIT(0) +#define ZXDH_PFHMC_SDDATALOW_PMSDTYPE_S 1 +#define ZXDH_PFHMC_SDDATALOW_PMSDTYPE BIT(1) +#define ZXDH_PFHMC_SDDATALOW_PMSDBPCOUNT_S 2 +#define ZXDH_PFHMC_SDDATALOW_PMSDBPCOUNT GENMASK(11, 2) +#define ZXDH_PFHMC_SDDATALOW_PMSDDATALOW_S 12 +#define ZXDH_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12) +#define ZXDH_PFHMC_SDCMD_PMSDWR_S 31 +#define ZXDH_PFHMC_SDCMD_PMSDWR BIT(31) +#define ZXDH_PFHMC_SDCMD_PMSDPARTSEL_S 15 +#define ZXDH_PFHMC_SDCMD_PMSDPARTSEL BIT(15) + +#define ZXDH_INVALID_CQ_IDX 0xffffffff + +enum zxdh_dyn_idx_t { + ZXDH_IDX_ITR0 = 0, + ZXDH_IDX_ITR1 = 1, + ZXDH_IDX_ITR2 = 2, + ZXDH_IDX_NOITR = 3, +}; + +enum zxdh_registers { + ZXDH_CQPTAIL, + ZXDH_CQPDB, + ZXDH_CCQPSTATUS, + ZXDH_CCQPHIGH, + ZXDH_CCQPLOW, + ZXDH_CQARM, + ZXDH_CQACK, + ZXDH_AEQALLOC, + ZXDH_CQPERRCODES, + ZXDH_WQEALLOC, + ZXDH_GLINT_DYN_CTL, + ZXDH_DB_ADDR_OFFSET, + ZXDH_GLPCI_LBARCTRL, + ZXDH_GLPE_CPUSTATUS0, + ZXDH_GLPE_CPUSTATUS1, + ZXDH_GLPE_CPUSTATUS2, + ZXDH_PFINT_AEQCTL, + ZXDH_GLINT_CEQCTL, + ZXDH_VSIQF_PE_CTL1, + ZXDH_PFHMC_PDINV, + ZXDH_GLHMC_VFPDINV, + ZXDH_GLPE_CRITERR, + ZXDH_GLINT_RATE, + ZXDH_MAX_REGS, /* Must be last entry */ +}; + +enum zxdh_shifts { + ZXDH_CCQPSTATUS_CCQP_DONE_S, + ZXDH_CCQPSTATUS_CCQP_ERR_S, + ZXDH_CQPSQ_STAG_PDID_S, + ZXDH_CQPSQ_CQ_CEQID_S, + ZXDH_CQPSQ_CQ_CQID_S, + ZXDH_COMMIT_FPM_CQCNT_S, + ZXDH_MAX_SHIFTS, +}; + +enum zxdh_masks { + ZXDH_CCQPSTATUS_CCQP_DONE_M, + ZXDH_CCQPSTATUS_CCQP_ERR_M, + ZXDH_CQPSQ_STAG_PDID_M, + ZXDH_CQPSQ_CQ_CEQID_M, + ZXDH_CQPSQ_CQ_CQID_M, + ZXDH_COMMIT_FPM_CQCNT_M, + ZXDH_MAX_MASKS, /* Must be last entry */ +}; + +#define ZXDH_MAX_MGS_PER_CTX 1022 + +struct zxdh_mcast_grp_ctx_entry_info { + u32 qp_id; + bool valid_entry; + u16 dest_port; + u32 use_cnt; +}; + +struct zxdh_mcast_grp_info { + u8 dest_mac_addr[ETH_ALEN]; + u16 vlan_id; + u8 hmc_fcn_id; + u8 ipv4_valid : 1; + u8 vlan_valid : 1; + u16 mg_id; + u32 no_of_mgs; + u32 dest_ip_addr[4]; + u16 qs_handle; + struct zxdh_dma_mem dma_mem_mc; + struct zxdh_mcast_grp_ctx_entry_info mg_ctx_info[ZXDH_MAX_MGS_PER_CTX]; +}; + +enum zxdh_rdma_vers { + ZXDH_GEN_RSVD, + ZXDH_GEN_1, + ZXDH_GEN_2, +}; + +struct zxdh_uk_attrs { + u64 feature_flags; + u32 max_hw_wq_frags; + u32 max_hw_read_sges; + u32 max_hw_inline; + u32 max_hw_srq_quanta; + u32 max_hw_rq_quanta; + u32 max_hw_wq_quanta; + u32 min_hw_cq_size; + u32 max_hw_cq_size; + u16 max_hw_sq_chunk; + u32 max_hw_srq_wr; + u8 hw_rev; +}; + +struct zxdh_hw_attrs { + struct zxdh_uk_attrs uk_attrs; + u64 max_hw_outbound_msg_size; + u64 max_hw_inbound_msg_size; + u64 max_mr_size; + u32 min_hw_qp_id; + u32 min_hw_aeq_size; + u32 max_hw_aeq_size; + u32 min_hw_ceq_size; + u32 max_hw_ceq_size; + u32 max_hw_device_pages; + u32 max_hw_vf_fpm_id; + u32 first_hw_vf_fpm_id; + u32 max_hw_ird; + u32 max_hw_ord; + u32 max_hw_wqes; + u32 max_hw_pds; + u32 max_hw_ena_vf_count; + u32 max_qp_wr; + u32 max_srq_wr; + u32 max_pe_ready_count; + u32 max_done_count; + u32 max_sleep_count; + u32 max_cqp_compl_wait_time_ms; + u16 max_stat_inst; + u16 max_stat_idx; +}; + +void i40iw_init_hw(struct zxdh_sc_dev *dev); +void zxdh_init_hw(struct zxdh_sc_dev *dev); +void zxdh_check_fc_for_qp(struct zxdh_sc_vsi *vsi, struct zxdh_sc_qp *sc_qp); +#endif /* ZXDH_H*/ diff --git a/src/rdma/src/zrdma_kcompat.c b/src/rdma/src/zrdma_kcompat.c new file mode 100644 index 0000000000000000000000000000000000000000..53472bcf5bff7a8b5317a5dd0b4742e2e6798aea --- /dev/null +++ b/src/rdma/src/zrdma_kcompat.c @@ -0,0 +1,4643 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "main.h" +#include "icrdma_hw.h" + +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u16 *speed, u8 *width) +#else +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u8 *speed, u8 *width) +#endif +{ + int rc; + u32 netdev_speed; + struct ethtool_link_ksettings lksettings; + + if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET) + return -EINVAL; + + rtnl_lock(); + rc = __ethtool_get_link_ksettings(netdev, &lksettings); + rtnl_unlock(); + + // dev_put(netdev); + + if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) { + netdev_speed = lksettings.base.speed; + } else { + netdev_speed = SPEED_1000; + pr_warn("%s speed is unknown, defaulting to %u\n", netdev->name, + netdev_speed); + } + + if (netdev_speed <= SPEED_1000) { + *width = IB_WIDTH_1X; + *speed = IB_SPEED_SDR; + } else if (netdev_speed <= SPEED_10000) { + *width = IB_WIDTH_1X; + *speed = IB_SPEED_FDR10; + } else if (netdev_speed <= SPEED_20000) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_DDR; + } else if (netdev_speed <= SPEED_25000) { + *width = IB_WIDTH_1X; + *speed = IB_SPEED_EDR; + } else if (netdev_speed <= SPEED_40000) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_FDR10; + } else if (netdev_speed <= SPEED_100000) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_EDR; + } else { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_HDR; + } + + return 0; +} + +#ifdef IB_FW_VERSION_NAME_MAX +void zxdh_get_dev_fw_str(struct ib_device *dev, char *str) +{ + struct zxdh_device *iwdev = to_iwdev(dev); + struct ethtool_drvinfo info; + + memset(&info, 0, sizeof(info)); + iwdev->netdev->ethtool_ops->get_drvinfo(iwdev->netdev, &info); + snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version); +} +#else +void zxdh_get_dev_fw_str(struct ib_device *dev, char *str, size_t str_len) +{ + struct zxdh_device *iwdev = to_iwdev(dev); + struct ethtool_drvinfo info; + + memset(&info, 0, sizeof(info)); + iwdev->netdev->ethtool_ops->get_drvinfo(iwdev->netdev, &info); + snprintf(str, str_len, "%s", info.fw_version); +} +#endif /* IB_FW_VERSION_NAME_MAX */ + +#ifdef ZXDH_ADD_DEL_GID +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context) +{ + return 0; +} + +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context) +{ + return 0; +} +#endif /* < 4.17.0 */ + +#ifdef ZXDH_ALLOC_MR_VER_1 +/** + * zxdh_alloc_mr - register stag for fast memory registration + * @pd: ibpd pointer + * @mr_type: memory for stag registrion + * @max_num_sg: man number of pages + * @udata: user data + */ +struct ib_mr *zxdh_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg, struct ib_udata *udata) +{ +#elif defined(ZXDH_ALLOC_MR_VER_0) +/** + * zxdh_alloc_mr - register stag for fast memory registration + * @pd: ibpd pointer + * @mr_type: memory for stag registrion + * @max_num_sg: man number of pages + */ +struct ib_mr *zxdh_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg) +{ +#endif + struct zxdh_device *iwdev = to_iwdev(pd->device); + struct zxdh_pble_alloc *palloc; + struct zxdh_pbl *iwpbl; + struct zxdh_mr *iwmr; + int status; + u32 stag; + int err_code = -ENOMEM; + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) + return ERR_PTR(-ENOMEM); + + stag = zxdh_create_stag(iwdev); + if (!stag) { + err_code = -ENOMEM; + goto err; + } + + iwmr->stag = stag; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->type = ZXDH_MEMREG_TYPE_MEM; + palloc = &iwpbl->pble_alloc; + iwmr->page_cnt = max_num_sg; + iwmr->sc_dev = &iwdev->rf->sc_dev; + status = zxdh_get_pble(iwdev->rf->pble_mr_rsrc, palloc, iwmr->page_cnt, + true); + if (status) + goto err_get_pble; + + err_code = zxdh_hw_alloc_stag(iwdev, iwmr); + if (err_code) + goto err_alloc_stag; + + iwpbl->pbl_allocated = true; + + return &iwmr->ibmr; +err_alloc_stag: + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, palloc); +err_get_pble: + zxdh_free_stag(iwdev, stag); +err: + kfree(iwmr); + + return ERR_PTR(err_code); +} + +#ifdef ALLOC_UCONTEXT_VER_2 +/** + * zxdh_alloc_ucontext - Allocate the user context data structure + * @uctx: context + * @udata: user data + * + * This keeps track of all objects associated with a particular + * user-mode client. + */ +int zxdh_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) +{ + struct ib_device *ibdev = uctx->device; + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_alloc_ucontext_req req; + struct zxdh_alloc_ucontext_resp uresp = {}; + struct zxdh_ucontext *ucontext = to_ucontext(uctx); + struct zxdh_uk_attrs *uk_attrs; + u64 sq_db_bar_off, cq_db_bar_off; + + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) + return -EINVAL; + + if (req.userspace_ver < 4 || req.userspace_ver > ZXDH_ABI_VER) + goto ver_error; + + ucontext->iwdev = iwdev; + ucontext->abi_ver = req.userspace_ver; + + uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; + + sq_db_bar_off = C_RDMA_TX_VHCA_PF_PAGE; + cq_db_bar_off = C_RDMA_RX_VHCA_PF_PAGE; + +#ifdef RDMA_MMAP_DB_SUPPORT + ucontext->sq_db_mmap_entry = zxdh_user_mmap_entry_insert( + ucontext, sq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.sq_db_mmap_key); +#else + spin_lock_init(&ucontext->mmap_tbl_lock); + ucontext->sq_db_mmap_entry = zxdh_user_mmap_entry_add_hash( + ucontext, sq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.sq_db_mmap_key); +#endif /* RDMA_MMAP_DB_SUPPORT */ + if (!ucontext->sq_db_mmap_entry) + return -ENOMEM; + +#ifdef RDMA_MMAP_DB_SUPPORT + ucontext->cq_db_mmap_entry = zxdh_user_mmap_entry_insert( + ucontext, cq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.cq_db_mmap_key); +#else + ucontext->cq_db_mmap_entry = zxdh_user_mmap_entry_add_hash( + ucontext, cq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.cq_db_mmap_key); +#endif /* RDMA_MMAP_DB_SUPPORT */ + if (!ucontext->cq_db_mmap_entry) { +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove(ucontext->sq_db_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash(ucontext->sq_db_mmap_entry); +#endif + return -ENOMEM; + } + + uresp.kernel_ver = ZXDH_ABI_VER; + uresp.feature_flags = uk_attrs->feature_flags; + uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; + uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; + uresp.max_hw_inline = uk_attrs->max_hw_inline; + uresp.max_hw_srq_wr = uk_attrs->max_hw_srq_wr; + uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; + uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta; + uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; + uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; + uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; + uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; + uresp.hw_rev = uk_attrs->hw_rev; + uresp.db_addr_type = ZXDH_DB_ADDR_BAR; + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove(ucontext->sq_db_mmap_entry); + rdma_user_mmap_entry_remove(ucontext->cq_db_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash(ucontext->sq_db_mmap_entry); + zxdh_user_mmap_entry_del_hash(ucontext->cq_db_mmap_entry); +#endif + return -EFAULT; + } + + INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); + spin_lock_init(&ucontext->cq_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); + spin_lock_init(&ucontext->qp_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->srq_reg_mem_list); + spin_lock_init(&ucontext->srq_reg_mem_list_lock); +#if KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE + INIT_LIST_HEAD(&ucontext->vma_list); + mutex_init(&ucontext->vma_list_mutex); +#endif + + return 0; + +ver_error: + dev_err(idev_to_dev(&iwdev->rf->sc_dev), + "Invalid userspace driver version detected. Detected version %d, should be %d\n", + req.userspace_ver, ZXDH_ABI_VER); + return -EINVAL; +} +#endif + +#ifdef ALLOC_UCONTEXT_VER_1 +/** + * zxdh_alloc_ucontext - Allocate the user context data structure + * @ibdev: ib device pointer + * @udata: user data + * + * This keeps track of all objects associated with a particular + * user-mode client. + */ +struct ib_ucontext *zxdh_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_alloc_ucontext_req req; + struct zxdh_alloc_ucontext_resp uresp = {}; + struct zxdh_ucontext *ucontext; + struct zxdh_uk_attrs *uk_attrs; + u64 sq_db_bar_off, cq_db_bar_off; + + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) + return ERR_PTR(-EINVAL); + + if (req.userspace_ver < 4 || req.userspace_ver > ZXDH_ABI_VER) + goto ver_error; + + ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL); + if (!ucontext) + return ERR_PTR(-ENOMEM); + + ucontext->iwdev = iwdev; + ucontext->abi_ver = req.userspace_ver; + + uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; + + sq_db_bar_off = C_RDMA_TX_VHCA_PF_PAGE + iwdev->rf->base_bar_offset; + cq_db_bar_off = C_RDMA_RX_VHCA_PF_PAGE + iwdev->rf->base_bar_offset; + +#ifdef RDMA_MMAP_DB_SUPPORT + ucontext->sq_db_mmap_entry = zxdh_user_mmap_entry_insert( + ucontext, sq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.sq_db_mmap_key); +#else + spin_lock_init(&ucontext->mmap_tbl_lock); + ucontext->sq_db_mmap_entry = zxdh_user_mmap_entry_add_hash( + ucontext, sq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.sq_db_mmap_key); +#endif /* RDMA_MMAP_DB_SUPPORT */ + if (!ucontext->sq_db_mmap_entry) { + kfree(ucontext); + return ERR_PTR(-ENOMEM); + } + +#ifdef RDMA_MMAP_DB_SUPPORT + ucontext->cq_db_mmap_entry = zxdh_user_mmap_entry_insert( + ucontext, cq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.cq_db_mmap_key); +#else + ucontext->cq_db_mmap_entry = zxdh_user_mmap_entry_add_hash( + ucontext, cq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.cq_db_mmap_key); +#endif /* RDMA_MMAP_DB_SUPPORT */ + if (!ucontext->cq_db_mmap_entry) { +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove(ucontext->sq_db_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash(ucontext->sq_db_mmap_entry); +#endif + kfree(ucontext); + return ERR_PTR(-ENOMEM); + } + + uresp.kernel_ver = ZXDH_ABI_VER; + uresp.feature_flags = uk_attrs->feature_flags; + uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; + uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; + uresp.max_hw_inline = uk_attrs->max_hw_inline; + uresp.max_hw_srq_wr = uk_attrs->max_hw_srq_wr; + uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; + uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta; + uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; + uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; + uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; + uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; + uresp.hw_rev = uk_attrs->hw_rev; + + uresp.db_addr_type = ZXDH_DB_ADDR_BAR; + + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove(ucontext->sq_db_mmap_entry); + rdma_user_mmap_entry_remove(ucontext->cq_db_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash(ucontext->sq_db_mmap_entry); + zxdh_user_mmap_entry_del_hash(ucontext->cq_db_mmap_entry); +#endif + kfree(ucontext); + return ERR_PTR(-EFAULT); + } + + INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); + spin_lock_init(&ucontext->cq_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); + spin_lock_init(&ucontext->qp_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->srq_reg_mem_list); + spin_lock_init(&ucontext->srq_reg_mem_list_lock); +#if KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE + INIT_LIST_HEAD(&ucontext->vma_list); + mutex_init(&ucontext->vma_list_mutex); +#endif + + return &ucontext->ibucontext; + +ver_error: + ibdev_err( + &iwdev->ibdev, + "Invalid userspace driver version detected. Detected version %d, should be %d\n", + req.userspace_ver, ZXDH_ABI_VER); + return ERR_PTR(-EINVAL); +} +#endif + +#ifdef DEALLOC_UCONTEXT_VER_2 +/** + * zxdh_dealloc_ucontext - deallocate the user context data structure + * @context: user context created during alloc + */ +void zxdh_dealloc_ucontext(struct ib_ucontext *context) +{ + struct zxdh_ucontext *ucontext = to_ucontext(context); + +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove(ucontext->sq_db_mmap_entry); + rdma_user_mmap_entry_remove(ucontext->cq_db_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash(ucontext->sq_db_mmap_entry); + zxdh_user_mmap_entry_del_hash(ucontext->cq_db_mmap_entry); +#endif +} +#endif + +#ifdef DEALLOC_UCONTEXT_VER_1 +/** + * zxdh_dealloc_ucontext - deallocate the user context data structure + * @context: user context created during alloc + */ +int zxdh_dealloc_ucontext(struct ib_ucontext *context) +{ + struct zxdh_ucontext *ucontext = to_ucontext(context); + +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove(ucontext->sq_db_mmap_entry); + rdma_user_mmap_entry_remove(ucontext->cq_db_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash(ucontext->sq_db_mmap_entry); + zxdh_user_mmap_entry_del_hash(ucontext->cq_db_mmap_entry); +#endif + kfree(ucontext); + + return 0; +} +#endif + +#ifdef ALLOC_PD_VER_3 +/** + * zxdh_alloc_pd - allocate protection domain + * @pd: protection domain + * @udata: user data + */ +int zxdh_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) +{ + struct zxdh_pd *iwpd = to_iwpd(pd); + struct zxdh_device *iwdev = to_iwdev(pd->device); + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_alloc_pd_resp uresp = {}; + struct zxdh_sc_pd *sc_pd; + u32 pd_id = 0; + int err; + + err = zxdh_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, + &rf->next_pd); + if (err) + return err; + + sc_pd = &iwpd->sc_pd; + if (udata) { + struct zxdh_ucontext *ucontext = rdma_udata_to_drv_context( + udata, struct zxdh_ucontext, ibucontext); + + zxdh_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); + uresp.pd_id = pd_id; + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { + err = -EFAULT; + goto error; + } + } else { + zxdh_sc_pd_init(dev, sc_pd, pd_id, ZXDH_ABI_VER); + } + + return 0; + +error: + + zxdh_free_rsrc(rf, rf->allocated_pds, pd_id); + + return err; +} +#endif + +#ifdef ALLOC_PD_VER_2 +/** + * zxdh_alloc_pd - allocate protection domain + * @pd: protection domain + * @context: user context + * @udata: user data + */ +int zxdh_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct zxdh_pd *iwpd = to_iwpd(pd); + struct zxdh_device *iwdev = to_iwdev(pd->device); + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_alloc_pd_resp uresp = {}; + struct zxdh_sc_pd *sc_pd; + u32 pd_id = 0; + int err; + + err = zxdh_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, + &rf->next_pd); + if (err) + return err; + + sc_pd = &iwpd->sc_pd; + if (udata) { + struct zxdh_ucontext *ucontext = to_ucontext(context); + + zxdh_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); + uresp.pd_id = pd_id; + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { + err = -EFAULT; + goto error; + } + } else { + zxdh_sc_pd_init(dev, sc_pd, pd_id, ZXDH_ABI_VER); + } + + return 0; + +error: + + zxdh_free_rsrc(rf, rf->allocated_pds, pd_id); + + return err; +} +#endif + +#ifdef ALLOC_PD_VER_1 +/** + * zxdh_alloc_pd - allocate protection domain + * @ibdev: IB device + * @context: user context + * @udata: user data + */ +struct ib_pd *zxdh_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, struct ib_udata *udata) +{ + struct zxdh_pd *iwpd; + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_alloc_pd_resp uresp = {}; + struct zxdh_sc_pd *sc_pd; + u32 pd_id = 0; + int err; + + err = zxdh_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, + &rf->next_pd); + if (err) + return ERR_PTR(err); + + iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL); + if (!iwpd) { + err = -ENOMEM; + goto free_res; + } + + sc_pd = &iwpd->sc_pd; + if (udata) { + struct zxdh_ucontext *ucontext = to_ucontext(context); + + zxdh_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); + uresp.pd_id = pd_id; + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { + err = -EFAULT; + goto error; + } + } else { + zxdh_sc_pd_init(dev, sc_pd, pd_id, ZXDH_ABI_VER); + } + + return &iwpd->ibpd; + +error: + kfree(iwpd); +free_res: + + zxdh_free_rsrc(rf, rf->allocated_pds, pd_id); + + return ERR_PTR(err); +} + +#endif + +#ifdef DEALLOC_PD_VER_4 +int zxdh_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +{ + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); + return 0; +} + +#endif + +#ifdef DEALLOC_PD_VER_3 +void zxdh_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +{ + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); +} + +#endif + +#ifdef DEALLOC_PD_VER_2 +void zxdh_dealloc_pd(struct ib_pd *ibpd) +{ + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); +} +#endif + +#ifdef DEALLOC_PD_VER_1 +int zxdh_dealloc_pd(struct ib_pd *ibpd) +{ + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); + kfree(iwpd); + return 0; +} +#endif + +static void zxdh_fill_ah_info(struct zxdh_ah_info *ah_info, + const struct ib_gid_attr *sgid_attr, + struct sockaddr *sgid_addr, + struct sockaddr *dgid_addr, u8 net_type) +{ + if (net_type == RDMA_NETWORK_IPV4) { + ah_info->ipv4_valid = true; + ah_info->dest_ip_addr[0] = ntohl( + ((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr); + ah_info->src_ip_addr[0] = ntohl( + ((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr); + ah_info->do_lpbk = zxdh_ipv4_is_lpb(ah_info->src_ip_addr[0], + ah_info->dest_ip_addr[0]); + if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr) + ->sin_addr.s_addr)) { + zxdh_mcast_mac_v4(ah_info->dest_ip_addr, ah_info->dmac); + } + } else { + zxdh_copy_ip_ntohl(ah_info->dest_ip_addr, + ((struct sockaddr_in6 *)dgid_addr) + ->sin6_addr.in6_u.u6_addr32); + zxdh_copy_ip_ntohl(ah_info->src_ip_addr, + ((struct sockaddr_in6 *)sgid_addr) + ->sin6_addr.in6_u.u6_addr32); + ah_info->do_lpbk = zxdh_ipv6_is_lpb(ah_info->src_ip_addr, + ah_info->dest_ip_addr); + if (rdma_is_multicast_addr( + &((struct sockaddr_in6 *)dgid_addr)->sin6_addr)) { + zxdh_mcast_mac_v6(ah_info->dest_ip_addr, ah_info->dmac); + } + } +} + +static int zxdh_create_ah_vlan_tag(struct zxdh_device *iwdev, + struct zxdh_ah_info *ah_info, + const struct ib_gid_attr *sgid_attr) +{ +#if !defined(CREATE_AH_VER_2) && !defined(CREATE_AH_VER_5) + if (sgid_attr->ndev && is_vlan_dev(sgid_attr->ndev)) + ah_info->vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev); + else + ah_info->vlan_tag = VLAN_N_VID; + +#endif + + if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode) + ah_info->vlan_tag = 0; + + if (ah_info->vlan_tag < VLAN_N_VID) { + ah_info->insert_vlan_tag = true; + ah_info->vlan_tag |= rt_tos2priority(ah_info->tc_tos) + << VLAN_PRIO_SHIFT; + } + return 0; +} + +static int zxdh_create_ah_wait(struct zxdh_pci_f *rf, struct zxdh_sc_ah *sc_ah, + bool sleep) +{ + if (!sleep) { + int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms * + CQP_TIMEOUT_THRESHOLD; + + do { + zxdh_cqp_ce_handler(rf, &rf->ccq.sc_cq); + mdelay(1); + } while (!sc_ah->ah_info.ah_valid && --cnt); + + if (!cnt) + return -ETIMEDOUT; + } + return 0; +} + +#ifndef CREATE_AH_VER_0 +static bool zxdh_ah_exists(struct zxdh_device *iwdev, struct zxdh_ah *new_ah) +{ + struct zxdh_ah *ah; + u32 save_ah_id = new_ah->sc_ah.ah_info.ah_idx; + + list_for_each_entry(ah, &iwdev->ah_list, list) { + /* Set ah_id the same so memcp can work */ + new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx; + if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info, + sizeof(ah->sc_ah.ah_info))) { + refcount_inc(&ah->refcnt); + new_ah->parent_ah = ah; + return true; + } + } + new_ah->sc_ah.ah_info.ah_idx = save_ah_id; + /* Add new AH to list */ + if (iwdev->ah_list_cnt >= ZXDH_MAX_AH_LIST) + return false; + ah = kmemdup(new_ah, sizeof(*new_ah), GFP_KERNEL); + if (!ah) + return false; + new_ah->parent_ah = ah; + list_add(&ah->list, &iwdev->ah_list); + iwdev->ah_list_cnt++; + if (iwdev->ah_list_cnt > iwdev->ah_list_hwm) + iwdev->ah_list_hwm = iwdev->ah_list_cnt; + refcount_set(&ah->refcnt, 1); + + return false; +} +#endif + +#if defined(CREATE_AH_VER_3) || defined(CREATE_AH_VER_4) +/** + * zxdh_create_ah_sleepable - create address handle + * @ibpd: Protection Domain for AH + * @attr: address handle attributes + * @sleep: wait for creation + * @udata: user data + * + * returns a pointer to an address handle + */ +static struct ib_ah *zxdh_create_ah_sleepable(struct ib_pd *ibpd, + struct rdma_ah_attr *attr, + bool sleep, + struct ib_udata *udata) +{ + struct zxdh_pd *pd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + struct zxdh_ah *ah; + const struct ib_gid_attr *sgid_attr; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_sc_ah *sc_ah; + u32 ah_id = 0; + struct zxdh_ah_info *ah_info; + struct zxdh_create_ah_resp uresp; + union { + struct sockaddr saddr; + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; + } sgid_addr, dgid_addr; + int err; + + err = zxdh_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id, + &rf->next_ah); + if (err) + return ERR_PTR(err); + + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); + if (!ah) { + zxdh_free_rsrc(rf, rf->allocated_ahs, ah_id); + return ERR_PTR(-ENOMEM); + } + + ah->pd = pd; + sc_ah = &ah->sc_ah; + sc_ah->ah_info.ah_idx = ah_id; + sc_ah->ah_info.vsi = &iwdev->vsi; + zxdh_sc_init_ah(&rf->sc_dev, sc_ah); + ah->sgid_index = attr->grh.sgid_index; + memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid)); + sgid_attr = attr->grh.sgid_attr; + rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); + ah->av.attrs = *attr; + ah->av.net_type = kc_rdma_gid_attr_network_type( + sgid_attr, sgid_attr.gid_type, &sgid); + ah->av.sgid_addr.saddr = sgid_addr.saddr; + ah->av.dgid_addr.saddr = dgid_addr.saddr; + ah_info = &sc_ah->ah_info; + ah_info->ah_idx = ah_id; + ah_info->pd_idx = pd->sc_pd.pd_id; + ether_addr_copy(ah_info->mac_addr, iwdev->netdev->dev_addr); + if (attr->ah_flags & IB_AH_GRH) { + ah_info->flow_label = attr->grh.flow_label; + ah_info->hop_ttl = attr->grh.hop_limit; + ah_info->tc_tos = attr->grh.traffic_class; + } + ether_addr_copy(ah_info->dmac, attr->roce.dmac); + + zxdh_fill_ah_info(ah_info, sgid_attr, &sgid_addr.saddr, + &dgid_addr.saddr, ah->av.net_type); + + err = zxdh_create_ah_vlan_tag(iwdev, ah_info, sgid_attr); + if (err) + goto err_gid_l2; + + if (sleep) { + mutex_lock(&iwdev->ah_list_lock); + if (zxdh_ah_exists(iwdev, ah)) { + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah_id); + ah_id = 0; + + goto exit; + } + } + + err = zxdh_ah_cqp_op(iwdev->rf, sc_ah, ZXDH_OP_AH_CREATE, sleep, + zxdh_gsi_ud_qp_ah_cb, sc_ah); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: CQP-OP Create AH fail"); + goto err_ah_create; + } + + err = zxdh_create_ah_wait(rf, sc_ah, sleep); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: CQP create AH timed out"); + goto err_gid_l2; + } + +exit: + if (udata) { + uresp.ah_id = ah->sc_ah.ah_info.ah_idx; + err = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (err) { + if (!ah->parent_ah || + (ah->parent_ah && + refcount_dec_and_test(&ah->parent_ah->refcnt))) { + zxdh_ah_cqp_op(iwdev->rf, &ah->sc_ah, + ZXDH_OP_AH_DESTROY, false, NULL, + ah); + ah_id = ah->sc_ah.ah_info.ah_idx; + goto err_ah_create; + } + goto err_unlock; + } + } + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); + + return &ah->ibah; + +err_ah_create: + if (ah->parent_ah) { + list_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + iwdev->ah_list_cnt--; + } +err_unlock: + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); +err_gid_l2: + kfree(ah); + if (ah_id) + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); + + return ERR_PTR(err); +} +#endif + +#ifdef CREATE_AH_VER_3 +/** + * zxdh_create_ah - create address handle + * @ibpd: Protection Domain for AH + * @attr: address handle attributes + * @flags: AH flags to wait + * @udata: user data + * + * returns a pointer to an address handle + */ +struct ib_ah *zxdh_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr, + u32 flags, struct ib_udata *udata) +{ + bool sleep = flags & RDMA_CREATE_AH_SLEEPABLE; + + return zxdh_create_ah_sleepable(ibpd, attr, sleep, udata); +} +#endif +#ifdef CREATE_AH_VER_4 +/** + * zxdh_create_ah - create address handle + * @ibpd: ptr to pd + * @attr: address handle attributes + * @udata: user data + * + * returns a pointer to an address handle + */ +struct ib_ah *zxdh_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr, + struct ib_udata *udata) +{ + bool sleep = udata ? true : false; + + return zxdh_create_ah_sleepable(ibpd, attr, sleep, udata); +} +#endif + +#ifdef CREATE_AH_VER_2 +/** + * zxdh_create_ah - create address handle + * @ib_ah: ptr to AH + * @attr: address handle attributes + * @flags: AH flags to wait + * @udata: user data + * + * returns 0 on success, error otherwise + */ +int zxdh_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *attr, u32 flags, + struct ib_udata *udata) +#elif defined(CREATE_AH_VER_5) +int zxdh_create_ah_v2(struct ib_ah *ib_ah, struct rdma_ah_attr *attr, u32 flags, + struct ib_udata *udata) +#endif +#if defined(CREATE_AH_VER_2) || defined(CREATE_AH_VER_5) +{ + struct zxdh_pd *pd = to_iwpd(ib_ah->pd); + struct zxdh_ah *ah = container_of(ib_ah, struct zxdh_ah, ibah); + struct zxdh_device *iwdev = to_iwdev(ib_ah->pd->device); + const struct ib_gid_attr *sgid_attr; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_sc_ah *sc_ah; + u32 ah_id = 0; + struct zxdh_ah_info *ah_info; + struct zxdh_create_ah_resp uresp = {}; + union { + struct sockaddr saddr; + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; + } sgid_addr, dgid_addr; + int err; + bool sleep = flags & RDMA_CREATE_AH_SLEEPABLE; + + err = zxdh_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id, + &rf->next_ah); + + if (err) + return err; + + ah->pd = pd; + sc_ah = &ah->sc_ah; + sc_ah->ah_info.ah_idx = ah_id; + sc_ah->ah_info.vsi = &iwdev->vsi; + zxdh_sc_init_ah(&rf->sc_dev, sc_ah); + ah->sgid_index = attr->grh.sgid_index; + memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid)); + sgid_attr = attr->grh.sgid_attr; + + rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); + ah->av.attrs = *attr; + ah->av.net_type = kc_rdma_gid_attr_network_type( + sgid_attr, sgid_attr.gid_type, &sgid); + + ah->av.sgid_addr.saddr = sgid_addr.saddr; + ah->av.dgid_addr.saddr = dgid_addr.saddr; + ah_info = &sc_ah->ah_info; + ah_info->ah_idx = ah_id; + ah_info->pd_idx = pd->sc_pd.pd_id; + err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag, + ah_info->mac_addr); + + if (err) + goto err_gid_l2; + + if (attr->ah_flags & IB_AH_GRH) { + ah_info->flow_label = attr->grh.flow_label; + ah_info->hop_ttl = attr->grh.hop_limit; + ah_info->tc_tos = attr->grh.traffic_class; + } + + ether_addr_copy(ah_info->dmac, attr->roce.dmac); + + zxdh_fill_ah_info(ah_info, sgid_attr, &sgid_addr.saddr, + &dgid_addr.saddr, ah->av.net_type); + + zxdh_create_ah_vlan_tag(iwdev, ah_info, sgid_attr); + + if (sleep) { + mutex_lock(&iwdev->ah_list_lock); + if (zxdh_ah_exists(iwdev, ah)) { + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah_id); + ah_id = 0; + + goto exit; + } + } + + err = zxdh_ah_cqp_op(iwdev->rf, sc_ah, ZXDH_OP_AH_CREATE, sleep, + zxdh_gsi_ud_qp_ah_cb, sc_ah); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: CQP-OP Create AH fail"); + goto err_ah_create; + } + + err = zxdh_create_ah_wait(rf, sc_ah, sleep); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: CQP create AH timed out"); + goto err_gid_l2; + } + +exit: + if (udata) { + uresp.ah_id = ah->sc_ah.ah_info.ah_idx; + err = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (err) { + if (!ah->parent_ah || + (ah->parent_ah && + refcount_dec_and_test(&ah->parent_ah->refcnt))) { + zxdh_ah_cqp_op(iwdev->rf, &ah->sc_ah, + ZXDH_OP_AH_DESTROY, false, NULL, + ah); + ah_id = ah->sc_ah.ah_info.ah_idx; + goto err_ah_create; + } + goto err_unlock; + } + } + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); + return 0; +err_ah_create: + if (ah->parent_ah) { + list_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + iwdev->ah_list_cnt--; + } +err_unlock: + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); +err_gid_l2: + if (ah_id) + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); + + return err; +} +#endif + +#ifdef CREATE_AH_VER_6 +/** + * zxdh_create_ah - create address handle + * @ib_ah: ptr to AH + * @attr: address handle attributes + * @flags: AH flags to wait + * @udata: user data + * + * returns 0 on success, error otherwise + */ +int zxdh_create_ah(struct ib_ah *ib_ah, struct ib_ah_attr *attr, u32 flags, + struct ib_udata *udata) +{ + struct zxdh_pd *pd = to_iwpd(ib_ah->pd); + struct zxdh_ah *ah = container_of(ib_ah, struct zxdh_ah, ibah); + struct zxdh_device *iwdev = to_iwdev(ib_ah->pd->device); + union ib_gid sgid; + struct ib_gid_attr sgid_attr; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_sc_ah *sc_ah; + u32 ah_id = 0; + struct zxdh_ah_info *ah_info; + struct zxdh_create_ah_resp uresp; + union { + struct sockaddr saddr; + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; + } sgid_addr, dgid_addr; + int err; + bool sleep = flags & RDMA_CREATE_AH_SLEEPABLE; + + err = zxdh_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id, + &rf->next_ah); + + if (err) + return err; + + ah->pd = pd; + sc_ah = &ah->sc_ah; + sc_ah->ah_info.ah_idx = ah_id; + sc_ah->ah_info.vsi = &iwdev->vsi; + zxdh_sc_init_ah(&rf->sc_dev, sc_ah); + ah->sgid_index = attr->grh.sgid_index; + memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid)); + rcu_read_lock(); + err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num, + attr->grh.sgid_index, &sgid, &sgid_attr); + rcu_read_unlock(); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: GID lookup at idx=%d with port=%d failed\n", + attr->grh.sgid_index, attr->port_num); + err = -EINVAL; + goto err_gid_l2; + } + rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); + ah->av.attrs = *attr; + ah->av.net_type = kc_rdma_gid_attr_network_type( + sgid_attr, sgid_attr.gid_type, &sgid); + + if (kc_deref_sgid_attr(sgid_attr)) + dev_put(kc_deref_sgid_attr(sgid_attr)); + + ah->av.sgid_addr.saddr = sgid_addr.saddr; + ah->av.dgid_addr.saddr = dgid_addr.saddr; + ah_info = &sc_ah->ah_info; + ah_info->ah_idx = ah_id; + ah_info->pd_idx = pd->sc_pd.pd_id; + ether_addr_copy(ah_info->mac_addr, iwdev->netdev->dev_addr); + + if (attr->ah_flags & IB_AH_GRH) { + ah_info->flow_label = attr->grh.flow_label; + ah_info->hop_ttl = attr->grh.hop_limit; + ah_info->tc_tos = attr->grh.traffic_class; + } + + ether_addr_copy(ah_info->dmac, attr->dmac); + + zxdh_fill_ah_info(ah_info, &sgid_attr, &sgid_addr.saddr, + &dgid_addr.saddr, ah->av.net_type); + + err = zxdh_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr); + if (err) + goto err_gid_l2; + + if (sleep) { + mutex_lock(&iwdev->ah_list_lock); + if (zxdh_ah_exists(iwdev, ah)) { + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah_id); + ah_id = 0; + + goto exit; + } + } + + err = zxdh_ah_cqp_op(iwdev->rf, sc_ah, ZXDH_OP_AH_CREATE, sleep, + zxdh_gsi_ud_qp_ah_cb, sc_ah); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: CQP-OP Create AH fail"); + goto err_ah_create; + } + + err = zxdh_create_ah_wait(rf, sc_ah, sleep); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: CQP create AH timed out"); + goto err_gid_l2; + } + +exit: + if (udata) { + uresp.ah_id = ah->sc_ah.ah_info.ah_idx; + err = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (err) { + if (!ah->parent_ah || + (ah->parent_ah && + refcount_dec_and_test(&ah->parent_ah->refcnt))) { + zxdh_ah_cqp_op(iwdev->rf, &ah->sc_ah, + ZXDH_OP_AH_DESTROY, false, NULL, + ah); + ah_id = ah->sc_ah.ah_info.ah_idx; + goto err_ah_create; + } + goto err_unlock; + } + } + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); + + return 0; +err_ah_create: + if (ah->parent_ah) { + list_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + iwdev->ah_list_cnt--; + } +err_unlock: + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); +err_gid_l2: + if (ah_id) + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); + + return err; +} +#endif /* CREATE_AH_VER_6 */ + +#ifdef CREATE_AH_VER_5 +/** + * zxdh_create_ah - create address handle + * @ibah: ptr to AH + * @init_attr: address handle attributes + * @udata: user data + * + * returns a pointer to an address handle + */ +int zxdh_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, + struct ib_udata *udata) +{ + return zxdh_create_ah_v2(ibah, init_attr->ah_attr, init_attr->flags, + udata); +} +#endif + +#if defined(ETHER_COPY_VER_1) +void zxdh_ether_copy(u8 *dmac, struct ib_ah_attr *attr) +{ + ether_addr_copy(dmac, attr->dmac); +} +#endif + +#if defined(ETHER_COPY_VER_2) +void zxdh_ether_copy(u8 *dmac, struct rdma_ah_attr *attr) +{ + ether_addr_copy(dmac, attr->roce.dmac); +} +#endif + +#ifdef CREATE_AH_VER_1_1 +/** + * zxdh_create_ah - create address handle + * @ibpd: ptr to pd + * @attr: address handle attributes + * @udata: user data + * + * returns a pointer to an address handle + */ +struct ib_ah *zxdh_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr, + struct ib_udata *udata) +#elif defined(CREATE_AH_VER_1_2) + struct ib_ah *zxdh_create_ah(struct ib_pd *ibpd, + struct rdma_ah_attr *attr, + struct ib_udata *udata) +#endif +#if defined(CREATE_AH_VER_1_1) || defined(CREATE_AH_VER_1_2) +{ + struct zxdh_pd *pd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + struct zxdh_ah *ah; +#ifdef IB_GET_CACHED_GID + union ib_gid sgid; + struct ib_gid_attr sgid_attr; +#else + const struct ib_gid_attr *sgid_attr; +#endif + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_sc_ah *sc_ah; + u32 ah_id = 0; + struct zxdh_ah_info *ah_info; + struct zxdh_create_ah_resp uresp; + union { + struct sockaddr saddr; + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; + } sgid_addr, dgid_addr; + int err; + bool sleep = udata ? true : false; + + err = zxdh_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id, + &rf->next_ah); + + if (err) + return ERR_PTR(err); + + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); + if (!ah) { + zxdh_free_rsrc(rf, rf->allocated_ahs, ah_id); + return ERR_PTR(-ENOMEM); + } + + ah->pd = pd; + sc_ah = &ah->sc_ah; + sc_ah->ah_info.ah_idx = ah_id; + sc_ah->ah_info.vsi = &iwdev->vsi; + zxdh_sc_init_ah(&rf->sc_dev, sc_ah); + ah->sgid_index = attr->grh.sgid_index; + memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid)); +#ifdef IB_GET_CACHED_GID + rcu_read_lock(); + err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num, + attr->grh.sgid_index, &sgid, &sgid_attr); + rcu_read_unlock(); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: GID lookup at idx=%d with port=%d failed\n", + attr->grh.sgid_index, attr->port_num); + err = -EINVAL; + goto err_gid_l2; + } + rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid); +#else + sgid_attr = attr->grh.sgid_attr; + rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid); +#endif + rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); + ah->av.attrs = *attr; + ah->av.net_type = kc_rdma_gid_attr_network_type( + sgid_attr, sgid_attr.gid_type, &sgid); + +#ifdef IB_GET_CACHED_GID + if (kc_deref_sgid_attr(sgid_attr)) + dev_put(kc_deref_sgid_attr(sgid_attr)); +#endif + + ah->av.sgid_addr.saddr = sgid_addr.saddr; + ah->av.dgid_addr.saddr = dgid_addr.saddr; + ah_info = &sc_ah->ah_info; + ah_info->ah_idx = ah_id; + ah_info->pd_idx = pd->sc_pd.pd_id; + + ether_addr_copy(ah_info->mac_addr, iwdev->netdev->dev_addr); + if (attr->ah_flags & IB_AH_GRH) { + ah_info->flow_label = attr->grh.flow_label; + ah_info->hop_ttl = attr->grh.hop_limit; + ah_info->tc_tos = attr->grh.traffic_class; + } + + zxdh_ether_copy(ah_info->dmac, attr); + +#ifdef IB_GET_CACHED_GID + zxdh_fill_ah_info(ah_info, &sgid_attr, &sgid_addr.saddr, + &dgid_addr.saddr, ah->av.net_type); + + err = zxdh_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr); +#else + zxdh_fill_ah_info(ah_info, sgid_attr, &sgid_addr.saddr, + &dgid_addr.saddr, ah->av.net_type); + + err = zxdh_create_ah_vlan_tag(iwdev, ah_info, sgid_attr); +#endif + if (err) + goto err_gid_l2; + + if (sleep) { + mutex_lock(&iwdev->ah_list_lock); + if (zxdh_ah_exists(iwdev, ah)) { + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah_id); + ah_id = 0; + + goto exit; + } + } + + err = zxdh_ah_cqp_op(iwdev->rf, sc_ah, ZXDH_OP_AH_CREATE, sleep, + zxdh_gsi_ud_qp_ah_cb, sc_ah); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: CQP-OP Create AH fail"); + goto err_ah_create; + } + + err = zxdh_create_ah_wait(rf, sc_ah, sleep); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: CQP create AH timed out"); + goto err_gid_l2; + } + +exit: + if (udata) { + uresp.ah_id = ah->sc_ah.ah_info.ah_idx; + err = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (err) { + if (!ah->parent_ah || + (ah->parent_ah && + refcount_dec_and_test(&ah->parent_ah->refcnt))) { + zxdh_ah_cqp_op(iwdev->rf, &ah->sc_ah, + ZXDH_OP_AH_DESTROY, false, NULL, + ah); + ah_id = ah->sc_ah.ah_info.ah_idx; + goto err_ah_create; + } + goto err_unlock; + } + } + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); + + return &ah->ibah; +err_ah_create: + if (ah->parent_ah) { + list_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + iwdev->ah_list_cnt--; + } +err_unlock: + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); +err_gid_l2: + kfree(ah); + if (ah_id) + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); + + return ERR_PTR(err); +} +#endif + +#ifdef CREATE_AH_VER_0 +struct ib_ah *zxdh_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) +{ + struct zxdh_pd *pd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + struct zxdh_ah *ah; + struct zxdh_pci_f *rf = iwdev->rf; + union ib_gid sgid; + struct ib_gid_attr sgid_attr; + struct zxdh_sc_ah *sc_ah; + u32 ah_id; + struct zxdh_ah_info *ah_info; + union { + struct sockaddr saddr; + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; + } sgid_addr, dgid_addr; + int err; + + err = zxdh_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id, + &rf->next_ah); + + if (err) + return ERR_PTR(err); + + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); + if (!ah) { + zxdh_free_rsrc(rf, rf->allocated_ahs, ah_id); + return ERR_PTR(-ENOMEM); + } + + ah->pd = pd; + sc_ah = &ah->sc_ah; + sc_ah->ah_info.ah_idx = ah_id; + sc_ah->ah_info.vsi = &iwdev->vsi; + zxdh_sc_init_ah(&rf->sc_dev, sc_ah); + ah->sgid_index = attr->grh.sgid_index; + memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid)); + rcu_read_lock(); + + err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num, + attr->grh.sgid_index, &sgid, &sgid_attr); + rcu_read_unlock(); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: GID lookup at idx=%d with port=%d failed\n", + attr->grh.sgid_index, attr->port_num); + err = -EINVAL; + goto error; + } + rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); + ah->av.attrs = *attr; + ah->av.net_type = kc_rdma_gid_attr_network_type( + sgid_attr, sgid_attr.gid_type, &sgid); + if (kc_deref_sgid_attr(sgid_attr)) + dev_put(kc_deref_sgid_attr(sgid_attr)); + + ah->av.sgid_addr.saddr = sgid_addr.saddr; + ah->av.dgid_addr.saddr = dgid_addr.saddr; + ah_info = &sc_ah->ah_info; + ah_info->ah_idx = ah_id; + ah_info->pd_idx = pd->sc_pd.pd_id; + + ether_addr_copy(ah_info->mac_addr, iwdev->netdev->dev_addr); + if (attr->ah_flags & IB_AH_GRH) { + ah_info->flow_label = attr->grh.flow_label; + ah_info->hop_ttl = attr->grh.hop_limit; + ah_info->tc_tos = attr->grh.traffic_class; + } + + zxdh_ether_copy(ah_info->dmac, attr); + + zxdh_fill_ah_info(ah_info, &sgid_attr, &sgid_addr.saddr, + &dgid_addr.saddr, ah->av.net_type); + + err = zxdh_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr); + if (err) + goto error; + + err = zxdh_ah_cqp_op(iwdev->rf, sc_ah, ZXDH_OP_AH_CREATE, false, + zxdh_gsi_ud_qp_ah_cb, sc_ah); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: CQP-OP Create AH fail"); + goto error; + } + + err = zxdh_create_ah_wait(rf, sc_ah, false); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: CQP create AH timed out"); + goto error; + } + + return &ah->ibah; +error: + kfree(ah); + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); + return ERR_PTR(err); +} + +#endif +#ifdef CREATE_QP_VER_2 +/** + * zxdh_free_qp_rsrc - free up memory resources for qp + * @iwqp: qp ptr (user or kernel) + */ +void zxdh_free_qp_rsrc(struct zxdh_qp *iwqp) +{ + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_pci_f *rf = iwdev->rf; + int qp_index; + + if (iwqp->ibqp.qp_num <= 1) + qp_index = iwqp->ibqp.qp_num; + else + qp_index = iwqp->ibqp.qp_num - rf->sc_dev.base_qpn; + + if (qp_index > 0 && qp_index < rf->max_qp) { + if (iwqp->sc_qp.dev) + zxdh_qp_rem_qos(&iwqp->sc_qp); + zxdh_free_rsrc(rf, rf->allocated_qps, qp_index); + if (!iwqp->user_mode) { + dma_free_coherent(rf->sc_dev.hw->device, + iwqp->kqp.dma_mem.size, + iwqp->kqp.dma_mem.va, + iwqp->kqp.dma_mem.pa); + iwqp->kqp.dma_mem.va = NULL; + kfree(iwqp->kqp.sq_wrid_mem); + kfree(iwqp->kqp.rq_wrid_mem); + } + } + + if (iwqp->host_ctx.va) { + dma_free_coherent(rf->sc_dev.hw->device, iwqp->host_ctx.size, + iwqp->host_ctx.va, iwqp->host_ctx.pa); + iwqp->host_ctx.va = NULL; + } + kfree(iwqp->sg_list); +} + +/** + * zxdh_create_qp - create qp + * @ibqp: ptr of qp + * @init_attr: attributes for qp + * @udata: user data for create qp + */ +int zxdh_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct ib_pd *ibpd = ibqp->pd; + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_create_qp_req req; + struct zxdh_create_qp_resp uresp = {}; + u32 qp_num = 0; + u32 qp_ctx_num = 0; + int ret; + int err_code; + int sq_size; + int rq_size; + struct zxdh_srq *iwsrq; + struct zxdh_sc_qp *qp; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; + struct zxdh_qp_init_info init_info = {}; + struct zxdh_qp_host_ctx_info *ctx_info; + unsigned long flags; + + err_code = zxdh_validate_qp_attrs(init_attr, iwdev); + if (err_code) + return err_code; + + sq_size = init_attr->cap.max_send_wr; + rq_size = init_attr->cap.max_recv_wr; +#ifdef Z_CONFIG_RDMA_VSI + init_info.vsi = &iwdev->vsi; +#endif + init_info.dev = dev; + init_info.qp_uk_init_info.uk_attrs = uk_attrs; + init_info.qp_uk_init_info.sq_size = sq_size; + init_info.qp_uk_init_info.rq_size = rq_size; + init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; + init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; + init_info.qp_uk_init_info.max_inline_data = + init_attr->cap.max_inline_data; + + qp = &iwqp->sc_qp; + qp->dev = NULL; + qp->qp_uk.back_qp = iwqp; + qp->qp_uk.lock = &iwqp->lock; + + iwqp->is_srq = false; + if (init_attr->srq != NULL) { + iwqp->is_srq = true; + iwsrq = to_iwsrq(init_attr->srq); + iwqp->iwsrq = iwsrq; + iwqp->sc_qp.srq = &iwsrq->sc_srq; + } + qp->is_srq = iwqp->is_srq; + + iwqp->sg_list = kcalloc(uk_attrs->max_hw_wq_frags, + sizeof(*iwqp->sg_list), GFP_KERNEL); + if (!iwqp->sg_list) + return -ENOMEM; + + iwqp->iwdev = iwdev; + iwqp->host_ctx.va = NULL; + iwqp->host_ctx.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + iwqp->host_ctx.va = dma_alloc_coherent(dev->hw->device, + iwqp->host_ctx.size, + &iwqp->host_ctx.pa, GFP_KERNEL); + if (!iwqp->host_ctx.va) { + kfree(iwqp->sg_list); + return -ENOMEM; + } + + init_info.host_ctx = iwqp->host_ctx.va; + init_info.host_ctx_pa = iwqp->host_ctx.pa; + + err_code = zxdh_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp, + &qp_ctx_num, &rf->next_qp); + if (err_code) + goto error; + qp_ctx_num += dev->base_qpn; + if (init_attr->qp_type == IB_QPT_GSI) + qp_num = 1; + else + qp_num = qp_ctx_num; + + iwqp->iwpd = iwpd; + iwqp->ibqp.qp_num = qp_num; + qp = &iwqp->sc_qp; + iwqp->sc_qp.qp_ctx_num = qp_ctx_num; + iwqp->iwscq = to_iwcq(init_attr->send_cq); + iwqp->iwrcq = to_iwcq(init_attr->recv_cq); + + init_info.pd = &iwpd->sc_pd; + init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; + iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; + init_waitqueue_head(&iwqp->mod_qp_waitq); + + if (udata) { + err_code = ib_copy_from_udata(&req, udata, + min(sizeof(req), udata->inlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: ib_copy_from_data fail\n"); + goto error; + } + + iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; + iwqp->user_mode = 1; + if (req.user_wqe_bufs) { + struct zxdh_ucontext *ucontext = + kc_rdma_udata_to_drv_context(ibpd, udata); + + init_info.qp_uk_init_info.legacy_mode = + ucontext->legacy_mode; + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, + flags); + iwqp->iwpbl = + zxdh_get_pbl((unsigned long)req.user_wqe_bufs, + &ucontext->qp_reg_mem_list); + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, + flags); + + if (!iwqp->iwpbl) { + err_code = -ENODATA; + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: no pbl info\n"); + goto error; + } + } + init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; + zxdh_setup_virt_qp(iwdev, iwqp, &init_info); + } else { + INIT_DELAYED_WORK(&iwqp->dwork_flush, zxdh_flush_worker); + init_info.qp_uk_init_info.abi_ver = ZXDH_ABI_VER; + err_code = + zxdh_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); + } + + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: setup qp failed\n"); + goto error; + } + + if (init_attr->qp_type == IB_QPT_RC) { + init_info.qp_uk_init_info.type = ZXDH_QP_TYPE_ROCE_RC; + init_info.qp_uk_init_info.qp_caps = + ZXDH_SEND_WITH_IMM | ZXDH_WRITE_WITH_IMM | ZXDH_ROCE; + } else { + init_info.qp_uk_init_info.type = ZXDH_QP_TYPE_ROCE_UD; + init_info.qp_uk_init_info.qp_caps = ZXDH_SEND_WITH_IMM | + ZXDH_ROCE; + } + + ret = zxdh_sc_qp_init(qp, &init_info); + if (ret) { + err_code = -EPROTO; + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: qp_init fail\n"); + goto error; + } + + ctx_info = &iwqp->ctx_info; + ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; + ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; + + if (iwqp->is_srq == true) + ctx_info->use_srq = true; + else + ctx_info->use_srq = false; + + zxdh_roce_fill_and_set_qpctx_info(iwqp, ctx_info); + + err_code = zxdh_cqp_create_qp_cmd(iwqp); + if (err_code) + goto error; + + refcount_set(&iwqp->refcnt, 1); + spin_lock_init(&iwqp->lock); + spin_lock_init(&iwqp->sc_qp.pfpdu.lock); + iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; + rf->qp_table[qp_ctx_num - dev->base_qpn] = iwqp; + iwqp->max_send_wr = sq_size; + iwqp->max_recv_wr = rq_size; + + zxdh_qp_add_qos(&iwqp->sc_qp); + + if (udata) { + uresp.lsmm = 1; + uresp.actual_sq_size = sq_size; + uresp.actual_rq_size = rq_size; + uresp.qp_id = qp_num; + uresp.qp_caps = qp->qp_uk.qp_caps; + + err_code = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: copy_to_udata failed\n"); + kc_zxdh_destroy_qp(&iwqp->ibqp, udata); + return err_code; + } + } + if (refcount_read(&iwdev->trace_switch.t_switch)) { + ibdev_notice( + &iwdev->ibdev, + "create new QP, type %d, ib qpn 0x%X, max_send_wr %d, max_recv_wr %d\n", + iwqp->ibqp.qp_type, iwqp->ibqp.qp_num, + iwqp->max_send_wr, iwqp->max_recv_wr); + } + init_completion(&iwqp->free_qp); + return 0; + +error: + zxdh_free_qp_rsrc(iwqp); + + return err_code; +} +#endif /* CREATE_QP_VER_2 */ +#ifdef CREATE_QP_VER_1 +/** + * zxdh_free_qp_rsrc - free up memory resources for qp + * @iwqp: qp ptr (user or kernel) + */ +void zxdh_free_qp_rsrc(struct zxdh_qp *iwqp) +{ + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_pci_f *rf = iwdev->rf; + int qp_index; + + if (iwqp->ibqp.qp_num <= 1) + qp_index = iwqp->ibqp.qp_num; + else + qp_index = iwqp->ibqp.qp_num - rf->sc_dev.base_qpn; + + if (qp_index > 0 && qp_index < rf->max_qp) { + if (iwqp->sc_qp.dev) + zxdh_qp_rem_qos(&iwqp->sc_qp); + zxdh_free_rsrc(rf, rf->allocated_qps, qp_index); + if (!iwqp->user_mode) { + dma_free_coherent(rf->sc_dev.hw->device, + iwqp->kqp.dma_mem.size, + iwqp->kqp.dma_mem.va, + iwqp->kqp.dma_mem.pa); + iwqp->kqp.dma_mem.va = NULL; + kfree(iwqp->kqp.sq_wrid_mem); + kfree(iwqp->kqp.rq_wrid_mem); + } + } + if (iwqp->host_ctx.va) { + dma_free_coherent(rf->sc_dev.hw->device, iwqp->host_ctx.size, + iwqp->host_ctx.va, iwqp->host_ctx.pa); + iwqp->host_ctx.va = NULL; + } + kfree(iwqp->sg_list); + kfree(iwqp); +} + +/** + * zxdh_create_qp - create qp + * @ibpd: ptr of pd + * @init_attr: attributes for qp + * @udata: user data for create qp + */ +struct ib_qp *zxdh_create_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_qp *iwqp; + struct zxdh_srq *iwsrq; + struct zxdh_create_qp_req req; + struct zxdh_create_qp_resp uresp = {}; + u32 qp_num = 0; + u32 qp_ctx_num = 0; + int ret; + int err_code; + int sq_size; + int rq_size; + struct zxdh_sc_qp *qp; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; + struct zxdh_qp_init_info init_info = {}; + struct zxdh_qp_host_ctx_info *ctx_info; + unsigned long flags; + + err_code = zxdh_validate_qp_attrs(init_attr, iwdev); + if (err_code) + return ERR_PTR(err_code); + + sq_size = init_attr->cap.max_send_wr; + rq_size = init_attr->cap.max_recv_wr; + +#ifdef Z_CONFIG_RDMA_VSI + init_info.vsi = &iwdev->vsi; +#endif + init_info.dev = dev; + init_info.qp_uk_init_info.uk_attrs = uk_attrs; + init_info.qp_uk_init_info.sq_size = sq_size; + init_info.qp_uk_init_info.rq_size = rq_size; + init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; + init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; + init_info.qp_uk_init_info.max_inline_data = + init_attr->cap.max_inline_data; + + iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL); + if (!iwqp) + return ERR_PTR(-ENOMEM); + + iwqp->sg_list = kcalloc(uk_attrs->max_hw_wq_frags, + sizeof(*iwqp->sg_list), GFP_KERNEL); + if (!iwqp->sg_list) { + kfree(iwqp); + return ERR_PTR(-ENOMEM); + } + + qp = &iwqp->sc_qp; + qp->dev = NULL; + qp->qp_uk.back_qp = iwqp; + qp->qp_uk.lock = &iwqp->lock; + + iwqp->is_srq = false; + if (init_attr->srq != NULL) { + iwqp->is_srq = true; + iwsrq = to_iwsrq(init_attr->srq); + iwqp->iwsrq = iwsrq; + iwqp->sc_qp.srq = &iwsrq->sc_srq; + } + + qp->is_srq = iwqp->is_srq; + qp->qp_uk.is_srq = qp->is_srq; + iwqp->iwdev = iwdev; + iwqp->host_ctx.va = NULL; + iwqp->host_ctx.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + iwqp->host_ctx.va = dma_alloc_coherent(dev->hw->device, + iwqp->host_ctx.size, + &iwqp->host_ctx.pa, GFP_KERNEL); + if (!iwqp->host_ctx.va) { + kfree(iwqp->sg_list); + kfree(iwqp); + return ERR_PTR(-ENOMEM); + } + + init_info.host_ctx = iwqp->host_ctx.va; + init_info.host_ctx_pa = iwqp->host_ctx.pa; + + err_code = zxdh_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp, + &qp_ctx_num, &rf->next_qp); + if (err_code) + goto error; + qp_ctx_num += dev->base_qpn; + if (init_attr->qp_type == IB_QPT_GSI) + qp_num = 1; + else + qp_num = qp_ctx_num; + + iwqp->iwpd = iwpd; + iwqp->ibqp.qp_num = qp_num; + iwqp->ibqp.qp_type = init_attr->qp_type; + qp = &iwqp->sc_qp; + iwqp->sc_qp.qp_ctx_num = qp_ctx_num; + iwqp->iwscq = to_iwcq(init_attr->send_cq); + iwqp->iwrcq = to_iwcq(init_attr->recv_cq); + + init_info.pd = &iwpd->sc_pd; + init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; + iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; + init_waitqueue_head(&iwqp->mod_qp_waitq); + + if (udata) { + err_code = ib_copy_from_udata(&req, udata, + min(sizeof(req), udata->inlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: ib_copy_from_data fail\n"); + goto error; + } + + iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; + iwqp->user_mode = 1; + if (req.user_wqe_bufs) { + struct zxdh_ucontext *ucontext = + kc_rdma_udata_to_drv_context(ibpd, udata); + + init_info.qp_uk_init_info.legacy_mode = + ucontext->legacy_mode; + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, + flags); + iwqp->iwpbl = + zxdh_get_pbl((unsigned long)req.user_wqe_bufs, + &ucontext->qp_reg_mem_list); + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, + flags); + + if (!iwqp->iwpbl) { + err_code = -ENODATA; + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: no pbl info\n"); + goto error; + } + } + init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; + zxdh_setup_virt_qp(iwdev, iwqp, &init_info); + } else { + INIT_DELAYED_WORK(&iwqp->dwork_flush, zxdh_flush_worker); + init_info.qp_uk_init_info.abi_ver = ZXDH_ABI_VER; + err_code = + zxdh_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); + } + + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: setup qp failed\n"); + goto error; + } + + if (init_attr->qp_type == IB_QPT_RC) { + init_info.qp_uk_init_info.type = ZXDH_QP_TYPE_ROCE_RC; + init_info.qp_uk_init_info.qp_caps = + ZXDH_SEND_WITH_IMM | ZXDH_WRITE_WITH_IMM | ZXDH_ROCE; + } else { + init_info.qp_uk_init_info.type = ZXDH_QP_TYPE_ROCE_UD; + init_info.qp_uk_init_info.qp_caps = ZXDH_SEND_WITH_IMM | + ZXDH_ROCE; + } + + ret = zxdh_sc_qp_init(qp, &init_info); + if (ret) { + err_code = -EPROTO; + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: qp_init fail\n"); + goto error; + } + + ctx_info = &iwqp->ctx_info; + ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; + ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; + + if (iwqp->is_srq == true) + ctx_info->use_srq = true; + else + ctx_info->use_srq = false; + + zxdh_roce_fill_and_set_qpctx_info(iwqp, ctx_info); + + err_code = zxdh_cqp_create_qp_cmd(iwqp); + if (err_code) + goto error; + + refcount_set(&iwqp->refcnt, 1); + spin_lock_init(&iwqp->lock); + spin_lock_init(&iwqp->sc_qp.pfpdu.lock); + iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; + rf->qp_table[qp_ctx_num - dev->base_qpn] = iwqp; + iwqp->max_send_wr = sq_size; + iwqp->max_recv_wr = rq_size; + + zxdh_qp_add_qos(&iwqp->sc_qp); + + if (udata) { + uresp.lsmm = 1; + uresp.actual_sq_size = sq_size; + uresp.actual_rq_size = rq_size; + uresp.qp_id = qp_num; + uresp.qp_caps = qp->qp_uk.qp_caps; + + err_code = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: copy_to_udata failed\n"); + kc_zxdh_destroy_qp(&iwqp->ibqp, udata); + return ERR_PTR(err_code); + } + } + + init_completion(&iwqp->free_qp); + if (refcount_read(&iwdev->trace_switch.t_switch)) { + ibdev_notice( + &iwdev->ibdev, + "create new QP, type %d, ib qpn 0x%X, max_send_wr %d, max_recv_wr %d\n", + iwqp->ibqp.qp_type, iwqp->ibqp.qp_num, + iwqp->max_send_wr, iwqp->max_recv_wr); + } + return &iwqp->ibqp; + +error: + zxdh_free_qp_rsrc(iwqp); + + return ERR_PTR(err_code); +} + +#endif /* CREATE_QP_VER_1 */ +/** + * zxdh_destroy_qp - destroy qp + * @ibqp: qp's ib pointer also to get to device's qp address + * @udata: user data + */ +#ifdef DESTROY_QP_VER_2 +int zxdh_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) +#endif +#if defined(DESTROY_QP_VER_1) + int zxdh_destroy_qp(struct ib_qp *ibqp) +#endif +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_device *iwdev = iwqp->iwdev; + + if (iwqp->sc_qp.qp_uk.destroy_pending) + goto free_rsrc; + iwqp->sc_qp.qp_uk.destroy_pending = true; + + zxdh_modify_qp_to_err(&iwqp->sc_qp); + + if (!iwqp->user_mode) + cancel_delayed_work_sync(&iwqp->dwork_flush); + + zxdh_qp_rem_ref(&iwqp->ibqp); + wait_for_completion(&iwqp->free_qp); + + zxdh_sc_qp_resetctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va); + + if (!iwdev->rf->reset && + zxdh_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp)) + return iwqp->user_mode ? -ENOTRECOVERABLE : 0; +free_rsrc: + if (!iwqp->user_mode) { + if (iwqp->iwscq) { + zxdh_clean_cqes(iwqp, iwqp->iwscq); + if (iwqp->iwrcq != iwqp->iwscq) + zxdh_clean_cqes(iwqp, iwqp->iwrcq); + } + } + if (refcount_read(&iwdev->trace_switch.t_switch)) { + ibdev_notice( + &iwdev->ibdev, + "destroy QP, type %d, ib qpn 0x%X, max_send_wr %d, max_recv_wr %d\n", + iwqp->ibqp.qp_type, iwqp->ibqp.qp_num, + iwqp->max_send_wr, iwqp->max_recv_wr); + } + zxdh_free_qp_rsrc(iwqp); + return 0; +} + +/** + * zxdh_cq_round_up - return round up cq wq depth + * @wqdepth: wq depth in quanta to round up + */ +int zxdh_cq_round_up(u32 wqdepth) +{ + int scount = 1; + + for (wqdepth--; scount <= 16; scount *= 2) + wqdepth |= wqdepth >> scount; + + return ++wqdepth; +} + +/** + * zxdh_create_cq - create cq + * @ibcq: CQ allocated + * @attr: attributes for cq + * @udata: user data + */ +#ifdef CREATE_CQ_VER_3 +int zxdh_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata) +#elif defined(CREATE_CQ_VER_2) +struct ib_cq *zxdh_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_udata *udata) +#elif defined(CREATE_CQ_VER_1) +struct ib_cq *zxdh_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata) +#endif +{ +#ifdef CREATE_CQ_VER_3 + struct ib_device *ibdev = ibcq->device; +#endif + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_pci_f *rf = iwdev->rf; +#ifdef CREATE_CQ_VER_3 + struct zxdh_cq *iwcq = to_iwcq(ibcq); +#else + struct zxdh_cq *iwcq; +#endif + u32 cq_num = 0; + struct zxdh_sc_cq *cq; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_cq_init_info info = {}; + int status; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; + unsigned long flags; + int err_code; + int entries = attr->cqe; +#ifdef CREATE_CQ_VER_3 + err_code = + cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); + if (err_code) + return err_code; +#else + err_code = + cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); + if (err_code) + return ERR_PTR(err_code); + + iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL); + if (!iwcq) + return ERR_PTR(-ENOMEM); +#endif + err_code = zxdh_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, + &rf->next_cq); + if (err_code) +#ifdef CREATE_CQ_VER_3 + return err_code; +#else + goto error; +#endif + cq_num += dev->base_cqn; + cq = &iwcq->sc_cq; + cq->back_cq = iwcq; + iwcq->cq_num = cq_num; + refcount_set(&iwcq->refcnt, 1); + spin_lock_init(&iwcq->lock); + INIT_LIST_HEAD(&iwcq->resize_list); + INIT_LIST_HEAD(&iwcq->cmpl_generated); + info.dev = dev; + ukinfo->cq_size = max(entries, 4); /* Depth of CQ */ + ukinfo->cq_size = zxdh_cq_round_up(ukinfo->cq_size); + ukinfo->cq_id = cq_num; + ukinfo->cqe_size = ZXDH_CQE_SIZE_64; + ukinfo->cq_log_size = zxdh_num_to_log(ukinfo->cq_size); + iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; + info.ceq_id = dev->base_ceqn + 1; + info.ceq_index = 1; + if (attr->comp_vector < rf->ceqs_count) { + if (attr->comp_vector == 0) { + info.ceq_id = dev->base_ceqn + 1; + } else { + info.ceq_id = + dev->base_ceqn + + attr->comp_vector; /* attr->comp_vector default value is 0 */ + info.ceq_index = attr->comp_vector; + } + } + info.ceq_id_valid = true; + info.ceqe_mask = 1; + info.type = ZXDH_CQ_TYPE_IO; + + if (udata) { + struct zxdh_ucontext *ucontext; + struct zxdh_create_cq_req req = {}; + struct zxdh_cq_mr *cqmr; + struct zxdh_pbl *iwpbl; + struct zxdh_pbl *iwpbl_shadow; + struct zxdh_cq_mr *cqmr_shadow; + + iwcq->user_mode = true; + ucontext = kc_get_ucontext(udata); + if (ib_copy_from_udata(&req, udata, + min(sizeof(req), udata->inlen))) { + err_code = -EFAULT; + goto cq_free_rsrc; + } + + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + iwpbl = zxdh_get_pbl((unsigned long)req.user_cq_buf, + &ucontext->cq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + if (!iwpbl) { + err_code = -EPROTO; + goto cq_free_rsrc; + } + iwcq->iwpbl = iwpbl; + iwcq->cq_mem_size = 0; + cqmr = &iwpbl->cq_mr; + + if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags & + ZXDH_FEATURE_CQ_RESIZE && + !ucontext->legacy_mode) { + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, + flags); + iwpbl_shadow = zxdh_get_pbl( + (unsigned long)req.user_shadow_area, + &ucontext->cq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, + flags); + + if (!iwpbl_shadow) { + err_code = -EPROTO; + goto cq_free_rsrc; + } + iwcq->iwpbl_shadow = iwpbl_shadow; + cqmr_shadow = &iwpbl_shadow->cq_mr; + info.shadow_area_pa = cqmr_shadow->cq_pbl.addr; + cqmr->split = true; + } else { + info.shadow_area_pa = cqmr->shadow; + } + if (iwpbl->pbl_allocated) { + info.virtual_map = true; + info.pbl_chunk_size = 1; + info.first_pm_pbl_idx = cqmr->cq_pbl.idx; + } else { + info.cq_base_pa = cqmr->cq_pbl.addr; + } + } else { + /* Kmode allocations */ + int rsize; + + if (entries < 1 || entries > rf->max_cqe) { + err_code = -EINVAL; + goto cq_free_rsrc; + } + + entries++; + ukinfo->cq_size = zxdh_cq_round_up(entries); + ukinfo->cq_log_size = zxdh_num_to_log(ukinfo->cq_size); + + rsize = info.cq_uk_init_info.cq_size * + sizeof(struct zxdh_extended_cqe); + + iwcq->kmem.size = ALIGN(round_up(rsize, ZXDH_HW_PAGE_SIZE), + ZXDH_HW_PAGE_SIZE); + iwcq->kmem.va = dma_alloc_coherent(dev->hw->device, + iwcq->kmem.size, + &iwcq->kmem.pa, GFP_KERNEL); + if (!iwcq->kmem.va) { + err_code = -ENOMEM; + goto cq_free_rsrc; + } + + iwcq->kmem_shadow.size = ALIGN(ZXDH_SHADOW_AREA_SIZE << 3, 64); + iwcq->kmem_shadow.va = dma_alloc_coherent( + dev->hw->device, iwcq->kmem_shadow.size, + &iwcq->kmem_shadow.pa, GFP_KERNEL); + + if (!iwcq->kmem_shadow.va) { + err_code = -ENOMEM; + goto cq_free_rsrc; + } + info.shadow_area_pa = iwcq->kmem_shadow.pa; + ukinfo->shadow_area = iwcq->kmem_shadow.va; + ukinfo->cq_base = iwcq->kmem.va; + info.cq_base_pa = iwcq->kmem.pa; + } + + info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, + (u32)ZXDH_MAX_CQ_READ_THRESH); + if (zxdh_sc_cq_init(cq, &info)) { + pr_err("VERBS: init cq fail\n"); + err_code = -EPROTO; + goto cq_free_rsrc; + } + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto cq_free_rsrc; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_CQ_CREATE; + cqp_info->post_sq = 1; + cqp_info->in.u.cq_create.cq = cq; + cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + goto cq_free_rsrc; + } + + if (udata) { + struct zxdh_create_cq_resp resp = {}; + + resp.cq_id = info.cq_uk_init_info.cq_id; + resp.cq_size = info.cq_uk_init_info.cq_size; + if (ib_copy_to_udata(udata, &resp, + min(sizeof(resp), udata->outlen))) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: copy to user data\n"); + err_code = -EPROTO; + goto cq_destroy; + } + } + + rf->cq_table[cq_num - dev->base_cqn] = iwcq; + init_completion(&iwcq->free_cq); + +#ifdef CREATE_CQ_VER_3 + return 0; +#else + return &iwcq->ibcq; +#endif +cq_destroy: + zxdh_cq_wq_destroy(rf, cq); +cq_free_rsrc: + zxdh_cq_free_rsrc(rf, iwcq); +#ifdef CREATE_CQ_VER_3 + return err_code; +#else +error: + kfree(iwcq); + return ERR_PTR(err_code); +#endif +} + +/** + * zxdh_copy_user_pgaddrs - copy user page address to pble's os locally + * @iwmr: iwmr for IB's user page addresses + * @pblpar: ple pointer to save 1 level or 0 level pble + * @pbleinfo: pble info + * @level: indicated level 0, 1 or 2 + * @use_pbles: ple pointer to save 1 level or 0 level pble + * @pble_type: ple pointer to save 1 level or 0 level pble + */ +#ifdef COPY_USER_PGADDR_VER_4 +void zxdh_copy_user_pgaddrs(struct zxdh_mr *iwmr, u64 *pblpar, + struct zxdh_pble_info **pbleinfo, + enum zxdh_pble_level level, bool use_pbles, + bool pble_type) +{ + struct ib_umem *region = NULL; + struct zxdh_pbl *iwpbl = NULL; + struct ib_block_iter biter; + struct zxdh_pble_alloc *palloc = NULL; + struct zxdh_pble_info *pinfo = NULL; + struct zxdh_sc_dev *dev = NULL; + + u32 idx = 0; + u32 pbl_cnt = 0; + u64 *pbl = NULL; + u32 l2_pinfo_cnt = 0; + int j; + + region = iwmr->region; + iwpbl = &iwmr->iwpbl; + palloc = &iwpbl->pble_alloc; + + if (use_pbles) { + if (!(*pbleinfo)) + return; + dev = (*pbleinfo)->chunkinfo.pchunk->dev; + pbl = (*pbleinfo)->addr; + } else { + pbl = pblpar; + } + + pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf; + if (iwmr->type == ZXDH_MEMREG_TYPE_QP) + iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl); + rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) { + *pbl = rdma_block_iter_dma_address(&biter); + if (++pbl_cnt == palloc->total_cnt) + break; + pbl = zxdh_next_pbl_addr(pbl, &pinfo, &idx, &l2_pinfo_cnt); + } + + if (use_pbles) { + if (true == (*pbleinfo)->pble_copy) { + if (level == PBLE_LEVEL_1) { + zxdh_cqp_config_pble_table_cmd( + dev, (*pbleinfo), + palloc->total_cnt << 3, pble_type); + } else if (level == PBLE_LEVEL_2) { + if ((palloc->total_cnt % 512) == 0) { + l2_pinfo_cnt = palloc->total_cnt >> 9; + } else { + l2_pinfo_cnt = + (palloc->total_cnt >> 9) + 1; + } + + pinfo = palloc->level2.leaf; + for (j = 0; j < l2_pinfo_cnt; j++) { + zxdh_cqp_config_pble_table_cmd( + dev, pinfo, pinfo->cnt << 3, + pble_type); + pinfo++; + } + } + } + } +} +#endif + +#ifdef COPY_USER_PGADDR_VER_3 +void zxdh_copy_user_pgaddrs(struct zxdh_mr *iwmr, u64 *pblpar, + struct zxdh_pble_info **pbleinfo, + enum zxdh_pble_level level, bool use_pbles, + bool pble_type) +{ + struct ib_umem *region = NULL; + struct zxdh_pbl *iwpbl = NULL; + struct ib_block_iter biter; + struct zxdh_pble_alloc *palloc = NULL; + struct zxdh_pble_info *pinfo = NULL; + struct zxdh_sc_dev *dev = NULL; + int j; + u32 idx = 0; + u32 pbl_cnt = 0; + u64 *pbl = NULL; + u32 l2_pinfo_cnt = 0; + + region = iwmr->region; + iwpbl = &iwmr->iwpbl; + palloc = &iwpbl->pble_alloc; + + if (use_pbles) { + if (!(*pbleinfo)) + return; + dev = (*pbleinfo)->chunkinfo.pchunk->dev; + pbl = (*pbleinfo)->addr; + } else { + pbl = pblpar; + } + + pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf; + if (iwmr->type == ZXDH_MEMREG_TYPE_QP) + iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl); +#ifdef rdma_umem_for_each_dma_block + rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) { +#else + rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap, + iwmr->page_size) { +#endif + *pbl = rdma_block_iter_dma_address(&biter); + if (++pbl_cnt == palloc->total_cnt) + break; + pbl = zxdh_next_pbl_addr(pbl, &pinfo, &idx, &l2_pinfo_cnt); + } + + if (use_pbles) { + if (true == (*pbleinfo)->pble_copy) { + if (level == PBLE_LEVEL_1) { + zxdh_cqp_config_pble_table_cmd( + dev, (*pbleinfo), + palloc->total_cnt << 3, pble_type); + } else if (level == PBLE_LEVEL_2) { + if ((palloc->total_cnt % 512) == 0) { + l2_pinfo_cnt = palloc->total_cnt >> 9; + } else { + l2_pinfo_cnt = + (palloc->total_cnt >> 9) + 1; + } + + pinfo = palloc->level2.leaf; + for (j = 0; j < l2_pinfo_cnt; j++) { + zxdh_cqp_config_pble_table_cmd( + dev, pinfo, pinfo->cnt << 3, + pble_type); + pinfo++; + } + } + } + } +} +#endif + +#ifdef COPY_USER_PGADDR_VER_2 +void zxdh_copy_user_pgaddrs(struct zxdh_mr *iwmr, u64 *pblpar, + struct zxdh_pble_info **pbleinfo, + enum zxdh_pble_level level, bool use_pbles, + bool pble_type) +{ + struct ib_umem *region = NULL; + struct zxdh_pbl *iwpbl = NULL; + struct sg_dma_page_iter sg_iter; + bool first_pg = true; + u64 pg_addr = 0; + struct zxdh_pble_alloc *palloc = NULL; + struct zxdh_pble_info *pinfo = NULL; + struct zxdh_sc_dev *dev = NULL; + u32 idx = 0; + u32 pbl_cnt = 0; + u64 *pbl = NULL; + u32 l2_pinfo_cnt = 0; + int j; + + region = iwmr->region; + iwpbl = &iwmr->iwpbl; + palloc = &iwpbl->pble_alloc; + + if (use_pbles) { + if (!(*pbleinfo)) + return; + dev = (*pbleinfo)->chunkinfo.pchunk->dev; + pbl = (*pbleinfo)->addr; + } else { + pbl = pblpar; + } + + pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf; + if (iwmr->type == ZXDH_MEMREG_TYPE_QP) + iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl); + + for_each_sg_dma_page(region->sg_head.sgl, &sg_iter, region->nmap, 0) { + pg_addr = sg_page_iter_dma_address(&sg_iter); + if (first_pg) + *pbl = cpu_to_le64(pg_addr & iwmr->page_msk); + else if (!(pg_addr & ~iwmr->page_msk)) + *pbl = cpu_to_le64(pg_addr); + else + continue; + + first_pg = false; + if (++pbl_cnt == palloc->total_cnt) + break; + pbl = zxdh_next_pbl_addr(pbl, &pinfo, &idx, &l2_pinfo_cnt); + } + + if (use_pbles) { + if (true == (*pbleinfo)->pble_copy) { + if (level == PBLE_LEVEL_1) { + zxdh_cqp_config_pble_table_cmd( + dev, (*pbleinfo), + palloc->total_cnt << 3, pble_type); + } else if (level == PBLE_LEVEL_2) { + if ((palloc->total_cnt % 512) == 0) { + l2_pinfo_cnt = palloc->total_cnt >> 9; + } else { + l2_pinfo_cnt = + (palloc->total_cnt >> 9) + 1; + } + + pinfo = palloc->level2.leaf; + for (j = 0; j < l2_pinfo_cnt; j++) { + zxdh_cqp_config_pble_table_cmd( + dev, pinfo, pinfo->cnt << 3, + pble_type); + pinfo++; + } + } + } + } +} +#endif + +#ifdef COPY_USER_PGADDR_VER_1 +void zxdh_copy_user_pgaddrs(struct zxdh_mr *iwmr, u64 *pblpar, + struct zxdh_pble_info **pbleinfo, + enum zxdh_pble_level level, bool use_pbles, + bool pble_type) +{ + struct ib_umem *region = NULL; + struct zxdh_pbl *iwpbl = NULL; + int chunk_pages, entry, i, j; + struct scatterlist *sg; + u64 pg_addr = 0; + struct zxdh_pble_alloc *palloc = NULL; + struct zxdh_pble_info *pinfo = NULL; + u32 idx = 0; + u32 pbl_cnt = 0; + u32 l2_pinfo_cnt = 0; + struct zxdh_sc_dev *dev = NULL; + u64 *pbl = NULL; + + region = iwmr->region; + iwpbl = &iwmr->iwpbl; + palloc = &iwpbl->pble_alloc; + + if (use_pbles) { + if (!(*pbleinfo)) + return; + dev = (*pbleinfo)->chunkinfo.pchunk->dev; + pbl = (*pbleinfo)->addr; + } else { + pbl = pblpar; + } + + pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf; + for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) { + chunk_pages = DIV_ROUND_UP(sg_dma_len(sg), iwmr->page_size); + if (iwmr->type == ZXDH_MEMREG_TYPE_QP && !iwpbl->qp_mr.sq_page) + iwpbl->qp_mr.sq_page = sg_page(sg); + for (i = 0; i < chunk_pages; i++) { + pg_addr = sg_dma_address(sg) + (i * iwmr->page_size); + if ((entry + i) == 0) + *pbl = pg_addr & iwmr->page_msk; + else if (!(pg_addr & ~iwmr->page_msk)) + *pbl = pg_addr; + else + continue; + if (++pbl_cnt == palloc->total_cnt) + break; + pbl = zxdh_next_pbl_addr(pbl, &pinfo, &idx, + &l2_pinfo_cnt); + } + } + + if (use_pbles) { + if (true == (*pbleinfo)->pble_copy) { + if (level == PBLE_LEVEL_1) { + zxdh_cqp_config_pble_table_cmd( + dev, (*pbleinfo), + palloc->total_cnt << 3, pble_type); + } else if (level == PBLE_LEVEL_2) { + if ((palloc->total_cnt % 512) == 0) { + l2_pinfo_cnt = palloc->total_cnt >> 9; + } else { + l2_pinfo_cnt = + (palloc->total_cnt >> 9) + 1; + } + + pinfo = palloc->level2.leaf; + for (j = 0; j < l2_pinfo_cnt; j++) { + zxdh_cqp_config_pble_table_cmd( + dev, pinfo, pinfo->cnt << 3, + pble_type); + pinfo++; + } + } + } + } +} +#endif + +/** + * zxdh_destroy_ah - Destroy address handle + * @ibah: pointer to address handle + * @ah_flags: destroy flags + */ +#if defined(DESTROY_AH_VER_4) +int zxdh_destroy_ah(struct ib_ah *ibah, u32 ah_flags) +{ + struct zxdh_device *iwdev = to_iwdev(ibah->device); + struct zxdh_ah *ah = to_iwah(ibah); + + if (ah->parent_ah) { + mutex_lock(&iwdev->ah_list_lock); + if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) { + mutex_unlock(&iwdev->ah_list_lock); + return 0; + } + list_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + iwdev->ah_list_cnt--; + mutex_unlock(&iwdev->ah_list_lock); + } + zxdh_ah_cqp_op(iwdev->rf, &ah->sc_ah, ZXDH_OP_AH_DESTROY, false, NULL, + ah); + + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah->sc_ah.ah_info.ah_idx); + + return 0; +} +#endif + +#if defined(DESTROY_AH_VER_3) +void zxdh_destroy_ah(struct ib_ah *ibah, u32 ah_flags) +{ + struct zxdh_device *iwdev = to_iwdev(ibah->device); + struct zxdh_ah *ah = to_iwah(ibah); + + if (ah->parent_ah) { + mutex_lock(&iwdev->ah_list_lock); + if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) { + mutex_unlock(&iwdev->ah_list_lock); + return; + } + list_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + iwdev->ah_list_cnt--; + mutex_unlock(&iwdev->ah_list_lock); + } + zxdh_ah_cqp_op(iwdev->rf, &ah->sc_ah, ZXDH_OP_AH_DESTROY, false, NULL, + ah); + + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah->sc_ah.ah_info.ah_idx); +} +#endif + +#if defined(DESTROY_AH_VER_2) +int zxdh_destroy_ah(struct ib_ah *ibah, u32 ah_flags) +{ + struct zxdh_device *iwdev = to_iwdev(ibah->device); + struct zxdh_ah *ah = to_iwah(ibah); + + if (ah->parent_ah) { + mutex_lock(&iwdev->ah_list_lock); + if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) { + mutex_unlock(&iwdev->ah_list_lock); + goto done; + } + list_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + iwdev->ah_list_cnt--; + mutex_unlock(&iwdev->ah_list_lock); + } + zxdh_ah_cqp_op(iwdev->rf, &ah->sc_ah, ZXDH_OP_AH_DESTROY, false, NULL, + ah); + + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah->sc_ah.ah_info.ah_idx); + +done: + kfree(ah); + return 0; +} +#endif + +#if defined(DESTROY_AH_VER_1) +int zxdh_destroy_ah(struct ib_ah *ibah) +{ + struct zxdh_device *iwdev = to_iwdev(ibah->device); + struct zxdh_ah *ah = to_iwah(ibah); + + if (ah->parent_ah) { + mutex_lock(&iwdev->ah_list_lock); + if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) { + mutex_unlock(&iwdev->ah_list_lock); + goto done; + } + list_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + iwdev->ah_list_cnt--; + mutex_unlock(&iwdev->ah_list_lock); + } + zxdh_ah_cqp_op(iwdev->rf, &ah->sc_ah, ZXDH_OP_AH_DESTROY, false, NULL, + ah); + + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah->sc_ah.ah_info.ah_idx); + +done: + kfree(ah); + return 0; +} +#endif + +#ifdef DEREG_MR_VER_2 +int zxdh_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) +#else +int zxdh_dereg_mr(struct ib_mr *ib_mr) +#endif +{ + struct zxdh_mr *iwmr = to_iwmr(ib_mr); + struct zxdh_device *iwdev = to_iwdev(ib_mr->device); + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + int ret; + + if (iwmr->type != ZXDH_MEMREG_TYPE_MEM) { + if (iwmr->region) { + struct zxdh_ucontext *ucontext; +#ifdef DEREG_MR_VER_2 + + ucontext = rdma_udata_to_drv_context( + udata, struct zxdh_ucontext, ibucontext); +#else + struct ib_pd *ibpd = ib_mr->pd; + + ucontext = to_ucontext(ibpd->uobject->context); +#endif + zxdh_del_memlist(iwmr, ucontext); + } + goto done; + } + + ret = zxdh_hwdereg_mr(ib_mr); + if (ret) + return ret; + + zxdh_free_stag(iwdev, iwmr->stag); +done: + if (iwpbl->pbl_allocated) { + if (iwmr->type != ZXDH_MEMREG_TYPE_MEM) { + if (iwmr->region) + zxdh_free_pble(iwdev->rf->pble_rsrc, + &iwpbl->pble_alloc); + } else { + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, + &iwpbl->pble_alloc); + } + } + + if (iwmr->region) + ib_umem_release(iwmr->region); + + kfree(iwmr); + + return 0; +} + +#ifdef REREG_MR_VER_1 +/* + * zxdh_rereg_user_mr - Re-Register a user memory region + * @ibmr: ib mem to access iwarp mr pointer + * @flags: bit mask to indicate which of the attr's of MR modified + * @start: virtual start address + * @len: length of mr + * @virt: virtual address + * @new access flags: bit mask of access flags + * @new_pd: ptr of pd + * @udata: user data + */ +int zxdh_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len, + u64 virt, int new_access, struct ib_pd *new_pd, + struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ib_mr->device); + struct zxdh_mr *iwmr = to_iwmr(ib_mr); + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + int ret; + + if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) + return -EINVAL; + + if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) + return -EOPNOTSUPP; + + ret = zxdh_hwdereg_mr(ib_mr); + if (ret) + return ret; + + if (flags & IB_MR_REREG_ACCESS) + iwmr->access = new_access; + + if (flags & IB_MR_REREG_PD) { + iwmr->ibmr.pd = new_pd; + iwmr->ibmr.device = new_pd->device; + } + + if (flags & IB_MR_REREG_TRANS) { + if (iwpbl->pbl_allocated) { + zxdh_free_pble(iwdev->rf->pble_rsrc, + &iwpbl->pble_alloc); + iwpbl->pbl_allocated = false; + } + if (iwmr->region) { + ib_umem_release(iwmr->region); + iwmr->region = NULL; + } + + ib_mr = zxdh_rereg_mr_trans(iwmr, start, len, virt, udata); + if (IS_ERR(ib_mr)) + return PTR_ERR(ib_mr); + + } else { + ret = zxdh_hwreg_mr(iwdev, iwmr, iwmr->access); + if (ret) + return ret; + } + + return 0; +} +#endif +#ifdef REREG_MR_VER_2 +/* + * zxdh_rereg_user_mr - Re-Register a user memory region + * @ibmr: ib mem to access iwarp mr pointer + * @flags: bit mask to indicate which of the attr's of MR modified + * @start: virtual start address + * @len: length of mr + * @virt: virtual address + * @new access flags: bit mask of access flags + * @new_pd: ptr of pd + * @udata: user data + */ +struct ib_mr *zxdh_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + u64 len, u64 virt, int new_access, + struct ib_pd *new_pd, struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ib_mr->device); + struct zxdh_mr *iwmr = to_iwmr(ib_mr); + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + int ret; + + if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) + return ERR_PTR(-EINVAL); + + if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) + return ERR_PTR(-EOPNOTSUPP); + + ret = zxdh_hwdereg_mr(ib_mr); + if (ret) + return ERR_PTR(ret); + + if (flags & IB_MR_REREG_ACCESS) + iwmr->access = new_access; + + if (flags & IB_MR_REREG_PD) { + iwmr->ibmr.pd = new_pd; + iwmr->ibmr.device = new_pd->device; + } + + if (flags & IB_MR_REREG_TRANS) { + if (iwpbl->pbl_allocated) { + zxdh_free_pble(iwdev->rf->pble_rsrc, + &iwpbl->pble_alloc); + iwpbl->pbl_allocated = false; + } + if (iwmr->region) { + ib_umem_release(iwmr->region); + iwmr->region = NULL; + } + + ib_mr = zxdh_rereg_mr_trans(iwmr, start, len, virt, udata); + } else { + ret = zxdh_hwreg_mr(iwdev, iwmr, iwmr->access); + if (ret) + return ERR_PTR(ret); + } + + return ib_mr; +} +#endif +#ifdef SET_ROCE_CM_INFO_VER_3 +int kc_zxdh_set_roce_cm_info(struct zxdh_qp *iwqp, struct ib_qp_attr *attr, + u16 *vlan_id) +{ + const struct ib_gid_attr *sgid_attr; + int ret; + struct zxdh_av *av = &iwqp->roce_ah.av; + + sgid_attr = attr->ah_attr.grh.sgid_attr; + if (kc_deref_sgid_attr(sgid_attr)) { + ret = rdma_read_gid_l2_fields( + sgid_attr, vlan_id, iwqp->ctx_info.roce_info->mac_addr); + if (ret) + return ret; + } + + rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); + return 0; +} +#endif + +#ifdef SET_ROCE_CM_INFO_VER_2 +int kc_zxdh_set_roce_cm_info(struct zxdh_qp *iwqp, struct ib_qp_attr *attr, + u16 *vlan_id) +{ + const struct ib_gid_attr *sgid_attr; + struct zxdh_av *av = &iwqp->roce_ah.av; + + sgid_attr = attr->ah_attr.grh.sgid_attr; + if (kc_deref_sgid_attr(sgid_attr)) { + *vlan_id = rdma_vlan_dev_vlan_id(kc_deref_sgid_attr(sgid_attr)); + ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, + kc_deref_sgid_attr(sgid_attr)->dev_addr); + } + rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); + + return 0; +} +#endif + +#ifdef SET_ROCE_CM_INFO_VER_1 +int kc_zxdh_set_roce_cm_info(struct zxdh_qp *iwqp, struct ib_qp_attr *attr, + u16 *vlan_id) +{ + int ret; + union ib_gid sgid; + struct ib_gid_attr sgid_attr; + struct zxdh_av *av = &iwqp->roce_ah.av; + + ret = ib_get_cached_gid(iwqp->ibqp.device, attr->ah_attr.port_num, + attr->ah_attr.grh.sgid_index, &sgid, + &sgid_attr); + if (ret) + return ret; + + if (kc_deref_sgid_attr(sgid_attr)) { + *vlan_id = rdma_vlan_dev_vlan_id(kc_deref_sgid_attr(sgid_attr)); + ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, + kc_deref_sgid_attr(sgid_attr)->dev_addr); + } + + rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid); + + dev_put(kc_deref_sgid_attr(sgid_attr)); + + return 0; +} + +#endif +#ifdef ZXDH_DESTROY_CQ_VER_4 +/** + * zxdh_destroy_cq - destroy cq + * @ib_cq: cq pointer + * @udata: user data + */ +int zxdh_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ib_cq->device); + struct zxdh_cq *iwcq = to_iwcq(ib_cq); + struct zxdh_sc_cq *cq = &iwcq->sc_cq; + struct zxdh_sc_dev *dev = cq->dev; + struct zxdh_sc_ceq *ceq = dev->ceq[cq->ceq_index]; + struct zxdh_ceq *iwceq = container_of(ceq, struct zxdh_ceq, sc_ceq); + unsigned long flags; + + cq->cq_type = 0; + cq->back_cq = NULL; + + spin_lock_irqsave(&iwcq->lock, flags); + if (!list_empty(&iwcq->cmpl_generated)) + zxdh_remove_cmpls_list(iwcq); + if (!list_empty(&iwcq->resize_list)) + zxdh_process_resize_list(iwcq, iwdev, NULL); + spin_unlock_irqrestore(&iwcq->lock, flags); + + zxdh_cq_rem_ref(ib_cq); + wait_for_completion(&iwcq->free_cq); + +#ifdef Z_DH_DEBUG + zxdh_query_cqc(cq); +#endif + + zxdh_cq_wq_destroy(iwdev->rf, cq); + zxdh_cq_free_rsrc(iwdev->rf, iwcq); + + spin_lock_irqsave(&iwceq->ce_lock, flags); + zxdh_sc_cleanup_ceqes(cq, ceq); + spin_unlock_irqrestore(&iwceq->ce_lock, flags); + + return 0; +} + +#endif /* ZXDH_DESTROY_CQ_VER_4 */ +#ifdef ZXDH_DESTROY_CQ_VER_3 +/** + * zxdh_destroy_cq - destroy cq + * @ib_cq: cq pointer + * @udata: user data + */ +void zxdh_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ib_cq->device); + struct zxdh_cq *iwcq = to_iwcq(ib_cq); + struct zxdh_sc_cq *cq = &iwcq->sc_cq; + struct zxdh_sc_dev *dev = cq->dev; + struct zxdh_sc_ceq *ceq = dev->ceq[cq->ceq_index]; + struct zxdh_ceq *iwceq = container_of(ceq, struct zxdh_ceq, sc_ceq); + unsigned long flags; + + cq->cq_type = 0; + cq->back_cq = NULL; + + spin_lock_irqsave(&iwcq->lock, flags); + if (!list_empty(&iwcq->cmpl_generated)) + zxdh_remove_cmpls_list(iwcq); + if (!list_empty(&iwcq->resize_list)) + zxdh_process_resize_list(iwcq, iwdev, NULL); + spin_unlock_irqrestore(&iwcq->lock, flags); + + zxdh_cq_rem_ref(ib_cq); + wait_for_completion(&iwcq->free_cq); + + zxdh_cq_wq_destroy(iwdev->rf, cq); + zxdh_cq_free_rsrc(iwdev->rf, iwcq); + + spin_lock_irqsave(&iwceq->ce_lock, flags); + zxdh_sc_cleanup_ceqes(cq, ceq); + spin_unlock_irqrestore(&iwceq->ce_lock, flags); +} + +#endif /* ZXDH_DESTROY_CQ_VER_3 */ +#ifdef ZXDH_DESTROY_CQ_VER_2 +/** + * zxdh_destroy_cq - destroy cq + * @ib_cq: cq pointer + * @udata: user data + */ +int zxdh_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ib_cq->device); + struct zxdh_cq *iwcq = to_iwcq(ib_cq); + struct zxdh_sc_cq *cq = &iwcq->sc_cq; + struct zxdh_sc_dev *dev = cq->dev; + struct zxdh_sc_ceq *ceq = dev->ceq[cq->ceq_index]; + struct zxdh_ceq *iwceq = container_of(ceq, struct zxdh_ceq, sc_ceq); + unsigned long flags; + + cq->cq_type = 0; + cq->back_cq = NULL; + + spin_lock_irqsave(&iwcq->lock, flags); + if (!list_empty(&iwcq->cmpl_generated)) + zxdh_remove_cmpls_list(iwcq); + if (!list_empty(&iwcq->resize_list)) + zxdh_process_resize_list(iwcq, iwdev, NULL); + spin_unlock_irqrestore(&iwcq->lock, flags); + + zxdh_cq_rem_ref(ib_cq); + wait_for_completion(&iwcq->free_cq); + + zxdh_cq_wq_destroy(iwdev->rf, cq); + zxdh_cq_free_rsrc(iwdev->rf, iwcq); + + spin_lock_irqsave(&iwceq->ce_lock, flags); + zxdh_sc_cleanup_ceqes(cq, ceq); + spin_unlock_irqrestore(&iwceq->ce_lock, flags); + + kfree(iwcq); + + return 0; +} + +#endif /* ZXDH_DESTROY_CQ_VER_2 */ +#ifdef ZXDH_DESTROY_CQ_VER_1 +/** + * zxdh_destroy_cq - destroy cq + * @ib_cq: cq pointer + */ +int zxdh_destroy_cq(struct ib_cq *ib_cq) +{ + struct zxdh_device *iwdev = to_iwdev(ib_cq->device); + struct zxdh_cq *iwcq = to_iwcq(ib_cq); + struct zxdh_sc_cq *cq = &iwcq->sc_cq; + struct zxdh_sc_dev *dev = cq->dev; + struct zxdh_sc_ceq *ceq = dev->ceq[cq->ceq_index]; + struct zxdh_ceq *iwceq = container_of(ceq, struct zxdh_ceq, sc_ceq); + unsigned long flags; + + cq->cq_type = 0; + cq->back_cq = NULL; + + spin_lock_irqsave(&iwcq->lock, flags); + if (!list_empty(&iwcq->cmpl_generated)) + zxdh_remove_cmpls_list(iwcq); + if (!list_empty(&iwcq->resize_list)) + zxdh_process_resize_list(iwcq, iwdev, NULL); + spin_unlock_irqrestore(&iwcq->lock, flags); + + zxdh_cq_rem_ref(ib_cq); + wait_for_completion(&iwcq->free_cq); + + zxdh_cq_wq_destroy(iwdev->rf, cq); + zxdh_cq_free_rsrc(iwdev->rf, iwcq); + + spin_lock_irqsave(&iwceq->ce_lock, flags); + zxdh_sc_cleanup_ceqes(cq, ceq); + spin_unlock_irqrestore(&iwceq->ce_lock, flags); + + kfree(iwcq); + + return 0; +} + +#endif /* ZXDH_DESTROY_CQ_VER_1 */ +#ifdef ZXDH_ALLOC_MW_VER_2 +/** + * zxdh_alloc_mw - Allocate memory window + * @ibmw: Memory Window + * @udata: user data pointer + */ +int zxdh_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ibmw->device); + struct zxdh_mr *iwmr = to_iwmw(ibmw); + int err_code; + u32 stag; + + stag = zxdh_create_stag(iwdev); + if (!stag) + return -ENOMEM; + + iwmr->stag = stag; + ibmw->rkey = stag; + + err_code = zxdh_hw_alloc_mw(iwdev, iwmr); + if (err_code) { + zxdh_free_stag(iwdev, stag); + return err_code; + } + + return 0; +} + +#endif /* ZXDH_ALLOC_MW_VER_2 */ +#ifdef ZXDH_ALLOC_MW_VER_1 +/** + * zxdh_alloc_mw - Allocate memory window + * @pd: Protection domain + * @type: Window type + * @udata: user data pointer + */ +struct ib_mw *zxdh_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, + struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(pd->device); + struct zxdh_mr *iwmr; + int err_code; + u32 stag; + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) + return ERR_PTR(-ENOMEM); + + stag = zxdh_create_stag(iwdev); + if (!stag) { + kfree(iwmr); + return ERR_PTR(-ENOMEM); + } + + iwmr->stag = stag; + iwmr->ibmw.rkey = stag; + iwmr->ibmw.pd = pd; + iwmr->ibmw.type = type; + iwmr->ibmw.device = pd->device; + + err_code = zxdh_hw_alloc_mw(iwdev, iwmr); + if (err_code) { + zxdh_free_stag(iwdev, stag); + kfree(iwmr); + return ERR_PTR(err_code); + } + + return &iwmr->ibmw; +} + +#endif /* ZXDH_ALLOC_MW_VER_1 */ + +#ifdef VMA_DATA +struct zxdh_vma_data { + struct list_head list; + struct vm_area_struct *vma; + struct mutex *vma_list_mutex; /* protect the vma_list */ +}; + +/** + * zxdh_vma_open - + * @vma: User VMA + */ +static void zxdh_vma_open(struct vm_area_struct *vma) +{ + vma->vm_ops = NULL; +} + +/** + * zxdh_vma_close - Remove vma data from vma list + * @vma: User VMA + */ +static void zxdh_vma_close(struct vm_area_struct *vma) +{ + struct zxdh_vma_data *vma_data; + + vma_data = vma->vm_private_data; + vma->vm_private_data = NULL; + vma_data->vma = NULL; + mutex_lock(vma_data->vma_list_mutex); + list_del(&vma_data->list); + mutex_unlock(vma_data->vma_list_mutex); + kfree(vma_data); +} + +static const struct vm_operations_struct zxdh_vm_ops = { + .open = zxdh_vma_open, + .close = zxdh_vma_close +}; + +/** + * zxdh_set_vma_data - Save vma data in context list + * @vma: User VMA + * @context: ib user context + */ +static int zxdh_set_vma_data(struct vm_area_struct *vma, + struct zxdh_ucontext *context) +{ + struct list_head *vma_head = &context->vma_list; + struct zxdh_vma_data *vma_entry; + + vma_entry = kzalloc(sizeof(*vma_entry), GFP_KERNEL); + if (!vma_entry) + return -ENOMEM; + + vma->vm_private_data = vma_entry; + vma->vm_ops = &zxdh_vm_ops; + + vma_entry->vma = vma; + vma_entry->vma_list_mutex = &context->vma_list_mutex; + + mutex_lock(&context->vma_list_mutex); + list_add(&vma_entry->list, vma_head); + mutex_unlock(&context->vma_list_mutex); + + return 0; +} + +/** + * zxdh_disassociate_ucontext - Disassociate user context + * @context: ib user context + */ +void zxdh_disassociate_ucontext(struct ib_ucontext *context) +{ + struct zxdh_ucontext *ucontext = to_ucontext(context); + + struct zxdh_vma_data *vma_data, *n; + struct vm_area_struct *vma; + + mutex_lock(&ucontext->vma_list_mutex); + list_for_each_entry_safe(vma_data, n, &ucontext->vma_list, list) { + vma = vma_data->vma; + zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE); + + vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); + vma->vm_ops = NULL; + list_del(&vma_data->list); + kfree(vma_data); + } + mutex_unlock(&ucontext->vma_list_mutex); +} + +int rdma_user_mmap_io(struct ib_ucontext *context, struct vm_area_struct *vma, + unsigned long pfn, unsigned long size, pgprot_t prot) +{ + if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) + return -EAGAIN; + + return zxdh_set_vma_data(vma, to_ucontext(context)); +} +#else +/** + * zxdh_disassociate_ucontext - Disassociate user context + * @context: ib user context + */ +void zxdh_disassociate_ucontext(struct ib_ucontext *context) +{ +} +#endif /* RDMA_MMAP_DB_SUPPORT */ + +#ifndef NETDEV_TO_IBDEV_SUPPORT +struct ib_device *ib_device_get_by_netdev(struct net_device *netdev, + int driver_id) +{ + struct zxdh_device *iwdev; + struct zxdh_handler *hdl; + unsigned long flags; + + spin_lock_irqsave(&zxdh_handler_lock, flags); + list_for_each_entry(hdl, &zxdh_handlers, list) { + iwdev = hdl->iwdev; + if (netdev == iwdev->netdev) { + spin_unlock_irqrestore(&zxdh_handler_lock, flags); + return &iwdev->ibdev; + } + } + spin_unlock_irqrestore(&zxdh_handler_lock, flags); + + return NULL; +} + +void ib_unregister_device_put(struct ib_device *device) +{ + ib_unregister_device(device); +} + +#endif /* NETDEV_TO_IBDEV_SUPPORT */ +/** + * zxdh_query_gid_roce - Query port GID for Roce + * @ibdev: device pointer from stack + * @port: port number + * @index: Entry index + * @gid: Global ID + */ +#ifdef QUERY_GID_ROCE_V2 +int zxdh_query_gid_roce(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid) +#elif defined(QUERY_GID_ROCE_V1) +int zxdh_query_gid_roce(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +#endif +{ + int ret; + + ret = rdma_query_gid(ibdev, port, index, gid); + if (ret == -EAGAIN) { + memcpy(gid, &zgid, sizeof(*gid)); + return 0; + } + + return ret; +} + +/** + * zxdh_modify_port - modify port attributes + * @ibdev: device pointer from stack + * @port: port number for query + * @mask: Property mask + * @props: returning device attributes + */ +#ifdef MODIFY_PORT_V2 +int zxdh_modify_port(struct ib_device *ibdev, u32 port, int mask, + struct ib_port_modify *props) +#elif defined(MODIFY_PORT_V1) +int zxdh_modify_port(struct ib_device *ibdev, u8 port, int mask, + struct ib_port_modify *props) +#endif +{ + if (port > 1) + return -EINVAL; + + return 0; +} + +/** + * zxdh_query_pkey - Query partition key + * @ibdev: device pointer from stack + * @port: port number + * @index: index of pkey + * @pkey: pointer to store the pkey + */ +#ifdef QUERY_PKEY_V2 +int zxdh_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) +#elif defined(QUERY_PKEY_V1) +int zxdh_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) +#endif +{ + if (index >= ZXDH_PKEY_TBL_SZ) + return -EINVAL; + + *pkey = ZXDH_DEFAULT_PKEY; + return 0; +} + +#ifdef ROCE_PORT_IMMUTABLE_V2 +int zxdh_roce_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable) +#elif defined(ROCE_PORT_IMMUTABLE_V1) +int zxdh_roce_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +#endif +{ + struct ib_port_attr attr; + int err; + + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + err = ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + +#ifdef IW_PORT_IMMUTABLE_V2 +int zxdh_iw_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable) +#elif defined(IW_PORT_IMMUTABLE_V1) +int zxdh_iw_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +#endif +{ + struct ib_port_attr attr; + int err; + + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; + err = ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + immutable->gid_tbl_len = 1; + + return 0; +} + +/** + * zxdh_get_eth_speed_and_width - Get IB port speed and width from netdev speed + * @link_speed: netdev phy link speed + * @active_speed: IB port speed + * @active_width: IB port width + */ +#ifdef GET_ETH_SPEED_AND_WIDTH_V1 +void zxdh_get_eth_speed_and_width(u32 link_speed, u8 *active_speed, + u8 *active_width) +#elif defined(GET_ETH_SPEED_AND_WIDTH_V2) +void zxdh_get_eth_speed_and_width(u32 link_speed, u16 *active_speed, + u8 *active_width) +#endif +{ + if (link_speed <= SPEED_1000) { + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_SDR; + } else if (link_speed <= SPEED_10000) { + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_FDR10; + } else if (link_speed <= SPEED_20000) { + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_DDR; + } else if (link_speed <= SPEED_25000) { + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_EDR; + } else if (link_speed <= SPEED_40000) { + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_FDR10; + } else { + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_EDR; + } +} + +/** + * zxdh_query_port - get port attributes + * @ibdev: device pointer from stack + * @port: port number for query + * @props: returning device attributes + */ +#ifdef QUERY_PORT_V2 +int zxdh_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props) +#elif defined(QUERY_PORT_V1) +int zxdh_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +#endif +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct net_device *netdev = iwdev->netdev; + u32 val = 0; + /* no need to zero out pros here. done by caller */ + + props->max_mtu = IB_MTU_4096; + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); + props->lid = 0; + props->lmc = 0; + props->sm_lid = 0; + props->sm_sl = 0; + if (netif_carrier_ok(netdev) && netif_running(netdev)) { + props->state = IB_PORT_ACTIVE; + props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; + } else { + props->state = IB_PORT_DOWN; + props->phys_state = IB_PORT_PHYS_STATE_DISABLED; + } + zxdh_get_eth_speed(ibdev, netdev, port, &props->active_speed, + &props->active_width); + if (rdma_protocol_roce(ibdev, 1)) { + props->gid_tbl_len = 32; + kc_set_props_ip_gid_caps(props); + props->pkey_tbl_len = ZXDH_PKEY_TBL_SZ; + } else { + props->gid_tbl_len = 1; + } + props->qkey_viol_cntr = 0; + props->port_cap_flags |= IB_PORT_CM_SUP; + props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size; + val = rd32(iwdev->rf->sc_dev.hw, RDMARX_PRI_BASE_RD); + props->qkey_viol_cntr = + (u32)FIELD_GET(ZXDH_PRI_BASE_RD_BAD_QKEY_COUNTER, val); + return 0; +} + +#ifdef ALLOC_HW_STATS_STRUCT_V2 +extern const struct rdma_stat_desc zxdh_hw_stat_descs[]; + +#endif +#ifdef ALLOC_HW_STATS_STRUCT_V1 +const char *const zxdh_hw_stat_names[] = { + /*32-bit */ + [HW_STAT_DUPLICATE_REQUEST] = "duplicate_request", + [HW_STAT_NP_CNP_SENT] = "np_cnp_sent", + [HW_STAT_NP_ECN_MARKED_ROCE_PACKETS] = "np_ecn_marked_roce_packets", + [HW_STAT_OUT_OF_SEQUENCE] = "out_of_sequence", + [HW_STAT_PACKET_SEQ_ERR] = "packet_seq_err", + [HW_STAT_REQ_CQE_ERROR] = "req_cqe_error", + [HW_STAT_REQ_REMOTE_ACCESS_ERRORS] = "req_remote_access_errors", + [HW_STAT_REQ_REMOTE_INVALID_REQUEST] = "req_remote_invalid_request", + [HW_STAT_REQ_REMOTE_OPERATION_ERRORS] = "req_remote_operation_errors", + [HW_STAT_REQ_LOCAL_LENGTH_ERROR] = "req_local_length_error", + [HW_STAT_RESP_CQE_ERROR] = "resp_cqe_error", + [HW_STAT_RESP_REMOTE_ACCESS_ERRORS] = "resp_remote_access_errors", + [HW_STAT_RESP_REMOTE_INVALID_REQUEST] = "resp_remote_invalid_request", + [HW_STAT_RESP_REMOTE_OPERATION_ERRORS] = "resp_remote_operation_errors", + [HW_STAT_RESP_RNR_NAK] = "resp_rnr_nak", + [HW_STAT_RNR_NAK_RETRY_ERR] = "rnr_nak_retry_err", + [HW_STAT_RP_CNP_HANDLED] = "rp_cnp_handled", + [HW_STAT_RX_READ_REQUESTS] = "rx_read_requests", + [HW_STAT_RX_WRITE_REQUESTS] = "rx_write_requests", + [HW_STAT_RX_ICRC_ENCAPSULATED] = "rx_icrc_encapsulated", + [HW_STAT_ROCE_SLOW_RESTART_CNPS] = "roce_slow_restart_cnps", + [HW_STAT_RDMA_TX_PKTS] = "rdma_tx_pkts", + [HW_STAT_RDMA_TX_BYTES] = "rdma_tx_bytes", + [HW_STAT_RDMA_RX_PKTS] = "rdma_rx_pkts", + [HW_STAT_RDMA_RX_BYTES] = "rdma_rx_bytes", +}; + +#endif /* ALLOC_HW_STATS_STRUCT_V1 */ +#ifdef ALLOC_HW_STATS_V3 +/** + * zxdh_alloc_hw_port_stats - Allocate a hw stats structure + * @ibdev: device pointer from stack + * @port_num: port number + */ +struct rdma_hw_stats *zxdh_alloc_hw_port_stats(struct ib_device *ibdev, + u32 port_num) +#elif defined(ALLOC_HW_STATS_V2) +struct rdma_hw_stats *zxdh_alloc_hw_stats(struct ib_device *ibdev, u32 port_num) +#elif defined(ALLOC_HW_STATS_V1) +struct rdma_hw_stats *zxdh_alloc_hw_stats(struct ib_device *ibdev, u8 port_num) +#endif +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + + int num_counters = dev->hw_attrs.max_stat_idx; + unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; + + /* We support only per port stats */ + if (port_num == 0) + return NULL; + + if (!dev->privileged) + lifespan = 1000; +#ifdef ALLOC_HW_STATS_STRUCT_V2 + return rdma_alloc_hw_stats_struct(zxdh_hw_stat_descs, num_counters, + lifespan); +#else + return rdma_alloc_hw_stats_struct(zxdh_hw_stat_names, num_counters, + lifespan); +#endif +} + +/** + * zxdh_get_hw_stats - Populates the rdma_hw_stats structure + * @ibdev: device pointer from stack + * @stats: stats pointer from stack + * @port_num: port number + * @index: which hw counter the stack is requesting we update + */ +#ifdef GET_HW_STATS_V2 +int zxdh_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u32 port_num, int index) +#elif defined(GET_HW_STATS_V1) +int zxdh_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u8 port_num, int index) +#endif +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + + int i; + struct zxdh_sc_dev *dev; + struct zxdh_rdma_stats_get rdma_stats; + struct zxdh_hw_stats *stats_entry; + + stats_entry = &iwdev->rf->sc_dev.stats_entry; + dev = &iwdev->rf->sc_dev; + memset(&rdma_stats, 0, sizeof(struct zxdh_rdma_stats_get)); + + zxdh_rdma_stats_read(dev, &rdma_stats); + for (i = 0; i < ZXDH_HW_STAT_INDEX_MAX; i++) { + if (rdma_stats.rdma_stats_entry_sta[i] == ZXDH_HW_STATS_VALID) + stats_entry->rdma_stats_entry[i] = + rdma_stats.rdma_stats_entry[i]; + } + memcpy(&stats->value[0], &stats_entry->rdma_stats_entry, + sizeof(u64) * stats->num_counters); + return stats->num_counters; +} + +#ifdef PROCESS_MAD_VER_3 +/* + * zxdh_process_mad - process an incoming MAD packet + * @ibdev: the infiniband device this packet came in on + * @mad_flags: MAD flags + * @port_num: the port number this packet came in on + * @in_wc: the work completion entry for this packet + * @in_grh: the global route header for this packet + * @in_mad: the incoming MAD + * @out_mad: any outgoing MAD reply + * @out_mad_size:outgoing MAD size + * @out_mad_pkey_index:outgoing MAD pkey index + */ +int zxdh_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index) +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_sc_dev *dev; + u8 mgmt_class; + int ret; + + ret = IB_MAD_RESULT_FAILURE; + dev = &iwdev->rf->sc_dev; + mgmt_class = in_mad->mad_hdr.mgmt_class; + pr_debug( + "%s %d vhca_id:%d mgmt_class:%d base_version:0x%x method:0x%x\n", + __func__, __LINE__, dev->vhca_id, mgmt_class, + in_mad->mad_hdr.base_version, in_mad->mad_hdr.method); + if (in_mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) + return -EINVAL; + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET) + return -EINVAL; + switch (mgmt_class) { + case IB_MGMT_CLASS_PERF_MGMT: + ret = zxdh_process_pma_cmd(dev, port_num, in_mad, out_mad); + break; + default: + ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + break; + } + return ret; +} +#endif + +#ifdef PROCESS_MAD_VER_2 +/* + * zxdh_process_mad - process an incoming MAD packet + * @ibdev: the infiniband device this packet came in on + * @mad_flags: MAD flags + * @port_num: the port number this packet came in on + * @in_wc: the work completion entry for this packet + * @in_grh: the global route header for this packet + * @in_mad: the incoming MAD + * @out_mad: any outgoing MAD reply + * @out_mad_size:outgoing MAD size + * @out_mad_pkey_index:outgoing MAD pkey index + */ +int zxdh_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index) +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_sc_dev *dev; + u8 mgmt_class; + int ret; + + ret = IB_MAD_RESULT_FAILURE; + dev = &iwdev->rf->sc_dev; + mgmt_class = in_mad->mad_hdr.mgmt_class; + pr_debug( + "%s %d vhca_id:%d mgmt_class:%d base_version:0x%x method:0x%x\n", + __func__, __LINE__, dev->vhca_id, mgmt_class, + in_mad->mad_hdr.base_version, in_mad->mad_hdr.method); + if (in_mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) + return -EINVAL; + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET) + return -EINVAL; + switch (mgmt_class) { + case IB_MGMT_CLASS_PERF_MGMT: + ret = zxdh_process_pma_cmd(dev, port_num, in_mad, out_mad); + break; + default: + ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + break; + } + return ret; +} +#endif + +#ifdef PROCESS_MAD_VER_1 +/* + * zxdh_process_mad - process an incoming MAD packet + * @ibdev: the infiniband device this packet came in on + * @mad_flags: MAD flags + * @port_num: the port number this packet came in on + * @in_wc: the work completion entry for this packet + * @in_grh: the global route header for this packet + * @in: the incoming hdr MAD + * @in_mad_size:incoming MAD size + * @out: any outgoing MAD hdr reply + * @out_mad_size:outgoing MAD size + * @out_mad_pkey_index:outgoing MAD pkey index + */ +int zxdh_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index) +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_sc_dev *dev; + u8 mgmt_class; + int ret; + const struct ib_mad *in_mad = (const struct ib_mad *)in; + struct ib_mad *out_mad = (struct ib_mad *)out; + + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; + + ret = IB_MAD_RESULT_FAILURE; + dev = &iwdev->rf->sc_dev; + mgmt_class = in_mad->mad_hdr.mgmt_class; + pr_debug( + "%s %d vhca_id:%d mgmt_class:%d base_version:0x%x method:0x%x\n", + __func__, __LINE__, dev->vhca_id, mgmt_class, + in_mad->mad_hdr.base_version, in_mad->mad_hdr.method); + if (in_mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) + return -EINVAL; + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET) + return -EINVAL; + switch (mgmt_class) { + case IB_MGMT_CLASS_PERF_MGMT: + ret = zxdh_process_pma_cmd(dev, port_num, in_mad, out_mad); + break; + default: + ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + break; + } + return ret; +} +#endif +/** + * zxdh_query_gid - Query port GID + * @ibdev: device pointer from stack + * @port: port number + * @index: Entry index + * @gid: Global ID + */ +#ifdef QUERY_GID_V2 +int zxdh_query_gid(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid) +#elif defined(QUERY_GID_V1) +int zxdh_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +#endif +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + + memset(gid->raw, 0, sizeof(gid->raw)); + ether_addr_copy(gid->raw, iwdev->netdev->dev_addr); + + return 0; +} + +/** + * zxdh_query_qpc - query qpc + * @qp: points to qp + * @qpc_buf: qpc buffer + */ +int zxdh_query_qpc(struct zxdh_sc_qp *qp, struct zxdh_dma_mem *qpc_buf) +{ + struct zxdh_sc_dev *dev = qp->dev; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto free_rsrc; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_QPC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_qpc.qp = qp; + cqp_info->in.u.query_qpc.qpc_buf_pa = qpc_buf->pa; + cqp_info->in.u.query_qpc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + goto free_rsrc; + } + return 0; + +free_rsrc: + return err_code; +} + +void zxdh_print_hw_qpc(__le64 *qp_ctx) +{ + u64 temp; + u64 lsn_bit0, rnr_retry_time_bits_l30, ssn_bits_low20; + u64 hw_sq_tail_bits_low11, rdwqe_pyld_length_bits_low5; + u64 vhca_id_bits_low6; + + pr_info("******TX Part******\n"); + + get_64bit_val(qp_ctx, 0, &temp); + pr_info("txwindow_waddr[7:0]:0x%llx\n", + FIELD_GET(GENMASK_ULL(7, 0), temp)); + pr_info("Retry_Count:0x%llx\n", FIELD_GET(GENMASK_ULL(10, 8), temp)); + pr_info("Cur_Retry_Count:0x%llx\n", + FIELD_GET(GENMASK_ULL(13, 11), temp)); + pr_info("read_retry_flag:0x%llx\n", FIELD_GET(BIT_ULL(14), temp)); + pr_info("tx_Last_Ack_PSN:0x%llx\n", + FIELD_GET(GENMASK_ULL(38, 15), temp)); + pr_info("ACK_MSN:0x%llx\n", FIELD_GET(GENMASK_ULL(62, 39), temp)); + lsn_bit0 = (u64)FIELD_GET(BIT_ULL(63), temp); + + get_64bit_val(qp_ctx, 8, &temp); + pr_info("LSN:0x%llx\n", + (FIELD_GET(GENMASK_ULL(22, 0), temp) << 1) + lsn_bit0); + pr_info("tx_Ack_Credits:0x%llx\n", + FIELD_GET(GENMASK_ULL(27, 23), temp)); + pr_info("rnr_retry_flag:0x%llx\n", FIELD_GET(BIT_ULL(28), temp)); + pr_info("rnr_retry_threshold:0x%llx\n", + FIELD_GET(GENMASK_ULL(33, 29), temp)); + rnr_retry_time_bits_l30 = (u64)FIELD_GET(GENMASK_ULL(63, 34), temp); + + get_64bit_val(qp_ctx, 16, &temp); + pr_info("rnr_retry_time:0x%llx\n", + (FIELD_GET(GENMASK_ULL(1, 0), temp) << 30) + + rnr_retry_time_bits_l30); + pr_info("wqe_offset:0x%llx\n", FIELD_GET(GENMASK_ULL(33, 2), temp)); + pr_info("fence flag:0x%llx\n", FIELD_GET(GENMASK_ULL(35, 34), temp)); + pr_info("cur_ord_cnt:0x%llx\n", FIELD_GET(GENMASK_ULL(43, 36), temp)); + ssn_bits_low20 = (u64)FIELD_GET(GENMASK_ULL(63, 44), temp); + + get_64bit_val(qp_ctx, 24, &temp); + pr_info("SSN:0x%llx\n", + (FIELD_GET(GENMASK_ULL(3, 0), temp) << 20) + ssn_bits_low20); + pr_info("first_packet_done_flag:0x%llx\n", FIELD_GET(BIT_ULL(4), temp)); + pr_info("PSN MAX:0x%llx\n", FIELD_GET(GENMASK_ULL(28, 5), temp)); + pr_info("PSN_Next:0x%llx\n", FIELD_GET(GENMASK_ULL(52, 29), temp)); + hw_sq_tail_bits_low11 = (u64)FIELD_GET(GENMASK_ULL(63, 53), temp); + + get_64bit_val(qp_ctx, 32, &temp); + pr_info("HW_SQ_Tail:0x%llx\n", + (FIELD_GET(GENMASK_ULL(6, 0), temp) << 11) + + hw_sq_tail_bits_low11); + pr_info("last_packet_time:0x%llx\n", + FIELD_GET(GENMASK_ULL(38, 7), temp)); + pr_info("incast_fragment_cnt:0x%llx\n", + FIELD_GET(GENMASK_ULL(56, 39), temp)); + pr_info("local_ack_timeout:0x%llx\n", + FIELD_GET(GENMASK_ULL(61, 57), temp)); + pr_info("retry_flag:0x%llx\n", FIELD_GET(BIT_ULL(62), temp)); + + get_64bit_val(qp_ctx, 40, &temp); + pr_info("HW_SQ_Tail_una:0x%llx\n", FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("last_ack_wqe_offset:0x%llx\n", + FIELD_GET(GENMASK_ULL(46, 16), temp)); + pr_info("err_flag:0x%llx\n", FIELD_GET(BIT_ULL(47), temp)); + pr_info("ack_err_flag:0x%llx\n", FIELD_GET(BIT_ULL(48), temp)); + pr_info("in_flight:0x%llx\n", FIELD_GET(GENMASK_ULL(58, 49), temp)); + rdwqe_pyld_length_bits_low5 = (u64)FIELD_GET(GENMASK_ULL(63, 59), temp); + + get_64bit_val(qp_ctx, 48, &temp); + pr_info("rdwqe_pyld_length:0x%llx\n", + (FIELD_GET(GENMASK_ULL(26, 0), temp) << 5) + + rdwqe_pyld_length_bits_low5); + pr_info("package_err_flag:0x%llx\n", FIELD_GET(BIT_ULL(27), temp)); + pr_info("txwindow_waddr[9:8]:0x%llx\n", + FIELD_GET(GENMASK_ULL(29, 28), temp)); + pr_info("txwindow_raddr:0x%llx\n", + FIELD_GET(GENMASK_ULL(39, 30), temp)); + pr_info("rd_msg_loss_err_flag:0x%llx\n", FIELD_GET(BIT_ULL(40), temp)); + pr_info("pktchk_rd_msg_loss_err_cnt:0x%llx\n", + FIELD_GET(GENMASK_ULL(42, 41), temp)); + pr_info("recv_rd_msg_loss_err_cnt:0x%llx\n", + FIELD_GET(GENMASK_ULL(44, 43), temp)); + pr_info("recv_rd_msg_loss_err_flag:0x%llx\n", + FIELD_GET(BIT_ULL(45), temp)); + pr_info("recv_err_flag:0x%llx\n", FIELD_GET(GENMASK_ULL(47, 46), temp)); + pr_info("recv_read_flag:0x%llx\n", FIELD_GET(BIT_ULL(48), temp)); + + get_64bit_val(qp_ctx, 56, &temp); + pr_info("retry_cqe_sq_opcode:0x%llx\n", + FIELD_GET(GENMASK_ULL(5, 0), temp)); + + get_64bit_val(qp_ctx, 64, &temp); + pr_info("Service_Type:0x%llx\n", FIELD_GET(GENMASK_ULL(2, 0), temp)); + pr_info("SQ_Virtually_Mapped:0x%llx\n", FIELD_GET(BIT_ULL(3), temp)); + pr_info("SQ_Leaf_PBL_Size:0x%llx\n", + FIELD_GET(GENMASK_ULL(5, 4), temp)); + pr_info("is_QP1:0x%llx\n", FIELD_GET(BIT_ULL(6), temp)); + pr_info("IPv4:0x%llx\n", FIELD_GET(BIT_ULL(7), temp)); + pr_info("FastRegisterEnable:0x%llx\n", FIELD_GET(BIT_ULL(8), temp)); + pr_info("BindEnable:0x%llx\n", FIELD_GET(BIT_ULL(9), temp)); + pr_info("Insert_VLAN_Tag:0x%llx\n", FIELD_GET(BIT_ULL(10), temp)); + pr_info("VLAN_Tag:0x%llx\n", FIELD_GET(GENMASK_ULL(26, 11), temp)); + pr_info("PD_Index:0x%llx\n", FIELD_GET(GENMASK_ULL(50, 27), temp)); + pr_info("rev_l_key_en:0x%llx\n", FIELD_GET(BIT_ULL(51), temp)); + pr_info("ECN_enable:0x%llx\n", FIELD_GET(BIT_ULL(63), temp)); + + get_64bit_val(qp_ctx, 72, &temp); + pr_info("SQ_Address:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 80, &temp); + pr_info("Dest_IP_Address_lo:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 88, &temp); + pr_info("Dest_IP_Address_hi:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 96, &temp); + pr_info("Source_Port_Number:0x%llx\n", + FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("Dest_Port_Number:0x%llx\n", + FIELD_GET(GENMASK_ULL(31, 16), temp)); + pr_info("Flow_Label:0x%llx\n", FIELD_GET(GENMASK_ULL(51, 32), temp)); + pr_info("Hop_Limit_or_TTL:0x%llx\n", + FIELD_GET(GENMASK_ULL(59, 52), temp)); + pr_info("ROCE_Tver:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 60), temp)); + + get_64bit_val(qp_ctx, 104, &temp); + pr_info("Q_Key:0x%llx\n", FIELD_GET(GENMASK_ULL(31, 0), temp)); + pr_info("Dest_QPN:0x%llx\n", FIELD_GET(GENMASK_ULL(55, 32), temp)); + pr_info("ORD_Size:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 56), temp)); + + get_64bit_val(qp_ctx, 112, &temp); + pr_info("P_Key:0x%llx\n", FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("Dest_MAC:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 16), temp)); + + get_64bit_val(qp_ctx, 120, &temp); + pr_info("QP_Completion_Context:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 128, &temp); + pr_info("S_IP_low:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 136, &temp); + pr_info("S_IP_high:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 144, &temp); + pr_info("Src_MAC:0x%llx\n", FIELD_GET(GENMASK_ULL(47, 0), temp)); + pr_info("PMTU:0x%llx\n", FIELD_GET(GENMASK_ULL(50, 48), temp)); + pr_info("ack_timeout:0x%llx\n", FIELD_GET(GENMASK_ULL(55, 51), temp)); + pr_info("Log_SQ_Size:0x%llx\n", FIELD_GET(GENMASK_ULL(59, 56), temp)); + + get_64bit_val(qp_ctx, 152, &temp); + pr_info("TxCmpQueueNum:0x%llx\n", FIELD_GET(GENMASK_ULL(20, 0), temp)); + pr_info("NVME_OF_QID:0x%llx\n", FIELD_GET(GENMASK_ULL(30, 21), temp)); + pr_info("Is_NVME_OF_Target:0x%llx\n", FIELD_GET(BIT_ULL(31), temp)); + pr_info("Is_NVME_OF_IOQ:0x%llx\n", FIELD_GET(BIT_ULL(32), temp)); + pr_info("GQP_id:0x%llx\n", FIELD_GET(GENMASK_ULL(43, 33), temp)); + pr_info("DCQCN_enable:0x%llx\n", FIELD_GET(BIT_ULL(49), temp)); + pr_info("queue_Tc:0x%llx\n", FIELD_GET(GENMASK_ULL(52, 50), temp)); + + get_64bit_val(qp_ctx, 160, &temp); + pr_info("QPN:0x%llx\n", FIELD_GET(GENMASK_ULL(19, 0), temp)); + pr_info("rtt_first_index:0x%llx\n", + FIELD_GET(GENMASK_ULL(35, 22), temp)); + pr_info("rtt_last_index:0x%llx\n", + FIELD_GET(GENMASK_ULL(49, 36), temp)); + pr_info("Traffic_Class_or_TOS:0x%llx\n", + FIELD_GET(GENMASK_ULL(57, 50), temp)); + vhca_id_bits_low6 = (u64)FIELD_GET(GENMASK_ULL(63, 58), temp); + + get_64bit_val(qp_ctx, 168, &temp); + pr_info("VHCA_ID:0x%llx\n", + (FIELD_GET(GENMASK_ULL(3, 0), temp) << 6) + vhca_id_bits_low6); + pr_info("8k_index:0x%llx\n", FIELD_GET(GENMASK_ULL(16, 4), temp)); + pr_info("RDMA_State:0x%llx\n", FIELD_GET(GENMASK_ULL(19, 17), temp)); + pr_info("debug_set:0x%llx\n", FIELD_GET(GENMASK_ULL(29, 20), temp)); + pr_info("qp_link_in:0x%llx\n", FIELD_GET(BIT_ULL(30), temp)); + pr_info("128k_index:0x%llx\n", FIELD_GET(GENMASK_ULL(47, 31), temp)); + + pr_info("******RX Part******\n"); + + get_64bit_val(qp_ctx, 256, &temp); + pr_info("Wr_Dma_Len:0x%llx\n", FIELD_GET(GENMASK_ULL(31, 0), temp)); + pr_info("Wr_R_Key:0x%llx\n", FIELD_GET(GENMASK_ULL(55, 32), temp)); + pr_info("Last_Opcode:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 56), temp)); + + get_64bit_val(qp_ctx, 264, &temp); + pr_info("Wr_Virt_Addr/Q_Key:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 272, &temp); + pr_info("send_psn:0x%llx\n", FIELD_GET(GENMASK_ULL(23, 0), temp)); + pr_info("HW_RQ_Tail/Rnr_Wqe_Index:0x%llx\n", + FIELD_GET(GENMASK_ULL(39, 24), temp)); + pr_info("E_PSN/Rnr_Nak_Psn:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 40), temp)); + + get_64bit_val(qp_ctx, 280, &temp); + pr_info("HW_RQ_Tail_credit[14:14]:0x%llx\n", + FIELD_GET(BIT_ULL(0), temp)); + pr_info("nof_check_state:0x%llx\n", FIELD_GET(BIT_ULL(1), temp)); + pr_info("qp_check_state:0x%llx\n", FIELD_GET(BIT_ULL(2), temp)); + pr_info("R_MSN:0x%llx\n", FIELD_GET(GENMASK_ULL(26, 3), temp)); + pr_info("ack_nack_flag:0x%llx\n", FIELD_GET(BIT_ULL(27), temp)); + pr_info("HW_RQ_Tail_credit[15:15]:0x%llx\n", + FIELD_GET(BIT_ULL(28), temp)); + pr_info("nak_syn:0x%llx\n", FIELD_GET(GENMASK_ULL(36, 29), temp)); + pr_info("ird_tx_num0/ird_tx_num1:0x%llx\n", + FIELD_GET(GENMASK_ULL(45, 37), temp)); + pr_info("ird_rx_num0/ird_rx_num1:0x%llx\n", + FIELD_GET(GENMASK_ULL(54, 46), temp)); + pr_info("cnp_pending:0x%llx\n", FIELD_GET(BIT_ULL(55), temp)); + pr_info("is_in_list:0x%llx\n", FIELD_GET(BIT_ULL(56), temp)); + pr_info("mr_hit_flag:0x%llx\n", FIELD_GET(BIT_ULL(57), temp)); + pr_info("ack_nak_rsv:0x%llx\n", FIELD_GET(GENMASK_ULL(62, 58), temp)); + pr_info("Rnr_Nak_Signal:0x%llx\n", FIELD_GET(BIT_ULL(63), temp)); + + get_64bit_val(qp_ctx, 288, &temp); + pr_info("SW_RQ_Tail:0x%llx\n", FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("psn_seq_error_signal:0x%llx\n", FIELD_GET(BIT_ULL(21), temp)); + pr_info("prifield_check_error_signal:0x%llx\n", + FIELD_GET(BIT_ULL(22), temp)); + pr_info("read_tail[0:0]:0x%llx\n", FIELD_GET(BIT_ULL(23), temp)); + pr_info("tx_send_length:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 24), temp)); + + get_64bit_val(qp_ctx, 296, &temp); + pr_info("read_tail[8:1]:0x%llx\n", FIELD_GET(GENMASK_ULL(7, 0), temp)); + pr_info("last_read_psn:0x%llx\n", FIELD_GET(GENMASK_ULL(31, 8), temp)); + pr_info("ird_send_offset:0x%llx\n", + FIELD_GET(GENMASK_ULL(55, 32), temp)); + pr_info("HW_RQ_Tail_credit[13:6]:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 56), temp)); + + get_64bit_val(qp_ctx, 304, &temp); + pr_info("Comm Esta sig:0x%llx\n", FIELD_GET(BIT_ULL(0), temp)); + pr_info("rtt:0x%llx\n", FIELD_GET(GENMASK_ULL(16, 1), temp)); + pr_info("cq_overflow:0x%llx\n", FIELD_GET(BIT_ULL(17), temp)); + pr_info("rq:sec_index[27:12] /\n"); + pr_info("srq:wqe_index[15:0]:0x%llx\n", + FIELD_GET(GENMASK_ULL(33, 18), temp)); + pr_info("last_expected_sent_read_psn:0x%llx\n", + FIELD_GET(GENMASK_ULL(57, 34), temp)); + pr_info("HW_RQ_Tail_credit[5:0]:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 58), temp)); + + get_64bit_val(qp_ctx, 312, &temp); + pr_info("rq:sec_index[11:0]+first index[63:12] /\n"); + pr_info("srq:wqe_addr:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 320, &temp); + pr_info("S_IP_low:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 328, &temp); + pr_info("Src_MAC[47:32]:0x%llx\n", FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("Dest_MAC:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 16), temp)); + + get_64bit_val(qp_ctx, 336, &temp); + pr_info("Is_NVME_OF_IOQ:0x%llx\n", FIELD_GET(BIT_ULL(0), temp)); + pr_info("Insert_VLAN_Tag:0x%llx\n", FIELD_GET(BIT_ULL(1), temp)); + pr_info("PMTU:0x%llx\n", FIELD_GET(GENMASK_ULL(4, 2), temp)); + pr_info("Service_Type:0x%llx\n", FIELD_GET(GENMASK_ULL(7, 5), temp)); + pr_info("IPv4:0x%llx\n", FIELD_GET(BIT_ULL(8), temp)); + pr_info("PD_Index:0x%llx\n", FIELD_GET(GENMASK_ULL(28, 9), temp)); + pr_info("RDMA_State:0x%llx\n", FIELD_GET(GENMASK_ULL(31, 29), temp)); + pr_info("Src_MAC[31:0]:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 32), temp)); + + get_64bit_val(qp_ctx, 344, &temp); + pr_info("Dest_QPN[23:12]:0x%llx\n", + FIELD_GET(GENMASK_ULL(11, 0), temp)); + pr_info("Flow_Label:0x%llx\n", FIELD_GET(GENMASK_ULL(31, 12), temp)); + pr_info("Hop_Limit_or_TTL:0x%llx\n", + FIELD_GET(GENMASK_ULL(39, 32), temp)); + pr_info("Traffic_Class_or_TOS:0x%llx\n", + FIELD_GET(GENMASK_ULL(47, 40), temp)); + pr_info("VLAN_Tag:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 48), temp)); + + get_64bit_val(qp_ctx, 352, &temp); + pr_info("srqn[18:0]:0x%llx /\n", FIELD_GET(GENMASK_ULL(18, 0), temp)); + pr_info("is_nvme_of_target[10:10]+nvme_of_qid[9:0] /\n"); + pr_info("rq_address[63:0]:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 360, &temp); + pr_info("db_address:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 368, &temp); + pr_info("header length:0x%llx\n", FIELD_GET(GENMASK_ULL(9, 0), temp)); + pr_info("P_Key:0x%llx\n", FIELD_GET(GENMASK_ULL(47, 32), temp)); + pr_info("Source_Port_Number:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 48), temp)); + + get_64bit_val(qp_ctx, 376, &temp); + pr_info("wqe_sign_enbale:0x%llx\n", FIELD_GET(BIT_ULL(1), temp)); + pr_info("RQ_Virtually_Mapped:0x%llx\n", FIELD_GET(BIT_ULL(2), temp)); + pr_info("IRD_Size:0x%llx\n", FIELD_GET(GENMASK_ULL(6, 3), temp)); + pr_info("Log_RQ_Size:0x%llx\n", FIELD_GET(GENMASK_ULL(10, 7), temp)); + pr_info("Rse_Enable:0x%llx\n", FIELD_GET(BIT_ULL(11), temp)); + pr_info("Rwr_Enable:0x%llx\n", FIELD_GET(BIT_ULL(12), temp)); + pr_info("Rre_Enable:0x%llx\n", FIELD_GET(BIT_ULL(13), temp)); + pr_info("Log_RQ_WQE_Size:0x%llx\n", + FIELD_GET(GENMASK_ULL(16, 14), temp)); + pr_info("rq_type:0x%llx\n", FIELD_GET(BIT_ULL(17), temp)); + pr_info("RxCmpQueueNum:0x%llx\n", FIELD_GET(GENMASK_ULL(38, 18), temp)); + pr_info("Dest_QPN[11:0]:0x%llx\n", + FIELD_GET(GENMASK_ULL(50, 39), temp)); + pr_info("RQ_Leaf_PBL_Size:0x%llx\n", + FIELD_GET(GENMASK_ULL(52, 51), temp)); + pr_info("rsv_lkey_enable:0x%llx\n", FIELD_GET(BIT_ULL(53), temp)); + pr_info("t_ver:0x%llx\n", FIELD_GET(GENMASK_ULL(57, 54), temp)); + pr_info("RQ_Rnr_Nak_Timer:0x%llx\n", + FIELD_GET(GENMASK_ULL(62, 58), temp)); + pr_info("rx_Ack_Credits:0x%llx\n", FIELD_GET(BIT_ULL(63), temp)); + + get_64bit_val(qp_ctx, 384, &temp); + pr_info("global_qp_num:0x%llx\n", FIELD_GET(GENMASK_ULL(10, 0), temp)); + pr_info("8k_qp_index:0x%llx\n", FIELD_GET(GENMASK_ULL(23, 11), temp)); + pr_info("debug_set:0x%llx\n", FIELD_GET(GENMASK_ULL(49, 40), temp)); + pr_info("vHCA:0x%llx\n", FIELD_GET(GENMASK_ULL(59, 50), temp)); + pr_info("queue_tc:0x%llx\n", FIELD_GET(GENMASK_ULL(62, 59), temp)); + + get_64bit_val(qp_ctx, 392, &temp); + pr_info("cq_context:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 400, &temp); + pr_info("Dest_IP_Address_hi:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 408, &temp); + pr_info("Dest_IP_Address_lo:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 416, &temp); + pr_info("S_IP_high:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); +} + +#ifdef Z_DH_DEBUG +#if 0 +static void zxdh_print_hw_cqc(__le64 *cq_ctx) +{ + u64 temp; + + get_64bit_val(cq_ctx, 0, &temp); + pr_info("scqe_break_moderation_en:0x%llx", FIELD_GET(BIT_ULL(6), temp)); + pr_info("cq_period:0x%llx", FIELD_GET(GENMASK_ULL(17, 7), temp)); + pr_info("cq_max:0x%llx", FIELD_GET(GENMASK_ULL(33, 18), temp)); + pr_info("vHCA:0x%llx", FIELD_GET(GENMASK_ULL(43, 34), temp)); + pr_info("cqc_debug_set:0x%llx", FIELD_GET(GENMASK_ULL(53, 44), temp)); + pr_info("CQ_OVERFLOW_CHECK_En:0x%llx", FIELD_GET(BIT_ULL(54), temp)); + pr_info("Leaf_PBL_Size:0x%llx", FIELD_GET(GENMASK_ULL(56, 55), temp)); + pr_info("cq_pble_axi_error_locked_flag:0x%llx", + FIELD_GET(BIT_ULL(57), temp)); + pr_info("cqe_size:0x%llx", FIELD_GET(BIT_ULL(58), temp)); + pr_info("cq_overflow_locked_flag:0x%llx", FIELD_GET(BIT_ULL(59), temp)); + pr_info("CQ_State:0x%llx", FIELD_GET(GENMASK_ULL(63, 60), temp)); + + get_64bit_val(cq_ctx, 8, &temp); + pr_info("Doorbell_Shadow_Addr:0x%llx", + FIELD_GET(GENMASK_ULL(57, 0), temp)); + + get_64bit_val(cq_ctx, 16, &temp); + pr_info("CQ_Shadow_Read_Threshold:0x%llx", + FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("log_CQE_NUM:0x%llx", FIELD_GET(GENMASK_ULL(28, 24), temp)); + pr_info("is_in_list_cnt:0x%llx", FIELD_GET(GENMASK_ULL(45, 33), temp)); + pr_info("cq_st:0x%llx", FIELD_GET(GENMASK_ULL(47, 46), temp)); + pr_info("CEQ_ID:0x%llx", FIELD_GET(GENMASK_ULL(59, 48), temp)); + + get_64bit_val(cq_ctx, 24, &temp); + pr_info("arm_valid:0x%llx", FIELD_GET(BIT_ULL(4), temp)); + pr_info("hw_arm_next:0x%llx", FIELD_GET(BIT_ULL(5), temp)); + pr_info("hw_arm_seq_num:0x%llx", FIELD_GET(GENMASK_ULL(7, 6), temp)); + pr_info("hw_cq_select:0x%llx", FIELD_GET(GENMASK_ULL(17, 12), temp)); + pr_info("hw_cq_tail:0x%llx", FIELD_GET(GENMASK_ULL(40, 18), temp)); + pr_info("hw_cq_head:0x%llx", FIELD_GET(GENMASK_ULL(63, 41), temp)); + + get_64bit_val(cq_ctx, 32, &temp); + pr_info("CQ_Address:0x%llx", FIELD_GET(GENMASK_ULL(55, 0), temp)); + + get_64bit_val(cq_ctx, 40, &temp); + pr_info("CQ_Context_Value:0x%llx", FIELD_GET(GENMASK_ULL(62, 0), temp)); + + get_64bit_val(cq_ctx, 48, &temp); + pr_info("Leaf_PBLE:0x%llx", FIELD_GET(GENMASK_ULL(27, 0), temp)); + pr_info("cqe_next_cnt:0x%llx", FIELD_GET(GENMASK_ULL(43, 28), temp)); + pr_info("cqe_se_cnt:0x%llx", FIELD_GET(GENMASK_ULL(59, 44), temp)); + + get_64bit_val(cq_ctx, 56, &temp); + pr_info("Root_PBLE:0x%llx", FIELD_GET(GENMASK_ULL(51, 0), temp)); +} +#endif +int zxdh_query_cqc(struct zxdh_sc_cq *cq) +{ + struct zxdh_sc_dev *dev = cq->dev; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_dma_mem cqc_buf; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + u64 temp; + + cqc_buf.va = NULL; + cqc_buf.size = ALIGN(ZXDH_CQ_CTX_SIZE, ZXDH_CQC_ALIGNMENT); + cqc_buf.va = dma_alloc_coherent(dev->hw->device, cqc_buf.size, + &cqc_buf.pa, GFP_KERNEL); + if (!cqc_buf.va) + return -ENOMEM; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto free_rsrc; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_CQC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_cqc.cq = cq; + cqp_info->in.u.query_cqc.cqc_buf_pa = cqc_buf.pa; + cqp_info->in.u.query_cqc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + goto free_rsrc; + } + + get_64bit_val(cqc_buf.va, 0, &temp); + cq->cq_overflow_locked_flag = FIELD_GET(BIT_ULL(59), temp); +#if 0 + pr_info("***CQ %d HW CQC info print start***\n", cq->cq_uk.cq_id); + zxdh_print_hw_cqc(cqc_buf.va); + pr_info("****CQ %d HW CQC info print end****\n", cq->cq_uk.cq_id); +#endif +free_rsrc: + dma_free_coherent(dev->hw->device, cqc_buf.size, cqc_buf.va, + cqc_buf.pa); + cqc_buf.va = NULL; + return err_code; +} + +static void zxdh_print_hw_ceqc(__le64 *ceq_ctx) +{ + u64 temp; + + get_64bit_val(ceq_ctx, 0, &temp); + pr_info("ceq_period[2:0]:0x%llx", FIELD_GET(GENMASK_ULL(2, 0), temp)); + pr_info("vHCA:0x%llx", FIELD_GET(GENMASK_ULL(12, 3), temp)); + pr_info("INT_num:0x%llx", FIELD_GET(GENMASK_ULL(30, 13), temp)); + pr_info("INT_type:0x%llx", FIELD_GET(BIT_ULL(31), temp)); + pr_info("CEQE_Head:0x%llx", FIELD_GET(GENMASK_ULL(52, 32), temp)); + pr_info("CEQE_Valid:0x%llx", FIELD_GET(BIT_ULL(53), temp)); + pr_info("Leaf_PBL_Size:0x%llx", FIELD_GET(GENMASK_ULL(55, 54), temp)); + pr_info("CEQ_size:0x%llx", FIELD_GET(GENMASK_ULL(57, 56), temp)); + pr_info("log_CEQ_num:0x%llx", FIELD_GET(GENMASK_ULL(62, 58), temp)); + pr_info("CEQ_State:0x%llx", FIELD_GET(BIT_ULL(63), temp)); + + get_64bit_val(ceq_ctx, 8, &temp); + pr_info("CEQ_Address:0x%llx", FIELD_GET(GENMASK_ULL(56, 0), temp)); + pr_info("ceq_period[9:3]:0x%llx", FIELD_GET(GENMASK_ULL(63, 57), temp)); + + get_64bit_val(ceq_ctx, 16, &temp); + pr_info("ceq_max_cnt:0x%llx", FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("ceq_axi_rsp_err_flag:0x%llx", FIELD_GET(BIT_ULL(16), temp)); +} + +int zxdh_query_ceqc(struct zxdh_sc_ceq *ceq) +{ + struct zxdh_sc_dev *dev = ceq->dev; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_dma_mem ceqc_buf; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + + ceqc_buf.va = NULL; + ceqc_buf.size = ALIGN(ZXDH_CEQ_CTX_SIZE, ZXDH_CEQC_ALIGNMENT); + ceqc_buf.va = dma_alloc_coherent(dev->hw->device, ceqc_buf.size, + &ceqc_buf.pa, GFP_KERNEL); + if (!ceqc_buf.va) + return -ENOMEM; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto free_rsrc; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_CEQC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_ceqc.ceq = ceq; + cqp_info->in.u.query_ceqc.ceqc_buf_pa = ceqc_buf.pa; + cqp_info->in.u.query_ceqc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + goto free_rsrc; + } + pr_info("***CEQ %d HW CEQC info print start***\n", ceq->ceq_id); + zxdh_print_hw_ceqc(ceqc_buf.va); + pr_info("****CEQ %d HW CEQC info print end****\n", ceq->ceq_id); + +free_rsrc: + dma_free_coherent(dev->hw->device, ceqc_buf.size, ceqc_buf.va, + ceqc_buf.pa); + ceqc_buf.va = NULL; + return err_code; +} + +static void zxdh_print_hw_aeqc(__le64 *aeq_ctx) +{ + u64 temp; + + get_64bit_val(aeq_ctx, 0, &temp); + pr_info("Intr_ID:0x%llx", FIELD_GET(GENMASK_ULL(11, 0), temp)); + pr_info("AEQ_Head:0x%llx", FIELD_GET(GENMASK_ULL(34, 13), temp)); + pr_info("Leaf_PBL_Size:0x%llx", FIELD_GET(GENMASK_ULL(36, 35), temp)); + pr_info("Virtually_Mapped:0x%llx", FIELD_GET(BIT_ULL(37), temp)); + pr_info("AEQ_Size:0x%llx", FIELD_GET(GENMASK_ULL(59, 38), temp)); + pr_info("AEQ_State:0x%llx", FIELD_GET(GENMASK_ULL(63, 60), temp)); + + get_64bit_val(aeq_ctx, 8, &temp); + pr_info("AEQ_Address:0x%llx", FIELD_GET(GENMASK_ULL(63, 0), temp)); +} + +int zxdh_query_aeqc(struct zxdh_sc_aeq *aeq) +{ + struct zxdh_sc_dev *dev = aeq->dev; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_dma_mem aeqc_buf; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + + aeqc_buf.va = NULL; + aeqc_buf.size = ALIGN(ZXDH_CEQ_CTX_SIZE, ZXDH_CEQC_ALIGNMENT); + aeqc_buf.va = dma_alloc_coherent(dev->hw->device, aeqc_buf.size, + &aeqc_buf.pa, GFP_KERNEL); + if (!aeqc_buf.va) + return -ENOMEM; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto free_rsrc; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_AEQC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_aeqc.aeq = aeq; + cqp_info->in.u.query_aeqc.aeqc_buf_pa = aeqc_buf.pa; + cqp_info->in.u.query_aeqc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + goto free_rsrc; + } + pr_info("***VHCA %d HW AEQC info print start***\n", dev->vhca_id); + zxdh_print_hw_aeqc(aeqc_buf.va); + pr_info("****VHCA %d HW CEQC info print end****\n", dev->vhca_id); + +free_rsrc: + dma_free_coherent(dev->hw->device, aeqc_buf.size, aeqc_buf.va, + aeqc_buf.pa); + aeqc_buf.va = NULL; + return err_code; +} +#endif + +#ifdef GET_LINK_LAYER_V2 +enum rdma_link_layer zxdh_get_link_layer(struct ib_device *ibdev, u32 port_num) +#elif defined(GET_LINK_LAYER_V1) +enum rdma_link_layer zxdh_get_link_layer(struct ib_device *ibdev, u8 port_num) +#endif +{ + return IB_LINK_LAYER_ETHERNET; +} + +#ifdef IB_MTU_CONVERSIONS + +inline enum ib_mtu ib_mtu_int_to_enum(int mtu) +{ + if (mtu >= 4096) + return IB_MTU_4096; + else if (mtu >= 2048) + return IB_MTU_2048; + else if (mtu >= 1024) + return IB_MTU_1024; + else if (mtu >= 512) + return IB_MTU_512; + else + return IB_MTU_256; +} +#endif + +#ifdef UVERBS_CMD_MASK +inline void kc_set_roce_uverbs_cmd_mask(struct zxdh_device *iwdev) +{ + iwdev->ibdev.uverbs_cmd_mask |= + BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) | + BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) | + BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) | + BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST); +} + +inline void kc_set_rdma_uverbs_cmd_mask(struct zxdh_device *iwdev) +{ + iwdev->ibdev.uverbs_cmd_mask = + BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) | + BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) | + BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) | + BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) | + BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) | + BIT_ULL(IB_USER_VERBS_CMD_REG_MR) | + BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) | + BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) | + BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) | + BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) | + BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) | + BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | + BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) | + BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) | + BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) | + BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) | + BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) | + BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) | + BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) | + BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) | + BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) | + BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) | + BIT_ULL(IB_USER_VERBS_CMD_BIND_MW) | + BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) | + BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) | + BIT_ULL(IB_USER_VERBS_CMD_POST_SEND); + iwdev->ibdev.uverbs_ex_cmd_mask = + BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_QP) | + BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | + BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_CQ); + + if (iwdev->rf->rdma_ver >= ZXDH_GEN_2) + iwdev->ibdev.uverbs_ex_cmd_mask |= + BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ); +} +#endif + +int zxdh_fill_qpc(struct zxdh_sc_qp *qp, struct zxdh_dma_mem *qpc_buf) +{ + struct zxdh_sc_dev *dev = qp->dev; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + return err_code; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_QPC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_qpc.qp = qp; + cqp_info->in.u.query_qpc.qpc_buf_pa = qpc_buf->pa; + cqp_info->in.u.query_qpc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + return err_code; + } + return 0; +} + +int zxdh_fill_cqc(struct zxdh_sc_cq *cq, struct zxdh_dma_mem *cqc_buf) +{ + struct zxdh_sc_dev *dev = cq->dev; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + return err_code; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_CQC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_cqc.cq = cq; + cqp_info->in.u.query_cqc.cqc_buf_pa = cqc_buf->pa; + cqp_info->in.u.query_cqc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) + err_code = -ENOMEM; + + return err_code; +} diff --git a/src/rdma/src/zrdma_kcompat.h b/src/rdma/src/zrdma_kcompat.h new file mode 100644 index 0000000000000000000000000000000000000000..b1c7ac57bc458228b9f4da4c02baa58ea2461a80 --- /dev/null +++ b/src/rdma/src/zrdma_kcompat.h @@ -0,0 +1,611 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZRDMA_KCOMPAT_H +#define ZRDMA_KCOMPAT_H + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE +#include +#endif +#if KERNEL_VERSION(3, 4, 0) <= LINUX_VERSION_CODE +#include +#endif +#if KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE +#include +#endif +#if KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE +#include +#else +#include +#endif + +#include "distro_ver.h" + +#if defined(__OFED_BUILD__) || defined(__OFED_4_8__) +#include "ofed_kcompat.h" +#elif defined(RHEL_RELEASE_CODE) +#include "rhel_kcompat.h" +#elif defined(CONFIG_SUSE_KERNEL) +#include "suse_kcompat.h" +#elif defined(UTS_UBUNTU_RELEASE_ABI) +#include "ubuntu_kcompat.h" +#else +#include "linux_kcompat.h" +#endif + +#ifndef RDMA_DRIVER_ZXDH +#define RDMA_DRIVER_ZXDH (RDMA_DRIVER_MLX5 + 18) +#endif + +#ifndef IB_QP_ATTR_STANDARD_BITS +#define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0) +#endif + +#if (KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE) +#define TASKLET_DATA_TYPE unsigned long +#define TASKLET_FUNC_TYPE void (*)(TASKLET_DATA_TYPE) + +#define tasklet_setup(tasklet, callback) \ + tasklet_init((tasklet), (TASKLET_FUNC_TYPE)(callback), \ + (TASKLET_DATA_TYPE)(tasklet)) + +#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ + container_of(callback_tasklet, typeof(*var), tasklet_fieldname) +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) */ + +#if (KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE) +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */ + +#if !defined(__OFED_BUILD__) && !defined(__OFED_4_8__) +#if KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE +#define dma_alloc_coherent dma_zalloc_coherent +#endif +#endif + +#if KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE +#define IB_GET_NETDEV_OP_NOT_DEPRECATED +#endif + +#ifdef USE_KMAP +#define kmap_local_page kmap +#define kunmap_local(sq_base) kunmap(iwqp->page) +#endif + +#ifdef IB_IW_PKEY +static inline int zxdh_iw_query_pkey(struct ib_device *ibdev, u8 port, + u16 index, u16 *pkey) +{ + *pkey = 0; + return 0; +} +#endif +/*******************************************************************************/ +struct zxdh_mr; +struct zxdh_cq; +struct zxdh_cq_buf; +struct zxdh_ucontext; +u32 zxdh_create_stag(struct zxdh_device *iwdev); +void zxdh_free_stag(struct zxdh_device *iwdev, u32 stag); +int zxdh_hw_alloc_mw(struct zxdh_device *iwdev, struct zxdh_mr *iwmr); +void zxdh_cq_free_rsrc(struct zxdh_pci_f *rf, struct zxdh_cq *iwcq); +int zxdh_process_resize_list(struct zxdh_cq *iwcq, struct zxdh_device *iwdev, + struct zxdh_cq_buf *lcqe_buf); +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u16 *speed, u8 *width); +#else +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u8 *speed, u8 *width); +#endif + +#ifdef ZXDH_SET_DRIVER_ID +#define kc_set_driver_id(ibdev) ibdev.driver_id = RDMA_DRIVER_ZXDH +#else +#define kc_set_driver_id(x) +#endif /* ZXDH_SET_DRIVER_ID */ +/*****************************************************************************/ + +/*********************************************************/ +#ifndef ether_addr_copy +#define ether_addr_copy(mac_addr, new_mac_addr) \ + memcpy(mac_addr, new_mac_addr, ETH_ALEN) +#endif +#ifndef eth_zero_addr +#define eth_zero_addr(mac_addr) memset(mac_addr, 0x00, ETH_ALEN) +#endif + +#if KERNEL_VERSION(2, 6, 35) <= LINUX_VERSION_CODE +#define zxdh_for_each_ipv6_addr(ifp, tmp, idev) \ + list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) +#else +#define zxdh_for_each_ipv6_addr(ifp, tmp, idev) \ + for (ifp = idev->addr_list; ifp != NULL; ifp = ifp->if_next) +#endif /* >= 2.6.35 */ + +#ifdef IB_FW_VERSION_NAME_MAX +void zxdh_get_dev_fw_str(struct ib_device *dev, char *str); +#else +void zxdh_get_dev_fw_str(struct ib_device *dev, char *str, size_t str_len); +#endif /* IB_FW_VERSION_NAME_MAX */ + +/*****************************************************************************/ +#ifdef CREATE_AH_VER_5 +int zxdh_create_ah_v2(struct ib_ah *ib_ah, struct rdma_ah_attr *attr, u32 flags, + struct ib_udata *udata); +int zxdh_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr, + struct ib_udata *udata); +#endif + +#ifdef CREATE_AH_VER_4 +struct ib_ah *zxdh_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr, + struct ib_udata *udata); +#endif + +#ifdef CREATE_AH_VER_3 +struct ib_ah *zxdh_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr, + u32 flags, struct ib_udata *udata); +#endif + +#ifdef CREATE_AH_VER_2 +int zxdh_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *attr, u32 flags, + struct ib_udata *udata); +#endif + +#ifdef CREATE_AH_VER_1_1 +struct ib_ah *zxdh_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr, + struct ib_udata *udata); +void zxdh_ether_copy(u8 *dmac, struct ib_ah_attr *attr); +#endif +#if defined(CREATE_AH_VER_1_2) +struct ib_ah *zxdh_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr, + struct ib_udata *udata); +void zxdh_ether_copy(u8 *dmac, struct rdma_ah_attr *attr); +#endif + +#if defined(CREATE_AH_VER_0) +struct ib_ah *zxdh_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr); +#endif + +#ifdef DESTROY_AH_VER_4 +int zxdh_destroy_ah(struct ib_ah *ibah, u32 ah_flags); +#endif + +#ifdef DESTROY_AH_VER_3 +void zxdh_destroy_ah(struct ib_ah *ibah, u32 flags); +#endif + +#ifdef DESTROY_AH_VER_2 +int zxdh_destroy_ah(struct ib_ah *ibah, u32 flags); +#endif + +#ifdef DESTROY_AH_VER_1 +int zxdh_destroy_ah(struct ib_ah *ibah); +#endif + +#ifdef CREATE_CQ_VER_3 +int zxdh_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +#endif + +#ifdef CREATE_CQ_VER_2 +struct ib_cq *zxdh_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +#endif + +#ifdef CREATE_CQ_VER_1 +struct ib_cq *zxdh_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata); +#endif + +/* functions called by zxdh_create_qp and zxdh_free_qp_rsrc */ +int zxdh_validate_qp_attrs(struct ib_qp_init_attr *init_attr, + struct zxdh_device *iwdev); + +void zxdh_setup_virt_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_qp_init_info *init_info); + +int zxdh_setup_kmode_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_qp_init_info *info, + struct ib_qp_init_attr *init_attr); + +void zxdh_roce_fill_and_set_qpctx_info(struct zxdh_qp *iwqp, + struct zxdh_qp_host_ctx_info *ctx_info); + +int zxdh_cqp_create_qp_cmd(struct zxdh_qp *iwqp); + +void zxdh_free_qp_rsrc(struct zxdh_qp *iwqp); + +#ifdef ZXDH_ALLOC_MW_VER_2 +int zxdh_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); +#endif + +#ifdef ZXDH_ALLOC_MW_VER_1 +struct ib_mw *zxdh_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, + struct ib_udata *udata); +#endif + +#ifdef CREATE_QP_VER_2 +int zxdh_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); +#endif + +#ifdef CREATE_QP_VER_1 +struct ib_qp *zxdh_create_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); +#endif + +int zxdh_hw_alloc_stag(struct zxdh_device *iwdev, struct zxdh_mr *iwmr); + +#ifdef ZXDH_ALLOC_MR_VER_1 +struct ib_mr *zxdh_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg, struct ib_udata *udata); +#endif + +#ifdef ZXDH_ALLOC_MR_VER_0 +struct ib_mr *zxdh_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg); +#endif + +#ifdef ALLOC_UCONTEXT_VER_2 +int zxdh_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +#endif + +#ifdef ALLOC_UCONTEXT_VER_1 +struct ib_ucontext *zxdh_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata); +#endif + +#ifdef DEALLOC_UCONTEXT_VER_2 +void zxdh_dealloc_ucontext(struct ib_ucontext *context); +#endif + +#ifdef DEALLOC_UCONTEXT_VER_1 +int zxdh_dealloc_ucontext(struct ib_ucontext *context); +#endif + +#if defined(ETHER_COPY_VER_2) +void zxdh_ether_copy(u8 *dmac, struct rdma_ah_attr *attr); +#endif + +#if defined(ETHER_COPY_VER_1) +void zxdh_ether_copy(u8 *dmac, struct ib_ah_attr *attr); +#endif + +#ifdef ALLOC_PD_VER_3 +int zxdh_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); +#endif + +#ifdef ALLOC_PD_VER_2 +int zxdh_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context, + struct ib_udata *udata); +#endif + +#ifdef ALLOC_PD_VER_1 +struct ib_pd *zxdh_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata); +#endif + +#ifdef DEALLOC_PD_VER_4 +int zxdh_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +#endif + +#ifdef DEALLOC_PD_VER_3 +void zxdh_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +#endif + +#ifdef DEALLOC_PD_VER_2 +void zxdh_dealloc_pd(struct ib_pd *ibpd); +#endif + +#ifdef DEALLOC_PD_VER_1 +int zxdh_dealloc_pd(struct ib_pd *ibpd); +#endif + +int zxdh_add_gid(struct ib_device *device, u8 port_num, unsigned int index, + const union ib_gid *gid, const struct ib_gid_attr *attr, + void **context); + +int zxdh_del_gid(struct ib_device *device, u8 port_num, unsigned int index, + void **context); + +#ifdef ZXDH_DESTROY_CQ_VER_4 +int zxdh_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); +#endif + +#ifdef ZXDH_DESTROY_CQ_VER_3 +void zxdh_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); +#endif + +#ifdef ZXDH_DESTROY_CQ_VER_2 +int zxdh_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); +#endif + +#ifdef ZXDH_DESTROY_CQ_VER_1 +int zxdh_destroy_cq(struct ib_cq *ib_cq); +#endif + +#ifdef DESTROY_QP_VER_2 +int zxdh_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); +#define kc_zxdh_destroy_qp(ibqp, udata) zxdh_destroy_qp(ibqp, udata) +#endif + +#ifdef DESTROY_QP_VER_1 +int zxdh_destroy_qp(struct ib_qp *ibqp); +#define kc_zxdh_destroy_qp(ibqp, udata) zxdh_destroy_qp(ibqp) +#endif + +#ifdef DEREG_MR_VER_2 +int zxdh_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); +#endif + +#ifdef DEREG_MR_VER_1 +int zxdh_dereg_mr(struct ib_mr *ib_mr); +#endif + +int zxdh_hwdereg_mr(struct ib_mr *ib_mr); + +#ifdef REREG_MR_VER_1 +int zxdh_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len, + u64 virt, int new_access, struct ib_pd *new_pd, + struct ib_udata *udata); +#endif + +#ifdef REREG_MR_VER_2 +struct ib_mr *zxdh_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + u64 len, u64 virt, int new_access, + struct ib_pd *new_pd, struct ib_udata *udata); +#endif + +int zxdh_hwreg_mr(struct zxdh_device *iwdev, struct zxdh_mr *iwmr, u16 access); + +struct ib_mr *zxdh_rereg_mr_trans(struct zxdh_mr *iwmr, u64 start, u64 len, + u64 virt, struct ib_udata *udata); + +struct zxdh_pbl *zxdh_get_pbl(unsigned long va, struct list_head *pbl_list); + +void zxdh_copy_user_pgaddrs(struct zxdh_mr *iwmr, u64 *pblpar, + struct zxdh_pble_info **pbleinfo, + enum zxdh_pble_level level, bool use_pbles, + bool pble_type); + +void zxdh_del_memlist(struct zxdh_mr *iwmr, struct zxdh_ucontext *ucontext); + +void zxdh_unregister_rdma_device(struct ib_device *ibdev); +#ifndef RDMA_MMAP_DB_SUPPORT +int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, + unsigned long pfn, unsigned long size, pgprot_t prot); +#endif +void zxdh_disassociate_ucontext(struct ib_ucontext *context); +int kc_zxdh_set_roce_cm_info(struct zxdh_qp *iwqp, struct ib_qp_attr *attr, + u16 *vlan_id); +int kc_zxdh_create_sysfs_file(struct ib_device *ibdev); +struct zxdh_device *kc_zxdh_get_device(struct net_device *netdev); +void kc_zxdh_put_device(struct zxdh_device *iwdev); +void kc_set_roce_uverbs_cmd_mask(struct zxdh_device *iwdev); +void kc_set_rdma_uverbs_cmd_mask(struct zxdh_device *iwdev); + +#ifdef QUERY_GID_ROCE_V2 +int zxdh_query_gid_roce(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid); +#elif defined(QUERY_GID_ROCE_V1) +int zxdh_query_gid_roce(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid); +#endif + +#ifdef MODIFY_PORT_V2 +int zxdh_modify_port(struct ib_device *ibdev, u32 port, int mask, + struct ib_port_modify *props); +#elif defined(MODIFY_PORT_V1) +int zxdh_modify_port(struct ib_device *ibdev, u8 port, int mask, + struct ib_port_modify *props); +#endif + +#ifdef QUERY_PKEY_V2 +int zxdh_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey); +#elif defined(QUERY_PKEY_V1) +int zxdh_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey); +#endif + +#ifdef ROCE_PORT_IMMUTABLE_V2 +int zxdh_roce_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable); +#elif defined(ROCE_PORT_IMMUTABLE_V1) +int zxdh_roce_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable); +#endif + +#ifdef IW_PORT_IMMUTABLE_V2 +int zxdh_iw_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable); +#elif defined(IW_PORT_IMMUTABLE_V1) +int zxdh_iw_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable); +#endif + +#ifdef ALLOC_HW_STATS_V3 +struct rdma_hw_stats *zxdh_alloc_hw_port_stats(struct ib_device *ibdev, + u32 port_num); +#endif +#ifdef ALLOC_HW_STATS_V2 +struct rdma_hw_stats *zxdh_alloc_hw_stats(struct ib_device *ibdev, + u32 port_num); +#endif +#ifdef ALLOC_HW_STATS_V1 +struct rdma_hw_stats *zxdh_alloc_hw_stats(struct ib_device *ibdev, u8 port_num); +#endif + +#ifdef GET_HW_STATS_V2 +int zxdh_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u32 port_num, int index); +#elif defined(GET_HW_STATS_V1) +int zxdh_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u8 port_num, int index); +#endif +#ifdef PROCESS_MAD_VER_3 +int zxdh_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index); +#endif +#ifdef PROCESS_MAD_VER_2 +int zxdh_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index); +#endif + +#ifdef PROCESS_MAD_VER_1 +int zxdh_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in_mad, size_t in_mad_size, + struct ib_mad_hdr *out_mad, size_t *out_mad_size, + u16 *out_mad_pkey_index); +#endif +#ifdef QUERY_GID_V2 +int zxdh_query_gid(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid); +#elif defined(QUERY_GID_V1) +int zxdh_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid); +#endif + +int zxdh_query_qpc(struct zxdh_sc_qp *qp, struct zxdh_dma_mem *qpc_buf); +void zxdh_print_hw_qpc(__le64 *qp_ctx); +#ifdef Z_DH_DEBUG +int zxdh_query_cqc(struct zxdh_sc_cq *cq); +int zxdh_query_ceqc(struct zxdh_sc_ceq *ceq); +int zxdh_query_aeqc(struct zxdh_sc_aeq *aeq); +#endif + +#ifdef GET_LINK_LAYER_V2 +enum rdma_link_layer zxdh_get_link_layer(struct ib_device *ibdev, u32 port_num); +#elif defined(GET_LINK_LAYER_V1) +enum rdma_link_layer zxdh_get_link_layer(struct ib_device *ibdev, u8 port_num); +#endif + +#ifdef QUERY_PORT_V2 +int zxdh_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props); +#elif defined(QUERY_PORT_V1) +int zxdh_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props); +#endif + +#ifdef GET_ETH_SPEED_AND_WIDTH_V1 +void zxdh_get_eth_speed_and_width(u32 link_speed, u8 *active_speed, + u8 *active_width); +#elif defined(GET_ETH_SPEED_AND_WIDTH_V2) +void zxdh_get_eth_speed_and_width(u32 link_speed, u16 *active_speed, + u8 *active_width); +#endif + +void zxdh_clean_cqes(struct zxdh_qp *iwqp, struct zxdh_cq *iwcq); +#ifndef NETDEV_TO_IBDEV_SUPPORT +struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, + int driver_id); +void ib_unregister_device_put(struct ib_device *device); +#endif + +#if defined(DEREG_MR_VER_2) && defined(HAS_IB_SET_DEVICE_OP) +#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \ + ((iwdev)->ibdev.ops.dereg_mr((iwqp)->lsmm_mr, NULL)) +#elif defined(DEREG_MR_VER_2) && !defined(HAS_IB_SET_DEVICE_OP) +#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \ + ((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr, NULL)) +#elif !defined(DEREG_MR_VER_2) && defined(HAS_IB_SET_DEVICE_OP) +#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \ + ((iwdev)->ibdev.ops.dereg_mr((iwqp)->lsmm_mr)) +#else +#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \ + ((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr)) +#endif + +static inline int cq_validate_flags(u32 flags, u8 hw_rev) +{ + /* GEN1 does not support CQ create flags */ + if (hw_rev == ZXDH_GEN_1) + return flags ? -EOPNOTSUPP : 0; + + return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : + 0; +} + +static inline u64 *zxdh_next_pbl_addr(u64 *pbl, struct zxdh_pble_info **pinfo, + u32 *idx, u32 *l2_pinfo_cnt) +{ + *idx += 1; + if (!(*pinfo) || *idx != (*pinfo)->cnt) + return ++pbl; + *idx = 0; + (*pinfo)++; + *l2_pinfo_cnt += 1; + return (*pinfo)->addr; +} + +/* Introduced in this series https://lore.kernel.org/linux-rdma/0-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com/ + * An zrdma version helper doing same for older functions with difference that iova is passed in + * as opposed to derived from umem->iova. + */ +static inline size_t zxdh_ib_umem_num_dma_blocks(struct ib_umem *umem, + unsigned long pgsz, u64 iova) +{ + /* some older OFED distros do not have ALIGN_DOWN */ +#ifndef ALIGN_DOWN +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a)-1), (a)) +#endif + + return (size_t)((ALIGN(iova + umem->length, pgsz) - + ALIGN_DOWN(iova, pgsz))) / + pgsz; +} + +int zxdh_fill_qpc(struct zxdh_sc_qp *qp, struct zxdh_dma_mem *qpc_buf); +int zxdh_fill_cqc(struct zxdh_sc_cq *cq, struct zxdh_dma_mem *cqc_buf); +#endif /* IRDMA_KCOMPAT_H_ */ diff --git a/src/rdma/src/zxdh_user_ioctl_cmds.h b/src/rdma/src/zxdh_user_ioctl_cmds.h new file mode 100644 index 0000000000000000000000000000000000000000..ba21be7b25e6e49fa5192c594614287139cde37e --- /dev/null +++ b/src/rdma/src/zxdh_user_ioctl_cmds.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_USER_IOCTL_CMDS_H +#define ZXDH_USER_IOCTL_CMDS_H + +#include +#include + +enum zxdh_ib_dev_get_log_trace_attrs { + ZXDH_IB_ATTR_DEV_GET_LOG_TARCE_SWITCH = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_set_log_trace_attrs { + ZXDH_IB_ATTR_DEV_SET_LOG_TARCE_SWITCH = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_cap_start { + ZXDH_IB_ATTR_DEV_CAP_START = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_DEV_CAP_START_RESP, +}; + +enum zxdh_ib_dev_cap_stop { + ZXDH_IB_ATTR_DEV_CAP_STOP = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_cap_free { + ZXDH_IB_ATTR_DEV_CAP_FREE = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_mp_cap { + ZXDH_IB_ATTR_DEV_MP_CAP = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_DEV_MP_CAP_RESP, +}; + +enum zxdh_ib_dev_mp_get_data { + ZXDH_IB_ATTR_DEV_MP_GET_DATA = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_mp_cap_clear { + ZXDH_IB_ATTR_DEV_MP_CAP_CLEAR = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_methods { + ZXDH_IB_METHOD_DEV_GET_LOG_TRACE = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_METHOD_DEV_SET_LOG_TRACE, + ZXDH_IB_METHOD_DEV_CAP_START, + ZXDH_IB_METHOD_DEV_CAP_STOP, + ZXDH_IB_METHOD_DEV_CAP_FREE, + ZXDH_IB_METHOD_DEV_MP_CAP, + ZXDH_IB_METHOD_DEV_MP_GET_DATA, + ZXDH_IB_METHOD_DEV_MP_CAP_CLEAR, +}; + +enum zxdh_ib_qp_modify_udp_sport_attrs { + ZXDH_IB_ATTR_QP_UDP_PORT = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_QP_QPN, +}; + +enum zxdh_ib_qp_query_qpc_attrs { + ZXDH_IB_ATTR_QP_QUERY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_QP_QUERY_RESP, +}; + +enum zxdh_ib_qp_modify_qpc_attrs { + ZXDH_IB_ATTR_QP_MODIFY_QPC_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_QP_MODIFY_QPC_REQ, + ZXDH_IB_ATTR_QP_MODIFY_QPC_MASK, +}; + +enum zxdh_ib_qp_reset_qp_attrs { + ZXDH_IB_ATTR_QP_RESET_QP_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_QP_RESET_OP_CODE, +}; + +enum zxdh_ib_qp_methods { + ZXDH_IB_METHOD_QP_MODIFY_UDP_SPORT = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_METHOD_QP_QUERY_QPC, + ZXDH_IB_METHOD_QP_MODIFY_QPC, + ZXDH_IB_METHOD_QP_RESET_QP, +}; + +enum zxdh_ib_objects { + ZXDH_IB_OBJECT_DEV = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_OBJECT_QP_OBJ, +}; + +#endif diff --git a/src/rdma/src/zxdh_user_ioctl_verbs.h b/src/rdma/src/zxdh_user_ioctl_verbs.h new file mode 100644 index 0000000000000000000000000000000000000000..a62f926fd4939ad238f380afe269d15f4693bca3 --- /dev/null +++ b/src/rdma/src/zxdh_user_ioctl_verbs.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_USER_IOCTL_VERBS_H +#define ZXDH_USER_IOCTL_VERBS_H + +#include + +struct zxdh_query_qpc_resp { + __u8 retry_flag; + __u8 rnr_retry_flag; + __u8 read_retry_flag; + __u8 cur_retry_count; + __u8 retry_cqe_sq_opcode; + __u8 err_flag; + __u8 ack_err_flag; + __u8 package_err_flag; + __u8 recv_err_flag; + __u8 retry_count; + __u32 tx_last_ack_psn; +}; + +struct zxdh_modify_qpc_req { + __u8 retry_flag; + __u8 rnr_retry_flag; + __u8 read_retry_flag; + __u8 cur_retry_count; + __u8 retry_cqe_sq_opcode; + __u8 err_flag; + __u8 ack_err_flag; + __u8 package_err_flag; +}; + +struct zxdh_cap_start_resp { + __u64 cap_pa_node0; + __u64 cap_pa_node1; +}; + +struct zxdh_mp_cap_resp { + __u8 mcode_type; + __u8 cap_gqp_num; + __u16 cap_gqpid[4]; + __u64 cap_pa; +}; +#endif