diff --git a/kmod-dinghai.spec b/kmod-dinghai.spec
index 834d8d06b0152d19ecf6db3b60be14a1a02c80aa..5a75361dc8bc8120a0a08668446f5415517392df 100644
--- a/kmod-dinghai.spec
+++ b/kmod-dinghai.spec
@@ -47,10 +47,9 @@ pushd src/crypto/zsda
popd
pushd src/net/build
./build.pl -t clean --ksrc /usr/src/kernels/%{kernel}.%{_arch}
- ./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF \
- -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH \
- -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP \
- -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP -m CONFIG_DINGHAI_TSN \
+ ./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX \
+ -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST \
+ -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP \
--ksrc /usr/src/kernels/%{kernel}.%{_arch}
popd
diff --git a/src/net/.gitignore b/src/net/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..8693e5a585f27f14fc6408236b104771039e92c6
--- /dev/null
+++ b/src/net/.gitignore
@@ -0,0 +1,16 @@
+autoconf.h
+# Compiled files
+*.ko
+*.mod.c
+*.mod.o
+*.o
+*.o.cmd
+*.cmd
+*.mod
+*.o.d
+
+# Generated files
+Module.symvers
+Module.markers
+modules.order
+.vscode/settings.json
diff --git a/src/net/Makefile b/src/net/Makefile
index c7829d27c2a905903efe225a2df440811fb04a9b..880aae0c127127acb466fc6d83c08497115f9768 100755
--- a/src/net/Makefile
+++ b/src/net/Makefile
@@ -2,6 +2,6 @@ EXTRA_CFLAGS += -I$(CWD)/include
EXTRA_CFLAGS += -I$(CWD)/drivers/net/ethernet/dinghai/zf_mpf/epc
subdir-ccflags-y += -include $(CWD)/autoconf.h
-obj-$(CONFIG_DINGHAI_ETH) += drivers/net/ethernet/dinghai/
-obj-$(CONFIG_ZXDH_AUXILIARY) += drivers/base/
+obj-$(CONFIG_DINGHAI_ETH) += drivers/net/ethernet/dinghai/
+obj-m += drivers/base/
obj-m += drivers/pcie/zxdh_pcie/
\ No newline at end of file
diff --git a/src/net/README.MD b/src/net/README.MD
index 7c31da72bb8971492d2a21a3c4cb38ed9ebafc50..dedeb67987fe12e80536be8e77b3d6a81b651ac1 100644
--- a/src/net/README.MD
+++ b/src/net/README.MD
@@ -1,7 +1,7 @@
# 源码编译驱动
## build
cd ./build
-./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP -m CONFIG_DINGHAI_TSN
+./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP
## build clean
cd ./build
@@ -12,26 +12,26 @@ cd ./build
2. You can also use the "build.sh" compilation script under the root directory of the source code: ./build.sh
3. If you are using ZF server or your architecture is aarch64, and you want to compile the mpf.ko driver to run on ZF.
-./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_DINGHAI_ZF_MPF -m PCIE_ZF_EPC_OPEN
+./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_DINGHAI_ZF_MPF -m PCIE_ZF_EPC_OPEN
4. **Note: CONFIG_DINGHAI_ZF_MPF is used for the ZF platform(aarch64), CONFIG_DINGHAI_MPF is used for the x86_64 platform, and the two cannot be used at the same time.**
5. If you want to test 1588, you need to add the compile option CONFIG_ZXDH_1588 and open macro TIME_STAMP_1588, otherwise do not add the compile option CONFIG_ZXDH_1588.
example,
-./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP -m CONFIG_DINGHAI_TSN
+./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP
6. ## Modify the queue pairs
If you want to modify the queue pairs, you should add the "max_pairs=*" parameter after "insmod zxdh_en_aux.ko". If you do not add the "max_pairs=*" parameter after "insmod zxdh_en_aux.ko", the default queue pairs in the driver are used for initialization. For example:
insmod zxdh_en_aux.ko max_pairs=16
7. If you want to test Hot-Plug PF(hpf), you need to add the compile option CONFIG_DINGHAI_HPF. For example:
-./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_HPF
+./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP -m CONFIG_DINGHAI_HPF
8. rpm包相关
## Compile and Install driver module
```
cd ./build
-./build.pl -t {all|install} -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP
+./build.pl -t {all|install} -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP
```
- These drivers will be compiled and they will not only appear in the local directory, but also be copied to **/lib/modules/\/updates/drivers/net/ethernet/zte/zxdh/**
- The install location listed above is the default location. This may differ for various Linux distributions.
@@ -58,7 +58,7 @@ cd ./build
## cross compile
If you want to use source code to cross-compile, for example, the host is the x86_64 platform and the target is the aarch64 platform, you need to add 3 additional parameters.
``` shell
-./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_PF -m CONFIG_DINGHAI_ZF_MPF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP --ksrc /path/kernel/source --cross_compile /path/cross/compile/tools/aarch64-pc-linux-gnu- --target_arch aarch64
+./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_PF -m CONFIG_DINGHAI_ZF_MPF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP --ksrc /path/kernel/source --cross_compile /path/cross/compile/tools/aarch64-pc-linux-gnu- --target_arch aarch64
```
- --ksrc kernel source path
- --cross_compile cross compile tools path
@@ -92,7 +92,7 @@ zxdh_zf_mpf_rpm_build.sh -h
- `zxdh_np.ko`
- `zxdh_pf.ko`
- `zxdh_en_aux.ko`
-- `zxdh_auxiliary.ko`(内核是否支持`auxiliary`总线,都会生成该驱动)
+- `zxdh_auxiliary.ko`(如果内核不支持`auxiliary`总线,那么就会生成该驱动)
其中`zxdh-smartnic-config-*******.rpm`包里包含配置文件
- `dpu_init.cfg`或者`smart-nic_init.cfg`
@@ -106,7 +106,7 @@ zxdh_zf_mpf_rpm_build.sh -h
- `zxdh_cmd.ko`
- `zxdh_np.ko`
- `zxdh_mpf.ko`
-- `zxdh_auxiliary.ko`(内核是否支持`auxiliary`总线,都会生成该驱动)
+- `zxdh_auxiliary.ko`(如果内核不支持`auxiliary`总线,那么就会生成该驱动)
## `zxdh_zf_mpf_rpm_build.sh`脚本
该脚本支持交叉编译, 但是目标架构必须是`aarch64`, 和使用不同内核源码编译, 具体参数见`zxdh_zf_mpf_rpm_build.sh -h`
diff --git a/src/net/build.sh b/src/net/build.sh
index 3ff31dc77bc288226d633c5f69655181c0fd7180..abd28fbe8dd036eff14e7a3f2de959ed5879846e 100755
--- a/src/net/build.sh
+++ b/src/net/build.sh
@@ -7,7 +7,6 @@ unload_drivers() {
echo "卸载驱动"
rmmod zxdh_en_aux
rmmod zxdh_pf
- rmmod zxdh_tsn
rmmod zxdh_ptp
rmmod zxdh_np
rmmod zxdh_cmd
@@ -18,10 +17,7 @@ compile_zxdh_kernel() {
echo "编译zxdh_kernel"
cd $path/build
./build.pl -t clean
- ./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF \
- -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH \
- -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP \
- -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP -m CONFIG_DINGHAI_TSN
+ ./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP
}
load_zxdh_kernel() {
@@ -31,7 +27,6 @@ load_zxdh_kernel() {
insmod net/ethernet/dinghai/zxdh_cmd.ko
insmod net/ethernet/dinghai/zxdh_np.ko
insmod net/ethernet/dinghai/zxdh_ptp.ko
- insmod net/ethernet/dinghai/zxdh_tsn.ko
insmod net/ethernet/dinghai/zxdh_pf.ko
insmod net/ethernet/dinghai/zxdh_en_aux.ko
}
diff --git a/src/net/build/Makefile b/src/net/build/Makefile
index b32d748768e01ecb28d6a0412d51071cca8da530..262d1714d970a874207a9df497b5a07d231eabf2 100755
--- a/src/net/build/Makefile
+++ b/src/net/build/Makefile
@@ -50,7 +50,7 @@ LINUXINCLUDE=\
zxdh_en_aux_udev_rule_file:
@echo 'ACTION!="add", GOTO="drivers_end"' > ${ZXDH_EN_AUX_UDEV_RULE}.rules
- @echo 'ENV{MODALIAS}=="zxdh_auxiliary:zxdh_pf.en_aux", RUN{builtin}+="kmod load zxdh_en_aux"' >> ${ZXDH_EN_AUX_UDEV_RULE}.rules
+ @echo 'ENV{MODALIAS}=="auxiliary:zxdh_pf.en_aux", RUN{builtin}+="kmod load zxdh_en_aux"' >> ${ZXDH_EN_AUX_UDEV_RULE}.rules
@echo 'LABEL="drivers_end"' >> ${ZXDH_EN_AUX_UDEV_RULE}.rules
@@ -83,11 +83,13 @@ kernel:
modules_install:
@echo "Installing kernel modules..."
+$(call kernelbuild, modules_install)
+ ${auxiliary_post_install}
+
##############################
# Build and install kernel #
##############################
-all: kernel modules_install zxdh_en_aux_udev_rule_install
+all: kernel modules_install
@echo "Running depmod..."
$(call cmd_depmod)
$(call cmd_initramfs)
@@ -106,8 +108,9 @@ clean:
##############################
modules_uninstall:
rm -rf ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}
+ ${auxiliary_post_uninstall}
-uninstall: clean modules_uninstall zxdh_en_aux_udev_rule_uninstall
+uninstall: clean modules_uninstall
$(call cmd_depmod)
$(call cmd_initramfs)
diff --git a/src/net/build/build.pl b/src/net/build/build.pl
index 42bd19790dee0990398b4fa64ed5b4d35429dc56..f5d5bd4bc4b5d07358db9ac571768b7cc99ead1d 100755
--- a/src/net/build/build.pl
+++ b/src/net/build/build.pl
@@ -10,20 +10,17 @@ use File::Temp;
use Cwd;
use Term::ANSIColor qw(:constants);
use Data::Dumper;
-use Scalar::Util 'reftype';
my $CWD = getcwd;
my $compile_arg = "";
-my $num_cores = scalar(`cat /proc/cpuinfo | grep -c '^processor'`);
-$num_cores -= 1 unless $num_cores == 1;
+
my $autoconf_h = $CWD.'/../autoconf.h';
my $target_name;
my $auto_config= "";
my $ksrc = "";
my $cross_compile = "";
my $target_arch = "";
-my $driver_version = "";
print $autoconf_h."\n";
while ( $#ARGV >= 0 ) {
@@ -39,9 +36,6 @@ while ( $#ARGV >= 0 ) {
my $mod = shift(@ARGV);
$auto_config = $auto_config."\n"."#define ".$mod." 2";
$compile_arg = $compile_arg." ".$mod."=y";
- } elsif ( $cmd_flag eq "--dri_ver" ) {
- my $mod = shift(@ARGV);
- $compile_arg = $compile_arg." CONFIG_DRIVER_VERSION=".$mod;
} elsif ( $cmd_flag eq "--ksrc" ) {
$ksrc = shift(@ARGV);
$compile_arg = $compile_arg." "."KSRC=".$ksrc;
@@ -51,21 +45,17 @@ while ( $#ARGV >= 0 ) {
} elsif ( $cmd_flag eq "--target_arch" ) {
$target_arch = shift(@ARGV);
$compile_arg = $compile_arg." "."TARGET_ARCH=".$target_arch;
- } elsif ( $cmd_flag eq "-j" ) {
- $num_cores = shift(@ARGV);
} elsif ( $cmd_flag eq "--help" or $cmd_flag eq "-h" ) {
usage();
exit 0;
}
}
-
system("cat >$autoconf_h</dev/null 2>&1; echo $$?)
+endif # check_aux_bus exists
+
+# The out-of-tree auxiliary module we ship should be moved into this
+# directory as part of installation.
+export INSTALL_AUX_DIR ?= updates/drivers/net/ethernet/zte/auxiliary
+
+# If we're installing auxiliary bus out-of-tree, the following steps are
+# necessary to ensure the relevant files get put in place.
+ifeq (${NEED_AUX_BUS},2)
+define auxiliary_post_install
+ install -D -m 644 ../Module.symvers ${INSTALL_MOD_PATH}/lib/modules/${KVER}/extern-symvers/zxdh_auxiliary.symvers
+ install -d ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}
+ mv -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/drivers/base/zxdh_auxiliary.ko \
+ ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/zxdh_auxiliary.ko
+ install -D -m 644 ../include/linux/dinghai/auxiliary_bus.h ${INSTALL_MOD_PATH}/${KSRC}/include/linux/auxiliary_bus.h
+endef
+else
+auxiliary_post_install =
+endif
+
+ifeq (${NEED_AUX_BUS},2)
+define auxiliary_post_uninstall
+ rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/extern-symvers/zxdh_auxiliary.symvers
+ rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_AUX_DIR}/zxdh_auxiliary.ko
+ rm -f ${INSTALL_MOD_PATH}/${KSRC}/include/linux/auxiliary_bus.h
+endef
+else
+auxiliary_post_uninstall =
+endif
+
######################
# Kernel Build Macro #
######################
@@ -383,4 +424,5 @@ kernelbuild = $(call warn_signed_modules) \
$(if ${NEED_CROSS_COMPILE}, CROSS_COMPILE=$(CROSS_COMPILE)) \
$(if ${DISABLE_MODULE_SIGNING},CONFIG_MODULE_SIG=n) \
$(if ${DISABLE_MODULE_SIGNING},CONFIG_MODULE_SIG_ALL=) \
+ $(if ${NEED_AUX_BUS},NEED_AUX_BUS="${NEED_AUX_BUS}") \
${2} ${1}
diff --git a/src/net/build/spec/zxdh-eth.spec.example b/src/net/build/spec/zxdh-eth.spec.example
index 5a526665fcfab744e33e47b82f93c7294c4c766e..cac77fa5c2715daf9481011b1a8c60ff7ea2cfe1 100644
--- a/src/net/build/spec/zxdh-eth.spec.example
+++ b/src/net/build/spec/zxdh-eth.spec.example
@@ -8,8 +8,6 @@
%{!?_config_version: %global _config_version 1.0}
%{!?_config_release: %global _config_release 1}
-%{!?ETH_DRI_VER: %global ETH_DRI_VER 1.0-1}
-
%{!?KSRC: %global KSRC /lib/modules/%(uname -r)/source}
%{!?target: %global target %(uname -m)}
@@ -84,28 +82,24 @@ cd ./build
%if 0%{?CROSS_COMPILE:1}
# cross compile
-./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_PF \
+./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_PF \
-m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX \
-m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH \
-m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD \
-m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ \
-m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP \
- -m CONFIG_DINGHAI_TSN\
--ksrc %{KSRC} \
--cross_compile %{CROSS_COMPILE} \
- --target_arch %{target} \
- --dri_ver %{ETH_DRI_VER}
+ --target_arch %{target}
%else
# no cross compile
-./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_PF \
+./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_PF \
-m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX \
-m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH \
-m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD \
-m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ \
-m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP \
- -m CONFIG_DINGHAI_TSN\
- --ksrc %{KSRC} \
- --dri_ver %{ETH_DRI_VER}
+ --ksrc %{KSRC}
%endif
%install
@@ -113,17 +107,27 @@ cd ./build
export INSTALL_MOD_PATH=%{buildroot}
export INSTALL_MOD_DIR=%{install_mod_dir}
+export INSTALL_AUX_DIR=%{install_mod_dir}/auxiliary
export KSRC=%{KSRC}
make modules_install INSTALL_MOD_PATH=%{buildroot} INSTALL_MOD_DIR=%{install_mod_dir} KSRC=%{KSRC}
# Remove modules files that we do not want to include
find %{buildroot}/lib/modules/ -name 'modules.*' -exec rm -f {} \;
+find %{buildroot}/lib/modules/ -name 'zxdh_auxiliary.symvers' -exec rm -f {} \;
+find %{buildroot}/lib/modules/ -name 'auxiliary_bus.h' -exec rm -f {} \;
+%if 0%{?NEED_AUX_BUS:1}
+if [ -d %{buildroot}%{KSRC} ]; then
+ rm -rf %{buildroot}%{KSRC};
+fi
+%endif
cd %{buildroot}
find lib -name "*.ko" -printf "/%p\n" \
>%{_builddir}/zxdh_kernel/file.list
export _ksrc=%{KSRC}
+
+
# Add config files
%if %{IS_RHEL_VENDOR}
%if ! 0%{?fedora}
@@ -140,12 +144,11 @@ done
%endif
%endif
-# Add zxdh_en_aux udev conf
-
-%{__install} -d %{buildroot}%{_sysconfdir}/udev/rules.d/
-echo 'ACTION!="add", GOTO="drivers_end"' > %{buildroot}%{_sysconfdir}/udev/rules.d/80-%{name}-zxdh_en_aux-drivers.rules
-echo 'ENV{MODALIAS}=="zxdh_auxiliary:zxdh_pf.en_aux", RUN{builtin}+="kmod load zxdh_en_aux"' >> %{buildroot}%{_sysconfdir}/udev/rules.d/80-%{name}-zxdh_en_aux-drivers.rules
-echo 'LABEL="drivers_end"' >> %{buildroot}%{_sysconfdir}/udev/rules.d/80-%{name}-zxdh_en_aux-drivers.rules
+# Add zxdh_en_aux modprobe conf
+%if 0%{?NEED_AUX_BUS:1}
+%{__install} -d %{buildroot}%{_sysconfdir}/modprobe.d/
+echo "install zxdh_pf /sbin/modprobe --ignore-install zxdh_pf && /sbin/modprobe zxdh_en_aux" > %{buildroot}%{_sysconfdir}/modprobe.d/zxdh_en_aux.conf
+%endif
cd $HOME/rpmbuild/BUILD/zxdh_kernel/build/zxdh_config
%{__install} -m 644 ./%{_config_file}.cfg %{buildroot}%{_sysconfdir}
@@ -160,7 +163,9 @@ rm -rf %{buildroot}
%endif
%endif
-%{_sysconfdir}/udev/rules.d/80-%{name}-zxdh_en_aux-drivers.rules
+%if 0%{?NEED_AUX_BUS:1}
+%config(noreplace) %{_sysconfdir}/modprobe.d/zxdh_en_aux.conf
+%endif
diff --git a/src/net/build/spec/zxdh-hpf.spec.example b/src/net/build/spec/zxdh-hpf.spec.example
index 3dd09b05c3fbd12e3d0f92dea33cd32eb1838e7a..da93c71957d6dccc6de28752c617358661d1ccaa 100644
--- a/src/net/build/spec/zxdh-hpf.spec.example
+++ b/src/net/build/spec/zxdh-hpf.spec.example
@@ -4,8 +4,6 @@
%{!?_version: %global _version 1.0}
%{!?_release: %global _release 1}
-%{!?HPF_DRI_VER: %global HPF_DRI_VER 1.0-1}
-
%{!?KSRC: %global KSRC /lib/modules/%(uname -r)/source}
%{!?target: %global target %(uname -m)}
@@ -65,15 +63,17 @@ cd ./build
%if 0%{?CROSS_COMPILE:1}
# cross compile
./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_HPF -m CONFIG_DINGHAI_DH_CMD -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS\
+ -m HAVE_DEV_PM_DOMAIN_ATTACH \
+ -m HAVE_BUS_FIND_DEVICE_GET_CONST \
--ksrc %{KSRC} \
--cross_compile %{CROSS_COMPILE} \
- --target_arch %{target} \
- --dri_ver %{HPF_DRI_VER}
+ --target_arch %{target}
%else
# no cross compile
./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_HPF -m CONFIG_DINGHAI_DH_CMD -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS\
- --ksrc %{KSRC} \
- --dri_ver %{HPF_DRI_VER}
+ -m HAVE_DEV_PM_DOMAIN_ATTACH \
+ -m HAVE_BUS_FIND_DEVICE_GET_CONST \
+ --ksrc %{KSRC}
%endif
%install
@@ -81,11 +81,21 @@ cd ./build
export INSTALL_MOD_PATH=%{buildroot}
export INSTALL_MOD_DIR=%{install_mod_dir}
+export INSTALL_AUX_DIR=%{install_mod_dir}/auxiliary
export KSRC=%{KSRC}
make modules_install INSTALL_MOD_PATH=%{buildroot} INSTALL_MOD_DIR=%{install_mod_dir} KSRC=%{KSRC}
# Remove modules files that we do not want to include
find %{buildroot}/lib/modules/ -name 'modules.*' -exec rm -f {} \;
+find %{buildroot}/lib/modules/ -name 'zxdh_auxiliary.symvers' -exec rm -f {} \;
+find %{buildroot}/lib/modules/ -name 'auxiliary_bus.h' -exec rm -f {} \;
+
+%if 0%{?NEED_AUX_BUS:1}
+find %{buildroot}/lib/modules/ -name '*auxiliary.ko' -exec rm -f {} \;
+if [ -d %{buildroot}%{KSRC} ]; then
+ rm -rf %{buildroot}%{KSRC};
+fi
+%endif
cd %{buildroot}
find lib -name "*.ko" -printf "/%p\n" \
diff --git a/src/net/build/spec/zxdh-zf-mpf.spec.example b/src/net/build/spec/zxdh-zf-mpf.spec.example
index d918da7e707c96beffb2a92aa2acc23e0714567d..fa8eacdacedd1486a84949521cc6cafd1f32a457 100644
--- a/src/net/build/spec/zxdh-zf-mpf.spec.example
+++ b/src/net/build/spec/zxdh-zf-mpf.spec.example
@@ -4,8 +4,6 @@
%{!?_version: %global _version 1.0}
%{!?_release: %global _release 1}
-%{!?ZF_MPF_DRI_VER: %global ZF_MPF_DRI_VER 1.0-1}
-
%{!?KSRC: %global KSRC /lib/modules/%(uname -r)/source}
%{!?target: %global target aarch64}
@@ -65,24 +63,22 @@ cd ./build
%if 0%{?CROSS_COMPILE:1}
# cross compile
-./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_ZF_MPF \
+./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_ZF_MPF \
-m PCIE_ZF_EPC_OPEN -m CONFIG_DINGHAI_DH_CMD \
-m CONFIG_ZXDH_SF -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS \
-m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST \
-m CONFIG_ZF_GDMA \
--ksrc %{KSRC} \
--cross_compile %{CROSS_COMPILE} \
- --target_arch %{target} \
- --dri_ver %{ZF_MPF_DRI_VER}
+ --target_arch %{target}
%else
# no cross compile
-./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_ZF_MPF \
+./build.pl -t kernel -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_ZF_MPF \
-m PCIE_ZF_EPC_OPEN -m CONFIG_DINGHAI_DH_CMD \
-m CONFIG_ZXDH_SF -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS \
-m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST \
-m CONFIG_ZF_GDMA \
- --ksrc %{KSRC} \
- --dri_ver %{ZF_MPF_DRI_VER}
+ --ksrc %{KSRC}
%endif
%install
@@ -97,6 +93,13 @@ make modules_install INSTALL_MOD_PATH=%{buildroot} INSTALL_MOD_DIR=%{install_mod
find %{buildroot}/lib/modules/ -name 'modules.*' -exec rm -f {} \;
find %{buildroot}/lib/modules/ -name '*zf_epf.ko' -exec rm -f {} \;
+%if 0%{?NEED_AUX_BUS:1}
+find %{buildroot}/lib/modules/ -name '*auxiliary.ko' -exec rm -f {} \;
+if [ -d %{buildroot}%{KSRC} ]; then
+ rm -rf %{buildroot}%{KSRC};
+fi
+%endif
+
cd %{buildroot}
find lib -name "*.ko" -printf "/%p\n" \
>%{_builddir}/zxdh_kernel/zf_mpf_file.list
diff --git a/src/net/ci_cmd.sh b/src/net/ci_cmd.sh
old mode 100755
new mode 100644
diff --git a/src/net/compat/config.h b/src/net/compat/config.h
old mode 100755
new mode 100644
diff --git a/src/net/compile.sh b/src/net/compile.sh
index c249b036254db3ab58d06f376b49cc9d3e341089..864e607f7b281412f15f4287eb642eea73b91466 100755
--- a/src/net/compile.sh
+++ b/src/net/compile.sh
@@ -4,7 +4,6 @@
compile_stage=$1 #四种取值:verifyci、gateci、postci、versionci
build_no=$2
chg_no=$3
-rpm_ver_no="2.24.20.02"
#ver_path不变,各组件只需修改root_dir
@@ -20,13 +19,13 @@ if [ "$compile_stage" = "verifyci" ]; then
echo "NXI_NXE_DPU_host_x86_file:"$NXI_NXE_DPU_host_x86_file
DPU_zf_aarch64_file=$(ls /lib/modules/ | grep "aarch64_${DPU_CGS_kernel_aarch64}$")
echo "DPU_zf_aarch64_file:"$DPU_zf_aarch64_file
-
+
echo "start check x86 pf driver"
compile_path=${root_dir}/build
rm -f $compile_path/zxdh_kernel_compile.txt $compile_path/zxdh_kernel_compile_fail_result.txt
cd ${compile_path}
./build.pl -t clean --ksrc /lib/modules/$NXI_NXE_DPU_host_x86_file
- ./build.pl -t kernel --ksrc /lib/modules/$NXI_NXE_DPU_host_x86_file -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP -m CONFIG_DINGHAI_TSN \
+ ./build.pl -t kernel --ksrc /lib/modules/$NXI_NXE_DPU_host_x86_file -m CONFIG_DINGHAI_ETH -m CONFIG_DINGHAI_PF -m CONFIG_ZXDH_SF -m CONFIG_DINGHAI_EN_AUX -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_DINGHAI_NP -m CONFIG_ZXDH_MSGQ -m CONFIG_ZXDH_1588 -m CONFIG_DINGHAI_PTP \
>>$compile_path/zxdh_kernel_compile.txt 2>$compile_path/zxdh_kernel_compile_fail_result.txt
status=$?
echo "***************print x86 en_pf_compile.txt***************"
@@ -65,7 +64,7 @@ if [ "$compile_stage" = "verifyci" ]; then
./build.pl -t kernel --ksrc /lib/modules/$DPU_zf_aarch64_file \
--cross_compile /opt/aarch64_cgslv6.01_gcc8.3.1_glibc2.28/bin/aarch64-pc-linux-gnu- \
--target_arch aarch64 \
- -m CONFIG_DINGHAI_ZF_MPF -m PCIE_ZF_EPC_OPEN -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_AUXILIARY -m CONFIG_ZXDH_SF -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_ZF_GDMA \
+ -m CONFIG_DINGHAI_ZF_MPF -m PCIE_ZF_EPC_OPEN -m CONFIG_DINGHAI_ETH -m CONFIG_ZXDH_SF -m HAVE_DEVLINK_ALLOC_GET_1_PARAMS -m HAVE_DEV_PM_DOMAIN_ATTACH -m HAVE_BUS_FIND_DEVICE_GET_CONST -m CONFIG_DINGHAI_DH_CMD -m CONFIG_ZF_GDMA \
>>$compile_path/zxdh_kernel_compile.txt 2>$compile_path/zxdh_kernel_compile_fail_result.txt
status=$?
echo "***************print arm zf_mpf_compile.txt***************"
@@ -96,12 +95,9 @@ if [ $compile_stage = "postci" ]; then
cd $root_dir/kernel-src
rm -f *\.tgz
date=$(date +%Y%m%d%H%M%S)
- version=${rpm_ver_no}
- dri_version=${rpm_ver_no}-${date}
-
- pf_tgz_name=$root_dir/kernel-src/zxdh-eth-${version}-${date}.src.tgz
- host_hpf_tgz_name=$root_dir/kernel-src/zxdh-neo-host-hpf-${version}-${date}.src.tgz
- zf_mpf_tgz_name=$root_dir/kernel-src/zxdh-neo-mpf-${version}-${date}.src.tgz
+ pf_tgz_name=$root_dir/kernel-src/zxdh-eth-${build_no}.${chg_no}.${date}.src.tgz
+ host_hpf_tgz_name=$root_dir/kernel-src/zxdh-neo-host-hpf-${build_no}.${chg_no}.${date}.src.tgz
+ zf_mpf_tgz_name=$root_dir/kernel-src/zxdh-neo-mpf-${build_no}.${chg_no}.${date}.src.tgz
tar czvf $pf_tgz_name --exclude=$root_dir/kernel-src $root_dir
result=$?
#判断结果
@@ -117,6 +113,7 @@ if [ $compile_stage = "postci" ]; then
#开始生成rpm包
cd $root_dir
rm -f *\.rpm
+ version=${build_no}.${chg_no}
echo "[NXI/NXE]交叉编译生成host-arm下的rpm包,使用[en_pf]源码"
./zxdh_eth_rpm_build.sh \
--ksrc /lib/modules/$NXI_NXE_DPU_host_aarch64_file \
@@ -124,16 +121,14 @@ if [ $compile_stage = "postci" ]; then
--target-arch aarch64 \
--dist .cgsl${NXI_Host_CGS_kernel_aarch64:1} \
--rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --rpm-config-version "${version}" --rpm-config-release "${date}" --rpm-config-name zxdh-dpu-config \
- --eth-driver-version "${dri_version}"
+ --rpm-config-version "${version}" --rpm-config-release "${date}" --rpm-config-name zxdh-dpu-config
echo "[NXI/NXE]生成host-x86下的rpm包,使用[en_pf]源码"
./zxdh_eth_rpm_build.sh \
--ksrc /lib/modules/$NXI_NXE_DPU_host_x86_file \
--dist .cgsl${NXI_Host_CGS_kernel_X86:1} \
--rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --rpm-config-version "${version}" --rpm-config-release "${date}" \
- --eth-driver-version "${dri_version}"
+ --rpm-config-version "${version}" --rpm-config-release "${date}"
echo "[DPU]交叉编译生成host-arm下的rpm包,使用[function_hotplug]源码"
./zxdh_hpf_rpm_build.sh \
@@ -141,15 +136,13 @@ if [ $compile_stage = "postci" ]; then
--cross-compile /opt/aarch64_cgslv6.01_gcc8.3.1_glibc2.28/bin/aarch64-pc-linux-gnu- \
--target-arch aarch64 \
--dist .cgsl${NXI_Host_CGS_kernel_aarch64:1} \
- --rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --hpf-driver-version "${dri_version}"
+ --rpm-driver-version "${version}" --rpm-driver-release "${date}"
echo "[DPU]生成host-x86下的rpm包,使用[function_hotplug]源码"
./zxdh_hpf_rpm_build.sh \
--ksrc /lib/modules/$NXI_NXE_DPU_host_x86_file \
--dist .cgsl${NXI_Host_CGS_kernel_X86:1} \
- --rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --hpf-driver-version "${dri_version}"
+ --rpm-driver-version "${version}" --rpm-driver-release "${date}"
echo "[DPU]交叉编译生成zf-arm下的rpm包,使用[en_pf]源码"
./zxdh_eth_rpm_build.sh \
@@ -159,21 +152,18 @@ if [ $compile_stage = "postci" ]; then
--dist .cgsl${DPU_CGS_kernel_aarch64:1} \
--rpm-driver-name "zxdh-eth" \
--rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --rpm-config-version "${version}" --rpm-config-release "${date}" \
- --eth-driver-version "${dri_version}"
+ --rpm-config-version "${version}" --rpm-config-release "${date}"
echo "[DPU]生成zf-arm下的rpm包,使用[zf_mpf]源码"
./zxdh_zf_mpf_rpm_build.sh \
--ksrc /lib/modules/$DPU_zf_aarch64_file \
--cross-compile /opt/aarch64_cgslv6.01_gcc8.3.1_glibc2.28/bin/aarch64-pc-linux-gnu- \
--dist .cgsl${DPU_CGS_kernel_aarch64:1} \
- --rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --zf-mpf-driver-version "${dri_version}"
+ --rpm-driver-version "${version}" --rpm-driver-release "${date}"
#TODO
echo "查看全部rpm包"
ls
-
exit 0
fi
@@ -192,12 +182,9 @@ if [ $compile_stage = "versionci" ]; then
cd $root_dir/kernel-src
rm -f *\.tgz
date=$(date +%Y%m%d%H%M%S)
- version=${rpm_ver_no}
- dri_version=${rpm_ver_no}-${date}
-
- pf_tgz_name=$root_dir/kernel-src/zxdh-eth-${version}-${date}-daily.src.tgz
- host_hpf_tgz_name=$root_dir/kernel-src/zxdh-neo-host-hpf-${version}-${date}-daily.src.tgz
- zf_mpf_tgz_name=$root_dir/kernel-src/zxdh-neo-mpf-${version}-${date}-daily.src.tgz
+ pf_tgz_name=$root_dir/kernel-src/zxdh-eth-${build_no}.${chg_no}.${date}-daily.src.tgz
+ host_hpf_tgz_name=$root_dir/kernel-src/zxdh-neo-host-hpf-${build_no}.${chg_no}.${date}-daily.src.tgz
+ zf_mpf_tgz_name=$root_dir/kernel-src/zxdh-neo-mpf-${build_no}.${chg_no}.${date}-daily.src.tgz
tar czvf $pf_tgz_name --exclude=$root_dir/kernel-src $root_dir
result=$?
#判断结果
@@ -213,6 +200,7 @@ if [ $compile_stage = "versionci" ]; then
#开始生成rpm包
cd $root_dir
rm -f *\.rpm
+ version=${build_no}.${chg_no}
echo "[NXI/NXE]交叉编译生成host-arm下的rpm包,使用[en_pf]源码"
./zxdh_eth_rpm_build.sh \
--ksrc /lib/modules/$NXI_NXE_DPU_host_aarch64_file \
@@ -220,16 +208,14 @@ if [ $compile_stage = "versionci" ]; then
--target-arch aarch64 \
--dist .cgsl${NXI_Host_CGS_kernel_aarch64:1} \
--rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --rpm-config-version "${version}" --rpm-config-release "${date}" --rpm-config-name zxdh-dpu-config \
- --eth-driver-version "${dri_version}"
+ --rpm-config-version "${version}" --rpm-config-release "${date}" --rpm-config-name zxdh-dpu-config
echo "[NXI/NXE]生成host-x86下的rpm包,使用[en_pf]源码"
./zxdh_eth_rpm_build.sh \
--ksrc /lib/modules/$NXI_NXE_DPU_host_x86_file \
--dist .cgsl${NXI_Host_CGS_kernel_X86:1} \
--rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --rpm-config-version "${version}" --rpm-config-release "${date}" \
- --eth-driver-version "${dri_version}"
+ --rpm-config-version "${version}" --rpm-config-release "${date}"
echo "[DPU]交叉编译生成host-arm下的rpm包,使用[function_hotplug]源码"
./zxdh_hpf_rpm_build.sh \
@@ -237,15 +223,13 @@ if [ $compile_stage = "versionci" ]; then
--cross-compile /opt/aarch64_cgslv6.01_gcc8.3.1_glibc2.28/bin/aarch64-pc-linux-gnu- \
--target-arch aarch64 \
--dist .cgsl${NXI_Host_CGS_kernel_aarch64:1} \
- --rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --hpf-driver-version "${dri_version}"
+ --rpm-driver-version "${version}" --rpm-driver-release "${date}"
echo "[DPU]生成host-x86下的rpm包,使用[function_hotplug]源码"
./zxdh_hpf_rpm_build.sh \
--ksrc /lib/modules/$NXI_NXE_DPU_host_x86_file \
--dist .cgsl${NXI_Host_CGS_kernel_X86:1} \
- --rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --hpf-driver-version "${dri_version}"
+ --rpm-driver-version "${version}" --rpm-driver-release "${date}"
echo "[DPU]交叉编译生成zf-arm下的rpm包,使用[en_pf]源码"
./zxdh_eth_rpm_build.sh \
@@ -255,16 +239,14 @@ if [ $compile_stage = "versionci" ]; then
--dist .cgsl${DPU_CGS_kernel_aarch64:1} \
--rpm-driver-name "zxdh-eth" \
--rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --rpm-config-version "${version}" --rpm-config-release "${date}" \
- --eth-driver-version "${dri_version}"
+ --rpm-config-version "${version}" --rpm-config-release "${date}"
echo "[DPU]生成zf-arm下的rpm包,使用[zf_mpf]源码"
./zxdh_zf_mpf_rpm_build.sh \
--ksrc /lib/modules/$DPU_zf_aarch64_file \
--cross-compile /opt/aarch64_cgslv6.01_gcc8.3.1_glibc2.28/bin/aarch64-pc-linux-gnu- \
--dist .cgsl${DPU_CGS_kernel_aarch64:1} \
- --rpm-driver-version "${version}" --rpm-driver-release "${date}" \
- --zf-mpf-driver-version "${dri_version}"
+ --rpm-driver-version "${version}" --rpm-driver-release "${date}"
#TODO
echo "查看全部rpm包"
diff --git a/src/net/drivers/base/Makefile b/src/net/drivers/base/Makefile
old mode 100755
new mode 100644
index 4f40a2eed03bdc40f4ebf270086e77d90ef6761f..3457ecf804c8ff764140d1c87b8510194b1c407c
--- a/src/net/drivers/base/Makefile
+++ b/src/net/drivers/base/Makefile
@@ -1,6 +1,9 @@
subdir-ccflags-y += -I$(CWD)/include
subdir-ccflags-y += -include $(CWD)/autoconf.h
-ccflags-y += -Werror
+
+ifeq (${NEED_AUX_BUS},2)
+EXTRA_CFLAGS += -DAUX_BUS_NO_SUPPORT
obj-m += zxdh_auxiliary.o
-zxdh_auxiliary-y += en_auxiliary.o
\ No newline at end of file
+zxdh_auxiliary-y += auxiliary.o
+endif
\ No newline at end of file
diff --git a/src/net/drivers/base/auxiliary.c b/src/net/drivers/base/auxiliary.c
new file mode 100644
index 0000000000000000000000000000000000000000..b1f3e01ddc044fb016408f1c3086bbfef46a856a
--- /dev/null
+++ b/src/net/drivers/base/auxiliary.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020 Intel Corporation
+ *
+ * Please see Documentation/driver-api/auxiliary_bus.rst for more information.
+ */
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#ifdef AUX_BUS_NO_SUPPORT
+#include
+#else
+#include
+#endif
+#include
+#ifdef CONFIG_COMPAT_AUXILIARY_EXTERNAL_INIT
+#include "base.h"
+#endif
+
+/**
+ * DOC: PURPOSE
+ *
+ * In some subsystems, the functionality of the core device (PCI/ACPI/other) is
+ * too complex for a single device to be managed by a monolithic driver (e.g.
+ * Sound Open Firmware), multiple devices might implement a common intersection
+ * of functionality (e.g. NICs + RDMA), or a driver may want to export an
+ * interface for another subsystem to drive (e.g. SIOV Physical Function export
+ * Virtual Function management). A split of the functionality into child-
+ * devices representing sub-domains of functionality makes it possible to
+ * compartmentalize, layer, and distribute domain-specific concerns via a Linux
+ * device-driver model.
+ *
+ * An example for this kind of requirement is the audio subsystem where a
+ * single IP is handling multiple entities such as HDMI, Soundwire, local
+ * devices such as mics/speakers etc. The split for the core's functionality
+ * can be arbitrary or be defined by the DSP firmware topology and include
+ * hooks for test/debug. This allows for the audio core device to be minimal
+ * and focused on hardware-specific control and communication.
+ *
+ * Each auxiliary_device represents a part of its parent functionality. The
+ * generic behavior can be extended and specialized as needed by encapsulating
+ * an auxiliary_device within other domain-specific structures and the use of
+ * .ops callbacks. Devices on the auxiliary bus do not share any structures and
+ * the use of a communication channel with the parent is domain-specific.
+ *
+ * Note that ops are intended as a way to augment instance behavior within a
+ * class of auxiliary devices, it is not the mechanism for exporting common
+ * infrastructure from the parent. Consider EXPORT_SYMBOL_NS() to convey
+ * infrastructure from the parent module to the auxiliary module(s).
+ */
+
+/**
+ * DOC: USAGE
+ *
+ * The auxiliary bus is to be used when a driver and one or more kernel
+ * modules, who share a common header file with the driver, need a mechanism to
+ * connect and provide access to a shared object allocated by the
+ * auxiliary_device's registering driver. The registering driver for the
+ * auxiliary_device(s) and the kernel module(s) registering auxiliary_drivers
+ * can be from the same subsystem, or from multiple subsystems.
+ *
+ * The emphasis here is on a common generic interface that keeps subsystem
+ * customization out of the bus infrastructure.
+ *
+ * One example is a PCI network device that is RDMA-capable and exports a child
+ * device to be driven by an auxiliary_driver in the RDMA subsystem. The PCI
+ * driver allocates and registers an auxiliary_device for each physical
+ * function on the NIC. The RDMA driver registers an auxiliary_driver that
+ * claims each of these auxiliary_devices. This conveys data/ops published by
+ * the parent PCI device/driver to the RDMA auxiliary_driver.
+ *
+ * Another use case is for the PCI device to be split out into multiple sub
+ * functions. For each sub function an auxiliary_device is created. A PCI sub
+ * function driver binds to such devices that creates its own one or more class
+ * devices. A PCI sub function auxiliary device is likely to be contained in a
+ * struct with additional attributes such as user defined sub function number
+ * and optional attributes such as resources and a link to the parent device.
+ * These attributes could be used by systemd/udev; and hence should be
+ * initialized before a driver binds to an auxiliary_device.
+ *
+ * A key requirement for utilizing the auxiliary bus is that there is no
+ * dependency on a physical bus, device, register accesses or regmap support.
+ * These individual devices split from the core cannot live on the platform bus
+ * as they are not physical devices that are controlled by DT/ACPI. The same
+ * argument applies for not using MFD in this scenario as MFD relies on
+ * individual function devices being physical devices.
+ */
+
+/**
+ * DOC: EXAMPLE
+ *
+ * Auxiliary devices are created and registered by a subsystem-level core
+ * device that needs to break up its functionality into smaller fragments. One
+ * way to extend the scope of an auxiliary_device is to encapsulate it within a
+ * domain- pecific structure defined by the parent device. This structure
+ * contains the auxiliary_device and any associated shared data/callbacks
+ * needed to establish the connection with the parent.
+ *
+ * An example is:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * struct auxiliary_device auxdev;
+ * void (*connect)(struct auxiliary_device *auxdev);
+ * void (*disconnect)(struct auxiliary_device *auxdev);
+ * void *data;
+ * };
+ *
+ * The parent device then registers the auxiliary_device by calling
+ * auxiliary_device_init(), and then auxiliary_device_add(), with the pointer
+ * to the auxdev member of the above structure. The parent provides a name for
+ * the auxiliary_device that, combined with the parent's KBUILD_MODNAME,
+ * creates a match_name that is be used for matching and binding with a driver.
+ *
+ * Whenever an auxiliary_driver is registered, based on the match_name, the
+ * auxiliary_driver's probe() is invoked for the matching devices. The
+ * auxiliary_driver can also be encapsulated inside custom drivers that make
+ * the core device's functionality extensible by adding additional
+ * domain-specific ops as follows:
+ *
+ * .. code-block:: c
+ *
+ * struct my_ops {
+ * void (*send)(struct auxiliary_device *auxdev);
+ * void (*receive)(struct auxiliary_device *auxdev);
+ * };
+ *
+ *
+ * struct my_driver {
+ * struct auxiliary_driver auxiliary_drv;
+ * const struct my_ops ops;
+ * };
+ *
+ * An example of this type of usage is:
+ *
+ * .. code-block:: c
+ *
+ * const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * { },
+ * };
+ *
+ * const struct my_ops my_custom_ops = {
+ * .send = my_tx,
+ * .receive = my_rx,
+ * };
+ *
+ * const struct my_driver my_drv = {
+ * .auxiliary_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_probe,
+ * .remove = my_remove,
+ * .shutdown = my_shutdown,
+ * },
+ * .ops = my_custom_ops,
+ * };
+ */
+
+static const struct auxiliary_device_id *
+auxiliary_match_id(const struct auxiliary_device_id *id,
+ const struct auxiliary_device *auxdev)
+{
+ for (; id->name[0]; id++) {
+ const char *p = strrchr(dev_name(&auxdev->dev), '.');
+ int32_t match_size;
+
+ if (!p) {
+ continue;
+ }
+ match_size = p - dev_name(&auxdev->dev);
+
+ /* use dev_name(&auxdev->dev) prefix before last '.' char to match to */
+ if (strlen(id->name) == match_size &&
+ !strncmp(dev_name(&auxdev->dev), id->name, match_size)) {
+ return id;
+ }
+ }
+
+ return NULL;
+}
+
+static int32_t auxiliary_match(struct device *dev, struct device_driver *drv)
+{
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+ struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv);
+
+ return !!auxiliary_match_id(auxdrv->id_table, auxdev);
+}
+
+static int32_t auxiliary_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ const char *name;
+ const char *p;
+
+ name = dev_name(dev);
+ p = strrchr(name, '.');
+
+ return add_uevent_var(env, "MODALIAS=%s%.*s", AUXILIARY_MODULE_PREFIX,
+ (int32_t)(p - name), name);
+}
+
+static const struct dev_pm_ops auxiliary_dev_pm_ops = { SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend, pm_generic_runtime_resume,
+ NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume) };
+
+static int32_t auxiliary_bus_probe(struct device *dev)
+{
+ struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+ int32_t ret = 0;
+
+#ifdef HAVE_DEV_PM_DOMAIN_ATTACH
+ ret = dev_pm_domain_attach(dev, true);
+
+ /* In case of old kernels 4.17 and below do nothing in case of
+ * failure of ENODEV */
+ if (ret == -ENODEV) {
+ ret = 0;
+ }
+
+ if (ret != 0) {
+ LOG_WARN("Failed to attach to PM Domain : %d\n", ret);
+ return ret;
+ }
+#else
+ acpi_dev_pm_attach(dev, true);
+#endif
+
+ ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
+ if (ret != 0)
+#ifdef HAVE_DEV_PM_DOMAIN_ATTACH
+ dev_pm_domain_detach(dev, true);
+#else
+ acpi_dev_pm_detach(dev, true);
+#endif
+
+ return ret;
+}
+
+#ifdef HAVE_BUS_TYPE_REMOVE_RETURN_VOID
+static void auxiliary_bus_remove(struct device *dev)
+#else
+static int32_t auxiliary_bus_remove(struct device *dev)
+#endif
+{
+ struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+
+ if (auxdrv->remove) {
+ auxdrv->remove(auxdev);
+ }
+#ifdef HAVE_DEV_PM_DOMAIN_ATTACH
+ dev_pm_domain_detach(dev, true);
+#else
+ acpi_dev_pm_detach(dev, true);
+#endif
+
+#ifndef HAVE_BUS_TYPE_REMOVE_RETURN_VOID
+ return 0;
+#endif
+}
+
+static void auxiliary_bus_shutdown(struct device *dev)
+{
+ struct auxiliary_driver *auxdrv = NULL;
+ struct auxiliary_device *auxdev = NULL;
+
+ if (dev->driver) {
+ auxdrv = to_auxiliary_drv(dev->driver);
+ auxdev = to_auxiliary_dev(dev);
+ }
+
+ if (auxdrv && auxdrv->shutdown) {
+ auxdrv->shutdown(auxdev);
+ }
+}
+
+static struct bus_type auxiliary_bus_type = {
+ .name = "auxiliary",
+ .probe = auxiliary_bus_probe,
+ .remove = auxiliary_bus_remove,
+ .shutdown = auxiliary_bus_shutdown,
+ .match = auxiliary_match,
+ .uevent = auxiliary_uevent,
+ .pm = &auxiliary_dev_pm_ops,
+};
+
+/**
+ * auxiliary_device_init - check auxiliary_device and initialize
+ * @auxdev: auxiliary device struct
+ *
+ * This is the second step in the three-step process to register an
+ * auxiliary_device.
+ *
+ * When this function returns an error code, then the device_initialize will
+ * *not* have been performed, and the caller will be responsible to free any
+ * memory allocated for the auxiliary_device in the error path directly.
+ *
+ * It returns 0 on success. On success, the device_initialize has been
+ * performed. After this point any error unwinding will need to include a call
+ * to auxiliary_device_uninit(). In this post-initialize error scenario, a call
+ * to the device's .release callback will be triggered, and all memory clean-up
+ * is expected to be handled there.
+ */
+int32_t auxiliary_device_init(struct auxiliary_device *auxdev)
+{
+ struct device *dev = &auxdev->dev;
+
+ if (!dev->parent) {
+ LOG_ERR("auxiliary_device has a NULL dev->parent\n");
+ return -EINVAL;
+ }
+
+ if (!auxdev->name) {
+ LOG_ERR("auxiliary_device has a NULL name\n");
+ return -EINVAL;
+ }
+
+ dev->bus = &auxiliary_bus_type;
+ device_initialize(&auxdev->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_init);
+
+/**
+ * __auxiliary_device_add - add an auxiliary bus device
+ * @auxdev: auxiliary bus device to add to the bus
+ * @modname: name of the parent device's driver module
+ *
+ * This is the third step in the three-step process to register an
+ * auxiliary_device.
+ *
+ * This function must be called after a successful call to
+ * auxiliary_device_init(), which will perform the device_initialize. This
+ * means that if this returns an error code, then a call to
+ * auxiliary_device_uninit() must be performed so that the .release callback
+ * will be triggered to free the memory associated with the auxiliary_device.
+ *
+ * The expectation is that users will call the "auxiliary_device_add" macro so
+ * that the caller's KBUILD_MODNAME is automatically inserted for the modname
+ * parameter. Only if a user requires a custom name would this version be
+ * called directly.
+ */
+int32_t __auxiliary_device_add(struct auxiliary_device *auxdev,
+ const char *modname)
+{
+ struct device *dev = &auxdev->dev;
+ int32_t ret = 0;
+
+ if (!modname) {
+ LOG_ERR("auxiliary device modname is NULL\n");
+ return -EINVAL;
+ }
+
+ ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id);
+ if (ret != 0) {
+ LOG_ERR("auxiliary device dev_set_name failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = device_add(dev);
+ if (ret != 0) {
+ LOG_ERR("adding auxiliary device failed!: %d\n", ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__auxiliary_device_add);
+
+/**
+ * auxiliary_find_device - auxiliary device iterator for locating a particular
+ * device.
+ * @start: Device to begin with
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * This function returns a reference to a device that is 'found'
+ * for later use, as determined by the @match callback.
+ *
+ * The reference returned should be released with put_device().
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero, this function will
+ * return to the caller and not iterate over any more devices.
+ */
+#if defined(HAVE_LINUX_DEVICE_BUS_H) || defined(HAVE_BUS_FIND_DEVICE_GET_CONST)
+struct auxiliary_device *
+auxiliary_find_device(struct device *start, const void *data,
+ int32_t (*match)(struct device *dev, const void *data))
+#else
+struct auxiliary_device *
+auxiliary_find_device(struct device *start, void *data,
+ int32_t (*match)(struct device *dev, void *data))
+#endif /* HAVE_BUS_FIND_DEVICE_GET_CONST || HAVE_LINUX_DEVICE_BUS_H */
+{
+ struct device *dev = NULL;
+
+ dev = bus_find_device(&auxiliary_bus_type, start, data, match);
+ if (dev == NULL) {
+ return NULL;
+ }
+
+ return to_auxiliary_dev(dev);
+}
+EXPORT_SYMBOL_GPL(auxiliary_find_device);
+
+/**
+ * __auxiliary_driver_register - register a driver for auxiliary bus devices
+ * @auxdrv: auxiliary_driver structure
+ * @owner: owning module/driver
+ * @modname: KBUILD_MODNAME for parent driver
+ *
+ * The expectation is that users will call the "auxiliary_driver_register"
+ * macro so that the caller's KBUILD_MODNAME is automatically inserted for the
+ * modname parameter. Only if a user requires a custom name would this version
+ * be called directly.
+ */
+int32_t __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
+ struct module *owner, const char *modname)
+{
+ int32_t ret = 0;
+
+ if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table)) {
+ return -EINVAL;
+ }
+
+ if (auxdrv->name) {
+ auxdrv->driver.name =
+ kasprintf(GFP_KERNEL, "%s.%s", modname, auxdrv->name);
+ } else {
+ auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", modname);
+ }
+ if (!auxdrv->driver.name) {
+ return -ENOMEM;
+ }
+
+ auxdrv->driver.owner = owner;
+ auxdrv->driver.bus = &auxiliary_bus_type;
+ auxdrv->driver.mod_name = modname;
+
+ ret = driver_register(&auxdrv->driver);
+ if (ret) {
+ kfree(auxdrv->driver.name);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__auxiliary_driver_register);
+
+/**
+ * auxiliary_driver_unregister - unregister a driver
+ * @auxdrv: auxiliary_driver structure
+ */
+void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
+{
+ driver_unregister(&auxdrv->driver);
+ kfree(auxdrv->driver.name);
+}
+EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
+
+#ifdef CONFIG_COMPAT_AUXILIARY_EXTERNAL_INIT
+void __init auxiliary_bus_init(void)
+{
+ WARN_ON(bus_register(&auxiliary_bus_type));
+}
+#else
+static int32_t __init auxiliary_bus_init(void)
+{
+ return bus_register(&auxiliary_bus_type);
+}
+
+static void __exit auxiliary_bus_exit(void)
+{
+ bus_unregister(&auxiliary_bus_type);
+}
+
+module_init(auxiliary_bus_init);
+module_exit(auxiliary_bus_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Auxiliary Bus");
+MODULE_INFO(supported, "external");
+MODULE_AUTHOR("David Ertman ");
+MODULE_AUTHOR("Kiran Patil ");
+#endif
diff --git a/src/net/drivers/base/en_auxiliary.c b/src/net/drivers/base/en_auxiliary.c
deleted file mode 100644
index fb7232b8f67f54bc6da3e505ac89eec72602d35c..0000000000000000000000000000000000000000
--- a/src/net/drivers/base/en_auxiliary.c
+++ /dev/null
@@ -1,368 +0,0 @@
-
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#ifdef CONFIG_COMPAT_AUXILIARY_EXTERNAL_INIT
-#include "base.h"
-#endif
-
-
-static const struct zxdh_auxiliary_device_id *zxdh_auxiliary_match_id(const struct zxdh_auxiliary_device_id *id,
- const struct zxdh_auxiliary_device *auxdev)
-{
- for (; id->name[0]; id++)
- {
- const char *p = strrchr(dev_name(&auxdev->dev), '.');
- int32_t match_size;
-
- if (!p)
- {
- continue;
- }
- match_size = p - dev_name(&auxdev->dev);
-
- /* use dev_name(&auxdev->dev) prefix before last '.' char to match to */
- if (strlen(id->name) == match_size &&
- !strncmp(dev_name(&auxdev->dev), id->name, match_size))
- {
- return id;
- }
- }
-
- return NULL;
-}
-
-static int32_t zxdh_auxiliary_match(struct device *dev, struct device_driver *drv)
-{
- struct zxdh_auxiliary_device *auxdev = zxdh_to_auxiliary_dev(dev);
- struct zxdh_auxiliary_driver *auxdrv = zxdh_to_auxiliary_drv(drv);
-
- return !!zxdh_auxiliary_match_id(auxdrv->id_table, auxdev);
-}
-
-static int32_t zxdh_auxiliary_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- const char *name;
- const char *p;
-
- name = dev_name(dev);
- p = strrchr(name, '.');
-
- return add_uevent_var(env, "MODALIAS=%s%.*s", ZXDH_AUXILIARY_MODULE_PREFIX, (int32_t)(p - name), name);
-}
-
-static const struct dev_pm_ops zxdh_auxiliary_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)
-};
-
-static int32_t zxdh_auxiliary_bus_probe(struct device *dev)
-{
- struct zxdh_auxiliary_driver *auxdrv = zxdh_to_auxiliary_drv(dev->driver);
- struct zxdh_auxiliary_device *auxdev = zxdh_to_auxiliary_dev(dev);
- int32_t ret = 0;
-
-#ifdef HAVE_DEV_PM_DOMAIN_ATTACH
- ret = dev_pm_domain_attach(dev, true);
-
- /* In case of old kernels 4.17 and below do nothing in case of
- * failure of ENODEV */
- if (ret == -ENODEV)
- {
- ret = 0;
- }
-
- if (ret != 0)
- {
- LOG_WARN("Failed to attach to PM Domain : %d\n", ret);
- return ret;
- }
-#else
- acpi_dev_pm_attach(dev, true);
-#endif
-
- ret = auxdrv->probe(auxdev, zxdh_auxiliary_match_id(auxdrv->id_table, auxdev));
- if (ret != 0)
-#ifdef HAVE_DEV_PM_DOMAIN_ATTACH
- dev_pm_domain_detach(dev, true);
-#else
- acpi_dev_pm_detach(dev, true);
-#endif
-
- return ret;
-}
-
-#ifdef HAVE_BUS_TYPE_REMOVE_RETURN_VOID
-static void zxdh_auxiliary_bus_remove(struct device *dev)
-#else
-static int32_t zxdh_auxiliary_bus_remove(struct device *dev)
-#endif
-{
- struct zxdh_auxiliary_driver *auxdrv = zxdh_to_auxiliary_drv(dev->driver);
- struct zxdh_auxiliary_device *auxdev = zxdh_to_auxiliary_dev(dev);
-
- if (auxdrv->remove)
- {
- auxdrv->remove(auxdev);
- }
-#ifdef HAVE_DEV_PM_DOMAIN_ATTACH
- dev_pm_domain_detach(dev, true);
-#else
- acpi_dev_pm_detach(dev, true);
-#endif
-
-#ifndef HAVE_BUS_TYPE_REMOVE_RETURN_VOID
- return 0;
-#endif
-}
-
-static void zxdh_auxiliary_bus_shutdown(struct device *dev)
-{
- struct zxdh_auxiliary_driver *auxdrv = NULL;
- struct zxdh_auxiliary_device *auxdev = NULL;
-
- if (dev->driver)
- {
- auxdrv = zxdh_to_auxiliary_drv(dev->driver);
- auxdev = zxdh_to_auxiliary_dev(dev);
- }
-
- if (auxdrv && auxdrv->shutdown)
- {
- auxdrv->shutdown(auxdev);
- }
-}
-
-static struct bus_type zxdh_auxiliary_bus_type = {
- .name = "zxdh_auxiliary",
- .probe = zxdh_auxiliary_bus_probe,
- .remove = zxdh_auxiliary_bus_remove,
- .shutdown = zxdh_auxiliary_bus_shutdown,
- .match = zxdh_auxiliary_match,
- .uevent = zxdh_auxiliary_uevent,
- .pm = &zxdh_auxiliary_dev_pm_ops,
-};
-
-/**
- * zxdh_auxiliary_device_init - check zxdh_auxiliary_device and initialize
- * @auxdev: auxiliary device struct
- *
- * This is the second step in the three-step process to register an
- * zxdh_auxiliary_device.
- *
- * When this function returns an error code, then the device_initialize will
- * *not* have been performed, and the caller will be responsible to free any
- * memory allocated for the zxdh_auxiliary_device in the error path directly.
- *
- * It returns 0 on success. On success, the device_initialize has been
- * performed. After this point any error unwinding will need to include a call
- * to zxdh_auxiliary_device_uninit(). In this post-initialize error scenario, a call
- * to the device's .release callback will be triggered, and all memory clean-up
- * is expected to be handled there.
- */
-int32_t zxdh_auxiliary_device_init(struct zxdh_auxiliary_device *auxdev)
-{
- struct device *dev = &auxdev->dev;
-
- if (!dev->parent)
- {
- LOG_ERR("zxdh_auxiliary_device has a NULL dev->parent\n");
- return -EINVAL;
- }
-
- if (!auxdev->name)
- {
- LOG_ERR("zxdh_auxiliary_device has a NULL name\n");
- return -EINVAL;
- }
-
- dev->bus = &zxdh_auxiliary_bus_type;
- device_initialize(&auxdev->dev);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(zxdh_auxiliary_device_init);
-
-/**
- * zxdh_aux_dev_add - add an auxiliary bus device
- * @auxdev: auxiliary bus device to add to the bus
- * @modname: name of the parent device's driver module
- *
- * This is the third step in the three-step process to register an
- * zxdh_auxiliary_device.
- *
- * This function must be called after a successful call to
- * zxdh_auxiliary_device_init(), which will perform the device_initialize. This
- * means that if this returns an error code, then a call to
- * zxdh_auxiliary_device_uninit() must be performed so that the .release callback
- * will be triggered to free the memory associated with the zxdh_auxiliary_device.
- *
- * The expectation is that users will call the "zxdh_auxiliary_device_add" macro so
- * that the caller's KBUILD_MODNAME is automatically inserted for the modname
- * parameter. Only if a user requires a custom name would this version be
- * called directly.
- */
-int32_t zxdh_aux_dev_add(struct zxdh_auxiliary_device *auxdev, const char *modname)
-{
- struct device *dev = &auxdev->dev;
- int32_t ret = 0;
-
- if (!modname)
- {
- LOG_ERR( "zxdh auxiliary device modname is NULL\n");
- return -EINVAL;
- }
-
- ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id);
- if (ret != 0)
- {
- LOG_ERR( "zxdh auxiliary device dev_set_name failed: %d\n", ret);
- return ret;
- }
-
- ret = device_add(dev);
- if (ret != 0)
- {
- LOG_ERR( "adding zxdh auxiliary device failed!: %d\n", ret);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(zxdh_aux_dev_add);
-
-/**
- * zxdh_auxiliary_find_device - auxiliary device iterator for locating a particular device.
- * @start: Device to begin with
- * @data: Data to pass to match function
- * @match: Callback function to check device
- *
- * This function returns a reference to a device that is 'found'
- * for later use, as determined by the @match callback.
- *
- * The reference returned should be released with put_device().
- *
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero, this function will
- * return to the caller and not iterate over any more devices.
- */
-#if defined(HAVE_LINUX_DEVICE_BUS_H) || defined(HAVE_BUS_FIND_DEVICE_GET_CONST)
-struct zxdh_auxiliary_device *
-zxdh_auxiliary_find_device(struct device *start,
- const void *data,
- int32_t (*match)(struct device *dev, const void *data))
-#else
-struct zxdh_auxiliary_device *
-zxdh_auxiliary_find_device(struct device *start,
- void *data,
- int32_t (*match)(struct device *dev, void *data))
-#endif /* HAVE_BUS_FIND_DEVICE_GET_CONST || HAVE_LINUX_DEVICE_BUS_H */
-{
- struct device *dev = NULL;
-
- dev = bus_find_device(&zxdh_auxiliary_bus_type, start, data, match);
- if (dev == NULL)
- {
- return NULL;
- }
-
- return zxdh_to_auxiliary_dev(dev);
-}
-EXPORT_SYMBOL_GPL(zxdh_auxiliary_find_device);
-
-/**
- * zxdh_aux_drv_register - register a driver for auxiliary bus devices
- * @auxdrv: zxdh_auxiliary_driver structure
- * @owner: owning module/driver
- * @modname: KBUILD_MODNAME for parent driver
- *
- * The expectation is that users will call the "zxdh_auxiliary_driver_register"
- * macro so that the caller's KBUILD_MODNAME is automatically inserted for the
- * modname parameter. Only if a user requires a custom name would this version
- * be called directly.
- */
-int32_t zxdh_aux_drv_register(struct zxdh_auxiliary_driver *auxdrv,
- struct module *owner, const char *modname)
-{
- int32_t ret = 0;
-
- if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
- {
- return -EINVAL;
- }
-
- if (auxdrv->name)
- {
- auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", modname, auxdrv->name);
- }
- else
- {
- auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", modname);
- }
- if (!auxdrv->driver.name)
- {
- return -ENOMEM;
- }
-
- auxdrv->driver.owner = owner;
- auxdrv->driver.bus = &zxdh_auxiliary_bus_type;
- auxdrv->driver.mod_name = modname;
-
- ret = driver_register(&auxdrv->driver);
- if (ret)
- {
- kfree(auxdrv->driver.name);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(zxdh_aux_drv_register);
-
-/**
- * zxdh_auxiliary_driver_unregister - unregister a driver
- * @auxdrv: zxdh_auxiliary_driver structure
- */
-void zxdh_auxiliary_driver_unregister(struct zxdh_auxiliary_driver *auxdrv)
-{
- driver_unregister(&auxdrv->driver);
- kfree(auxdrv->driver.name);
-}
-EXPORT_SYMBOL_GPL(zxdh_auxiliary_driver_unregister);
-
-#ifdef CONFIG_COMPAT_AUXILIARY_EXTERNAL_INIT
-void __init zxdh_auxiliary_bus_init(void)
-{
- WARN_ON(bus_register(&zxdh_auxiliary_bus_type));
-}
-#else
-static int32_t __init zxdh_auxiliary_bus_init(void)
-{
- return bus_register(&zxdh_auxiliary_bus_type);
-}
-
-static void __exit zxdh_auxiliary_bus_exit(void)
-{
- bus_unregister(&zxdh_auxiliary_bus_type);
-}
-
-module_init(zxdh_auxiliary_bus_init);
-module_exit(zxdh_auxiliary_bus_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Auxiliary Bus");
-MODULE_INFO(supported, "external");
-MODULE_AUTHOR("David Ertman ");
-MODULE_AUTHOR("Kiran Patil ");
-#endif
diff --git a/src/net/drivers/net/ethernet/dinghai/Kconfig b/src/net/drivers/net/ethernet/dinghai/Kconfig
old mode 100755
new mode 100644
diff --git a/src/net/drivers/net/ethernet/dinghai/Makefile b/src/net/drivers/net/ethernet/dinghai/Makefile
old mode 100755
new mode 100644
index 2f40270519ad16d50e0cbf0fd151457229f53afe..4e5173101c4a5121b458300df9e6599be4ee2417
--- a/src/net/drivers/net/ethernet/dinghai/Makefile
+++ b/src/net/drivers/net/ethernet/dinghai/Makefile
@@ -1,8 +1,10 @@
subdir-ccflags-y += -I$(src)
subdir-ccflags-y += -I$(CWD)/include/
subdir-ccflags-y += -include $(CWD)/autoconf.h
-ccflags-y += -Werror
+ifeq (${NEED_AUX_BUS},2)
+EXTRA_CFLAGS += -DAUX_BUS_NO_SUPPORT
+endif
ifeq ($(CONFIG_ZXDH_MSGQ),m)
EXTRA_CFLAGS += -DZXDH_MSGQ
@@ -12,14 +14,6 @@ ifeq ($(CONFIG_DINGHAI_SEC),m)
EXTRA_CFLAGS += -DZXDH_SEC
endif
-ifeq ($(CONFIG_DRIVER_VERSION),)
-EXTRA_CFLAGS += -DDRIVER_VERSION_VAL=\"1.0-1\"
-$(info CONFIG_PF_MPF_DRIVER_VERSION is null, EXTRA_CFLAGS=$(EXTRA_CFLAGS))
-else
-EXTRA_CFLAGS += -DDRIVER_VERSION_VAL=\"$(CONFIG_DRIVER_VERSION)\"
-$(info CONFIG_PF_MPF_DRIVER_VERSION is not null, EXTRA_CFLAGS=$(EXTRA_CFLAGS))
-endif
-
obj-$(CONFIG_DINGHAI_DH_CMD) += zxdh_cmd.o
zxdh_cmd-y := dh_cmd.o cmd/msg_main.o cmd/msg_chan_lock.o cmd/msg_chan_test.o
@@ -34,14 +28,14 @@ ifdef PCIE_ZF_EPC_OPEN
zxdh_zf_mpf-y := events.o eq.o pci_irq.o devlink.o \
zf_mpf/zf_mpf.o zf_mpf/zf_events.o zf_mpf/irq.o zf_mpf/eq.o zf_mpf/devlink.o zf_mpf/cfg_sf.o \
zf_mpf/epc/pcie-zte-zf-epc.o zf_mpf/epc/pcie-zte-zf-hdma.o zf_mpf/epc/virt-dma.o zf_mpf/zxdh_reset_zf.o \
- zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.o zf_mpf/fuc_hotplug/fuc_hotplug.o zf_mpf/zf_reset_finish_flag.o zf_mpf/zf_chan_ioctl.o
+ zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.o zf_mpf/fuc_hotplug/fuc_hotplug.o zf_mpf/zf_reset_finish_flag.o
obj-m += zxdh_zf_epf.o
my_api-objs := zf_mpf/epc/pcie-zte-zf-epc.o
zxdh_zf_epf-y := zf_mpf/epf/pcie-zte-zf-epf.o
else
zxdh_zf_mpf-y := events.o eq.o pci_irq.o devlink.o \
zf_mpf/zf_mpf.o zf_mpf/zf_events.o zf_mpf/irq.o zf_mpf/eq.o zf_mpf/devlink.o zf_mpf/cfg_sf.o zf_mpf/zxdh_reset_zf.o \
- zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.o zf_mpf/fuc_hotplug/fuc_hotplug.o zf_mpf/zf_reset_finish_flag.o zf_mpf/zf_chan_ioctl.o
+ zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.o zf_mpf/fuc_hotplug/fuc_hotplug.o zf_mpf/zf_reset_finish_flag.o
endif
zxdh_zf_mpf-$(CONFIG_ZXDH_SF) += irq_affinity.o en_sf.o en_sf/eq.o en_sf/irq.o en_sf/devlink.o
zxdh_zf_mpf-$(CONFIG_ZF_GDMA) += zf_mpf/gdma.o
@@ -55,17 +49,15 @@ zxdh_pf-$(CONFIG_ZXDH_SF) += irq_affinity.o en_sf.o en_sf/eq.o en_sf/irq.o en_sf
obj-$(CONFIG_DINGHAI_EN_AUX) += zxdh_en_aux.o
zxdh_en_aux-y := en_aux.o eq.o pci_irq.o irq_affinity.o en_aux/queue.o en_aux/en_cmd.o en_aux/eq.o \
en_aux/events.o en_ethtool/ethtool.o en_aux/en_ioctl.o \
- en_aux/dcbnl/en_dcbnl.o en_aux/dcbnl/en_dcbnl_api.o \
+ en_aux/dcbnl/en_dcbnl.o en_aux/dcbnl/en_dcbnl_api.o en_plcr.o \
zxdh_tools/zxdh_tools_ioctl.o zxdh_tools/zxdh_tools_netlink.o
zxdh_en_aux-$(CONFIG_ZXDH_1588) += en_aux/en_1588_pkt_proc.o en_aux/en_1588_pkt_proc_func.o
zxdh_en_aux-$(CONFIG_DINGHAI_SEC) += en_aux/drs_sec_dtb.o
zxdh_en_aux-$(CONFIG_ZXDH_MSGQ) += en_aux/priv_queue.o
obj-$(CONFIG_DINGHAI_PTP) += zxdh_ptp.o
-zxdh_ptp-y :=en_ptp/tod_driver.o en_ptp/tod_driver_stub.o en_ptp/zxdh_ptp.o
-obj-$(CONFIG_DINGHAI_TSN) += zxdh_tsn.o
-zxdh_tsn-y :=en_tsn/zxdh_tsn.o en_tsn/zxdh_tsn_reg.o en_tsn/zxdh_tsn_ioctl.o
+zxdh_ptp-y :=en_ptp/tod_driver.o en_ptp/tod_driver_stub.o en_ptp/zxdh_ptp.o
include $(CWD)/drivers/net/ethernet/dinghai/en_np/Makefile
diff --git a/src/net/drivers/net/ethernet/dinghai/cmd.c b/src/net/drivers/net/ethernet/dinghai/cmd.c
old mode 100755
new mode 100644
index a8e46fc19f2e625ba93207c46a4848529998e0db..fc2e8ae861f4c3d088ff11f179bcb2eb355ddef1
--- a/src/net/drivers/net/ethernet/dinghai/cmd.c
+++ b/src/net/drivers/net/ethernet/dinghai/cmd.c
@@ -1,35 +1,38 @@
-#include
-#include
-#include
-#include
-
-static int32_t cmd_status_err(struct dh_core_dev *dev, int32_t err, uint16_t opcode, void *out)
-{
- u8 status = DH_GET(mbox_out, out, status);
-
- return err;
-}
-static int32_t cmd_exec(struct dh_core_dev *dev, void *in, int32_t in_size, void *out,
- int32_t out_size, zxdh_cmd_cbk_t callback, void *context,
- bool force_polling)
-{
- return 0;
-}
-
-int32_t zxdh_cmd_do(struct dh_core_dev *dev, void *in, int32_t in_size, void *out, int32_t out_size)
-{
- int32_t err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
- uint16_t opcode = DH_GET(mbox_in, in, opcode);
-
- err = cmd_status_err(dev, err, opcode, out);
-
- return err;
-}
-EXPORT_SYMBOL(zxdh_cmd_do);
-
-int32_t zxdh_cmd_exec(struct dh_core_dev *dev, void *in, int32_t in_size, void *out, int32_t out_size)
-{
- int32_t err = zxdh_cmd_do(dev, in, in_size, out, out_size);
-
- return zxdh_cmd_check(dev, err, in, out);
+#include
+#include
+#include
+#include
+
+static int32_t cmd_status_err(struct dh_core_dev *dev, int32_t err,
+ uint16_t opcode, void *out)
+{
+ u8 status = DH_GET(mbox_out, out, status);
+
+ return err;
+}
+static int32_t cmd_exec(struct dh_core_dev *dev, void *in, int32_t in_size,
+ void *out, int32_t out_size, zxdh_cmd_cbk_t callback,
+ void *context, bool force_polling)
+{
+ return 0;
+}
+
+int32_t zxdh_cmd_do(struct dh_core_dev *dev, void *in, int32_t in_size,
+ void *out, int32_t out_size)
+{
+ int32_t err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
+ uint16_t opcode = DH_GET(mbox_in, in, opcode);
+
+ err = cmd_status_err(dev, err, opcode, out);
+
+ return err;
+}
+EXPORT_SYMBOL(zxdh_cmd_do);
+
+int32_t zxdh_cmd_exec(struct dh_core_dev *dev, void *in, int32_t in_size,
+ void *out, int32_t out_size)
+{
+ int32_t err = zxdh_cmd_do(dev, in, in_size, out, out_size);
+
+ return zxdh_cmd_check(dev, err, in, out);
}
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.c b/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.c
index daf77d40b2072ae6909220c9bc19e868c6dcee13..541c91a17b363df3728215751481c9a68b3c710c 100644
--- a/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.c
+++ b/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.c
@@ -1,6 +1,7 @@
#include
#include "msg_chan_lock.h"
#include "msg_chan_priv.h"
+
/*****************************************
[src/dst]时应该将消息发到硬件锁还是软件所
src/dst: TO_RISC, TO_PFVF, TO_MPF
@@ -10,13 +11,12 @@ VF: 0 0 1
******************************************/
/*/PF0-7 DIRECT_CHNA/(PF0)VF0-VF32/(PF1)VF0-VF32/...*/
-struct mutex lock_array[LOCK_ARR_LENGTH] = {0};
+struct mutex lock_array[LOCK_ARR_LENGTH] = { 0 };
-uint8_t lock_type_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] =
-{
- {LOCK_TYPE_HARD, LOCK_TYPE_HARD, LOCK_TYPE_HARD},
- {LOCK_TYPE_SOFT, LOCK_TYPE_SOFT, LOCK_TYPE_HARD},
- {LOCK_TYPE_HARD, LOCK_TYPE_HARD, LOCK_TYPE_HARD}
+uint8_t lock_type_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = {
+ { LOCK_TYPE_HARD, LOCK_TYPE_HARD, LOCK_TYPE_HARD },
+ { LOCK_TYPE_SOFT, LOCK_TYPE_SOFT, LOCK_TYPE_HARD },
+ { LOCK_TYPE_HARD, LOCK_TYPE_HARD, LOCK_TYPE_HARD }
};
/**
@@ -24,79 +24,73 @@ uint8_t lock_type_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] =
* @src_pcieid: pcie_id
* @result: 软件数组的索引值
*/
-uint16_t pcieid_to_lockid(uint16_t src_pcieid, uint8_t dst)
+uint16_t pcieid_to_lockid(uint16_t src_pcieid, uint8_t dst)
{
- uint16_t lock_id = 0;
- uint16_t pf_idx = 0;
- uint16_t vf_idx = 0;
- uint16_t ep_idx = 0;
-
- pf_idx = (src_pcieid & PCIEID_PF_IDX_MASK) >> PCIEID_PF_IDX_OFFSET;
- vf_idx = (src_pcieid & PCIEID_VF_IDX_MASK);
- ep_idx = (src_pcieid & PCIEID_EP_IDX_MASK) >> PCIEID_EP_IDX_OFFSET ;
- switch (dst)
- {
- case MSG_CHAN_END_RISC:
- {
- if (src_pcieid & PCIEID_IS_PF_MASK)
- {
- lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx;
- }
- else
- {
- lock_id = MULTIPLY_BY_256(ep_idx) + MULTIPLY_BY_32(pf_idx) + vf_idx + MULTIPLY_BY_32(1);
- }
- break;
- }
- case MSG_CHAN_END_VF:
- {
- lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx + MULTIPLY_BY_32(1 + VF_NUM_PER_PF);
- break;
- }
- case MSG_CHAN_END_PF:
- {
- lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx + MULTIPLY_BY_32(2 + VF_NUM_PER_PF);
- break;
- }
- default:
- {
- lock_id = 0;
- break;
- }
- }
-
- if (lock_id >= LOCK_ARR_LENGTH)
- {
- lock_id = 0;
- }
-
- return lock_id;
+ uint16_t lock_id = 0;
+ uint16_t pf_idx = 0;
+ uint16_t vf_idx = 0;
+ uint16_t ep_idx = 0;
+
+ pf_idx = (src_pcieid & PCIEID_PF_IDX_MASK) >> PCIEID_PF_IDX_OFFSET;
+ vf_idx = (src_pcieid & PCIEID_VF_IDX_MASK);
+ ep_idx = (src_pcieid & PCIEID_EP_IDX_MASK) >> PCIEID_EP_IDX_OFFSET;
+ switch (dst) {
+ case MSG_CHAN_END_RISC: {
+ if (src_pcieid & PCIEID_IS_PF_MASK) {
+ lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx;
+ } else {
+ lock_id = MULTIPLY_BY_256(ep_idx) + MULTIPLY_BY_32(pf_idx) +
+ vf_idx + MULTIPLY_BY_32(1);
+ }
+ break;
+ }
+ case MSG_CHAN_END_VF: {
+ lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx +
+ MULTIPLY_BY_32(1 + VF_NUM_PER_PF);
+ break;
+ }
+ case MSG_CHAN_END_PF: {
+ lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx +
+ MULTIPLY_BY_32(2 + VF_NUM_PER_PF);
+ break;
+ }
+ default: {
+ lock_id = 0;
+ break;
+ }
+ }
+
+ if (lock_id >= LOCK_ARR_LENGTH) {
+ lock_id = 0;
+ }
+
+ return lock_id;
}
void bar_soft_lock(uint16_t src_pcieid, uint8_t dst)
{
- uint16_t lockid = 0;
+ uint16_t lockid = 0;
- lockid = pcieid_to_lockid(src_pcieid, dst);
- mutex_lock(&lock_array[lockid]);
+ lockid = pcieid_to_lockid(src_pcieid, dst);
+ mutex_lock(&lock_array[lockid]);
}
void bar_soft_unlock(uint16_t src_pcieid, uint8_t dst)
{
- uint16_t lockid = 0;
+ uint16_t lockid = 0;
- lockid = pcieid_to_lockid(src_pcieid, dst);
- mutex_unlock(&lock_array[lockid]);
+ lockid = pcieid_to_lockid(src_pcieid, dst);
+ mutex_unlock(&lock_array[lockid]);
}
void bar_hard_lock(void)
{
- return;
+ return;
}
void bar_hard_unlock(void)
{
- return;
+ return;
}
/**
@@ -105,12 +99,11 @@ void bar_hard_unlock(void)
*/
void bar_init_lock_arr(void)
{
- int idx = 0;
+ int idx = 0;
- for (idx = 0; idx < ARR_LEN(lock_array); idx++)
- {
- mutex_init(&lock_array[idx]);
- }
+ for (idx = 0; idx < ARR_LEN(lock_array); idx++) {
+ mutex_init(&lock_array[idx]);
+ }
}
/**
@@ -122,28 +115,24 @@ void bar_init_lock_arr(void)
*/
int bar_chan_lock(uint8_t src, uint8_t dst, uint16_t src_pcieid)
{
- uint16_t idx = 0;
- uint8_t src_index = 0;
- uint8_t dst_index = 0;
-
- src_index = bar_msg_row_index_trans(src);
- dst_index = bar_msg_col_index_trans(dst);
- if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR)
- {
- BAR_LOG_ERR("lock ERR: chan doesn't exist.\n");
- return BAR_MSG_ERR_TYPE;
- }
- idx = lock_type_tbl[src_index][dst_index];
- if (idx == LOCK_TYPE_SOFT)
- {
- bar_soft_lock(src_pcieid, dst);
- }
- else
- {
- bar_hard_lock();
- BAR_LOG_INFO("hard_lock.\n");
- }
- return BAR_MSG_OK;
+ uint16_t idx = 0;
+ uint8_t src_index = 0;
+ uint8_t dst_index = 0;
+
+ src_index = __bar_msg_row_index_trans(src);
+ dst_index = __bar_msg_col_index_trans(dst);
+ if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) {
+ BAR_LOG_ERR("lock ERR: chan doesn't exist.\n");
+ return BAR_MSG_ERR_TYPE;
+ }
+ idx = lock_type_tbl[src_index][dst_index];
+ if (idx == LOCK_TYPE_SOFT) {
+ bar_soft_lock(src_pcieid, dst);
+ } else {
+ bar_hard_lock();
+ BAR_LOG_INFO("hard_lock.\n");
+ }
+ return BAR_MSG_OK;
}
/**
@@ -155,25 +144,21 @@ int bar_chan_lock(uint8_t src, uint8_t dst, uint16_t src_pcieid)
*/
int bar_chan_unlock(uint8_t src, uint8_t dst, uint16_t src_pcieid)
{
- uint16_t idx = 0;
- uint8_t src_index = 0;
- uint8_t dst_index = 0;
-
- src_index = bar_msg_row_index_trans(src);
- dst_index = bar_msg_col_index_trans(dst);
- if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR)
- {
- BAR_LOG_ERR("unlock ERR: chan doesn't exist.\n");
- return BAR_MSG_ERR_TYPE;
- }
- idx = lock_type_tbl[src_index][dst_index];
- if (idx == LOCK_TYPE_SOFT)
- {
- bar_soft_unlock(src_pcieid, dst);
- }
- else
- {
- bar_hard_unlock();
- }
- return BAR_MSG_OK;
+ uint16_t idx = 0;
+ uint8_t src_index = 0;
+ uint8_t dst_index = 0;
+
+ src_index = __bar_msg_row_index_trans(src);
+ dst_index = __bar_msg_col_index_trans(dst);
+ if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) {
+ BAR_LOG_ERR("unlock ERR: chan doesn't exist.\n");
+ return BAR_MSG_ERR_TYPE;
+ }
+ idx = lock_type_tbl[src_index][dst_index];
+ if (idx == LOCK_TYPE_SOFT) {
+ bar_soft_unlock(src_pcieid, dst);
+ } else {
+ bar_hard_unlock();
+ }
+ return BAR_MSG_OK;
}
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.h b/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.h
index beea9b0406170750ff71d0534f456a4d5653f514..93f6ad5aaddb21eec117216e2a9227bf266774a5 100644
--- a/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.h
+++ b/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.h
@@ -5,35 +5,32 @@
extern "C" {
#endif
-#define ARR_LEN(arr) (sizeof(arr)/sizeof(arr[0]))
+#define ARR_LEN(arr) (sizeof(arr) / sizeof(arr[0]))
/* PCIEID位域掩码*/
-#define PCIEID_IS_PF_MASK (0x0800)
-#define PCIEID_PF_IDX_MASK (0x0700)
-#define PCIEID_VF_IDX_MASK (0x00ff)
-#define PCIEID_EP_IDX_MASK (0x7000)
-#define PF0_PCIEID (0x0800)
+#define PCIEID_IS_PF_MASK (0x0800)
+#define PCIEID_PF_IDX_MASK (0x0700)
+#define PCIEID_VF_IDX_MASK (0x00ff)
+#define PCIEID_EP_IDX_MASK (0x7000)
+#define PF0_PCIEID (0x0800)
/* PCIEID位域偏移*/
-#define PCIEID_PF_IDX_OFFSET (8)
-#define PCIEID_EP_IDX_OFFSET (12)
-
-
-
+#define PCIEID_PF_IDX_OFFSET (8)
+#define PCIEID_EP_IDX_OFFSET (12)
/* 硬件锁软件锁*/
-#define LOCK_TYPE_HARD 0
-#define LOCK_TYPE_SOFT 0
+#define LOCK_TYPE_HARD 0
+#define LOCK_TYPE_SOFT 0
-#define MAX_EP_NUM 4
-#define PF_NUM_PER_EP 8
-#define VF_NUM_PER_PF 32
+#define MAX_EP_NUM 4
+#define PF_NUM_PER_EP 8
+#define VF_NUM_PER_PF 32
-#define MULTIPLY_BY_8(x) ((x) << 3)
-#define MULTIPLY_BY_32(x) ((x) << 5)
+#define MULTIPLY_BY_8(x) ((x) << 3)
+#define MULTIPLY_BY_32(x) ((x) << 5)
#define MULTIPLY_BY_256(x) ((x) << 8)
-#define LOCK_ARR_LENGTH (MAX_EP_NUM * PF_NUM_PER_EP * (3 + VF_NUM_PER_PF))
+#define LOCK_ARR_LENGTH (MAX_EP_NUM * PF_NUM_PER_EP * (3 + VF_NUM_PER_PF))
void bar_init_lock_arr(void);
diff --git a/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_priv.h b/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_priv.h
index 1533ef54d6ed770e9621b4989598e6df0e0f2919..4521a721bcf77ca986e382b264b84f294914e7f3 100644
--- a/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_priv.h
+++ b/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_priv.h
@@ -11,78 +11,79 @@ extern "C" {
#include
#include
#include
+
/* */
-#define BAR_KFREE_PTR(ptr) { \
- if (ptr != NULL) \
- { \
- kfree(ptr); \
- } \
- ptr = NULL; \
-}
+#define BAR_KFREE_PTR(ptr) \
+ { \
+ if (ptr != NULL) { \
+ kfree(ptr); \
+ } \
+ ptr = NULL; \
+ }
-#define BAR_LOG_ERR(fmt, arg...) DH_LOG_ERR(MODULE_CMD, fmt, ##arg);
-#define BAR_LOG_INFO(fmt, arg...) DH_LOG_INFO(MODULE_CMD, fmt, ##arg);
-#define BAR_LOG_DEBUG(fmt, arg...) DH_LOG_DEBUG(MODULE_CMD, fmt, ##arg);
-#define BAR_LOG_WARN(fmt, arg...) DH_LOG_WARNING(MODULE_CMD, fmt, ##arg);
+#define BAR_LOG_ERR(fmt, arg...) DH_LOG_ERR(MODULE_CMD, fmt, ##arg);
+#define BAR_LOG_INFO(fmt, arg...) DH_LOG_INFO(MODULE_CMD, fmt, ##arg);
+#define BAR_LOG_DEBUG(fmt, arg...) DH_LOG_DEBUG(MODULE_CMD, fmt, ##arg);
+#define BAR_LOG_WARN(fmt, arg...) DH_LOG_WARNING(MODULE_CMD, fmt, ##arg);
-#define HOST_OR_ZX 0
+#define HOST_OR_ZX 0
-#define MAX_MSG_BUFF_NUM 0xffff
+#define MAX_MSG_BUFF_NUM 0xffff
-#define BAR_ALIGN_WORD_MASK 0xffffffc
-#define BAR_MSG_ADDR_CHAN_INTERVAL (1024*2)
+#define BAR_ALIGN_WORD_MASK 0xffffffc
+#define BAR_MSG_ADDR_CHAN_INTERVAL (1024 * 2)
/* 消息类型*/
-#define BAR_CHAN_MSG_SYNC 0
-#define BAR_CHAN_MSG_ASYNC 1
-#define BAR_CHAN_MSG_NO_EMEC 0
-#define BAR_CHAN_MSG_EMEC 1
-#define BAR_CHAN_MSG_NO_ACK 0
-#define BAR_CHAN_MSG_ACK 1
+#define BAR_CHAN_MSG_SYNC 0
+#define BAR_CHAN_MSG_ASYNC 1
+#define BAR_CHAN_MSG_NO_EMEC 0
+#define BAR_CHAN_MSG_EMEC 1
+#define BAR_CHAN_MSG_NO_ACK 0
+#define BAR_CHAN_MSG_ACK 1
/* payload, valid和内容的偏移*/
-#define BAR_MSG_PLAYLOAD_OFFSET (sizeof(struct bar_msg_header))
-#define BAR_MSG_LEN_OFFSET 2
-#define BAR_MSG_VALID_OFFSET 0
+#define BAR_MSG_PLAYLOAD_OFFSET (sizeof(struct bar_msg_header))
+#define BAR_MSG_LEN_OFFSET 2
+#define BAR_MSG_VALID_OFFSET 0
/* valid字段的掩码*/
-#define BAR_MSG_VALID_MASK 1
+#define BAR_MSG_VALID_MASK 1
/* reps_buff的偏移*/
-#define REPS_HEADER_VALID_OFFSET 0
-#define REPS_HEADER_LEN_OFFSET 1
-#define REPS_HEADER_PAYLOAD_OFFSET 4
+#define REPS_HEADER_VALID_OFFSET 0
+#define REPS_HEADER_LEN_OFFSET 1
+#define REPS_HEADER_PAYLOAD_OFFSET 4
-#define REPS_HEADER_REPLYED 0xff
+#define REPS_HEADER_REPLYED 0xff
/* 通道状态*/
-#define BAR_MSG_CHAN_USABLE 0
-#define BAR_MSG_CHAN_USED 1
+#define BAR_MSG_CHAN_USABLE 0
+#define BAR_MSG_CHAN_USED 1
/* 超时时间 = 100 us *30000次轮询 = 3s*/
-#define BAR_MSG_POLLING_SPAN_US 100
-#define BAR_MSG_TIMEOUT_TH 30000
+#define BAR_MSG_POLLING_SPAN_US 100
+#define BAR_MSG_TIMEOUT_TH 30000
/* vf,pf,mpf总数*/
-#define BAR_DRIVER_TOTAL_NUM (BAR_MPF_NUM + BAR_PF_NUM + BAR_VF_NUM)
+#define BAR_DRIVER_TOTAL_NUM (BAR_MPF_NUM + BAR_PF_NUM + BAR_VF_NUM)
/* bar的通道偏移*/
-#define BAR_INDEX_TO_RISC 0
-#define BAR_MPF_NUM 1
+#define BAR_INDEX_TO_RISC 0
+#define BAR_MPF_NUM 1
/* 定时器周期宏*/
-#define BAR_MSGID_FREE_THRESHOLD (jiffies + msecs_to_jiffies(2000))
+#define BAR_MSGID_FREE_THRESHOLD (jiffies + msecs_to_jiffies(2000))
/* 管理pf信息*/
-#define BAR_MSG_OFFSET (0x2000)
-#define MPF_VENDOR_ID (0x16c3)
-#define MPF_DEVICE_ID (0x8045)
+#define BAR_MSG_OFFSET (0x2000)
+#define MPF_VENDOR_ID (0x16c3)
+#define MPF_DEVICE_ID (0x8045)
enum {
- TYPE_SEND_NP = 0x0,
- TYPE_SEND_DRS = 0x01,
- TYPE_SEND_DTP = 0x10,
- TYPE_END,
+ TYPE_SEND_NP = 0x0,
+ TYPE_SEND_DRS = 0x01,
+ TYPE_SEND_DTP = 0x10,
+ TYPE_END,
};
/**************************************************************************
@@ -92,203 +93,240 @@ enum {
* 3、智能网卡带ddr: SCENE_NIC_WITH_DDR
* 4、智能网卡不带ddr: SCENE_NIC_NO_DDR
* 5、普卡: SCENE_STD_NIC
-**************************************************************************/
+ **************************************************************************/
#define SCENE_TEST
#ifdef SCENE_HOST_IN_DPU
-#define BAR_PF_NUM 31
-#define BAR_VF_NUM 1024
-#define BAR_INDEX_PF_TO_VF 1
-#define BAR_INDEX_MPF_TO_MPF 1
-#define BAR_INDEX_MPF_TO_PFVF 0xff
-#define BAR_INDEX_PFVF_TO_MPF 0xff
+#define BAR_PF_NUM 31
+#define BAR_VF_NUM 1024
+#define BAR_INDEX_PF_TO_VF 1
+#define BAR_INDEX_MPF_TO_MPF 1
+#define BAR_INDEX_MPF_TO_PFVF 0xff
+#define BAR_INDEX_PFVF_TO_MPF 0xff
#endif
-#ifdef SCENE_ZF_IN_DPU
-#define BAR_PF_NUM 7
-#define BAR_VF_NUM 128
-#define BAR_INDEX_PF_TO_VF 0xff
-#define BAR_INDEX_MPF_TO_MPF 1
-#define BAR_INDEX_MPF_TO_PFVF 0xff
-#define BAR_INDEX_PFVF_TO_MPF 0xff
+#ifdef SCENE_ZF_IN_DPU
+#define BAR_PF_NUM 7
+#define BAR_VF_NUM 128
+#define BAR_INDEX_PF_TO_VF 0xff
+#define BAR_INDEX_MPF_TO_MPF 1
+#define BAR_INDEX_MPF_TO_PFVF 0xff
+#define BAR_INDEX_PFVF_TO_MPF 0xff
#endif
-#ifdef SCENE_NIC_WITH_DDR
-#define BAR_PF_NUM 31
-#define BAR_VF_NUM 1024
-#define BAR_INDEX_PF_TO_VF 1
-#define BAR_INDEX_MPF_TO_MPF 0xff
-#define BAR_INDEX_MPF_TO_PFVF 0xff
-#define BAR_INDEX_PFVF_TO_MPF 0xff
+#ifdef SCENE_NIC_WITH_DDR
+#define BAR_PF_NUM 31
+#define BAR_VF_NUM 1024
+#define BAR_INDEX_PF_TO_VF 1
+#define BAR_INDEX_MPF_TO_MPF 0xff
+#define BAR_INDEX_MPF_TO_PFVF 0xff
+#define BAR_INDEX_PFVF_TO_MPF 0xff
#endif
-#ifdef SCENE_NIC_NO_DDR
-#define BAR_PF_NUM 31
-#define BAR_VF_NUM 1024
-#define BAR_INDEX_PF_TO_VF 1
-#define BAR_INDEX_MPF_TO_MPF 0xff
-#define BAR_INDEX_MPF_TO_PFVF 1
-#define BAR_INDEX_PFVF_TO_MPF 2
+#ifdef SCENE_NIC_NO_DDR
+#define BAR_PF_NUM 31
+#define BAR_VF_NUM 1024
+#define BAR_INDEX_PF_TO_VF 1
+#define BAR_INDEX_MPF_TO_MPF 0xff
+#define BAR_INDEX_MPF_TO_PFVF 1
+#define BAR_INDEX_PFVF_TO_MPF 2
#endif
-#ifdef SCENE_STD_NIC
-#define BAR_PF_NUM 7
-#define BAR_VF_NUM 256
-#define BAR_INDEX_PF_TO_VF 1
-#define BAR_INDEX_MPF_TO_MPF 0xff
-#define BAR_INDEX_MPF_TO_PFVF 1
-#define BAR_INDEX_PFVF_TO_MPF 2
+#ifdef SCENE_STD_NIC
+#define BAR_PF_NUM 7
+#define BAR_VF_NUM 256
+#define BAR_INDEX_PF_TO_VF 1
+#define BAR_INDEX_MPF_TO_MPF 0xff
+#define BAR_INDEX_MPF_TO_PFVF 1
+#define BAR_INDEX_PFVF_TO_MPF 2
#endif
-#ifdef SCENE_TEST
-#define BAR_PF_NUM 7
-#define BAR_VF_NUM 256
-#define BAR_INDEX_PF_TO_VF 0
-#define BAR_INDEX_MPF_TO_MPF 0xff
-#define BAR_INDEX_MPF_TO_PFVF 0
-#define BAR_INDEX_PFVF_TO_MPF 0
+#ifdef SCENE_TEST
+#define BAR_PF_NUM 7
+#define BAR_VF_NUM 256
+#define BAR_INDEX_PF_TO_VF 0
+#define BAR_INDEX_MPF_TO_MPF 0xff
+#define BAR_INDEX_MPF_TO_PFVF 0
+#define BAR_INDEX_PFVF_TO_MPF 0
#endif
/* 左边通道还是右边通道*/
-#define BAR_SUBCHAN_INDEX_SEND 0
-#define BAR_SUBCHAN_INDEX_RECV 1
+#define BAR_SUBCHAN_INDEX_SEND 0
+#define BAR_SUBCHAN_INDEX_RECV 1
/* 消息源索引*/
-#define BAR_MSG_SRC_NUM 3
-#define BAR_MSG_SRC_MPF 0
-#define BAR_MSG_SRC_PF 1
-#define BAR_MSG_SRC_VF 2
-#define BAR_MSG_SRC_ERR 0xff
+#define BAR_MSG_SRC_NUM 3
+#define BAR_MSG_SRC_MPF 0
+#define BAR_MSG_SRC_PF 1
+#define BAR_MSG_SRC_VF 2
+#define BAR_MSG_SRC_ERR 0xff
/* 消息目的索引*/
-#define BAR_MSG_DST_NUM 3
-#define BAR_MSG_DST_RISC 0
-#define BAR_MSG_DST_MPF 2
-#define BAR_MSG_DST_PFVF 1
-#define BAR_MSG_DST_ERR 0xff
+#define BAR_MSG_DST_NUM 3
+#define BAR_MSG_DST_RISC 0
+#define BAR_MSG_DST_MPF 2
+#define BAR_MSG_DST_PFVF 1
+#define BAR_MSG_DST_ERR 0xff
/* msg_id项标志位状态*/
-#define REPS_INFO_FLAG_USABLE 0
-#define REPS_INFO_FLAG_USED 1
-
-#define BAR_MSG_PAYLOAD_MAX_LEN (BAR_MSG_ADDR_CHAN_INTERVAL - sizeof(struct bar_msg_header))
+#define REPS_INFO_FLAG_USABLE 0
+#define REPS_INFO_FLAG_USED 1
-#define BAR_MSG_POL_MASK (0x10)
-#define BAR_MSG_POL_OFFSET (4)
+#define BAR_MSG_PAYLOAD_MAX_LEN \
+ (BAR_MSG_ADDR_CHAN_INTERVAL - sizeof(struct bar_msg_header))
-enum {
- CHECK_STATE_OK = 0,
- CHECK_STATE_EVENT_EXCEED = 1,
- CHECK_STATE_EVENT_NOT_EXIST = 2,
- CHECK_STATE_EVENT_ERR_RET = 4,
- CHECK_STATE_EVENT_ERR_REPS_LEN = 5,
-};
+#define BAR_MSG_POL_MASK (0x10)
+#define BAR_MSG_POL_OFFSET (4)
-struct zxdh_pcie_bar_msg_internal
-{
- uint32_t id; /**< the msg id that passing through */
- uint64_t virt_addr; /**< pcie bar mapping virtual addr */
+struct zxdh_pcie_bar_msg_internal {
+ uint32_t id; /**< the msg id that passing through */
+ uint64_t virt_addr; /**< pcie bar mapping virtual addr */
};
/* bar通道消息头 */
-struct bar_msg_header
-{
- uint8_t valid: 1; /* 消息通道状态 */
- uint8_t sync: 1; /* 同步消息or异步消息*/
- uint8_t emec: 1; /* 消息是否紧急 */
- uint8_t ack: 1; /* 是否是回复消息*/
- uint8_t poll: 1;
- uint8_t check;
- uint16_t event_id; /* 请求的消息处理函数标识 */
- uint16_t len; /* 消息长度 */
- uint16_t msg_id; /* 消息id*/
- uint16_t src_pcieid;
- uint16_t dst_pcieid; /* 用于pf给vf发消息*/
+struct bar_msg_header {
+ uint8_t valid : 1; /* 消息通道状态 */
+ uint8_t sync : 1; /* 同步消息or异步消息*/
+ uint8_t emec : 1; /* 消息是否紧急 */
+ uint8_t ack : 1; /* 是否是回复消息*/
+ uint8_t poll : 1;
+ uint8_t rsv;
+ uint16_t event_id; /* 请求的消息处理函数标识 */
+ uint16_t len; /* 消息长度 */
+ uint16_t msg_id; /* 消息id*/
+ uint16_t src_pcieid;
+ uint16_t dst_pcieid; /* 用于pf给vf发消息*/
};
/* 根据消息的msgid查询回复缓存的地址和长度*/
-struct msgid_reps_info
-{
- void *reps_buffer; /* reps的地址*/
- uint16_t id; /* msg_id*/
- uint16_t buffer_len; /* buffer的最大长度*/
- uint16_t flag; /* 该条目是否被分配,已经非配和未被分配*/
- struct timer_list id_timer; /* 该id对应的定时器*/
+struct msgid_reps_info {
+ void *reps_buffer; /* reps的地址*/
+ uint16_t id; /* msg_id*/
+ uint16_t buffer_len; /* buffer的最大长度*/
+ uint16_t flag; /* 该条目是否被分配,已经非配和未被分配*/
+ struct timer_list id_timer; /* 该id对应的定时器*/
};
-struct msix_msg
-{
- uint16_t pcie_id;
- uint16_t vector_risc;
- uint16_t vector_pfvf;
- uint16_t vector_mpf;
+struct msix_msg {
+ uint16_t pcie_id;
+ uint16_t vector_risc;
+ uint16_t vector_pfvf;
+ uint16_t vector_mpf;
};
-struct offset_get_msg
-{
- uint16_t pcie_id;
- uint16_t type;
+struct offset_get_msg {
+ uint16_t pcie_id;
+ uint16_t type;
};
-struct bar_offset_reps
-{
- uint16_t check;
- uint16_t rsv;
- uint32_t offset;
- uint32_t length;
-}__attribute__((packed));
-
-struct bar_recv_msg
-{
- uint8_t replied;
- uint16_t reps_len;
- uint8_t rsv1;
- union
- {
- struct bar_offset_reps offset_reps;
- uint8_t data[BAR_MSG_PAYLOAD_MAX_LEN - 4];
- };
-}__attribute__((packed));
-
-struct msgid_ring
-{
- uint16_t msg_id;
- spinlock_t lock;
- struct msgid_reps_info reps_info_tbl[MAX_MSG_BUFF_NUM];
+struct bar_offset_reps {
+ uint16_t check;
+ uint16_t rsv;
+ uint32_t offset;
+ uint32_t length;
+} __attribute__((packed));
+
+struct bar_recv_msg {
+ uint8_t replied;
+ uint16_t reps_len;
+ uint8_t rsv1;
+ union {
+ struct bar_offset_reps offset_reps;
+ uint8_t data[ZXDH_NET_MAX_ACK_LEN - 4];
+ };
+} __attribute__((packed));
+
+struct msgid_ring {
+ uint16_t msg_id;
+ spinlock_t lock;
+ struct msgid_reps_info reps_info_tbl[MAX_MSG_BUFF_NUM];
};
/* 异步消息相关实体*/
-struct async_msg_entity
-{
- struct task_struct *async_proc; /* 异步队列消息线程*/
- struct mutex async_qlock; /* 易怒队列入队锁*/
- struct bar_async_node *noemq_head; /* 非紧急队列头*/
- struct bar_async_node *noemq_tail; /* 非紧急队列尾部*/
- struct bar_async_node *emq_head; /* 紧急队列头*/
- struct bar_async_node *emq_tail; /* 紧急队列尾部*/
+struct async_msg_entity {
+ struct task_struct *async_proc; /* 异步队列消息线程*/
+ struct mutex async_qlock; /* 易怒队列入队锁*/
+ struct bar_async_node *noemq_head; /* 非紧急队列头*/
+ struct bar_async_node *noemq_tail; /* 非紧急队列尾部*/
+ struct bar_async_node *emq_head; /* 紧急队列头*/
+ struct bar_async_node *emq_tail; /* 紧急队列尾部*/
};
/* 异步消息队列节点*/
-struct bar_async_node
-{
- uint32_t msg_id;
- void *payload_addr; /**< 消息净荷起始地址,有用户创建并填充 */
- uint64_t payload_len; /**< 消息净荷长度. */
- uint64_t subchan_addr; /**< 消息发送到哪个2K, 由virt_addr, src, dst共同决定,计算交给common来做>**/
- uint32_t event_id; /**< 消息发送模块,描述消息哪个模块发送 */
- uint16_t src_pcieid;
- uint16_t dst_pcieid; /**< 消息目的的bdf号,适用于PF与VF公用4K的时候用>**/
- uint16_t emec; /**< 消息紧急类型,异步消息可以分为紧急消息和非紧急消息>**/
- uint16_t ack;
- uint8_t src;
- uint8_t dst;
- struct bar_async_node *next;
+struct bar_async_node {
+ uint32_t msg_id;
+ void *payload_addr; /**< 消息净荷起始地址,有用户创建并填充 */
+ uint64_t payload_len; /**< 消息净荷长度. */
+ uint64_t subchan_addr; /* < 消息发送到哪个2K, 由virt_addr, src,
+ dst共同决定,计算交给common来做> */
+ uint32_t event_id; /**< 消息发送模块,描述消息哪个模块发送 */
+ uint16_t src_pcieid;
+ uint16_t dst_pcieid; /**< 消息目的的bdf号,适用于PF与VF公用4K的时候用>**/
+ uint16_t emec; /**< 消息紧急类型,异步消息可以分为紧急消息和非紧急消息>**/
+ uint16_t ack;
+ uint8_t src;
+ uint8_t dst;
+ struct bar_async_node *next;
};
-uint8_t bar_msg_col_index_trans(uint8_t dst);
-uint8_t bar_msg_row_index_trans(uint8_t src);
+/* */
+int __bar_chan_send_para_check(struct zxdh_pci_bar_msg *in,
+ struct zxdh_msg_recviver_mem *result);
+void __bar_chan_subchan_addr_get(struct zxdh_pci_bar_msg *in,
+ uint64_t *subchan_addr);
+void __bar_chan_sync_fill_header(uint32_t msg_id, struct zxdh_pci_bar_msg *in,
+ struct bar_msg_header *msg_header);
+uint16_t __bar_chan_msg_send(uint64_t subchan_addr, void *payload_addr,
+ uint16_t payload_len,
+ struct bar_msg_header *msg_header);
+
+void __bar_msg_ack_async_msg_proc(struct bar_msg_header *msg_header,
+ uint8_t *reciver_buff);
+void __bar_msg_noack_async_msg_proc(uint64_t reps_addr,
+ struct bar_msg_header *msg_header,
+ uint8_t *reciver_buff, uint8_t src,
+ uint8_t dst, void *dev);
+void __bar_msg_sync_msg_proc(uint64_t reply_addr,
+ struct bar_msg_header *msg_header,
+ uint8_t *reciver_buff, void *dev);
+uint16_t __bar_chan_msg_header_set(uint64_t subchan_addr,
+ struct bar_msg_header *msg_header);
+uint16_t __bar_chan_msg_header_get(uint64_t subchan_addr,
+ struct bar_msg_header *msg_header);
+uint16_t __bar_chan_msg_header_check(struct bar_msg_header *msg_header);
+uint16_t
+__bar_chan_callback_register_check(uint8_t event_id,
+ zxdh_bar_chan_msg_recv_callback callback);
+uint16_t __bar_chan_msg_payload_set(uint64_t subchan_addr, uint8_t *msg,
+ uint16_t len);
+uint16_t __bar_chan_msg_payload_get(uint64_t subchan_addr, uint8_t *msg,
+ uint16_t len);
+uint16_t __bar_chan_msg_valid_set(uint64_t subchan_addr, uint8_t valid_label);
+uint16_t __bar_chan_msg_poltag_set(uint64_t subchan_addr, uint8_t label);
+uint16_t __bar_msg_valid_stat_get(uint64_t subchan_addr);
+uint16_t __bar_chan_sync_msg_reps_get(uint64_t subchan_addr,
+ uint64_t recv_buffer, uint16_t buffer_len,
+ uint16_t msg_id);
+int __bar_chan_msgid_allocate(uint16_t *msgid);
+void __bar_chan_msgid_free(uint16_t msg_id);
+struct bar_async_node *
+__bar_chan_async_node_create(uint16_t msg_id, struct zxdh_pci_bar_msg *in);
+uint16_t __bar_chan_async_node_add(struct bar_async_node *node);
+uint16_t __bar_chan_async_node_del(struct bar_async_node *node);
+void __bar_msg_async_list_admin(void);
+uint16_t __bar_msg_async_list_parse(struct bar_async_node *node);
+uint16_t bar_msg_src_parse(struct zxdh_pci_bar_msg *in);
+
+uint16_t __bar_chan_save_recv_info(struct zxdh_msg_recviver_mem *result,
+ uint16_t *msg_id);
+int __bar_chan_reg_write(uint64_t subchan_addr, uint32_t offset, uint32_t data);
+int __bar_chan_reg_read(uint64_t subchan_addr, uint32_t offset,
+ uint32_t *pdata);
+uint8_t __bar_msg_col_index_trans(uint8_t dst);
+uint8_t __bar_msg_row_index_trans(uint8_t src);
+void bar_chan_timer_callback(struct timer_list *timer);
#ifdef __cplusplus
}
#endif
-#endif /* _ZXDH_MSG_CHAN_PRIV_H_ */
+#endif /* _ZXDH_MSG_CHAN_PRIV_H_ */
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_test.c b/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_test.c
index 1de7b5246042cb9732aef2d11ec79e3254709313..3f22cbb38c4a214abed490aff4c93241597e4e37 100644
--- a/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_test.c
+++ b/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_test.c
@@ -10,139 +10,129 @@
#ifdef BAR_MSG_TEST
/*计算方法: risc时间戳 - host时间戳*/
-#define HOST_RISC_DIFF (307762)
+#define HOST_RISC_DIFF (307762)
uint64_t print_time(char *str)
{
- ktime_t kt = ktime_get();
- s64 us_since_boot = ktime_to_us(kt);
- LOG_INFO(KERN_INFO "%s timestamp: %llu us\n", str, us_since_boot);
- return us_since_boot;
+ ktime_t kt = ktime_get();
+ s64 us_since_boot = ktime_to_us(kt);
+ LOG_INFO(KERN_INFO "%s timestamp: %llu us\n", str, us_since_boot);
+ return us_since_boot;
}
-struct msg_time_statis_reps
-{
- uint16_t sum_check;
-
- uint64_t risc_recv_msg_t;
- uint64_t risc_push_msg_t;
- uint64_t risc_pop_msg_t;
- uint64_t risc_msg_proc_t;
- uint64_t risc_notice_peer_t;
+struct msg_time_statis_reps {
+ uint16_t sum_check;
+
+ uint64_t risc_recv_msg_t;
+ uint64_t risc_push_msg_t;
+ uint64_t risc_pop_msg_t;
+ uint64_t risc_msg_proc_t;
+ uint64_t risc_notice_peer_t;
} __attribute__((packed));
-struct msg_time_host_risc
-{
- uint64_t host_send_msg_t;
- uint64_t host_recv_msg_t;
- struct msg_time_statis_reps risc_time;
-}__attribute__((packed));
+struct msg_time_host_risc {
+ uint64_t host_send_msg_t;
+ uint64_t host_recv_msg_t;
+ struct msg_time_statis_reps risc_time;
+} __attribute__((packed));
-struct msg_time_host_risc global_time_stat = {0};
+struct msg_time_host_risc global_time_stat = { 0 };
void print_risc_time_stamp(struct msg_time_host_risc *stat)
{
-#if 0
- LOF_INFO("host_send_msg_t: %llu us.\n", stat->host_send_msg_t);
- LOF_INFO("risc_recv_msg_t: %llu us.\n", stat->risc_time.risc_recv_msg_t);
- LOF_INFO("risc_push_msg_t: %llu us.\n", stat->risc_time.risc_push_msg_t);
- LOF_INFO("risc_pop_msg_t: %llu us.\n", stat->risc_time.risc_pop_msg_t);
- LOF_INFO("risc_msg_proc_t: %llu us.\n", stat->risc_time.risc_msg_proc_t);
- LOF_INFO("risc_notice_peer_t: %llu us.\n", stat->risc_time.risc_notice_peer_t);
- LOF_INFO("host_recv_msg_t: %llu us.\n", stat->host_recv_msg_t);
-#endif
- LOG_INFO("risc recv->msg push: %llu us.\n", stat->risc_time.risc_push_msg_t - stat->risc_time.risc_recv_msg_t);
- LOG_INFO("risc push->risc pop: %llu us.\n", stat->risc_time.risc_pop_msg_t - stat->risc_time.risc_push_msg_t);
- LOG_INFO("risc pop->before proc : %llu us.\n", stat->risc_time.risc_msg_proc_t- stat->risc_time.risc_pop_msg_t);
- LOG_INFO("after proc->risc set valid: %llu us.\n", stat->risc_time.risc_notice_peer_t - stat->risc_time.risc_msg_proc_t);
+ LOG_INFO("risc recv->msg push: %llu us.\n",
+ stat->risc_time.risc_push_msg_t - stat->risc_time.risc_recv_msg_t);
+ LOG_INFO("risc push->risc pop: %llu us.\n",
+ stat->risc_time.risc_pop_msg_t - stat->risc_time.risc_push_msg_t);
+ LOG_INFO("risc pop->before proc : %llu us.\n",
+ stat->risc_time.risc_msg_proc_t - stat->risc_time.risc_pop_msg_t);
+ LOG_INFO("after proc->risc set valid: %llu us.\n",
+ stat->risc_time.risc_notice_peer_t -
+ stat->risc_time.risc_msg_proc_t);
}
uint16_t sum_func(void *data, uint16_t len)
{
- uint64_t result = 0;
- int idx = 0;
- uint16_t ret = 0;
-
- if (data == NULL)
- {
- return 0;
- }
-
- for (idx = 0; idx < len; idx++)
- {
- result += *((uint8_t *)data + idx);
- }
-
- ret = (uint16_t)result;
- return ret;
+ uint64_t result = 0;
+ int idx = 0;
+ uint16_t ret = 0;
+
+ if (data == NULL) {
+ return 0;
+ }
+
+ for (idx = 0; idx < len; idx++) {
+ result += *((uint8_t *)data + idx);
+ }
+
+ ret = (uint16_t)result;
+ return ret;
}
uint16_t test_sync_send(void)
{
- struct zxdh_pci_bar_msg in = {0};
- struct zxdh_msg_recviver_mem result = {0};
- uint16_t payload_len = 0;
- uint64_t bar_base_addr = 0;
- void *payload_addr = NULL;
- uint8_t recv_buffer[200] = {0};
- uint16_t reps = 0;
- uint16_t ret = 0;
-
- payload_len = 100;
- payload_addr = kmalloc(payload_len, GFP_KERNEL);
- if (!payload_addr)
- {
- LOG_ERR("malloca failed");
- return 0xaa;
- }
- get_random_bytes(payload_addr, payload_len);
- LOG_INFO("sync send msg len: %x", payload_len);
-
- in.src_pcieid = 0x900;
- in.virt_addr = 0;
- in.payload_addr = payload_addr;
- in.payload_len = payload_len;
- in.src = MSG_CHAN_END_MPF;
- in.dst = MSG_CHAN_END_RISC;
- in.event_id = 50;
-
- /* 构造result接收参数*/
- result.recv_buffer = recv_buffer;
- result.buffer_len = sizeof(recv_buffer);
-
- /* 调用发送接口*/
- LOG_INFO("start to sync send test.");
- global_time_stat.host_send_msg_t = print_time("before send.") + HOST_RISC_DIFF;
- ret = zxdh_bar_chan_sync_msg_send(&in, &result);
- global_time_stat.host_recv_msg_t = print_time("after send.") + HOST_RISC_DIFF;
-
- if (ret != BAR_MSG_OK)
- {
- LOG_ERR("sync send failed");
- ret = 0xAA;
- goto out;
- }
-
- struct msg_time_statis_reps *reps_ptr = (struct msg_time_statis_reps*)((uint8_t *)result.recv_buffer + 4);
- if (reps_ptr->sum_check == sum_func(payload_addr, payload_len))
- {
- memcpy(&global_time_stat.risc_time, (void*)reps_ptr, sizeof(struct msg_time_statis_reps));
- print_risc_time_stamp(&global_time_stat);
- ret = 0;
- LOG_ERR("reps validate success: %d", reps);
- goto out;
- }
- else
- {
- LOG_ERR("reps valide failed: %d", reps);
- ret = 0xAA;
- }
-
-out:
- if (!payload_addr)
- {
- kfree(payload_addr);
- payload_addr = NULL;
- }
- return ret;
+ struct zxdh_pci_bar_msg in = { 0 };
+ struct zxdh_msg_recviver_mem result = { 0 };
+ uint16_t payload_len = 0;
+ uint64_t bar_base_addr = 0;
+ void *payload_addr = NULL;
+ uint8_t recv_buffer[200] = { 0 };
+ uint16_t reps = 0;
+ uint16_t ret = 0;
+
+ payload_len = 100;
+ payload_addr = kmalloc(payload_len, GFP_KERNEL);
+ if (!payload_addr) {
+ LOG_ERR("malloca failed");
+ return 0xaa;
+ }
+ get_random_bytes(payload_addr, payload_len);
+ LOG_INFO("sync send msg len: %x", payload_len);
+
+ in.src_pcieid = 0x900;
+ in.virt_addr = 0;
+ in.payload_addr = payload_addr;
+ in.payload_len = payload_len;
+ in.src = MSG_CHAN_END_MPF;
+ in.dst = MSG_CHAN_END_RISC;
+ in.event_id = 50;
+
+ /* 构造result接收参数*/
+ result.recv_buffer = recv_buffer;
+ result.buffer_len = sizeof(recv_buffer);
+
+ /* 调用发送接口*/
+ LOG_INFO("start to sync send test.");
+ global_time_stat.host_send_msg_t =
+ print_time("before send.") + HOST_RISC_DIFF;
+ ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+ global_time_stat.host_recv_msg_t =
+ print_time("after send.") + HOST_RISC_DIFF;
+
+ if (ret != BAR_MSG_OK) {
+ LOG_ERR("sync send failed");
+ ret = 0xAA;
+ goto exit;
+ }
+
+ struct msg_time_statis_reps *reps_ptr =
+ (struct msg_time_statis_reps *)((uint8_t *)result.recv_buffer + 4);
+ if (reps_ptr->sum_check == sum_func(payload_addr, payload_len)) {
+ memcpy(&global_time_stat.risc_time, (void *)reps_ptr,
+ sizeof(struct msg_time_statis_reps));
+ print_risc_time_stamp(&global_time_stat);
+ ret = 0;
+ LOG_ERR("reps validate success: %d", reps);
+ goto exit;
+ } else {
+ LOG_ERR("reps valide failed: %d", reps);
+ ret = 0xAA;
+ }
+
+exit:
+ if (!payload_addr) {
+ kfree(payload_addr);
+ payload_addr = NULL;
+ }
+ return ret;
}
#endif
diff --git a/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_ver.h b/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_ver.h
index b3575d2d3ef763680a274b66d10239b6e3cd2ebb..485300c445a8a95f22289382c97f418fa86a394e 100644
--- a/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_ver.h
+++ b/src/net/drivers/net/ethernet/dinghai/cmd/msg_chan_ver.h
@@ -1,16 +1,11 @@
#ifndef _ZXDH_MSG_CHAN_VERSION_H_
#define _ZXDH_MSG_CHAN_VERSION_H_
-#ifdef DRIVER_VERSION_VAL
- #define DRV_VERSION DRIVER_VERSION_VAL
-#else
- #define DRV_VERSION "1.0-1"
-#endif
-
-#define DRV_RELDATE "December 1, 2022"
-#define DRV_NAME "msg_chan"
-#define DRV_DESCRIPTION "DPU MSG Channel Driver"
+#define DRV_VERSION "1.0.1"
+#define DRV_RELDATE "December 1, 2022"
+#define DRV_NAME "msg_chan"
+#define DRV_DESCRIPTION "DPU MSG Channel Driver"
#define hbond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
-#endif /* _ZXDH_MSG_CHAN_VERSION_H_ */
\ No newline at end of file
+#endif /* _ZXDH_MSG_CHAN_VERSION_H_ */
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/cmd/msg_main.c b/src/net/drivers/net/ethernet/dinghai/cmd/msg_main.c
index 599e6e5c1179c5fdc7246edd0498b1d4eb2d4fcd..c84cb27771e3474ae5bc8aed0e5bfb75e21fa5c7 100644
--- a/src/net/drivers/net/ethernet/dinghai/cmd/msg_main.c
+++ b/src/net/drivers/net/ethernet/dinghai/cmd/msg_main.c
@@ -1,31 +1,30 @@
#include
#include
#include
-
#include "msg_chan_ver.h"
#include "msg_chan_priv.h"
static int __init msg_chan_init(void)
{
- BAR_LOG_INFO("%s init. version %s\n", DRV_DESCRIPTION, DRV_VERSION);
- zxdh_bar_msg_chan_init();
+ zxdh_bar_msg_chan_init();
#ifdef TEST
- BAR_TestApp();
+ BAR_TestApp();
#endif
+ BAR_LOG_INFO("%s init. version %s\n", DRV_DESCRIPTION, DRV_VERSION);
- return 0;
+ return 0;
}
static void __exit msg_chan_exit(void)
{
- zxdh_bar_msg_chan_remove();
- BAR_LOG_INFO("%s exit.\n", DRV_DESCRIPTION);
+ BAR_LOG_INFO("%s exit.\n", DRV_DESCRIPTION);
+ zxdh_bar_msg_chan_remove();
}
module_init(msg_chan_init);
module_exit(msg_chan_exit);
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
-MODULE_AUTHOR("ZTE Corporation");
+MODULE_AUTHOR("Zte Corporation");
diff --git a/src/net/drivers/net/ethernet/dinghai/devlink.c b/src/net/drivers/net/ethernet/dinghai/devlink.c
old mode 100755
new mode 100644
index f244c9027c30e6bd10db6124711edaffeab231f6..03453bfa17e2e4058b5170757a89a6bcefad31ba
--- a/src/net/drivers/net/ethernet/dinghai/devlink.c
+++ b/src/net/drivers/net/ethernet/dinghai/devlink.c
@@ -1,50 +1,52 @@
-#include
-#include
-
-#ifdef HAVE_DEVLINK_REGISTER_GET_1_PARAMS
- int32_t zxdh_devlink_register(struct devlink *devlink)
-#else
- int32_t zxdh_devlink_register(struct devlink *devlink, struct device *dev)
-#endif
-{
- struct dh_core_dev *dh_dev = devlink_priv(devlink);
- int32_t err = 0;
-
-#ifdef HAVE_DEVLINK_REGISTER_GET_1_PARAMS
- devlink_register(devlink);
-#else
- devlink_register(devlink, dev);
-#endif
-
- err = dh_dev->devlink_ops->params_register(devlink);
- if (err != 0)
- {
- LOG_ERR("params_register failed: %d\n", err);
- return err;
- }
-
- return err;
-}
-
-struct devlink *zxdh_devlink_alloc(struct device *dev, struct devlink_ops *dh_devlink_ops, size_t priv_size)
-{
-#ifdef HAVE_DEVLINK_ALLOC_GET_1_PARAMS
- return devlink_alloc(dh_devlink_ops, sizeof(struct dh_core_dev) + priv_size);
-#else
- return devlink_alloc(dh_devlink_ops, sizeof(struct dh_core_dev) + priv_size, dev);
-#endif
-}
-
-void zxdh_devlink_free(struct devlink *devlink)
-{
- devlink_free(devlink);
-}
-
-void zxdh_devlink_unregister(struct devlink *devlink)
-{
- struct dh_core_dev *dev = devlink_priv(devlink);
-
- dev->devlink_ops->params_unregister(devlink);
-
- devlink_unregister(devlink);
-}
+#include
+#include
+
+#ifdef HAVE_DEVLINK_REGISTER_GET_1_PARAMS
+int32_t zxdh_devlink_register(struct devlink *devlink)
+#else
+int32_t zxdh_devlink_register(struct devlink *devlink, struct device *dev)
+#endif
+{
+ struct dh_core_dev *dh_dev = devlink_priv(devlink);
+ int32_t err = 0;
+
+#ifdef HAVE_DEVLINK_REGISTER_GET_1_PARAMS
+ devlink_register(devlink);
+#else
+ devlink_register(devlink, dev);
+#endif
+
+ err = dh_dev->devlink_ops->params_register(devlink);
+ if (err != 0) {
+ return err;
+ }
+
+ return err;
+}
+
+struct devlink *zxdh_devlink_alloc(struct device *dev,
+ struct devlink_ops *dh_devlink_ops,
+ size_t priv_size)
+{
+#ifdef HAVE_DEVLINK_ALLOC_GET_1_PARAMS
+ return devlink_alloc(dh_devlink_ops,
+ sizeof(struct dh_core_dev) + priv_size);
+#else
+ return devlink_alloc(dh_devlink_ops, sizeof(struct dh_core_dev) + priv_size,
+ dev);
+#endif
+}
+
+void zxdh_devlink_free(struct devlink *devlink)
+{
+ devlink_free(devlink);
+}
+
+void zxdh_devlink_unregister(struct devlink *devlink)
+{
+ struct dh_core_dev *dev = devlink_priv(devlink);
+
+ dev->devlink_ops->params_unregister(devlink);
+
+ devlink_unregister(devlink);
+}
diff --git a/src/net/drivers/net/ethernet/dinghai/dh_cmd.c b/src/net/drivers/net/ethernet/dinghai/dh_cmd.c
index 704cf4afc6576fea3ca58cdf74b5cf1e16a976ca..8163e9858c2acd993af3b023cb52f3ba0bf7dbd6 100644
--- a/src/net/drivers/net/ethernet/dinghai/dh_cmd.c
+++ b/src/net/drivers/net/ethernet/dinghai/dh_cmd.c
@@ -16,24 +16,24 @@ MPF: 0 0 0
PF: 0 0 1
VF: 0 1 1
******************************************/
-uint8_t subchan_id_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] =
-{
- {BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND},
- {BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_RECV},
- {BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_RECV, BAR_SUBCHAN_INDEX_RECV}
+uint8_t subchan_id_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = {
+ { BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND },
+ { BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_RECV },
+ { BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_RECV, BAR_SUBCHAN_INDEX_RECV }
};
-uint8_t chan_id_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] =
-{
- {BAR_INDEX_TO_RISC, BAR_INDEX_MPF_TO_PFVF, BAR_INDEX_MPF_TO_MPF},
- {BAR_INDEX_TO_RISC, BAR_INDEX_PF_TO_VF, BAR_INDEX_PFVF_TO_MPF},
- {BAR_INDEX_TO_RISC, BAR_INDEX_PF_TO_VF, BAR_INDEX_PFVF_TO_MPF}
+uint8_t chan_id_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = {
+ { BAR_INDEX_TO_RISC, BAR_INDEX_MPF_TO_PFVF, BAR_INDEX_MPF_TO_MPF },
+ { BAR_INDEX_TO_RISC, BAR_INDEX_PF_TO_VF, BAR_INDEX_PFVF_TO_MPF },
+ { BAR_INDEX_TO_RISC, BAR_INDEX_PF_TO_VF, BAR_INDEX_PFVF_TO_MPF }
};
void *internal_addr;
bool is_mpf_scaned = FALSE;
+struct async_msg_entity async_en;
+
static struct msgid_ring g_msgid_ring;
/* 消息处理函数表*/
@@ -41,730 +41,967 @@ zxdh_bar_chan_msg_recv_callback msg_recv_func_tbl[MSG_MODULE_NUM];
void bar_chan_check_chan_stats(int ret, uint64_t addr)
{
- struct bar_msg_header *hdr = (struct bar_msg_header*)addr;
-
- if (ret == 0)
- {
- return;
- }
- /* check bar msg_header*/
- BAR_LOG_ERR("bar msg err, ret: %d, valid: %u, msg_id: %u, event_id: %u, "
- "ack: %u, src_pcieid: 0x%x, dst_pcieid: 0x%x, chan_addr: 0x%llx.\n",
- ret, hdr->valid, hdr->msg_id, hdr->event_id, hdr->ack, hdr->src_pcieid, hdr->dst_pcieid, addr);
+ struct bar_msg_header *hdr = (struct bar_msg_header *)addr;
+
+ if (ret == 0) {
+ return;
+ }
+ /* check bar msg_header*/
+ LOG_INFO(
+ "bar msg err, ret: %d, valid: %u, msg_id: %u, event_id: %u, "
+ "ack: %u, src_pcieid: 0x%x, dst_pcieid: 0x%x, chan_addr: 0x%llx.\n",
+ ret, hdr->valid, hdr->msg_id, hdr->event_id, hdr->ack,
+ hdr->src_pcieid, hdr->dst_pcieid, addr);
}
-uint16_t bar_msg_src_parse(struct zxdh_pci_bar_msg *in)
-{
- if (in == NULL)
- {
- return BAR_MSG_ERR_NULL;
- }
-
- if (in->src == MSG_CHAN_END_MPF)
- {
- if (!is_mpf_scaned)
- {
- return BAR_MSG_ERR_MPF_NOT_SCANED;
- }
- in->virt_addr = (uint64_t)internal_addr + BAR_MSG_OFFSET;
- in->src_pcieid = PF0_PCIEID;
- }
- return BAR_MSG_OK;
-}
-
-void bar_chan_sync_fill_header(uint32_t msg_id, struct zxdh_pci_bar_msg *in, struct bar_msg_header *msg_header)
+/**
+ * zxdh_bar_chan_sync_msg_send - 通过PCIE BAR空间发送同步消息
+ * @in: 消息发送信息
+ * @result: 消息结果反馈
+ * @return: 0 成功,其他失败
+ */
+int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in,
+ struct zxdh_msg_recviver_mem *result)
{
- memset(msg_header, 0, sizeof(*msg_header));
- msg_header->sync = BAR_CHAN_MSG_SYNC;
- msg_header->event_id = in->event_id;
- msg_header->len = in->payload_len;
- msg_header->msg_id = msg_id;
- msg_header->dst_pcieid = in->dst_pcieid;
- msg_header->src_pcieid = in->src_pcieid;
+ int ret = 0;
+ uint16_t valid = 0;
+ uint16_t time_out_cnt = 0;
+ uint16_t msg_id = 0;
+ uint64_t subchan_addr = 0;
+ struct bar_msg_header msg_header = { 0 };
+
+ ret = bar_msg_src_parse(in);
+ if (ret != BAR_MSG_OK) {
+ goto exit;
+ }
+
+ ret = __bar_chan_send_para_check(in, result);
+ if (ret != BAR_MSG_OK) {
+ BAR_LOG_ERR("para check failed, %d.", ret);
+ goto exit;
+ }
+
+ /* 申请msg_id,并将缓存信息存放到表中*/
+ ret = __bar_chan_save_recv_info(result, &msg_id);
+ if (ret != BAR_MSG_OK) {
+ BAR_LOG_ERR("msg_id allocated failed.");
+ goto exit;
+ }
+ /* 计算2K通道的地址*/
+ __bar_chan_subchan_addr_get(in, &subchan_addr);
+ /* 填充消息头*/
+ __bar_chan_sync_fill_header(msg_id, in, &msg_header);
+ /* 给通道上锁,根据src和dst判断是分配硬件锁还是软件锁*/
+ bar_chan_lock(in->src, in->dst, in->src_pcieid);
+
+ /* 消息头、消息体发送到bar空间, valid置位*/
+ __bar_chan_msg_send(subchan_addr, in->payload_addr, in->payload_len,
+ &msg_header);
+ /* 轮询等待消息回复*/
+ do {
+ usleep_range(BAR_MSG_POLLING_SPAN_US, BAR_MSG_POLLING_SPAN_US + 10);
+ valid = __bar_msg_valid_stat_get(subchan_addr);
+ time_out_cnt++;
+ } while ((time_out_cnt < BAR_MSG_TIMEOUT_TH) &&
+ (BAR_MSG_CHAN_USED == valid));
+
+ /* 如果超时恢复标志位*/
+ if ((BAR_MSG_TIMEOUT_TH == time_out_cnt) &&
+ (BAR_MSG_CHAN_USABLE != valid)) {
+ __bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USABLE);
+ __bar_chan_msg_poltag_set(subchan_addr, 0);
+ BAR_LOG_ERR("BAR MSG ERR: msg_id: %d time out.\n", msg_header.msg_id);
+ ret = BAR_MSG_ERR_TIME_OUT;
+ } else {
+ /* 从消息头中取出回复消息的长度len,
+ * 从payload中取出消息内容,放到本地缓存reps_buff*/
+ ret = __bar_chan_sync_msg_reps_get(subchan_addr,
+ (uint64_t)result->recv_buffer,
+ result->buffer_len, msg_id);
+ }
+ __bar_chan_msgid_free(msg_id);
+ /*通道解锁*/
+ bar_chan_unlock(in->src, in->dst, in->src_pcieid);
+
+exit:
+ bar_chan_check_chan_stats(ret, subchan_addr);
+ return ret;
}
+EXPORT_SYMBOL(zxdh_bar_chan_sync_msg_send);
-int bar_chan_msgid_allocate(uint16_t *msgid)
+uint16_t bar_msg_src_parse(struct zxdh_pci_bar_msg *in)
{
- int ret = BAR_MSG_OK;
- uint16_t msg_id = 0;
- struct msgid_reps_info *msgid_reps_info = NULL;
- uint16_t count = 0;
-
- spin_lock(&g_msgid_ring.lock);
- msg_id = g_msgid_ring.msg_id;
- do
- {
- count++;
- ++msg_id;
- msg_id %= MAX_MSG_BUFF_NUM;
- msgid_reps_info = &g_msgid_ring.reps_info_tbl[msg_id];
-
- }while(msgid_reps_info->flag != REPS_INFO_FLAG_USABLE && (count < MAX_MSG_BUFF_NUM));
-
- if (count >= MAX_MSG_BUFF_NUM)
- {
- ret = -1;
- goto out;
- }
-
- msgid_reps_info->flag = REPS_INFO_FLAG_USED;
- g_msgid_ring.msg_id = msg_id;
- *msgid = msg_id;
-
-out:
- spin_unlock(&g_msgid_ring.lock);
- return ret;
+ if (in == NULL) {
+ return BAR_MSG_ERR_NULL;
+ }
+
+ if (in->src == MSG_CHAN_END_MPF) {
+ if (!is_mpf_scaned) {
+ return BAR_MSG_ERR_MPF_NOT_SCANED;
+ }
+ in->virt_addr = (uint64_t)internal_addr + BAR_MSG_OFFSET;
+ in->src_pcieid = PF0_PCIEID;
+ }
+ return BAR_MSG_OK;
}
-uint16_t bar_chan_save_recv_info(struct zxdh_msg_recviver_mem *result, uint16_t *msg_id)
+void __bar_chan_sync_fill_header(uint32_t msg_id, struct zxdh_pci_bar_msg *in,
+ struct bar_msg_header *msg_header)
{
- int ret = 0;
- struct msgid_reps_info *reps_info = NULL;
-
- ret = bar_chan_msgid_allocate(msg_id);
- if (ret == -1)
- {
- return BAR_MSG_ERR_MSGID;
- }
- reps_info = &g_msgid_ring.reps_info_tbl[*msg_id];
- reps_info->reps_buffer = result->recv_buffer;
- reps_info->buffer_len = result->buffer_len;
-
- return BAR_MSG_OK;
+ memset(msg_header, 0, sizeof(*msg_header));
+ msg_header->sync = BAR_CHAN_MSG_SYNC;
+ msg_header->event_id = in->event_id;
+ msg_header->len = in->payload_len;
+ msg_header->msg_id = msg_id;
+ msg_header->dst_pcieid = in->dst_pcieid;
+ msg_header->src_pcieid = in->src_pcieid;
}
-void bar_chan_msgid_free(uint16_t msg_id)
+int __bar_chan_msgid_allocate(uint16_t *msgid)
{
- struct msgid_reps_info *msgid_reps_info = NULL;
- if (msg_id >= MAX_MSG_BUFF_NUM)
- {
- return;
- }
- msgid_reps_info = &g_msgid_ring.reps_info_tbl[msg_id];
- spin_lock(&g_msgid_ring.lock);
- msgid_reps_info->flag = REPS_INFO_FLAG_USABLE;
- spin_unlock(&g_msgid_ring.lock);
- return;
+ uint16_t msg_id = 0;
+ struct msgid_reps_info *msgid_reps_info = NULL;
+ uint16_t count = 0;
+
+ spin_lock(&g_msgid_ring.lock);
+ msg_id = g_msgid_ring.msg_id;
+ do {
+ count++;
+ msg_id = (msg_id >= (MAX_MSG_BUFF_NUM - 1)) ? 0 : msg_id + 1;
+ msgid_reps_info = &g_msgid_ring.reps_info_tbl[msg_id];
+
+ } while (msgid_reps_info->flag != REPS_INFO_FLAG_USABLE &&
+ (count < MAX_MSG_BUFF_NUM));
+
+ if (count >= MAX_MSG_BUFF_NUM) {
+ spin_unlock(&g_msgid_ring.lock);
+ return -1;
+ }
+
+ msgid_reps_info->flag = REPS_INFO_FLAG_USED;
+ g_msgid_ring.msg_id = msg_id;
+ *msgid = msg_id;
+
+ spin_unlock(&g_msgid_ring.lock);
+ return BAR_MSG_OK;
}
-uint8_t bar_msg_row_index_trans(uint8_t src)
+void __bar_chan_msgid_free(uint16_t msg_id)
{
- uint8_t src_index = 0;
-
- switch (src)
- {
- case MSG_CHAN_END_MPF:
- {
- src_index = BAR_MSG_SRC_MPF;
- break;
- }
- case MSG_CHAN_END_PF:
- {
- src_index = BAR_MSG_SRC_PF;
- break;
- }
- case MSG_CHAN_END_VF:
- {
- src_index = BAR_MSG_SRC_VF;
- break;
- }
- default:
- {
- src_index = BAR_MSG_SRC_ERR;
- break;
- }
- }
- return src_index;
-}
+ struct msgid_reps_info *msgid_reps_info =
+ &g_msgid_ring.reps_info_tbl[msg_id];
-uint8_t bar_msg_col_index_trans(uint8_t dst)
-{
- uint8_t dst_index = 0;
-
- switch (dst)
- {
- case MSG_CHAN_END_MPF:
- {
- dst_index = BAR_MSG_DST_MPF;
- break;
- }
- case MSG_CHAN_END_PF:
- {
- dst_index = BAR_MSG_DST_PFVF;
- break;
- }
- case MSG_CHAN_END_VF:
- {
- dst_index = BAR_MSG_DST_PFVF;
- break;
- }
- case MSG_CHAN_END_RISC:
- {
- dst_index = BAR_MSG_DST_RISC;
- break;
- }
- default:
- {
- dst_index = BAR_MSG_SRC_ERR;
- break;
- }
- }
- return dst_index;
+ spin_lock(&g_msgid_ring.lock);
+ msgid_reps_info->flag = REPS_INFO_FLAG_USABLE;
+ spin_unlock(&g_msgid_ring.lock);
+ return;
}
-int bar_chan_send_para_check(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result)
+int __bar_chan_send_para_check(struct zxdh_pci_bar_msg *in,
+ struct zxdh_msg_recviver_mem *result)
{
- uint8_t src_index = 0;
- uint8_t dst_index = 0;
-
- if (in == NULL || result == NULL)
- {
- BAR_LOG_ERR("send para ERR: null para.\n");
- return BAR_MSG_ERR_NULL_PARA;
- }
-
- src_index = bar_msg_row_index_trans((uint8_t)in->src);
- dst_index = bar_msg_col_index_trans((uint8_t)in->dst);
- if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR)
- {
- BAR_LOG_ERR("send para ERR: chan doesn't exist.\n");
- return BAR_MSG_ERR_TYPE;
- }
- if (in->event_id > MSG_MODULE_NUM)
- {
- BAR_LOG_ERR("send para ERR: invalid event_id: %d.\n", in->event_id);
- return BAR_MSG_ERR_MODULE;
- }
- if (in->payload_addr == NULL)
- {
- BAR_LOG_ERR("send para ERR: null message.\n");
- return BAR_MSG_ERR_BODY_NULL;
- }
- if (in->payload_len > BAR_MSG_PAYLOAD_MAX_LEN)
- {
- BAR_LOG_ERR("send para ERR: len %x is too long.\n", in->payload_len);
- return BAR_MSG_ERR_LEN;
- }
- if (in->virt_addr == 0 || result->recv_buffer == NULL)
- {
- BAR_LOG_ERR("send para ERR: virt_addr or recv_buffer is NULL.\n");
- return BAR_MSG_ERR_VIRTADDR_NULL;
- }
- if (result->buffer_len < REPS_HEADER_PAYLOAD_OFFSET)
- {
- BAR_LOG_ERR("recv buffer's len: %d is short than mininal 4 bytes\n", result->buffer_len);
- }
- return BAR_MSG_OK;
+ uint8_t src_index = 0;
+ uint8_t dst_index = 0;
+
+ if (in == NULL || result == NULL) {
+ BAR_LOG_ERR("send para ERR: null para.\n");
+ return BAR_MSG_ERR_NULL_PARA;
+ }
+
+ src_index = __bar_msg_row_index_trans(in->src);
+ dst_index = __bar_msg_col_index_trans(in->dst);
+ if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) {
+ BAR_LOG_ERR("send para ERR: chan doesn't exist.\n");
+ return BAR_MSG_ERR_TYPE;
+ }
+ if (in->event_id > MSG_MODULE_NUM) {
+ BAR_LOG_ERR("send para ERR: invalid event_id: %d.\n", in->event_id);
+ return BAR_MSG_ERR_MODULE;
+ }
+ if (in->payload_addr == NULL) {
+ BAR_LOG_ERR("send para ERR: null message.\n");
+ return BAR_MSG_ERR_BODY_NULL;
+ }
+ if (in->payload_len > BAR_MSG_PAYLOAD_MAX_LEN) {
+ BAR_LOG_ERR("send para ERR: len %x is too long.\n", in->payload_len);
+ return BAR_MSG_ERR_LEN;
+ }
+ if (in->virt_addr == 0 || result->recv_buffer == NULL) {
+ BAR_LOG_ERR("send para ERR: virt_addr or recv_buffer is NULL.\n");
+ return BAR_MSG_ERR_VIRTADDR_NULL;
+ }
+ if (result->buffer_len < REPS_HEADER_PAYLOAD_OFFSET) {
+ BAR_LOG_ERR("recv buffer's len: %d is short than mininal 4 bytes\n",
+ result->buffer_len);
+ }
+ return BAR_MSG_OK;
}
/* 根据用户提供的src和dst和当前的场景来推算2K的偏移*/
-void bar_chan_subchan_addr_get(struct zxdh_pci_bar_msg *in, uint64_t *subchan_addr)
+void __bar_chan_subchan_addr_get(struct zxdh_pci_bar_msg *in,
+ uint64_t *subchan_addr)
{
- uint8_t src_index, dst_index;
- uint16_t chan_id, subchan_id;
+ uint8_t src_index, dst_index;
+ uint16_t chan_id, subchan_id;
- src_index = bar_msg_row_index_trans((uint8_t)in->src);
- dst_index = bar_msg_col_index_trans((uint8_t)in->dst);
+ src_index = __bar_msg_row_index_trans(in->src);
+ dst_index = __bar_msg_col_index_trans(in->dst);
- if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR)
- {
- return;
- }
+ if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) {
+ return;
+ }
- chan_id = chan_id_tbl[src_index][dst_index];
- subchan_id = subchan_id_tbl[src_index][dst_index];
- *subchan_addr = in->virt_addr + (2 * chan_id + subchan_id) * BAR_MSG_ADDR_CHAN_INTERVAL;
- return;
+ chan_id = chan_id_tbl[src_index][dst_index];
+ subchan_id = subchan_id_tbl[src_index][dst_index];
+ *subchan_addr = in->virt_addr +
+ (2 * chan_id + subchan_id) * BAR_MSG_ADDR_CHAN_INTERVAL;
+ return;
}
-int bar_chan_reg_write(uint64_t subchan_addr, uint32_t offset, uint32_t data)
+static char payload_temp_buf[BAR_MSG_ADDR_CHAN_INTERVAL] = { 0 };
+uint16_t __bar_chan_msg_send(uint64_t subchan_addr, void *payload_addr,
+ uint16_t payload_len,
+ struct bar_msg_header *msg_header)
{
- uint32_t algin_offset = (offset & BAR_ALIGN_WORD_MASK);
+ uint8_t *msg = (uint8_t *)(payload_addr);
+ struct bar_msg_header hdr_read = { 0 };
+ uint16_t valid = 0;
- if (algin_offset >= BAR_MSG_ADDR_CHAN_INTERVAL)
- {
- return -EADDRNOTAVAIL;
- }
+ __bar_chan_msg_header_set(subchan_addr, msg_header);
+ __bar_chan_msg_header_get(subchan_addr, &hdr_read);
- writel(data, (volatile void*)(subchan_addr + algin_offset));
- return 0;
-}
+ __bar_chan_msg_payload_set(subchan_addr, msg, payload_len);
+ __bar_chan_msg_payload_get(subchan_addr, payload_temp_buf, payload_len);
-int bar_chan_reg_read(uint64_t subchan_addr, uint32_t offset, uint32_t *pdata)
-{
- uint32_t algin_offset = (offset & BAR_ALIGN_WORD_MASK);
+ __bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USED);
+ valid = __bar_msg_valid_stat_get(subchan_addr);
- if (algin_offset >= BAR_MSG_ADDR_CHAN_INTERVAL)
- {
- return -EADDRNOTAVAIL;
- }
-
- *pdata = readl((const volatile void *)(subchan_addr + algin_offset));
- return 0;
+ return BAR_MSG_OK;
}
-uint16_t bar_chan_msg_header_set(uint64_t subchan_addr, struct bar_msg_header *msg_header)
+uint16_t __bar_chan_msg_header_set(uint64_t subchan_addr,
+ struct bar_msg_header *msg_header)
{
- uint32_t *data = (uint32_t*)msg_header;
- uint16_t idx = 0;
+ uint32_t *data = (uint32_t *)msg_header;
+ uint16_t idx = 0;
- for (idx = 0; idx < (BAR_MSG_PLAYLOAD_OFFSET >> 2); idx++)
- {
- bar_chan_reg_write(subchan_addr, idx * 4, *(data + idx));
- }
+ for (idx = 0; idx < (BAR_MSG_PLAYLOAD_OFFSET >> 2); idx++) {
+ __bar_chan_reg_write(subchan_addr, idx * 4, *(data + idx));
+ }
- return BAR_MSG_OK;
+ return BAR_MSG_OK;
}
-uint16_t bar_chan_msg_header_get(uint64_t subchan_addr, struct bar_msg_header *msg_header)
+uint16_t __bar_chan_msg_header_get(uint64_t subchan_addr,
+ struct bar_msg_header *msg_header)
{
- uint32_t *data = (uint32_t*)msg_header;
- uint16_t idx = 0;
+ uint32_t *data = (uint32_t *)msg_header;
+ uint16_t idx = 0;
- for (idx = 0; idx < (BAR_MSG_PLAYLOAD_OFFSET >> 2); idx++)
- {
- bar_chan_reg_read(subchan_addr, idx * 4, data + idx);
- }
+ for (idx = 0; idx < (BAR_MSG_PLAYLOAD_OFFSET >> 2); idx++) {
+ __bar_chan_reg_read(subchan_addr, idx * 4, data + idx);
+ }
- return BAR_MSG_OK;
+ return BAR_MSG_OK;
}
-uint16_t bar_chan_msg_payload_set(uint64_t subchan_addr, uint8_t *msg, uint16_t len)
+uint16_t __bar_chan_msg_payload_set(uint64_t subchan_addr, uint8_t *msg,
+ uint16_t len)
{
- uint32_t *data = (uint32_t*)msg;
- uint32_t count = (len / sizeof(uint32_t));
- uint32_t remain = (len % sizeof(uint32_t));
- uint32_t ix = 0, remain_data = 0;
-
- for (ix = 0; ix < count; ix++)
- {
- bar_chan_reg_write(subchan_addr, 4 * ix + BAR_MSG_PLAYLOAD_OFFSET, *(data + ix));
- }
- for (ix = 0; ix < remain; ix++)
- {
- remain_data |= *((uint8_t *)(msg + (len - remain + ix))) << (8 * ix);
- }
- bar_chan_reg_write(subchan_addr, 4 * count + BAR_MSG_PLAYLOAD_OFFSET, remain_data);
-
- return BAR_MSG_OK;
+ uint32_t *data = (uint32_t *)msg;
+ uint32_t count = (len / sizeof(uint32_t));
+ uint32_t remain = (len % sizeof(uint32_t));
+ uint32_t ix = 0, remain_data = 0;
+
+ for (ix = 0; ix < count; ix++) {
+ __bar_chan_reg_write(subchan_addr, 4 * ix + BAR_MSG_PLAYLOAD_OFFSET,
+ *(data + ix));
+ }
+ for (ix = 0; ix < remain; ix++) {
+ remain_data |= *((uint8_t *)(msg + (len - remain + ix))) << (8 * ix);
+ }
+ __bar_chan_reg_write(subchan_addr, 4 * count + BAR_MSG_PLAYLOAD_OFFSET,
+ remain_data);
+
+ return BAR_MSG_OK;
}
-uint16_t bar_chan_msg_payload_get(uint64_t subchan_addr, uint8_t *msg, uint16_t len)
+uint16_t __bar_chan_msg_payload_get(uint64_t subchan_addr, uint8_t *msg,
+ uint16_t len)
{
- uint32_t *data = (uint32_t*)msg;
- uint32_t count = (len / sizeof(uint32_t));
- uint32_t remain = (len % sizeof(uint32_t));
- uint32_t ix = 0, remain_data = 0;
-
- for (ix = 0; ix < count; ix++)
- {
- bar_chan_reg_read(subchan_addr, 4 * ix + BAR_MSG_PLAYLOAD_OFFSET, (data + ix));
- }
- bar_chan_reg_read(subchan_addr, 4 * count + BAR_MSG_PLAYLOAD_OFFSET, &remain_data);
- for (ix = 0; ix < remain; ix++)
- {
- *((uint8_t *)(msg + (len - remain + ix))) = remain_data >> (8 * ix);
- }
- return BAR_MSG_OK;
+ uint32_t *data = (uint32_t *)msg;
+ uint32_t count = (len / sizeof(uint32_t));
+ uint32_t remain = (len % sizeof(uint32_t));
+ uint32_t ix = 0, remain_data = 0;
+
+ for (ix = 0; ix < count; ix++) {
+ __bar_chan_reg_read(subchan_addr, 4 * ix + BAR_MSG_PLAYLOAD_OFFSET,
+ (data + ix));
+ }
+ __bar_chan_reg_read(subchan_addr, 4 * count + BAR_MSG_PLAYLOAD_OFFSET,
+ &remain_data);
+ for (ix = 0; ix < remain; ix++) {
+ *((uint8_t *)(msg + (len - remain + ix))) = remain_data >> (8 * ix);
+ }
+ return BAR_MSG_OK;
}
-uint16_t bar_chan_msg_valid_set(uint64_t subchan_addr, uint8_t valid_label)
+uint16_t __bar_chan_msg_valid_set(uint64_t subchan_addr, uint8_t valid_label)
{
- uint32_t data = 0;
+ uint32_t data = 0;
- bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);
- data &= (~BAR_MSG_VALID_MASK);
- data |= (uint32_t)valid_label;
- bar_chan_reg_write(subchan_addr, BAR_MSG_VALID_OFFSET, data);
+ __bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);
+ data &= (~BAR_MSG_VALID_MASK);
+ data |= (uint32_t)valid_label;
+ __bar_chan_reg_write(subchan_addr, BAR_MSG_VALID_OFFSET, data);
- return BAR_MSG_OK;
+ return BAR_MSG_OK;
}
-uint16_t bar_msg_valid_stat_get(uint64_t subchan_addr)
+uint16_t __bar_msg_valid_stat_get(uint64_t subchan_addr)
{
- uint32_t data = 0;
+ uint32_t data = 0;
- bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);
- if (BAR_MSG_CHAN_USABLE == (data & BAR_MSG_VALID_MASK))
- {
- return BAR_MSG_CHAN_USABLE;
- }
+ __bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);
+ if (BAR_MSG_CHAN_USABLE == (data & BAR_MSG_VALID_MASK)) {
+ return BAR_MSG_CHAN_USABLE;
+ }
- return BAR_MSG_CHAN_USED;
+ return BAR_MSG_CHAN_USED;
}
-uint16_t bar_chan_msg_poltag_set(uint64_t subchan_addr, uint8_t label)
+uint16_t __bar_chan_msg_poltag_set(uint64_t subchan_addr, uint8_t label)
{
- uint32_t data = 0;
+ uint32_t data = 0;
- bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);
- data &= (~(uint32_t)BAR_MSG_POL_MASK);
- data |= ((uint32_t)label << BAR_MSG_POL_OFFSET);
- bar_chan_reg_write(subchan_addr, BAR_MSG_VALID_OFFSET, data);
+ __bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);
+ data &= (~(uint32_t)BAR_MSG_POL_MASK);
+ data |= ((uint32_t)label << BAR_MSG_POL_OFFSET);
+ __bar_chan_reg_write(subchan_addr, BAR_MSG_VALID_OFFSET, data);
- return BAR_MSG_OK;
+ return BAR_MSG_OK;
}
-static uint8_t payload_temp_buf[BAR_MSG_ADDR_CHAN_INTERVAL] = {0};
-uint16_t bar_chan_msg_send(uint64_t subchan_addr, void *payload_addr, uint16_t payload_len, struct bar_msg_header *msg_header)
+uint16_t __bar_chan_sync_msg_reps_get(uint64_t subchan_addr,
+ uint64_t recv_buffer, uint16_t buffer_len,
+ uint16_t send_msg_id)
{
- uint8_t *msg = (uint8_t*)(payload_addr);
- struct bar_msg_header hdr_read = {0};
- uint16_t valid = 0;
-
- bar_chan_msg_header_set(subchan_addr, msg_header);
- bar_chan_msg_header_get(subchan_addr, &hdr_read);
-
- bar_chan_msg_payload_set(subchan_addr, msg, payload_len);
- bar_chan_msg_payload_get(subchan_addr, payload_temp_buf, payload_len);
-
- bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USED);
- valid = bar_msg_valid_stat_get(subchan_addr);
-
- return BAR_MSG_OK;
+ uint16_t recv_msg_id = 0;
+ uint16_t recv_len = 0;
+ uint8_t *recv_msg = (uint8_t *)recv_buffer;
+ struct bar_msg_header msg_header;
+ struct msgid_reps_info *reps_info = NULL;
+
+ /*从消息头中取出消息回复的长度,取出msg_id,如果msg_id对应的usable的话,该条同步回复作废*/
+ memset(&msg_header, 0, sizeof(msg_header));
+ __bar_chan_msg_header_get(subchan_addr, &msg_header);
+ recv_len = msg_header.len;
+ recv_msg_id = msg_header.msg_id;
+
+ if (recv_msg_id != send_msg_id) {
+ BAR_LOG_ERR("send msg id: %d, but get reply msg id: %d.\n", send_msg_id,
+ recv_msg_id);
+ return BAR_MSG_ERR_REPLY;
+ }
+
+ reps_info = &g_msgid_ring.reps_info_tbl[recv_msg_id];
+ if (reps_info->flag != REPS_INFO_FLAG_USED) {
+ BAR_LOG_ERR("msg_id: %d is release", recv_msg_id);
+ return BAR_MSG_ERR_REPLY;
+ }
+ if (recv_len > buffer_len - REPS_HEADER_PAYLOAD_OFFSET) {
+ BAR_LOG_ERR("reps_buf_len is %d, but reps_msg_len is %d", buffer_len,
+ recv_len + 4);
+ return BAR_MSG_ERR_REPSBUFF_LEN;
+ }
+
+ /* 从reps_buff + 4的位置拷贝进回复数据*/
+ __bar_chan_msg_payload_get(subchan_addr,
+ recv_msg + REPS_HEADER_PAYLOAD_OFFSET, recv_len);
+
+ /* 拷贝数据长度*/
+ *(uint16_t *)(recv_msg + REPS_HEADER_LEN_OFFSET) = recv_len;
+ /* reps头valid置位*/
+ *recv_msg = REPS_HEADER_REPLYED;
+
+ return BAR_MSG_OK;
}
-int bar_chan_recv_func_check(uint16_t check)
+uint8_t __bar_msg_row_index_trans(uint8_t src)
{
- if (CHECK_STATE_OK == check)
- {
- return BAR_MSG_OK;
- }
- else
- {
- BAR_LOG_ERR("recv func check failed, check field: 0x%x", check);
- return BAR_MSG_ERR_USR_RET_ERR;
- }
+ uint8_t src_index = 0;
+
+ switch (src) {
+ case MSG_CHAN_END_MPF: {
+ src_index = BAR_MSG_SRC_MPF;
+ break;
+ }
+ case MSG_CHAN_END_PF: {
+ src_index = BAR_MSG_SRC_PF;
+ break;
+ }
+ case MSG_CHAN_END_VF: {
+ src_index = BAR_MSG_SRC_VF;
+ break;
+ }
+ default: {
+ src_index = BAR_MSG_SRC_ERR;
+ break;
+ }
+ }
+ return src_index;
}
-int bar_chan_sync_msg_reps_get(uint64_t subchan_addr, uint64_t recv_buffer, uint16_t buffer_len, uint16_t send_msg_id)
+uint8_t __bar_msg_col_index_trans(uint8_t dst)
{
- int ret = BAR_MSG_OK;
- uint16_t recv_msg_id = 0;
- uint16_t recv_len = 0;
- uint8_t *recv_msg = (uint8_t*)recv_buffer;
- struct bar_msg_header msg_header;
- struct msgid_reps_info *reps_info = NULL;
-
- /*从消息头中取出消息回复的长度,取出msg_id,如果msg_id对应的usable的话,该条同步回复作废*/
- memset(&msg_header, 0, sizeof(msg_header));
- bar_chan_msg_header_get(subchan_addr, &msg_header);
- recv_len = msg_header.len;
- recv_msg_id = msg_header.msg_id;
-
- if (recv_msg_id != send_msg_id)
- {
- BAR_LOG_ERR("send msg id: %d, but get reply msg id: %d.\n", send_msg_id, recv_msg_id);
- ret = BAR_MSG_ERR_REPLY;
- goto out;
- }
-
- reps_info = &g_msgid_ring.reps_info_tbl[recv_msg_id];
- if (reps_info->flag != REPS_INFO_FLAG_USED)
- {
- BAR_LOG_ERR("msg_id: %d is release", recv_msg_id);
- ret = BAR_MSG_ERR_REPLY;
- goto out;
- }
- if (recv_len > buffer_len - REPS_HEADER_PAYLOAD_OFFSET)
- {
- BAR_LOG_ERR("reps_buf_len is %d, but reps_msg_len is %d", buffer_len, recv_len + 4);
- ret = BAR_MSG_ERR_REPSBUFF_LEN;
- goto out;
- }
-
- /* 从reps_buff + 4的位置拷贝进回复数据*/
- bar_chan_msg_payload_get(subchan_addr, recv_msg + REPS_HEADER_PAYLOAD_OFFSET, recv_len);
-
- ret = bar_chan_recv_func_check(msg_header.check);
- if (ret != BAR_MSG_OK)
- {
- goto out;
- }
-
- /* 拷贝数据长度*/
- *(uint16_t*)(recv_msg + REPS_HEADER_LEN_OFFSET) = recv_len;
- /* reps头valid置位*/
- *recv_msg = REPS_HEADER_REPLYED;
-
-out:
- return ret;
+ uint8_t dst_index = 0;
+
+ switch (dst) {
+ case MSG_CHAN_END_MPF: {
+ dst_index = BAR_MSG_DST_MPF;
+ break;
+ }
+ case MSG_CHAN_END_PF: {
+ dst_index = BAR_MSG_DST_PFVF;
+ break;
+ }
+ case MSG_CHAN_END_VF: {
+ dst_index = BAR_MSG_DST_PFVF;
+ break;
+ }
+ case MSG_CHAN_END_RISC: {
+ dst_index = BAR_MSG_DST_RISC;
+ break;
+ }
+ default: {
+ dst_index = BAR_MSG_SRC_ERR;
+ break;
+ }
+ }
+ return dst_index;
}
-uint64_t subchan_addr_cal(uint64_t virt_addr, uint8_t chan_id, uint8_t subchan_id)
+int __bar_chan_reg_write(uint64_t subchan_addr, uint32_t offset, uint32_t data)
{
- return virt_addr + (2 * chan_id + subchan_id) * BAR_MSG_ADDR_CHAN_INTERVAL;
+ uint32_t algin_offset = (offset & BAR_ALIGN_WORD_MASK);
+
+ if (algin_offset >= BAR_MSG_ADDR_CHAN_INTERVAL) {
+ return -EADDRNOTAVAIL;
+ }
+
+ writel(data, (volatile void *)(subchan_addr + algin_offset));
+ return 0;
}
-uint64_t recv_addr_get(uint8_t src_type, uint8_t dst_type, uint64_t virt_addr)
+int __bar_chan_reg_read(uint64_t subchan_addr, uint32_t offset, uint32_t *pdata)
{
- uint8_t chan_id = 0;
- uint8_t subchan_id = 0;
- uint8_t src = bar_msg_col_index_trans(src_type);
- uint8_t dst = bar_msg_row_index_trans(dst_type);
-
- if (src >= BAR_MSG_SRC_NUM || dst >= BAR_MSG_DST_NUM)
- {
- return 0;
- }
- /* 接收通道id和发送通道id相同*/
- chan_id = chan_id_tbl[dst][src];
- /* 接收子通道id和发送子通道相反*/
- subchan_id = (!!subchan_id_tbl[dst][src])? BAR_SUBCHAN_INDEX_SEND : BAR_SUBCHAN_INDEX_RECV;
- return subchan_addr_cal(virt_addr, chan_id, subchan_id);
+ uint32_t algin_offset = (offset & BAR_ALIGN_WORD_MASK);
+
+ if (algin_offset >= BAR_MSG_ADDR_CHAN_INTERVAL) {
+ return -EADDRNOTAVAIL;
+ }
+
+ *pdata = readl((const volatile void *)(subchan_addr + algin_offset));
+ return 0;
}
-uint64_t reply_addr_get(uint8_t sync, uint8_t src_type, uint8_t dst_type, uint64_t virt_addr)
+uint64_t subchan_addr_cal(uint64_t virt_addr, uint8_t chan_id,
+ uint8_t subchan_id)
{
- uint8_t chan_id = 0;
- uint8_t subchan_id = 0;
- uint64_t recv_rep_addr = 0;
- uint8_t src = bar_msg_col_index_trans(src_type);
- uint8_t dst = bar_msg_row_index_trans(dst_type);
-
- if (src == BAR_MSG_SRC_ERR || dst == BAR_MSG_DST_ERR)
- {
- return 0;
- }
-
- chan_id = chan_id_tbl[dst][src];
- subchan_id = (!!subchan_id_tbl[dst][src])? BAR_SUBCHAN_INDEX_SEND : BAR_SUBCHAN_INDEX_RECV;
- if (sync == BAR_CHAN_MSG_SYNC) //同步消息
- {
- recv_rep_addr = subchan_addr_cal(virt_addr, chan_id, subchan_id);
- }
- else
- {
- recv_rep_addr = subchan_addr_cal(virt_addr, chan_id, 1 - subchan_id);
- }
- return recv_rep_addr;
+ return virt_addr + (2 * chan_id + subchan_id) * BAR_MSG_ADDR_CHAN_INTERVAL;
}
-uint16_t bar_chan_msg_header_check(struct bar_msg_header *msg_header)
+uint64_t recv_addr_get(uint8_t src_type, uint8_t dst_type, uint64_t virt_addr)
{
- uint8_t event_id = 0;
- uint16_t len = 0;
-
- if (msg_header == NULL)
- {
- return BAR_MSG_ERR_NULL;
- }
- if (msg_header->valid != BAR_MSG_CHAN_USED)
- {
- BAR_LOG_ERR("recv header ERR: valid label is not used.\n");
- return BAR_MSG_ERR_MODULE;
- }
- event_id = msg_header->event_id;
- if (event_id >= (uint8_t)MSG_MODULE_NUM)
- {
- BAR_LOG_ERR("recv header ERR: invalid event_id: %d.\n", event_id);
- return BAR_MSG_ERR_MODULE;
- }
- len = msg_header->len;
- if (len > BAR_MSG_PAYLOAD_MAX_LEN)
- {
- BAR_LOG_ERR("recv header ERR: invalid mesg len: %d.\n", len);
- return BAR_MSG_ERR_LEN;
- }
- if (msg_header->ack == BAR_CHAN_MSG_NO_ACK && msg_recv_func_tbl[msg_header->event_id] == NULL)
- {
- BAR_LOG_DEBUG("recv header ERR: module:%d doesn't register", event_id);
- return BAR_MSG_ERR_MODULE_NOEXIST;
- }
- return BAR_MSG_OK;
+ uint8_t chan_id = 0;
+ uint8_t subchan_id = 0;
+ uint8_t src = __bar_msg_col_index_trans(src_type);
+ uint8_t dst = __bar_msg_row_index_trans(dst_type);
+
+ if (src >= BAR_MSG_SRC_NUM || dst >= BAR_MSG_DST_NUM) {
+ return 0;
+ }
+ /* 接收通道id和发送通道id相同*/
+ chan_id = chan_id_tbl[dst][src];
+ /* 接收子通道id和发送子通道相反*/
+ subchan_id = (!!subchan_id_tbl[dst][src]) ? BAR_SUBCHAN_INDEX_SEND :
+ BAR_SUBCHAN_INDEX_RECV;
+ return subchan_addr_cal(virt_addr, chan_id, subchan_id);
}
+uint64_t reply_addr_get(uint8_t sync, uint8_t src_type, uint8_t dst_type,
+ uint64_t virt_addr)
+{
+ uint8_t chan_id = 0;
+ uint8_t subchan_id = 0;
+ uint64_t recv_rep_addr = 0;
+ uint8_t src = __bar_msg_col_index_trans(src_type);
+ uint8_t dst = __bar_msg_row_index_trans(dst_type);
+
+ if (src == BAR_MSG_SRC_ERR || dst == BAR_MSG_DST_ERR) {
+ return 0;
+ }
+
+ chan_id = chan_id_tbl[dst][src];
+ subchan_id = (!!subchan_id_tbl[dst][src]) ? BAR_SUBCHAN_INDEX_SEND :
+ BAR_SUBCHAN_INDEX_RECV;
+ if (sync == BAR_CHAN_MSG_SYNC) { //同步消息
+ recv_rep_addr = subchan_addr_cal(virt_addr, chan_id, subchan_id);
+ } else {
+ recv_rep_addr = subchan_addr_cal(virt_addr, chan_id, 1 - subchan_id);
+ }
+ return recv_rep_addr;
+}
-/* 同步消息接收处理*/
-void bar_msg_sync_msg_proc(uint64_t reply_addr, struct bar_msg_header *msg_header, uint8_t *reciver_buff, void *dev)
+void swap_src_dst(struct bar_msg_header *msg_header)
{
- uint16_t reps_len = 0;
- uint8_t *reps_buffer = NULL;
- zxdh_bar_chan_msg_recv_callback recv_func = NULL;
-
- reps_buffer = kmalloc(BAR_MSG_PAYLOAD_MAX_LEN, GFP_KERNEL);
- if (reps_buffer == NULL)
- {
- return;
- }
- /* 查询消息处理函数,处理消息,消息处理的结果放到reps_buffer中, 长度放到reps_len中*/
- recv_func = msg_recv_func_tbl[msg_header->event_id];
- recv_func(reciver_buff, msg_header->len, reps_buffer, &reps_len, dev);
- msg_header->ack = BAR_CHAN_MSG_ACK;
- msg_header->len = reps_len;
- /* 计算回复消息2K的地址*/
- bar_chan_msg_header_set(reply_addr, msg_header);
- bar_chan_msg_payload_set(reply_addr, reps_buffer, reps_len);
- bar_chan_msg_valid_set(reply_addr, BAR_MSG_CHAN_USABLE);
-
- BAR_KFREE_PTR(reps_buffer);
- return;
+ uint16_t temp = 0;
+
+ if (!msg_header) {
+ return;
+ }
+ temp = msg_header->src_pcieid;
+ msg_header->src_pcieid = msg_header->dst_pcieid;
+ msg_header->dst_pcieid = temp;
}
/* 统一的中断处理函数*/
int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev)
{
- uint64_t recv_addr = 0;
- uint64_t reps_addr = 0;
- struct bar_msg_header msg_header = {0};
- uint8_t *recved_msg = NULL;
- uint16_t ret = 0;
-
- /*1 接收消息地址*/
- recv_addr = recv_addr_get(src, dst, virt_addr);
- BAR_LOG_DEBUG("recv_addr: 0x%llx, \nvirt_addr: 0x%llx", recv_addr, virt_addr);
- if (recv_addr == 0)
- {
- BAR_LOG_DEBUG("invalid driver type");
- return BAR_MSG_ERR_NULL;
- }
- /*2 取消息头并检查是否合法*/
- bar_chan_msg_header_get(recv_addr, &msg_header);
- ret = bar_chan_msg_header_check(&msg_header);
- if (ret != BAR_MSG_OK)
- {
- bar_chan_check_chan_stats(ret, recv_addr);
- return ret;
- }
- /*3 创建消息payload buf,取出消息暂存*/
- recved_msg = kmalloc(msg_header.len, GFP_KERNEL);
- if (recved_msg == NULL)
- {
- BAR_LOG_DEBUG("create temp buff failed");
- return BAR_MSG_ERR_NULL;
- }
- bar_chan_msg_payload_get(recv_addr, recved_msg, msg_header.len);
-
- /*4 根据来的是同步消息还是异步消息计算回复消息地址*/
- reps_addr = reply_addr_get(msg_header.sync, src, dst, virt_addr);
-
- /*5 如果是同步消息,走同步消息流程*/
- if (msg_header.sync == BAR_CHAN_MSG_SYNC)
- {
- bar_msg_sync_msg_proc(reps_addr, &msg_header, recved_msg, dev);
- goto out;
- }
-
- /*6 不应该为异步消息,先置位告诉对方已收到消息*/
- BAR_LOG_DEBUG("%d end set valid", dst);
- //TODO set 错误码
- bar_chan_msg_poltag_set(recv_addr, 0);
- bar_chan_msg_valid_set(recv_addr, BAR_MSG_CHAN_USABLE);
-
-out:
- kfree(recved_msg);
- return BAR_MSG_OK;
+ uint64_t recv_addr = 0;
+ uint64_t reps_addr = 0;
+ struct bar_msg_header msg_header = { 0 };
+ uint8_t *recved_msg = NULL;
+ uint16_t ret = 0;
+
+ /*1 接收消息地址*/
+ recv_addr = recv_addr_get(src, dst, virt_addr);
+ BAR_LOG_DEBUG("recv_addr: 0x%llx, \nvirt_addr: 0x%llx", recv_addr,
+ virt_addr);
+ if (recv_addr == 0) {
+ BAR_LOG_DEBUG("invalid driver type");
+ return BAR_MSG_ERR_NULL;
+ }
+ /*2 取消息头并检查是否合法*/
+ __bar_chan_msg_header_get(recv_addr, &msg_header);
+ ret = __bar_chan_msg_header_check(&msg_header);
+ if (ret != BAR_MSG_OK) {
+ bar_chan_check_chan_stats(ret, recv_addr);
+ return ret;
+ }
+ /*3 创建消息payload buf,取出消息暂存*/
+ recved_msg = kmalloc(msg_header.len, GFP_KERNEL);
+ if (recved_msg == NULL) {
+ BAR_LOG_DEBUG("create temp buff failed");
+ return BAR_MSG_ERR_NULL;
+ }
+ __bar_chan_msg_payload_get(recv_addr, recved_msg, msg_header.len);
+
+ /*4 根据来的是同步消息还是异步消息计算回复消息地址*/
+ reps_addr = reply_addr_get(msg_header.sync, src, dst, virt_addr);
+
+ /*5 如果是同步消息,走同步消息流程*/
+ if (msg_header.sync == BAR_CHAN_MSG_SYNC) {
+ __bar_msg_sync_msg_proc(reps_addr, &msg_header, recved_msg, dev);
+ goto exit;
+ }
+
+ /*6 异步消息,先置位告诉对方已收到消息*/
+ BAR_LOG_DEBUG("%d end set valid", dst);
+ __bar_chan_msg_valid_set(recv_addr, BAR_MSG_CHAN_USABLE);
+ __bar_chan_msg_poltag_set(recv_addr, 0);
+ if (msg_header.ack == BAR_CHAN_MSG_ACK) {
+ /* 7 如果是回复消息,按照回复消息处理流程*/
+ __bar_msg_ack_async_msg_proc(&msg_header, recved_msg);
+ goto exit;
+ } else {
+ /* 8 异步非回复消息*/
+ swap_src_dst(&msg_header);
+ __bar_msg_noack_async_msg_proc(reps_addr, &msg_header, recved_msg, src,
+ dst, dev);
+ }
+
+exit:
+ kfree(recved_msg);
+ return BAR_MSG_OK;
}
EXPORT_SYMBOL(zxdh_bar_irq_recv);
-int32_t call_msg_recv_func_tbl(uint16_t event_id, void *pay_load, uint16_t len, void *reps_buffer, uint16_t *reps_len, void *dev)
+int32_t call_msg_recv_func_tbl(uint16_t event_id, void *pay_load, uint16_t len,
+ void *reps_buffer, uint16_t *reps_len, void *dev)
{
- zxdh_bar_chan_msg_recv_callback recv_func = NULL;
+ zxdh_bar_chan_msg_recv_callback recv_func = NULL;
- recv_func = msg_recv_func_tbl[event_id];
- if (unlikely(recv_func == NULL))
- {
- BAR_LOG_ERR("event_id[%d] unregister\n", event_id);
- return BAR_MSG_ERR_MODULE_NOEXIST;
- }
+ recv_func = msg_recv_func_tbl[event_id];
+ if (unlikely(recv_func == NULL)) {
+ BAR_LOG_ERR("event_id[%d] unregister\n", event_id);
+ return BAR_MSG_ERR_MODULE_NOEXIST;
+ }
- return recv_func(pay_load, len, reps_buffer, reps_len, dev);
+ return recv_func(pay_load, len, reps_buffer, reps_len, dev);
}
EXPORT_SYMBOL(call_msg_recv_func_tbl);
+/* 同步消息接收处理*/
+void __bar_msg_sync_msg_proc(uint64_t reply_addr,
+ struct bar_msg_header *msg_header,
+ uint8_t *reciver_buff, void *dev)
+{
+ uint16_t reps_len = 0;
+ uint8_t *reps_buffer = NULL;
+ zxdh_bar_chan_msg_recv_callback recv_func = NULL;
+
+ reps_buffer = kmalloc(BAR_MSG_PAYLOAD_MAX_LEN, GFP_KERNEL);
+ if (reps_buffer == NULL) {
+ return;
+ }
+ /* 查询消息处理函数,处理消息,消息处理的结果放到reps_buffer中,
+ * 长度放到reps_len中*/
+ recv_func = msg_recv_func_tbl[msg_header->event_id];
+ recv_func(reciver_buff, msg_header->len, reps_buffer, &reps_len, dev);
+ msg_header->ack = BAR_CHAN_MSG_ACK;
+ msg_header->len = reps_len;
+ /* 计算回复消息2K的地址*/
+ __bar_chan_msg_header_set(reply_addr, msg_header);
+ __bar_chan_msg_payload_set(reply_addr, reps_buffer, reps_len);
+ __bar_chan_msg_valid_set(reply_addr, BAR_MSG_CHAN_USABLE);
+
+ BAR_KFREE_PTR(reps_buffer);
+ return;
+}
+
+/* 异步回复消息处理*/
+void __bar_msg_ack_async_msg_proc(struct bar_msg_header *msg_header,
+ uint8_t *reciver_buff)
+{
+ struct msgid_reps_info *reps_info =
+ &g_msgid_ring.reps_info_tbl[msg_header->msg_id];
+ uint8_t *reps_buffer = (uint8_t *)reps_info->reps_buffer;
+
+ if ((msg_header->len) > (reps_info->buffer_len) - 4) {
+ BAR_LOG_ERR(
+ "reciver ERR: recv_buffer is too short to store reply msg.");
+ goto free_id;
+ }
+ memcpy(reps_buffer + 4, reciver_buff, msg_header->len);
+ *(uint16_t *)(reps_buffer + 1) = msg_header->len;
+ *(uint8_t *)(reps_info->reps_buffer) = REPS_HEADER_REPLYED;
+
+free_id:
+ __bar_chan_msgid_free(msg_header->msg_id);
+ return;
+}
+
+/* 异步非回复消息处理*/
+void __bar_msg_noack_async_msg_proc(uint64_t reps_addr,
+ struct bar_msg_header *msg_header,
+ uint8_t *reciver_buff, uint8_t src,
+ uint8_t dst, void *dev)
+{
+ uint16_t reps_len = 0;
+ uint8_t *temp_buffer = NULL;
+ zxdh_bar_chan_msg_recv_callback recv_func = NULL;
+ struct bar_async_node *node = NULL;
+
+ /* 查询消息处理函数,处理消息,消息处理的结果放到reps_buff中,
+ * 长度放到reps_len中*/
+ recv_func = msg_recv_func_tbl[msg_header->event_id];
+ temp_buffer = kmalloc(BAR_MSG_PAYLOAD_MAX_LEN, GFP_KERNEL);
+ if (temp_buffer == NULL) {
+ BAR_LOG_ERR("msg reply err: fail to allocate temp_buffer.\n");
+ return;
+ }
+ recv_func(reciver_buff, msg_header->len, temp_buffer, &reps_len, dev);
+
+ node = (struct bar_async_node *)kmalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ BAR_LOG_ERR("msg reply err: fail to allocate temp_buffer.\n");
+ BAR_KFREE_PTR(temp_buffer);
+ return;
+ }
+
+ node->subchan_addr = reps_addr;
+ node->payload_addr = (void *)temp_buffer;
+ node->payload_len = reps_len;
+ node->ack = BAR_CHAN_MSG_ACK;
+
+ node->msg_id = msg_header->msg_id;
+ node->event_id = msg_header->event_id;
+ node->dst_pcieid = msg_header->dst_pcieid;
+ node->src_pcieid = msg_header->src_pcieid;
+ node->emec = msg_header->emec;
+ node->src = src;
+ node->dst = dst;
+
+ __bar_chan_async_node_add(node);
+ return;
+}
+
+uint16_t __bar_chan_msg_header_check(struct bar_msg_header *msg_header)
+{
+ uint8_t event_id = 0;
+ uint16_t len = 0;
+
+ if (msg_header == NULL) {
+ return BAR_MSG_ERR_NULL;
+ }
+ if (msg_header->valid != BAR_MSG_CHAN_USED) {
+ BAR_LOG_ERR("recv header ERR: valid label is not used.\n");
+ return BAR_MSG_ERR_MODULE;
+ }
+ event_id = msg_header->event_id;
+ if (event_id >= (uint8_t)MSG_MODULE_NUM) {
+ BAR_LOG_ERR("recv header ERR: invalid event_id: %d.\n", event_id);
+ return BAR_MSG_ERR_MODULE;
+ }
+ len = msg_header->len;
+ if (len > BAR_MSG_PAYLOAD_MAX_LEN) {
+ BAR_LOG_ERR("recv header ERR: invalid mesg len: %d.\n", len);
+ return BAR_MSG_ERR_LEN;
+ }
+ if (msg_header->ack == BAR_CHAN_MSG_NO_ACK &&
+ msg_recv_func_tbl[msg_header->event_id] == NULL) {
+ BAR_LOG_DEBUG("recv header ERR: module:%d doesn't register", event_id);
+ return BAR_MSG_ERR_MODULE_NOEXIST;
+ }
+ return BAR_MSG_OK;
+}
/**
- * zxdh_bar_chan_sync_msg_send - 通过PCIE BAR空间发送同步消息
+ * zxdh_bar_chan_async_msg_send - 通过PCIE BAR空间发送异步消息
* @in: 消息发送信息
* @result: 消息结果反馈
* @return: 0 成功,其他失败
*/
-int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result)
+int zxdh_bar_chan_async_msg_send(struct zxdh_pci_bar_msg *in,
+ struct zxdh_msg_recviver_mem *result)
{
- int ret = 0;
- uint16_t valid = 0;
- uint16_t time_out_cnt = 0;
- uint16_t msg_id = 0;
- uint64_t subchan_addr = 0;
- struct bar_msg_header msg_header = {0};
-
- ret = bar_msg_src_parse(in);
- if (ret != BAR_MSG_OK)
- {
- goto out;
- }
-
- ret = bar_chan_send_para_check(in, result);
- if (ret != BAR_MSG_OK)
- {
- BAR_LOG_ERR("para check failed, %d.", ret);
- goto out;
- }
-
- /* 申请msg_id,并将缓存信息存放到表中*/
- ret = bar_chan_save_recv_info(result, &msg_id);
- if (ret != BAR_MSG_OK)
- {
- BAR_LOG_ERR("msg_id allocated failed.");
- goto out;
- }
- /* 计算2K通道的地址*/
- bar_chan_subchan_addr_get(in, &subchan_addr);
- /* 填充消息头*/
- bar_chan_sync_fill_header(msg_id, in, &msg_header);
- /* 给通道上锁,根据src和dst判断是分配硬件锁还是软件锁*/
- bar_chan_lock((uint8_t)in->src, (uint8_t)in->dst, in->src_pcieid);
-
- /* 消息头、消息体发送到bar空间, valid置位*/
- bar_chan_msg_send(subchan_addr, in->payload_addr, in->payload_len, &msg_header);
- /* 轮询等待消息回复*/
- do
- {
- usleep_range(BAR_MSG_POLLING_SPAN_US, BAR_MSG_POLLING_SPAN_US + 10);
- valid = bar_msg_valid_stat_get(subchan_addr);
- time_out_cnt++;
- }while((time_out_cnt < BAR_MSG_TIMEOUT_TH) && (BAR_MSG_CHAN_USED == valid));
-
- /* 如果超时恢复标志位*/
- if ((BAR_MSG_TIMEOUT_TH == time_out_cnt) && (BAR_MSG_CHAN_USABLE != valid))
- {
- bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USABLE);
- bar_chan_msg_poltag_set(subchan_addr, 0);
- BAR_LOG_ERR("BAR MSG ERR: msg_id: %d time out.\n", msg_header.msg_id);
- ret = BAR_MSG_ERR_TIME_OUT;
- }
- else
- {
- /* 从消息头中取出回复消息的长度len, 从payload中取出消息内容,放到本地缓存reps_buff*/
- ret = bar_chan_sync_msg_reps_get(subchan_addr, (uint64_t)result->recv_buffer, result->buffer_len, msg_id);
- }
- bar_chan_msgid_free(msg_id);
- /*通道解锁*/
- bar_chan_unlock((uint8_t)in->src, (uint8_t)in->dst, in->src_pcieid);
- bar_chan_check_chan_stats(ret, subchan_addr);
-out:
- return ret;
+ struct bar_async_node *node = NULL;
+ uint16_t ret = 0;
+ uint16_t msg_id = 0;
+
+ ret = bar_msg_src_parse(in);
+ if (ret != BAR_MSG_OK) {
+ goto exit;
+ }
+
+ ret = __bar_chan_send_para_check(in, result);
+ if (ret != BAR_MSG_OK) {
+ BAR_LOG_ERR("para check failed, %d.", ret);
+ goto exit;
+ }
+
+ ret = __bar_chan_save_recv_info(result, &msg_id);
+ if (ret != BAR_MSG_OK) {
+ BAR_LOG_DEBUG("msg_id allocate failed");
+ goto exit;
+ }
+
+ /* 创造节点, 并将节点插入到列表中去*/
+ node = __bar_chan_async_node_create(msg_id, in);
+ if (!node) {
+ BAR_LOG_ERR("creating node failed.\n");
+ return BAR_MSG_ERR_NULL;
+ }
+ __bar_chan_async_node_add(node);
+
+exit:
+ return ret;
+}
+EXPORT_SYMBOL(zxdh_bar_chan_async_msg_send);
+
+uint16_t __bar_chan_save_recv_info(struct zxdh_msg_recviver_mem *result,
+ uint16_t *msg_id)
+{
+ int ret = 0;
+ struct msgid_reps_info *reps_info = NULL;
+
+ ret = __bar_chan_msgid_allocate(msg_id);
+ if (ret == -1) {
+ return BAR_MSG_ERR_MSGID;
+ }
+ reps_info = &g_msgid_ring.reps_info_tbl[*msg_id];
+ reps_info->reps_buffer = result->recv_buffer;
+ reps_info->buffer_len = result->buffer_len;
+
+ return BAR_MSG_OK;
+}
+
+struct bar_async_node *__bar_chan_async_node_create(uint16_t msg_id,
+ struct zxdh_pci_bar_msg *in)
+{
+ struct bar_async_node *node = NULL;
+ uint8_t *payload_addr = NULL;
+ uint64_t subchan_addr = 0;
+
+ node = (struct bar_async_node *)kmalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ BAR_LOG_ERR("ERR: mallocation for node failed\n");
+ return NULL;
+ }
+ node->msg_id = msg_id;
+ payload_addr = (uint8_t *)kmalloc(in->payload_len, GFP_KERNEL);
+ if (!payload_addr) {
+ kfree(node);
+ BAR_LOG_ERR("ERR: malloc for payload failed\n");
+ return NULL;
+ }
+
+ __bar_chan_subchan_addr_get(in, &subchan_addr);
+
+ memcpy(payload_addr, in->payload_addr, in->payload_len);
+ node->payload_addr = payload_addr;
+ node->payload_len = in->payload_len;
+ node->subchan_addr = subchan_addr;
+ node->event_id = in->event_id;
+ node->msg_id = msg_id;
+ node->dst_pcieid = in->dst_pcieid;
+ node->src_pcieid = in->src_pcieid;
+ node->emec = in->emec;
+ node->ack = BAR_CHAN_MSG_NO_ACK;
+ node->next = NULL;
+ node->src = in->src;
+ node->dst = in->dst;
+
+ return node;
+}
+
+/* 送入node, 插入到head, tail指向的队列中*/
+void async_list_tail_insert(struct bar_async_node **head,
+ struct bar_async_node *node,
+ struct bar_async_node **tail)
+{
+ if (*tail == NULL) {
+ *head = node;
+ } else {
+ (*tail)->next = node;
+ }
+ *tail = node;
+ (*tail)->next = NULL;
+ return;
+}
+
+uint16_t __bar_chan_async_node_add(struct bar_async_node *node)
+{
+ if (node == NULL) {
+ return BAR_MSG_ERR_NULL;
+ }
+ mutex_lock(&async_en.async_qlock);
+ if (node->emec == BAR_CHAN_MSG_EMEC) {
+ async_list_tail_insert(&async_en.emq_head, node, &async_en.emq_tail);
+ } else {
+ async_list_tail_insert(&async_en.noemq_head, node,
+ &async_en.noemq_tail);
+ }
+ mutex_unlock(&async_en.async_qlock);
+ return 0;
+}
+
+uint16_t async_list_head_del(struct bar_async_node **head,
+ struct bar_async_node *node,
+ struct bar_async_node **tail)
+{
+ struct bar_async_node *temp_node = NULL;
+ if (node == NULL || *head == NULL || *head != node) {
+ return 0xaa;
+ }
+
+ temp_node = *head;
+ *head = (*head)->next;
+
+ if (*head == NULL) {
+ *tail = NULL;
+ }
+ if (temp_node->payload_addr != NULL) {
+ kfree(temp_node->payload_addr);
+ temp_node->payload_addr = NULL;
+ }
+ kfree(temp_node);
+ temp_node = NULL;
+ return 0;
+}
+
+/* 删除节点函数*/
+uint16_t __bar_chan_async_node_del(struct bar_async_node *node)
+{
+ uint16_t ret = 0;
+
+ if (node->emec == BAR_CHAN_MSG_EMEC) {
+ ret = async_list_head_del(&async_en.emq_head, node, &async_en.emq_tail);
+ } else {
+ ret = async_list_head_del(&async_en.noemq_head, node,
+ &async_en.noemq_tail);
+ }
+
+ return ret;
}
-EXPORT_SYMBOL(zxdh_bar_chan_sync_msg_send);
-static int bar_chan_callback_register_check(uint8_t event_id, zxdh_bar_chan_msg_recv_callback callback)
+/* 消息队列消费者*/
+void __bar_msg_async_list_admin(void)
{
- if (event_id >= (uint8_t)MSG_MODULE_NUM)
- {
- BAR_LOG_ERR("register ERR: invalid event_id: %d.\n", event_id);
- return BAR_MSG_ERR_MODULE;
- }
- if (callback == NULL)
- {
- BAR_LOG_ERR("register ERR: null callback.\n");
- return BAR_MEG_ERR_NULL_FUNC;
- }
- if (msg_recv_func_tbl[event_id] != NULL)
- {
- BAR_LOG_ERR("register ERR: repeat register.\n");
- return BAR_MSG_ERR_REPEAT_REGISTER;
- }
- return BAR_MSG_OK;
+ while (1) {
+ if (kthread_should_stop()) {
+ break;
+ }
+ if (NULL != async_en.emq_head) {
+ __bar_msg_async_list_parse(async_en.emq_head);
+ } else if (NULL != async_en.noemq_head) {
+ __bar_msg_async_list_parse(async_en.noemq_head);
+ } else {
+ msleep(1);
+ }
+ }
+ return;
+}
+
+/* 送入两个队列的头指针进行消费*/
+uint16_t __bar_msg_async_list_parse(struct bar_async_node *node)
+{
+ struct bar_msg_header msg_header;
+ uint16_t time_out_cnt = 0, valid = 0;
+ struct msgid_reps_info *reps_info =
+ &g_msgid_ring.reps_info_tbl[node->msg_id];
+
+ /* 填充消息头*/
+ memset(&msg_header, 0, sizeof(msg_header));
+ msg_header.sync = BAR_CHAN_MSG_ASYNC;
+ msg_header.event_id = node->event_id;
+ msg_header.len = node->payload_len;
+ msg_header.src_pcieid = node->src_pcieid; // pf跟vf通信需要
+ msg_header.dst_pcieid = node->dst_pcieid; // pf跟vf通信需要
+ msg_header.msg_id = node->msg_id;
+ msg_header.ack = node->ack;
+
+ /* TODO:给通道上锁*/
+ bar_chan_lock(node->src, node->dst, node->src_pcieid);
+
+ /* 设置消息头和消息体, 将消息头、消息体填充, valid置位封装成一步*/
+ __bar_chan_msg_send(node->subchan_addr, node->payload_addr,
+ node->payload_len, &msg_header);
+
+ if (node->ack == BAR_CHAN_MSG_NO_ACK) {
+ mod_timer(&reps_info->id_timer, BAR_MSGID_FREE_THRESHOLD);
+ }
+
+ do {
+ usleep_range(BAR_MSG_POLLING_SPAN_US, BAR_MSG_POLLING_SPAN_US + 10);
+ valid = __bar_msg_valid_stat_get(node->subchan_addr);
+ time_out_cnt++;
+ } while ((time_out_cnt < BAR_MSG_TIMEOUT_TH) &&
+ (BAR_MSG_CHAN_USED == valid));
+
+ /* 如果超时恢复标志位*/
+ if ((BAR_MSG_TIMEOUT_TH == time_out_cnt) &&
+ (BAR_MSG_CHAN_USABLE != valid)) {
+ __bar_chan_msg_valid_set(node->subchan_addr, BAR_MSG_CHAN_USABLE);
+ __bar_chan_msg_poltag_set(node->subchan_addr, 0);
+ BAR_LOG_ERR("BAR MSG ERR: chan reply time out.\n");
+ }
+
+ bar_chan_unlock(node->src, node->dst, node->src_pcieid);
+ __bar_chan_async_node_del(node);
+
+ return 0;
}
/**
@@ -774,22 +1011,41 @@ static int bar_chan_callback_register_check(uint8_t event_id, zxdh_bar_chan_msg_
* @return: 0 成功,其他失败
* 一般在驱动初始化时调用
*/
-int zxdh_bar_chan_msg_recv_register(uint8_t event_id, zxdh_bar_chan_msg_recv_callback callback)
+int zxdh_bar_chan_msg_recv_register(uint8_t event_id,
+ zxdh_bar_chan_msg_recv_callback callback)
{
- int ret = 0;
+ uint16_t ret = 0;
- ret = bar_chan_callback_register_check(event_id, callback);
+ ret = __bar_chan_callback_register_check(event_id, callback);
- if (BAR_MSG_OK == ret)
- {
- msg_recv_func_tbl[event_id] = callback;
- BAR_LOG_DEBUG("register module: %d success.\n", event_id);
- }
+ if (BAR_MSG_OK == ret) {
+ msg_recv_func_tbl[event_id] = callback;
+ BAR_LOG_DEBUG("register module: %d success.\n", event_id);
+ }
- return ret;
+ return ret;
}
EXPORT_SYMBOL(zxdh_bar_chan_msg_recv_register);
+uint16_t
+__bar_chan_callback_register_check(uint8_t event_id,
+ zxdh_bar_chan_msg_recv_callback callback)
+{
+ if (event_id >= (uint8_t)MSG_MODULE_NUM) {
+ BAR_LOG_ERR("register ERR: invalid event_id: %d.\n", event_id);
+ return BAR_MSG_ERR_MODULE;
+ }
+ if (callback == NULL) {
+ BAR_LOG_ERR("register ERR: null callback.\n");
+ return BAR_MEG_ERR_NULL_FUNC;
+ }
+ if (msg_recv_func_tbl[event_id] != NULL) {
+ BAR_LOG_ERR("register ERR: repeat register.\n");
+ return BAR_MSG_ERR_REPEAT_REGISTER;
+ }
+ return BAR_MSG_OK;
+}
+
/**
* zxdh_bar_chan_msg_recv_unregister - PCIE BAR空间消息方式,解注册消息接收回调
* @event_id: 内核PCIE设备地址
@@ -798,157 +1054,178 @@ EXPORT_SYMBOL(zxdh_bar_chan_msg_recv_register);
*/
int zxdh_bar_chan_msg_recv_unregister(uint8_t event_id)
{
- if (event_id >= (uint8_t)MSG_MODULE_NUM)
- {
- BAR_LOG_ERR("unregister ERR: invalid event_id :%d.\n", event_id);
- return BAR_MSG_ERR_MODULE;
- }
- if (msg_recv_func_tbl[event_id] == NULL)
- {
- BAR_LOG_ERR("unregister ERR: null proccess func.\n");
- return BAR_MSG_ERR_UNGISTER;
- }
- msg_recv_func_tbl[event_id] = NULL;
- BAR_LOG_DEBUG("unregister module %d success.\n", event_id);
- return BAR_MSG_OK;
+ if (event_id >= (uint8_t)MSG_MODULE_NUM) {
+ BAR_LOG_ERR("unregister ERR: invalid event_id :%d.\n", event_id);
+ return BAR_MSG_ERR_MODULE;
+ }
+ if (msg_recv_func_tbl[event_id] == NULL) {
+ BAR_LOG_ERR("unregister ERR: null proccess func.\n");
+ return BAR_MSG_ERR_UNGISTER;
+ }
+ msg_recv_func_tbl[event_id] = NULL;
+ BAR_LOG_DEBUG("unregister module %d success.\n", event_id);
+ return BAR_MSG_OK;
}
EXPORT_SYMBOL(zxdh_bar_chan_msg_recv_unregister);
int bar_mpf_addr_ioremap(void)
{
- uint64_t addr;
- uint64_t len;
- struct pci_dev *pdev = NULL;
-
- pdev = pci_get_device(MPF_VENDOR_ID, MPF_DEVICE_ID, NULL);
-
- if (pdev == NULL)
- {
- BAR_LOG_DEBUG("not found device: deviceID %x, VendorID: %x", MPF_DEVICE_ID, MPF_VENDOR_ID);
- return -EINVAL;
- }
-
- addr = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
- if (addr == 0 || len == 0)
- {
- BAR_LOG_ERR("pci resouce addr or len is 0\n");
- return -EINVAL;
- }
-
- internal_addr = ioremap(addr, len);
- if (IS_ERR_OR_NULL(internal_addr))
- {
- BAR_LOG_ERR("ioremap failed, internal_addr=0x%p\n", internal_addr);
- return -ENOMEM;
- }
- is_mpf_scaned = TRUE;
-
- return BAR_MSG_OK;
+ uint64_t addr;
+ uint64_t len;
+ struct pci_dev *pdev = NULL;
+
+ pdev = pci_get_device(MPF_VENDOR_ID, MPF_DEVICE_ID, NULL);
+
+ if (pdev == NULL) {
+ BAR_LOG_DEBUG("not found device: deviceID %x, VendorID: %x",
+ MPF_DEVICE_ID, MPF_VENDOR_ID);
+ return -EINVAL;
+ }
+
+ addr = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ if (addr == 0 || len == 0) {
+ BAR_LOG_ERR("pci resouce addr or len is 0\n");
+ return -EINVAL;
+ }
+
+ internal_addr = ioremap(addr, len);
+ if (IS_ERR_OR_NULL(internal_addr)) {
+ BAR_LOG_ERR("ioremap failed, internal_addr=0x%p\n", internal_addr);
+ return -ENOMEM;
+ }
+ is_mpf_scaned = TRUE;
+
+ return BAR_MSG_OK;
}
void bar_mpf_addr_iounmap(void)
{
- if (internal_addr != NULL)
- {
- iounmap(internal_addr);
- }
- internal_addr = NULL;
- is_mpf_scaned = FALSE;
- return;
+ if (internal_addr != NULL) {
+ iounmap(internal_addr);
+ }
+ internal_addr = NULL;
+ is_mpf_scaned = FALSE;
+ return;
}
int bar_msgid_ring_init(void)
{
- uint16_t msg_id = 0;
- struct msgid_reps_info *reps_info = NULL;
-
- spin_lock_init(&g_msgid_ring.lock);
- for( msg_id = 0; msg_id < MAX_MSG_BUFF_NUM; msg_id++)
- {
- reps_info = &(g_msgid_ring.reps_info_tbl[msg_id]);
- reps_info->id = msg_id;
- reps_info->flag = REPS_INFO_FLAG_USABLE;
- }
- return BAR_MSG_OK;
+ uint16_t msg_id = 0;
+ struct msgid_reps_info *reps_info = NULL;
+
+ spin_lock_init(&g_msgid_ring.lock);
+ for (msg_id = 0; msg_id < MAX_MSG_BUFF_NUM; msg_id++) {
+ reps_info = &(g_msgid_ring.reps_info_tbl[msg_id]);
+ timer_setup(&reps_info->id_timer, bar_chan_timer_callback, 0);
+ reps_info->id = msg_id;
+ reps_info->flag = REPS_INFO_FLAG_USABLE;
+ }
+ return BAR_MSG_OK;
}
void bar_msgid_ring_free(void)
{
- uint16_t msg_id = 0;
- struct msgid_reps_info *reps_info = NULL;
-
- for (msg_id = 0; msg_id < MAX_MSG_BUFF_NUM; msg_id++)
- {
- reps_info = &g_msgid_ring.reps_info_tbl[msg_id];
- del_timer_sync(&reps_info->id_timer);
- }
+ uint16_t msg_id = 0;
+ struct msgid_reps_info *reps_info = NULL;
+
+ for (msg_id = 0; msg_id < MAX_MSG_BUFF_NUM; msg_id++) {
+ reps_info = &g_msgid_ring.reps_info_tbl[msg_id];
+ del_timer_sync(&reps_info->id_timer);
+ }
+}
+
+void bar_async_queue_free(void)
+{
+ uint16_t ret = 0;
+
+ while (NULL != async_en.noemq_head) {
+ ret = __bar_chan_async_node_del(async_en.noemq_head);
+ if (ret != BAR_MSG_OK) {
+ break;
+ }
+ }
+ while (NULL != async_en.emq_head) {
+ ret = __bar_chan_async_node_del(async_en.emq_head);
+ if (ret != BAR_MSG_OK) {
+ break;
+ }
+ }
}
extern uint16_t test_sync_send(void);
int zxdh_bar_msg_chan_init(void)
{
- int16_t ret = 0;
-
- /* msg_id锁初始化*/
- bar_init_lock_arr();
- bar_msgid_ring_init();
-
- /* 管理pf地址映射*/
- ret = bar_mpf_addr_ioremap();
- if (ret != BAR_MSG_OK)
- {
- BAR_LOG_DEBUG("mpf do not exit, but do not impact the msg chan.\n");
- }
+ int16_t ret = 0;
+
+ /* 异步消息处理线程*/
+ async_en.async_proc = kthread_run((void *)&__bar_msg_async_list_admin, NULL,
+ "msg-pro-thread");
+ if (async_en.async_proc == NULL) {
+ BAR_LOG_ERR("async-msg_proc pthread: msg process create failed.\n");
+ return BAR_MSG_ERR_NULL;
+ }
+
+ /* msg_id锁初始化*/
+ bar_init_lock_arr();
+ bar_msgid_ring_init();
+
+ /* 管理pf地址映射*/
+ ret = bar_mpf_addr_ioremap();
+ if (ret != BAR_MSG_OK) {
+ BAR_LOG_DEBUG("mpf do not exit, but do not impact the msg chan.\n");
+ }
#ifdef BAR_MSG_TEST
- test_sync_send();
+ test_sync_send();
#endif
- return BAR_MSG_OK;
+ return BAR_MSG_OK;
}
void bar_chan_timer_callback(struct timer_list *timer)
{
- struct msgid_reps_info *reps_info = NULL;
-
- reps_info = container_of(timer, struct msgid_reps_info, id_timer);
- if (reps_info->flag == REPS_INFO_FLAG_USED)
- {
- reps_info->reps_buffer = NULL;
- reps_info->buffer_len = 0;
- reps_info->flag = REPS_INFO_FLAG_USABLE;
- BAR_LOG_ERR("RECV ERR: get async reply time out, free msg_id: %u.\n", reps_info->id);
- }
- else
- {
- BAR_LOG_DEBUG("RECV NOTICE: get async reply message success.\n");
- }
- return;
+ struct msgid_reps_info *reps_info = NULL;
+
+ reps_info = container_of(timer, struct msgid_reps_info, id_timer);
+ if (reps_info->flag == REPS_INFO_FLAG_USED) {
+ reps_info->reps_buffer = NULL;
+ reps_info->buffer_len = 0;
+ reps_info->flag = REPS_INFO_FLAG_USABLE;
+ BAR_LOG_ERR("RECV ERR: get async reply time out, free msg_id: %d.\n",
+ reps_info->id);
+ } else {
+ BAR_LOG_DEBUG("RECV NOTICE: get async reply message success.\n");
+ }
+ return;
}
int zxdh_bar_msg_chan_remove(void)
{
-
- bar_msgid_ring_free();
- /* mpf解ioremap*/
- bar_mpf_addr_iounmap();
- /* 消息链表资源释放*/
-
- BAR_LOG_DEBUG("zxdh_msg_chan_bar remove success");
-
- return 0;
+ /* 异步消息处理线程退出*/
+ if (async_en.async_proc != NULL) {
+ kthread_stop(async_en.async_proc);
+ async_en.async_proc = NULL;
+ }
+ bar_msgid_ring_free();
+ /* mpf解ioremap*/
+ bar_mpf_addr_iounmap();
+ /* 消息链表资源释放*/
+ bar_async_queue_free();
+
+ BAR_LOG_DEBUG("zxdh_msg_chan_bar remove success");
+
+ return 0;
}
uint16_t bar_get_sum(uint8_t *ptr, uint8_t len)
{
- int idx = 0;
- uint64_t sum = 0;
- for (idx = 0; idx < len; idx++)
- {
- sum += *(ptr + idx);
- }
- return (uint16_t)sum;
+ int idx = 0;
+ uint64_t sum = 0;
+ for (idx = 0; idx < len; idx++) {
+ sum += *(ptr + idx);
+ }
+ return (uint16_t)sum;
}
/**
@@ -959,250 +1236,212 @@ uint16_t bar_get_sum(uint8_t *ptr, uint8_t len)
*/
int zxdh_bar_enable_chan(struct msix_para *_msix_para, uint16_t *vport)
{
- int ret = 0;
- uint8_t recv_buf[12] = {0};
- uint16_t check_token, sum_res;
-#if 0
- uint32_t domain, bus, devid, function;
-#endif
- struct msix_msg msix_msg = {0};
- struct zxdh_pci_bar_msg in = {0};
- struct zxdh_msg_recviver_mem result = {0};
-
- if (!_msix_para || !_msix_para->pdev)
- {
- return -BAR_MSG_ERR_NULL;
- }
-#if 0
- sscanf(pci_name(_msix_para->pdev), "%x:%x:%x.%u", &domain, &bus, &devid, &function);
- msix_msg.bdf = BDF_ECAM(bus, devid, function);
-#endif
- msix_msg.pcie_id = _msix_para->pcie_id;
- msix_msg.vector_risc = _msix_para->vector_risc;
- msix_msg.vector_pfvf = _msix_para->vector_pfvf;
- msix_msg.vector_mpf = _msix_para->vector_mpf;
-
- in.payload_addr = &msix_msg;
- in.payload_len = sizeof(msix_msg);
- in.virt_addr = _msix_para->virt_addr;
- in.src = _msix_para->driver_type;
- in.dst = MSG_CHAN_END_RISC;
- in.event_id = MODULE_MSIX;
- in.src_pcieid = _msix_para->pcie_id;
-
- result.recv_buffer = recv_buf;
- result.buffer_len = sizeof(recv_buf);
-
- ret = zxdh_bar_chan_sync_msg_send(&in, &result);
- if (ret != BAR_MSG_OK)
- {
- return -ret;
- }
-
- check_token = *(uint16_t *)(recv_buf + 6);
- sum_res = bar_get_sum((uint8_t *)&msix_msg, sizeof(msix_msg));
- if (check_token != sum_res)
- {
- BAR_LOG_DEBUG("expect token: 0x%x, get token: 0x%x.\n", sum_res, check_token);
- return -BAR_MSG_ERR_NOT_MATCH;
- }
- *vport = *(uint16_t *)(recv_buf + 8);
- BAR_LOG_DEBUG("vport of %s get success.\n", pci_name(_msix_para->pdev));
- return BAR_MSG_OK;
+ int ret = 0;
+ uint8_t recv_buf[12] = { 0 };
+ uint16_t check_token, sum_res;
+ struct msix_msg msix_msg = { 0 };
+ struct zxdh_pci_bar_msg in = { 0 };
+ struct zxdh_msg_recviver_mem result = { 0 };
+
+ if (!_msix_para || !_msix_para->pdev) {
+ return -BAR_MSG_ERR_NULL;
+ }
+ msix_msg.pcie_id = _msix_para->pcie_id;
+ msix_msg.vector_risc = _msix_para->vector_risc;
+ msix_msg.vector_pfvf = _msix_para->vector_pfvf;
+ msix_msg.vector_mpf = _msix_para->vector_mpf;
+
+ in.payload_addr = &msix_msg;
+ in.payload_len = sizeof(msix_msg);
+ in.virt_addr = _msix_para->virt_addr;
+ in.src = _msix_para->driver_type;
+ in.dst = MSG_CHAN_END_RISC;
+ in.event_id = MODULE_MSIX;
+ in.src_pcieid = _msix_para->pcie_id;
+
+ result.recv_buffer = recv_buf;
+ result.buffer_len = sizeof(recv_buf);
+
+ ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+ if (ret != BAR_MSG_OK) {
+ return -ret;
+ }
+
+ check_token = *(uint16_t *)(recv_buf + 6);
+ sum_res = bar_get_sum((uint8_t *)&msix_msg, sizeof(msix_msg));
+ if (check_token != sum_res) {
+ BAR_LOG_DEBUG("expect token: 0x%x, get token: 0x%x.\n", sum_res,
+ check_token);
+ return -BAR_MSG_ERR_NOT_MATCH;
+ }
+ *vport = *(uint16_t *)(recv_buf + 8);
+ BAR_LOG_DEBUG("vport of %s get success.\n", pci_name(_msix_para->pdev));
+ return BAR_MSG_OK;
}
EXPORT_SYMBOL(zxdh_bar_enable_chan);
-int zxdh_get_bar_offset(struct bar_offset_params *paras, struct bar_offset_res *res)
+int zxdh_get_bar_offset(struct bar_offset_params *paras,
+ struct bar_offset_res *res)
{
- int ret = 0;
- uint16_t check_token, sum_res;
- struct offset_get_msg send_msg = {0};
- struct bar_recv_msg *recv_msg = NULL;
-
- struct zxdh_pci_bar_msg in = {0};
- struct zxdh_msg_recviver_mem result = {0};
-
- if (!paras || !res)
- {
- return BAR_MSG_ERR_NULL;
- }
-
- send_msg.pcie_id = paras->pcie_id;
- send_msg.type = paras->type;
-
- in.payload_addr = &send_msg;
- in.payload_len = sizeof(send_msg);
- in.virt_addr = paras->virt_addr;
- in.src = MSG_CHAN_END_PF;
- in.dst = MSG_CHAN_END_RISC;
- in.event_id = MODULE_OFFSET_GET;
-
- recv_msg = kzalloc(sizeof(struct bar_recv_msg), GFP_KERNEL);
- if (recv_msg == NULL)
- {
- LOG_ERR("NULL ptr\n");
- return -1;
- }
- result.recv_buffer = recv_msg;
- result.buffer_len = sizeof(struct bar_recv_msg);
-
- ret = zxdh_bar_chan_sync_msg_send(&in, &result);
- if (ret != BAR_MSG_OK)
- {
- ret = -ret;
- goto free_msg;
- }
-
- check_token = recv_msg->offset_reps.check;
- sum_res = bar_get_sum((uint8_t*)&send_msg, sizeof(send_msg));
- if (check_token != sum_res)
- {
- BAR_LOG_ERR("expect token: 0x%x, get token: 0x%x.\n", sum_res, check_token);
- ret = -BAR_MSG_ERR_NOT_MATCH;
- goto free_msg;
- }
- res->bar_offset = recv_msg->offset_reps.offset;
- res->bar_length = recv_msg->offset_reps.length;
-
-free_msg:
- kfree(recv_msg);
- return ret;
+ int ret = 0;
+ uint16_t check_token, sum_res;
+ struct offset_get_msg send_msg = { 0 };
+ struct bar_recv_msg recv_msg = { 0 };
+
+ struct zxdh_pci_bar_msg in = { 0 };
+ struct zxdh_msg_recviver_mem result = { 0 };
+
+ if (!paras || !res) {
+ return BAR_MSG_ERR_NULL;
+ }
+
+ send_msg.pcie_id = paras->pcie_id;
+ send_msg.type = paras->type;
+
+ in.payload_addr = &send_msg;
+ in.payload_len = sizeof(send_msg);
+ in.virt_addr = paras->virt_addr;
+ in.src = MSG_CHAN_END_PF;
+ in.dst = MSG_CHAN_END_RISC;
+ in.event_id = MODULE_OFFSET_GET;
+
+ result.recv_buffer = &recv_msg;
+ result.buffer_len = sizeof(recv_msg);
+
+ ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+ if (ret != BAR_MSG_OK) {
+ return -ret;
+ }
+
+ check_token = recv_msg.offset_reps.check;
+ sum_res = bar_get_sum((uint8_t *)&send_msg, sizeof(send_msg));
+ if (check_token != sum_res) {
+ BAR_LOG_ERR("expect token: 0x%x, get token: 0x%x.\n", sum_res,
+ check_token);
+ return BAR_MSG_ERR_NOT_MATCH;
+ }
+ res->bar_offset = recv_msg.offset_reps.offset;
+ res->bar_length = recv_msg.offset_reps.length;
+ return BAR_MSG_OK;
}
EXPORT_SYMBOL(zxdh_get_bar_offset);
void zxdh_bar_reset_valid(uint64_t subchan_addr)
{
- struct bar_msg_header msg_header = {0};
+ struct bar_msg_header msg_header = { 0 };
- bar_chan_msg_header_get(subchan_addr, &msg_header);
+ __bar_chan_msg_header_get(subchan_addr, &msg_header);
- subchan_addr += BAR_MSG_ADDR_CHAN_INTERVAL;
- bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USABLE);
- bar_chan_msg_poltag_set(subchan_addr, 0);
+ subchan_addr += BAR_MSG_ADDR_CHAN_INTERVAL;
+ __bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USABLE);
+ __bar_chan_msg_poltag_set(subchan_addr, 0);
}
EXPORT_SYMBOL(zxdh_bar_reset_valid);
-uint16_t zxdh_get_event_id(uint64_t subchan_addr, uint8_t src_type, uint8_t dst_type)
+uint16_t zxdh_get_event_id(uint64_t subchan_addr, uint8_t src_type,
+ uint8_t dst_type)
{
- uint8_t subchan_id = 0;
- struct bar_msg_header msg_header = {0};
- uint8_t src = bar_msg_col_index_trans(src_type);
- uint8_t dst = bar_msg_row_index_trans(dst_type);
-
- if (src == BAR_MSG_SRC_ERR || dst == BAR_MSG_DST_ERR)
- {
- return 0;
- }
- /* 接收子通道id和发送子通道相反*/
- subchan_id = (!!subchan_id_tbl[dst][src])? BAR_SUBCHAN_INDEX_SEND : BAR_SUBCHAN_INDEX_RECV;
- subchan_addr += subchan_id * BAR_MSG_ADDR_CHAN_INTERVAL;
- bar_chan_msg_header_get(subchan_addr, &msg_header);
- return msg_header.event_id;
+ uint8_t subchan_id = 0;
+ struct bar_msg_header msg_header = { 0 };
+ uint8_t src = __bar_msg_col_index_trans(src_type);
+ uint8_t dst = __bar_msg_row_index_trans(dst_type);
+
+ if (src == BAR_MSG_SRC_ERR || dst == BAR_MSG_DST_ERR) {
+ return 0;
+ }
+ /* 接收子通道id和发送子通道相反*/
+ subchan_id = (!!subchan_id_tbl[dst][src]) ? BAR_SUBCHAN_INDEX_SEND :
+ BAR_SUBCHAN_INDEX_RECV;
+ subchan_addr += subchan_id * BAR_MSG_ADDR_CHAN_INTERVAL;
+ __bar_chan_msg_header_get(subchan_addr, &msg_header);
+ return msg_header.event_id;
}
EXPORT_SYMBOL(zxdh_get_event_id);
-int32_t zxdh_send_command(uint64_t vaddr, uint16_t pcie_id, uint16_t module_id, \
- void *msg, void *ack, bool is_sync_msg)
+int32_t zxdh_send_command(uint64_t vaddr, uint16_t pcie_id, uint16_t module_id,
+ void *msg, void *ack, bool is_sync_msg)
{
- struct zxdh_pci_bar_msg in = {0};
- struct zxdh_msg_recviver_mem result = {0};
- struct bar_recv_msg *bar_reps = NULL;
- int32_t ret = 0;
-
- if ((msg == NULL) || (ack == NULL))
- {
- LOG_ERR("NULL ptr\n");
- return -1;
- }
-
- in.payload_addr = msg;
- in.payload_len = sizeof(union zxdh_msg);
-
- if (((pcie_id >> PFVF_FLAG_OFFSET) & 1) == 1)
- {
- in.src = MSG_CHAN_END_PF;
- }
- else
- {
- in.src = MSG_CHAN_END_VF;
- }
-
- bar_reps = kzalloc(sizeof(struct bar_recv_msg), GFP_KERNEL);
- if (bar_reps == NULL)
- {
- LOG_ERR("NULL ptr\n");
- return -1;
- }
- in.dst = MSG_CHAN_END_RISC;
- in.event_id = module_id;
- in.virt_addr = vaddr;
- in.src_pcieid = pcie_id;
- result.recv_buffer = bar_reps;
- result.buffer_len = BAR_MSG_PAYLOAD_MAX_LEN;
-
- switch (module_id)
- {
- case MODULE_VF_BAR_MSG_TO_PF:
- {
- in.dst = MSG_CHAN_END_PF;
- in.dst_pcieid = FIND_PF_PCIE_ID(pcie_id);
- in.virt_addr += ZXDH_BAR_PFVF_MSG_OFFSET;
- break;
- }
- case MODULE_PF_BAR_MSG_TO_VF:
- {
- in.dst = MSG_CHAN_END_VF;
- in.dst_pcieid = ((zxdh_msg_info *)msg)->hdr_vf.dst_pcie_id;
- in.virt_addr += ZXDH_BAR_PFVF_MSG_OFFSET;
- break;
- }
- case MODULE_TBL:
- {
- in.payload_len = MSG_STRUCT_HD_LEN + ((zxdh_msg_info *)msg)->hdr_to_cmn.write_bytes;
- break;
- }
- case MODULE_PF_TIMER_TO_RISC_MSG:
- {
- in.payload_len = MSG_STRUCT_HD_LEN + ((zxdh_msg_info *)msg)->hdr_to_cmn.write_bytes;
- break;
- }
- case MODULE_PHYPORT_QUERY:
- {
- in.payload_len = sizeof(struct zxdh_port_msg);
- break;
- }
- case MODULE_NPSDK:
- {
- in.payload_len = sizeof(zxdh_cfg_np_msg);
- break;
- }
- }
-
- ret = zxdh_bar_chan_sync_msg_send(&in, &result);
- if (ret != ZXDH_NET_ACK_OK)
- {
- LOG_ERR("zxdh_bar_chan_sync_msg_send failed: %d\n", ret);
- ret = -ret;
- goto free_reps;
- }
-
- if (is_sync_msg && bar_reps->replied != BAR_MSG_REPS_OK)
- {
- LOG_ERR("reps get failed\n");
- ret = -1;
- goto free_reps;
- }
-
- if (bar_reps->reps_len > BAR_MSG_PAYLOAD_MAX_LEN)
- {
- LOG_ERR("reps len too long\n");
- ret = -1;
- goto free_reps;
- }
- memcpy(ack, bar_reps->data, bar_reps->reps_len);
-
-free_reps:
- kfree(bar_reps);
- return ret;
+ struct zxdh_pci_bar_msg in = { 0 };
+ struct zxdh_msg_recviver_mem result = { 0 };
+ struct bar_recv_msg bar_reps = { 0 };
+ int32_t ret = 0;
+
+ if ((msg == NULL) || (ack == NULL)) {
+ LOG_ERR("NULL ptr\n");
+ return -1;
+ }
+
+ in.payload_addr = msg;
+ in.payload_len = sizeof(zxdh_msg_info);
+
+ if (((pcie_id >> PFVF_FLAG_OFFSET) & 1) == 1) {
+ in.src = MSG_CHAN_END_PF;
+ } else {
+ in.src = MSG_CHAN_END_VF;
+ }
+
+ in.dst = MSG_CHAN_END_RISC;
+ in.event_id = module_id;
+ in.virt_addr = vaddr;
+ in.src_pcieid = pcie_id;
+ result.recv_buffer = &bar_reps;
+ result.buffer_len = ZXDH_NET_MAX_ACK_LEN;
+
+ switch (module_id) {
+ case MODULE_VF_BAR_MSG_TO_PF: {
+ in.dst = MSG_CHAN_END_PF;
+ in.dst_pcieid = FIND_PF_PCIE_ID(pcie_id);
+ in.virt_addr += ZXDH_BAR_PFVF_MSG_OFFSET;
+ break;
+ }
+ case MODULE_PF_BAR_MSG_TO_VF: {
+ in.dst = MSG_CHAN_END_VF;
+ in.dst_pcieid = ((zxdh_msg_info *)msg)->hdr_vf.dst_pcie_id;
+ in.virt_addr += ZXDH_BAR_PFVF_MSG_OFFSET;
+ break;
+ }
+ case MODULE_TBL: {
+ in.payload_len = MSG_STRUCT_HD_LEN +
+ ((zxdh_msg_info *)msg)->hdr_to_cmn.write_bytes;
+ break;
+ }
+ case MODULE_PF_TIMER_TO_RISC_MSG: {
+ in.payload_len = MSG_STRUCT_HD_LEN +
+ ((zxdh_msg_info *)msg)->hdr_to_cmn.write_bytes;
+ break;
+ }
+ case MODULE_PHYPORT_QUERY: {
+ in.payload_len = sizeof(struct zxdh_port_msg);
+ break;
+ }
+ case MODULE_NPSDK: {
+ in.payload_len = sizeof(zxdh_cfg_np_msg);
+ break;
+ }
+ }
+
+ if (is_sync_msg) {
+ ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+ } else {
+ ret = zxdh_bar_chan_async_msg_send(&in, &result);
+ }
+
+ if (ret != ZXDH_NET_ACK_OK) {
+ LOG_ERR("zxdh_bar_chan_sync_msg_send failed: %d\n", ret);
+ return -ret;
+ }
+
+ if (is_sync_msg && bar_reps.replied != BAR_MSG_REPS_OK) {
+ LOG_ERR("reps get failed\n");
+ return -1;
+ }
+
+ if (bar_reps.reps_len > ZXDH_NET_MAX_ACK_LEN) {
+ LOG_ERR("reps len too long\n");
+ return -1;
+ }
+ memcpy(ack, bar_reps.data, bar_reps.reps_len);
+
+ return ret;
}
EXPORT_SYMBOL(zxdh_send_command);
diff --git a/src/net/drivers/net/ethernet/dinghai/dh_procfs.c b/src/net/drivers/net/ethernet/dinghai/dh_procfs.c
index bcd6fba1936ff0928a4d8b018d86fa9a2645c4f2..30abb921e0427515d23a76f666b538c559ae6fcd 100644
--- a/src/net/drivers/net/ethernet/dinghai/dh_procfs.c
+++ b/src/net/drivers/net/ethernet/dinghai/dh_procfs.c
@@ -4,82 +4,71 @@
#define DRV_NAME "dinghai"
-static struct fs_entry_desc fs_entry_table[] = {
- { FS_ENTRY_BOND, "lag" },
- {0}
-};
+static struct fs_entry_desc fs_entry_table[] = { { FS_ENTRY_BOND, "lag" },
+ { 0 } };
uint32_t find_fs_entry(uint32_t type)
{
- uint32_t idx = 0;
+ uint32_t idx = 0;
- while (fs_entry_table[idx].file_name)
- {
- if (type == fs_entry_table[idx].type)
- {
- break;
- }
- idx++;
- }
+ while (fs_entry_table[idx].file_name) {
+ if (type == fs_entry_table[idx].type) {
+ break;
+ }
+ idx++;
+ }
- return idx;
+ return idx;
}
void zxdh_create_proc_dir(struct zxdh_proc_fs *procfs)
{
- if (!procfs->proc_dir)
- {
- procfs->proc_dir = proc_mkdir(DRV_NAME, NULL);
- if (!procfs->proc_dir)
- {
- pr_warn("Warning: Cannot create /proc/%s\n", DRV_NAME);
- }
- }
+ if (!procfs->proc_dir) {
+ procfs->proc_dir = proc_mkdir(DRV_NAME, NULL);
+ if (!procfs->proc_dir) {
+ pr_warn("Warning: Cannot create /proc/%s\n", DRV_NAME);
+ }
+ }
}
void zxdh_destroy_proc_dir(struct zxdh_proc_fs *procfs)
{
- if (procfs->proc_dir)
- {
- remove_proc_entry(DRV_NAME, NULL);
- procfs->proc_dir = NULL;
- }
+ if (procfs->proc_dir) {
+ remove_proc_entry(DRV_NAME, NULL);
+ procfs->proc_dir = NULL;
+ }
}
-void zxdh_create_proc_entry(struct zxdh_proc_fs *procfs,
- uint32_t type, struct seq_operations *seq_ops, void *data)
+void zxdh_create_proc_entry(struct zxdh_proc_fs *procfs, uint32_t type,
+ struct seq_operations *seq_ops, void *data)
{
- uint32_t idx = 0;
- char *file_name = NULL;
+ uint32_t idx = 0;
+ char *file_name = NULL;
- if (procfs->proc_dir)
- {
- idx = find_fs_entry(type);
- file_name = fs_entry_table[idx].file_name;
- if (file_name && (idx < PROC_ENTRY_MAX))
- {
- procfs->proc_entry[idx] = proc_create_seq_data(file_name, 0444, procfs->proc_dir, seq_ops, data);
- if (procfs->proc_entry[idx] == NULL)
- {
- pr_info("Cannot create /proc/%s/%s\n", DRV_NAME, file_name);
- }
- }
- }
+ if (procfs->proc_dir) {
+ idx = find_fs_entry(type);
+ file_name = fs_entry_table[idx].file_name;
+ if (file_name && (idx < PROC_ENTRY_MAX)) {
+ procfs->proc_entry[idx] = proc_create_seq_data(
+ file_name, 0444, procfs->proc_dir, seq_ops, data);
+ if (procfs->proc_entry[idx] == NULL) {
+ pr_info("Cannot create /proc/%s/%s\n", DRV_NAME, file_name);
+ }
+ }
+ }
}
void zxdh_remove_proc_entry(struct zxdh_proc_fs *procfs, uint32_t type)
{
- uint32_t idx = 0;
- char *file_name = NULL;
+ uint32_t idx = 0;
+ char *file_name = NULL;
- if (procfs->proc_dir)
- {
- idx = find_fs_entry(type);
- file_name = fs_entry_table[idx].file_name;
- if (file_name && (idx < PROC_ENTRY_MAX))
- {
- remove_proc_entry(file_name, procfs->proc_dir);
- procfs->proc_entry[idx] = NULL;
- }
- }
+ if (procfs->proc_dir) {
+ idx = find_fs_entry(type);
+ file_name = fs_entry_table[idx].file_name;
+ if (file_name && (idx < PROC_ENTRY_MAX)) {
+ remove_proc_entry(file_name, procfs->proc_dir);
+ procfs->proc_entry[idx] = NULL;
+ }
+ }
}
diff --git a/src/net/drivers/net/ethernet/dinghai/dh_procfs.h b/src/net/drivers/net/ethernet/dinghai/dh_procfs.h
index 6197957474073ab17c0da41dffe9c2fd846c6157..fb7b5f09afc96c9bd1f06cf95dbd5bcf7da2de1f 100644
--- a/src/net/drivers/net/ethernet/dinghai/dh_procfs.h
+++ b/src/net/drivers/net/ethernet/dinghai/dh_procfs.h
@@ -1,30 +1,26 @@
#ifndef _ZXDH_PROC_FS_H_
#define _ZXDH_PROC_FS_H_
-#define PROC_ENTRY_MAX (16)
+#define PROC_ENTRY_MAX (16)
-struct zxdh_proc_fs
-{
- struct proc_dir_entry *proc_dir;
- struct proc_dir_entry *proc_entry[PROC_ENTRY_MAX];
+struct zxdh_proc_fs {
+ struct proc_dir_entry *proc_dir;
+ struct proc_dir_entry *proc_entry[PROC_ENTRY_MAX];
};
-struct fs_entry_desc
-{
- uint32_t type;
- char *file_name;
+struct fs_entry_desc {
+ uint32_t type;
+ char *file_name;
};
-enum
-{
- FS_ENTRY_BOND = 0,
+enum {
+ FS_ENTRY_BOND = 0,
};
void zxdh_create_proc_dir(struct zxdh_proc_fs *procfs);
void zxdh_destroy_proc_dir(struct zxdh_proc_fs *procfs);
-void zxdh_create_proc_entry(struct zxdh_proc_fs *procfs,
- uint32_t type, struct seq_operations *seq_ops, void *data);
+void zxdh_create_proc_entry(struct zxdh_proc_fs *procfs, uint32_t type,
+ struct seq_operations *seq_ops, void *data);
void zxdh_remove_proc_entry(struct zxdh_proc_fs *procfs, uint32_t type);
-
#endif /* _ZXDH_PROC_FS_H_ */
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux.c b/src/net/drivers/net/ethernet/dinghai/en_aux.c
old mode 100755
new mode 100644
index b5c64e85438c3f695735df51622eb4a5f35989f5..16cf2ea16a69f1b3dd71d48a7630f2551ddc7e0b
--- a/src/net/drivers/net/ethernet/dinghai/en_aux.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux.c
@@ -1,4046 +1,3702 @@
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include "en_aux.h"
-#include "en_ethtool/ethtool.h"
-#include "en_np/table/include/dpp_tbl_api.h"
-#include "en_np/table/include/dpp_tbl_comm.h"
-#include "en_aux/events.h"
-#include "en_aux/eq.h"
-#include "en_aux/en_cmd.h"
-#include "msg_common.h"
-#include "en_pf.h"
-#include "en_aux/en_ioctl.h"
-#include
-#ifdef ZXDH_MSGQ
-#include "en_aux/priv_queue.h"
-#endif
-#include "en_aux/en_1588_pkt_proc.h"
-#include "en_aux/en_cmd.h"
-#include "zxdh_tools/zxdh_tools_netlink.h"
-#include
-
-#ifdef ZXDH_DCBNL_OPEN
-#include "en_aux/dcbnl/en_dcbnl.h"
-#endif
-
-uint32_t max_pairs = ZXDH_MQ_PAIRS_NUM;
-module_param(max_pairs, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(max_pairs, "Max queue pairs");
-
-MODULE_LICENSE("Dual BSD/GPL");
-
-/* WARNING Do not use netif_carrier_on/off(),
- it may affect the ethtool function */
-int32_t zxdh_en_open(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t i = 0;
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- LOG_INFO("zxdh_en_open start\n");
- mutex_lock(&en_priv->lock);
-
- for (i = 0; i < en_dev->curr_queue_pairs; i++)
- {
- /* Make sure we have some buffers: if oom use wq */
- if (!try_fill_recv(netdev, &en_dev->rq[i], GFP_KERNEL))
- {
- schedule_delayed_work(&en_dev->refill, 0);
- }
-
- virtnet_napi_enable(en_dev->rq[i].vq, &en_dev->rq[i].napi);
- virtnet_napi_tx_enable(netdev, en_dev->sq[i].vq, &en_dev->sq[i].napi);
- }
-
- mutex_unlock(&en_priv->lock);
-
- if (!en_dev->link_up)
- {
- return 0;
- }
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- if (!en_dev->ops->is_bond(en_dev->parent))
- {
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- return zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_VPORT_IS_UP, 1, 0);
- }
- return dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VPORT_IS_UP, 1);
- }
-
- /* 给bond-pf的端口属性表配置为up */
- err = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VPORT_IS_UP, 1);
- if (err != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set bond pf failed\n");
- return err;
- }
-
- return dpp_panel_attr_set(&pf_info, en_dev->phy_port, PANEL_FLAG_IS_UP, 1);
-}
-
-int32_t zxdh_en_close(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t i = 0;
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- LOG_INFO("zxdh_en_close start\n");
- /* Make sure refill_work doesn't re-enable napi! */
- cancel_delayed_work_sync(&en_dev->refill);
-
- for (i = 0; i < en_dev->curr_queue_pairs; i++)
- {
- napi_disable(&en_dev->rq[i].napi);
- virtnet_napi_tx_disable(&en_dev->sq[i].napi);
- }
-
- if (!en_dev->link_up)
- {
- return 0;
- }
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- if (!en_dev->ops->is_bond(en_dev->parent))
- {
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- return zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_VPORT_IS_UP, 0, 0);
- }
- return dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VPORT_IS_UP, 0);
- }
-
- /* 给bond-pf的端口属性表配置为down */
- err = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VPORT_IS_UP, 0);
- if (err != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set bond pf failed\n");
- return err;
- }
-
- return dpp_panel_attr_set(&pf_info, en_dev->phy_port, PANEL_FLAG_IS_UP, 0);
-}
-
-void pkt_transport_protocol_parse(int8_t next_protocol, struct zxdh_net_hdr *hdr)
-{
- if (next_protocol == IPPROTO_UDP)
- {
- hdr->pi_hdr.pt.type_ctx.pkt_code = PCODE_UDP;
- }
- else if (next_protocol == IPPROTO_TCP)
- {
- hdr->pi_hdr.pt.type_ctx.pkt_code = PCODE_TCP;
- }
- else
- {
- hdr->pi_hdr.pt.type_ctx.pkt_code = PCODE_IP;
- }
-
- return;
-}
-
-void pkt_protocol_parse(struct sk_buff *skb, struct zxdh_net_hdr *hdr, int32_t flag)
-{
- struct ethhdr *mach = NULL;
- struct iphdr *ipv4h = NULL;
- struct ipv6hdr *ipv6h = NULL;
-
- if (flag == 0)
- {
- if (skb->protocol == htons(ETH_P_IP))
- {
- ipv4h = (struct iphdr *)skb_network_header(skb);
- hdr->pi_hdr.pt.type_ctx.ip_type = IPV4_TYPE;
- pkt_transport_protocol_parse(ipv4h->protocol, hdr);
- }
- else if (skb->protocol == htons(ETH_P_IPV6))
- {
- ipv6h = (struct ipv6hdr *)skb_network_header(skb);
- hdr->pi_hdr.pt.type_ctx.ip_type = IPV6_TYPE;
- pkt_transport_protocol_parse(ipv6h->nexthdr, hdr);
- }
- else
- {
- hdr->pi_hdr.pt.type_ctx.ip_type = NOT_IP_TYPE;
- }
- }
- else
- {
- mach = (struct ethhdr *)skb_inner_mac_header(skb);
- if (mach->h_proto == htons(ETH_P_IP))
- {
- ipv4h = (struct iphdr *)skb_inner_network_header(skb);
- hdr->pi_hdr.pt.type_ctx.ip_type = IPV4_TYPE;
- pkt_transport_protocol_parse(ipv4h->protocol, hdr);
- }
- else if (mach->h_proto == htons(ETH_P_IPV6))
- {
- ipv6h = (struct ipv6hdr *)skb_inner_network_header(skb);
- hdr->pi_hdr.pt.type_ctx.ip_type = IPV6_TYPE;
- pkt_transport_protocol_parse(ipv6h->nexthdr, hdr);
-
- }
- else
- {
- hdr->pi_hdr.pt.type_ctx.ip_type = NOT_IP_TYPE;
- }
- }
-}
-
-int32_t pkt_is_vxlan(struct sk_buff *skb)
-{
- switch (skb->protocol)
- {
- case htons(ETH_P_IP):
- if (ip_hdr(skb)->protocol != IPPROTO_UDP)
- {
- return -1;
- }
- break;
-
- case htons(ETH_P_IPV6):
- if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
- {
- return -1;
- }
- break;
-
- default:
- return -1;
- }
-
- if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || skb->inner_protocol != htons(ETH_P_TEB) ||
- (skb_inner_mac_header(skb) - skb_transport_header(skb) != sizeof(struct udphdr) + sizeof(struct vxlanhdr)))
- {
- return -1;
- }
-
- return 0;
-}
-
-int32_t zxdh_tx_checksum_offload(struct zxdh_en_device *edev, struct sk_buff *skb, struct zxdh_net_hdr *hdr)
-{
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- {
- return 0;
- }
-
- if ((skb->inner_protocol != 0) && (pkt_is_vxlan(skb) == 0))
- {
- skb->encapsulation = 0x1;
- }
-
- if(skb->encapsulation == 0x1)
- {
- if ((edev->netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM) == 0)
- {
- return 0;
- }
- hdr->pi_hdr.bttl_pi_len = ENABLE_PI_FLAG_32B;
- hdr->pd_hdr.ol_flag |= htons(0x1 << OUTER_IP_CHECKSUM_OFFSET);
- pkt_protocol_parse(skb, hdr, 1);
- hdr->pi_hdr.hdr_l3_offset = htons(edev->hdr_len + skb_inner_network_offset(skb));
- hdr->pi_hdr.hdr_l4_offset = htons(edev->hdr_len + skb_inner_transport_offset(skb));
- }
-
- hdr->pi_hdr.pkt_action_flag1 |= htons(0x1 << INNER_IP_CHECKSUM_OFFSET);
- hdr->pi_hdr.pkt_action_flag2 |= 0x1 << INNER_L4_CHECKSUM_OFFSET;
- return 0;
-}
-
-static int pd_hdr_validate_vlan(struct zxdh_en_device *edev, struct sk_buff *skb, struct zxdh_net_hdr *hdr)
-{
- /* pf set vf vlan is done*/
- if (edev->vlan_dev.vlan_id)
- {
- if (!skb_vlan_tag_present(skb))
- {
- hdr->pd_hdr.cvlan.tci = htons(edev->vlan_dev.vlan_id);
- hdr->pd_hdr.cvlan.tpid = (edev->vlan_dev.protcol);
- hdr->pd_hdr.ol_flag |= htons(TXCAP_CTAG_INSERT_EN_BIT);
- return 0;
- }
- else
- {
- hdr->pd_hdr.svlan.tci = htons(edev->vlan_dev.vlan_id);
- hdr->pd_hdr.svlan.tpid = (edev->vlan_dev.protcol);
- hdr->pd_hdr.ol_flag |= htons(TXCAP_STAG_INSERT_EN_BIT);
- }
- }
-
- /* insert vlan hard-accellate when skb is taged to be inserted, eg. in vlan interface case*/
- if (skb && skb_vlan_tag_present(skb))
- {
- hdr->pd_hdr.cvlan.tci = htons(skb_vlan_tag_get(skb));
- hdr->pd_hdr.cvlan.tpid = (skb->vlan_proto);
- hdr->pd_hdr.ol_flag |= htons(TXCAP_CTAG_INSERT_EN_BIT);
- }
- return 0;
-}
-
-
-int32_t pi_net_hdr_from_skb(struct zxdh_en_device *edev, struct sk_buff *skb, struct zxdh_net_hdr *hdr)
-{
- uint32_t gso_type = 0;
- uint16_t mss = 0;
-#ifdef TIME_STAMP_1588
- int32_t ret = 0;
-#endif
-
- memset(hdr, 0, sizeof(*hdr)); /* no info leak */
- hdr->pd_len = edev->hdr_len / HDR_2B_UNIT;
- hdr->pi_hdr.bttl_pi_len = DISABLE_PI_FIELD_PARSE + ENABLE_PI_FLAG_32B;
- hdr->tx_port = TX_PORT_DTP;
- hdr->pi_hdr.pt.type_ctx.pkt_src = PKT_SRC_CPU;
- hdr->pi_hdr.eth_port_id = INVALID_ETH_PORT_ID;
-
- if(edev->delay_statistics_enable)
- {
- pkt_delay_statistics_proc(skb, hdr, edev);
- }
-
-// #ifdef TIME_STAMP_1588
-// ret = pkt_1588_proc_xmit(skb, hdr, edev->clock_no, edev);
-// switch (ret)
-// {
-// case PTP_SUCCESS:
-// {
-// LOG_DEBUG("pkt_1588_proc_xmit success!!!\n");
-// return 0;
-// }
-// case IS_NOT_PTP_MSG:
-// {
-// LOG_DEBUG("not ptp msg!!\n");
-// break;
-// }
-// default:
-// {
-// LOG_ERR("pkt_1588_proc_xmit err!!!\n");
-// return ret;
-// }
-// }
-// #endif
-
- pd_hdr_validate_vlan(edev, skb, hdr);
-
- mss = skb_shinfo(skb)->gso_size;
- gso_type = skb_shinfo(skb)->gso_type;
- if(gso_type & SKB_GSO_TCPV4)
- {
- mss = (mss > 0) ? min(skb_shinfo(skb)->gso_size, (uint16_t)(edev->netdev->mtu - IP_BASE_HLEN - TCP_BASE_HLEN))
- : (uint16_t)(edev->netdev->mtu - IP_BASE_HLEN - TCP_BASE_HLEN);
- hdr->pi_hdr.pkt_action_flag1 |= htons((mss / ETH_MTU_4B_UNIT) + NOT_IP_FRG_CSUM_FLAG);
- hdr->pi_hdr.pkt_action_flag2 |= TCP_FRG_CSUM_FLAG; /*0x24 bit21,18: 带pi,tso,计算checksum */
- }
- else if(gso_type & SKB_GSO_TCPV6)
- {
- mss = (mss > 0) ? min(skb_shinfo(skb)->gso_size, (uint16_t)(edev->netdev->mtu - IPV6_BASE_HLEN - TCP_BASE_HLEN))
- : (uint16_t)(edev->netdev->mtu - IPV6_BASE_HLEN - TCP_BASE_HLEN);
- hdr->pi_hdr.pkt_action_flag1 |= htons((mss / ETH_MTU_4B_UNIT) + NOT_IP_FRG_CSUM_FLAG);
- hdr->pi_hdr.pkt_action_flag2 |= TCP_FRG_CSUM_FLAG; /*0x24 bit21,18: 带pi,tso,计算checksum */
- }
- else if(gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4 | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM))
- {
- hdr->pi_hdr.pkt_action_flag1 = htons((uint16_t)(edev->netdev->mtu / ETH_MTU_4B_UNIT) + IP_FRG_CSUM_FLAG);
- hdr->pi_hdr.pkt_action_flag2 |= NOT_TCP_FRG_CSUM_FLAG;
- }
- else
- {
- hdr->pi_hdr.pkt_action_flag1 |= htons((edev->netdev->mtu / ETH_MTU_4B_UNIT) + NOT_IP_FRG_CSUM_FLAG);
- hdr->pi_hdr.pkt_action_flag2 |= NOT_TCP_FRG_CSUM_FLAG;
- }
-
- if (edev->netdev->features & NETIF_F_HW_CSUM)
- {
- zxdh_tx_checksum_offload(edev, skb, hdr);
- }
-
- if ((edev->ops->is_bond(edev->parent)) &&
- (skb->protocol == htons(ETH_P_SLOW) || skb->protocol == htons(ETH_P_PAUSE)))
- {
- hdr->pd_hdr.ol_flag |= htons(PANELID_EN);
- hdr->pd_hdr.panel_id = edev->phy_port;
- }
-
- #ifdef ZXDH_DCBNL_OPEN
- if (NULL != skb->sk)
- {
- hdr->pd_hdr.ol_flag |= htons(ZXDH_DCBNL_SET_SK_PRIO(skb->sk->sk_priority));
- }
- #endif
-
-
-#ifdef TIME_STAMP_1588
- ret = pkt_1588_proc_xmit(skb, hdr, edev->clock_no, edev);
- switch (ret)
- {
- case PTP_SUCCESS:
- {
- LOG_DEBUG("pkt_1588_proc_xmit success!!!\n");
- return 0;
- }
- case IS_NOT_PTP_MSG:
- {
- LOG_DEBUG("not ptp msg!!\n");
- break;
- }
- default:
- {
- LOG_ERR("pkt_1588_proc_xmit err!!!\n");
- return ret;
- }
- }
-#endif
- return 0;
-}
-
-int32_t xmit_skb(struct net_device *netdev, struct send_queue *sq, struct sk_buff *skb)
-{
- struct zxdh_net_hdr *hdr = NULL;
- //const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t num_sg = 0;
- unsigned hdr_len = en_dev->hdr_len;
- bool can_push = false;
- uint8_t *hdr_buf = sq->hdr_buf;
-
- can_push = en_dev->any_header_sg &&
- !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
- !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
- /* Even if we can, don't push here yet as this would skew
- * csum_start offset below. */
- if (can_push)
- {
- hdr = (struct zxdh_net_hdr *)(skb->data - hdr_len);
- }
- else
- {
- memset(hdr_buf, 0, HDR_BUFFER_LEN);
- hdr = (struct zxdh_net_hdr *)hdr_buf;
- }
-
- if (pi_net_hdr_from_skb(en_dev, skb, hdr))
- {
- return -EPROTO;
- }
-
- sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
- if (can_push)
- {
- __skb_push(skb, hdr_len);
- num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
- if (unlikely(num_sg < 0))
- {
- return num_sg;
- }
- /* Pull header back to avoid skew in tx bytes calculations. */
- __skb_pull(skb, hdr_len);
- }
- else
- {
- sg_set_buf(sq->sg, hdr, hdr_len);
- num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
- if (unlikely(num_sg < 0))
- {
- return num_sg;
- }
- num_sg++;
- }
-
- return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
-}
-
-netdev_tx_t zxdh_en_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t qnum = skb_get_queue_mapping(skb);
- struct send_queue *sq = &en_dev->sq[qnum];
- int32_t err = 0;
- struct netdev_queue *txq = netdev_get_tx_queue(netdev, qnum);
- bool kick = !netdev_xmit_more();
- bool use_napi = sq->napi.weight;
-
- /* Free up any pending old buffers before queueing new ones. */
- do {
- if (use_napi)
- {
- virtqueue_disable_cb(sq->vq);
- }
-
- free_old_xmit_skbs(netdev, sq, false);
-
- } while (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
-
- /* timestamp packet in software */
- skb_tx_timestamp(skb);
-
- /* Try to transmit */
- err = xmit_skb(netdev, sq, skb);
-
- /* This should not happen! */
- if (unlikely(err))
- {
- netdev->stats.tx_fifo_errors++;
- netdev->stats.tx_errors++;
- if (net_ratelimit())
- {
- LOG_WARN("unexpected TXQ (%d) queue failure: %d\n", qnum, err);
- }
- netdev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- /* If running out of space, stop queue to avoid getting packets that we
- * are then unable to transmit.
- * An alternative would be to force queuing layer to requeue the skb by
- * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
- * returned in a normal path of operation: it means that driver is not
- * maintaining the TX queue stop/start state properly, and causes
- * the stack to do a non-trivial amount of useless work.
- * Since most packets only take 1 or 2 ring slots, stopping the queue
- * early means 16 slots are typically wasted.
- */
- if (sq->vq->num_free < 2 + MAX_SKB_FRAGS)
- {
- netif_stop_subqueue(netdev, qnum);
- en_dev->hw_stats.q_stats[qnum].q_tx_stopped++;
- if (!use_napi && unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
- {
- /* More just got used, free them then recheck. */
- free_old_xmit_skbs(netdev, sq, false);
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
- {
- netif_start_subqueue(netdev, qnum);
- virtqueue_disable_cb(sq->vq);
- }
- }
- }
-
- if (kick || netif_xmit_stopped(txq))
- {
- if (virtqueue_kick_prepare_packed(sq->vq) && virtqueue_notify(sq->vq))
- {
- u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.kicks++;
- u64_stats_update_end(&sq->stats.syncp);
- }
- }
-
- return NETDEV_TX_OK;
-}
-
-#ifdef HAVE_NDO_GET_STATS64
-#ifdef HAVE_VOID_NDO_GET_STATS64
-static void zxdh_en_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats)
-#else
-static struct rtnl_link_stats64 *zxdh_en_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats)
-#endif
-{
-#ifdef HAVE_VOID_NDO_GET_STATS64
- struct zxdh_en_device *en_dev = netdev_priv(netdev);
- struct receive_queue *rq = NULL;
- struct send_queue *sq = NULL;
- uint32_t start = 0;
- uint32_t i = 0;
- uint64_t tpackets = 0;
- uint64_t tbytes = 0;
- uint64_t rpackets = 0;
- uint64_t rbytes = 0;
- uint64_t rdrops = 0;
- uint32_t loop_cnt = en_dev->max_queue_pairs;
- int32_t ret = 0;
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- ret = zxdh_mac_stats_get(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_mac_stats_get failed, ret: %d\n", ret);
- return;
- }
-
- stats->rx_packets = en_dev->hw_stats.phy_stats.rx_packets_phy;
- stats->rx_bytes = en_dev->hw_stats.phy_stats.rx_bytes_phy;
- stats->rx_errors = en_dev->hw_stats.phy_stats.rx_errors;
- stats->rx_dropped = en_dev->hw_stats.phy_stats.rx_discards;
- stats->tx_packets = en_dev->hw_stats.phy_stats.tx_packets_phy;
- stats->tx_bytes = en_dev->hw_stats.phy_stats.tx_bytes_phy;
- stats->tx_errors = en_dev->hw_stats.phy_stats.tx_errors;
- stats->tx_dropped = en_dev->hw_stats.phy_stats.tx_drop;
- return;
- }
-
-#ifdef ZXDH_MSGQ
- NEED_MSGQ(en_dev)
- {
- loop_cnt--;
- }
-#endif
-
- for (i = 0; i < loop_cnt; ++i)
- {
- sq = &en_dev->sq[i];
- rq = &en_dev->rq[i];
- do
- {
- start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
- tpackets = sq->stats.packets;
- tbytes = sq->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
-
- do
- {
- start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
- rpackets = rq->stats.packets;
- rbytes = rq->stats.bytes;
- rdrops = rq->stats.drops;
- } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
-
- stats->rx_packets += rpackets;
- stats->rx_bytes += rbytes;
- stats->rx_dropped += rdrops;
- stats->tx_packets += tpackets;
- stats->tx_bytes += tbytes;
- }
-
- stats->rx_errors = netdev->stats.rx_errors;
- stats->tx_errors = netdev->stats.tx_errors;
- stats->tx_dropped = netdev->stats.tx_dropped;
- stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
- return;
-#else
- return stats;
-#endif
-}
-#endif/* HAVE_VOID_NDO_GET_STATS_64 */
-
-static void zxdh_en_set_rx_mode(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- queue_work(en_priv->events->wq, &en_dev->rx_mode_set_work);
-}
-
-void rx_mode_set_handler(struct work_struct *work)
-{
- struct zxdh_en_device *en_dev = container_of(work, struct zxdh_en_device, rx_mode_set_work);
- bool promisc_changed = false;
- bool allmulti_changed = false;
- int32_t err = 0;
- uint8_t fow = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- ZXDH_AUX_INIT_COMP_CHECK(en_dev);
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- return;
- }
-
- promisc_changed = en_dev->netdev->flags & IFF_PROMISC;
- allmulti_changed = en_dev->netdev->flags & IFF_ALLMULTI;
- if (en_dev->promisc_enabled != promisc_changed)
- {
- LOG_INFO("promisc_changed: %d", promisc_changed);
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- dpp_vport_uc_promisc_set(&pf_info, promisc_changed);
- if (!en_dev->allmulti_enabled)
- {
- dpp_vport_mc_promisc_set(&pf_info, promisc_changed);
- }
- }
- else
- {
- if (!en_dev->allmulti_enabled)
- {
- fow = 1;
- }
- err = zxdh_vf_port_promisc_set(en_dev, ZXDH_PROMISC_MODE, promisc_changed, fow);
- if (err != 0)
- {
- LOG_ERR("zxdh_vf_port_promisc_set failed\n");
- return;
- }
- }
- en_dev->promisc_enabled = promisc_changed;
- }
-
- if (en_dev->allmulti_enabled != allmulti_changed)
- {
- LOG_INFO("allmulti_changed: %d", allmulti_changed);
- if (!en_dev->promisc_enabled)
- {
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- dpp_vport_mc_promisc_set(&pf_info, allmulti_changed);
- }
- else
- {
- err = zxdh_vf_port_promisc_set(en_dev, ZXDH_ALLMULTI_MODE, allmulti_changed, fow);
- if (err != 0)
- {
- LOG_ERR("zxdh_vf_port_promisc_set failed\n");
- return;
- }
- }
- }
- en_dev->allmulti_enabled = allmulti_changed;
- }
-}
-
-static int zxdh_en_bar_cfg_mac(struct net_device *netdev, const char *mac)
-{
- int ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
-
- memcpy(msg.payload.mac_cfg_msg.ifname, netdev->name, IFNAMSIZ);
- memcpy(msg.payload.mac_cfg_msg.mac, mac, ETH_ALEN);
- msg.payload.mac_cfg_msg.pannel_id = en_dev->panel_id;
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- msg.payload.mac_cfg_msg.pannel_id = en_dev->pannel_id;
- }
- msg.payload.mac_cfg_msg.ctl = 1;
-
- ret = zxdh_send_command_to_specify(en_dev, MODULE_CFG_MAC, &msg, &msg);
- if (ret != 0 || msg.reps.flag != ZXDH_REPS_SUCC)
- {
- LOG_ERR("config mac info failed\n");
- return -msg.reps.flag;
- }
- return 0;
-}
-
-static int zxdh_en_bar_del_mac(struct net_device *netdev)
-{
- int ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
-
- msg.payload.mac_cfg_msg.pannel_id = en_dev->panel_id;
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- msg.payload.mac_cfg_msg.pannel_id = en_dev->pannel_id;
- }
- msg.payload.mac_cfg_msg.ctl = 0;
-
- ret = zxdh_send_command_to_specify(en_dev, MODULE_CFG_MAC, &msg, &msg);
- if (ret != 0 || msg.reps.flag != ZXDH_REPS_SUCC)
- {
- LOG_ERR("del mac info failed.\n");
- return -msg.reps.flag;
- }
- return 0;
-}
-
-static int zxdh_en_set_mac(struct net_device *netdev, void *p)
-{
- struct sockaddr *addr = (struct sockaddr *)p;
- struct zxdh_en_device *en_dev = NULL;
- struct zxdh_en_priv *en_priv = NULL;
- struct netdev_hw_addr *ha = NULL;
- bool delete_flag = true;
- bool add_flag = true;
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- if (!is_valid_ether_addr(addr->sa_data))
- {
- LOG_INFO("invalid mac address %pM\n", addr->sa_data);
- return -EADDRNOTAVAIL;
- }
-
- if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
- {
- LOG_INFO("already using mac address %pM\n", addr->sa_data);
- return 0;
- }
-
- list_for_each_entry(ha, &netdev->uc.list, list)
- {
- if (!memcmp(ha->addr, netdev->dev_addr, netdev->addr_len))
- {
- delete_flag = false;
- }
-
- if (!memcmp(ha->addr, addr->sa_data, netdev->addr_len))
- {
- add_flag = false;
- }
- }
-
- en_priv = netdev_priv(netdev);
- en_dev = &en_priv->edev;
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
- zxdh_en_bar_del_mac(netdev);
- zxdh_en_bar_cfg_mac(netdev, netdev->dev_addr);
- return 0;
- }
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- if (delete_flag)
- {
- ret = dpp_del_mac(&pf_info, netdev->dev_addr);
- if (ret != 0)
- {
- LOG_ERR("pf del mac failed, retval: %d\n", ret);
- return ret;
- }
- }
-
- if (add_flag)
- {
- ret = dpp_add_mac(&pf_info, addr->sa_data);
- if (ret != 0)
- {
- LOG_ERR("pf add mac failed: %d\n", ret);
- return ret;
- }
- }
-
- LOG_INFO("set pf new mac address %pM\n", addr->sa_data);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
- if (!en_dev->ops->is_upf(en_dev->parent))
- {
- zxdh_en_bar_del_mac(netdev);
- zxdh_en_bar_cfg_mac(netdev, netdev->dev_addr);
- }
- }
- else
- {
- ret = zxdh_vf_dpp_del_mac(en_dev, netdev->dev_addr, UNFILTER_MAC, delete_flag);
- if (ret != 0)
- {
- LOG_ERR("zxdh vf dpp del mac failed: %d\n", ret);
- return ret;
- }
-
- if (add_flag)
- {
- ret = zxdh_vf_dpp_add_mac(en_dev, addr->sa_data, UNFILTER_MAC);
- if (ret != 0)
- {
- LOG_ERR("zxdh vf dpp add mac failed: %d\n", ret);
- return ret;
- }
- en_dev->ops->set_mac(en_dev->parent, addr->sa_data);
- }
-
- LOG_INFO("set vf new mac address %pM\n", addr->sa_data);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
- }
-
- return ret;
-}
-
-int32_t zxdh_en_config_mtu_to_np(struct net_device *netdev, int32_t mtu_value)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- dpp_panel_attr_set(&pf_info, en_dev->phy_port, PANEL_FLAG_MTU_OFFLOAD_ENABLE, 1);
- dpp_panel_attr_set(&pf_info, en_dev->phy_port, PANEL_FLAG_MTU, mtu_value);
- dpp_egr_port_attr_set(&pf_info, EGR_FLAG_MTU_OFFLOAD_EN_OFF, 1);
- dpp_egr_port_attr_set(&pf_info, EGR_FLAG_MTU, mtu_value);
- }
- else
- {
- ret = zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_MTU_OFFLOAD_EN_OFF, 1, 0);
- if (ret != 0)
- {
- LOG_ERR("zxdh_vf_egr_port_attr_set config mtu enable failed: %d\n", ret);
- return ret;
- }
- ret = zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_MTU, mtu_value, 0);
- if (ret != 0)
- {
- LOG_ERR("zxdh_vf_egr_port_attr_set config mut value failed: %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int zxdh_en_change_mtu(struct net_device *netdev, int new_mtu)
-{
- int32_t ret = 0;
-
- if ((new_mtu < ETH_MIN_MTU) || (new_mtu > ZXDH_MAX_MTU))
- {
- return -EINVAL;
- }
- LOG_INFO("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
-
- netdev->mtu = new_mtu;
-
- ret = zxdh_en_config_mtu_to_np(netdev, new_mtu);
- if (ret != 0)
- {
- LOG_ERR("zxdh_en_config_mtu_to_np failed: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-#ifdef HAVE_TX_TIMEOUT_TXQUEUE
-static void zxdh_en_tx_timeout(struct net_device *netdev, unsigned int txqueue)
-{
- return;
-}
-#else
-static void zxdh_en_tx_timeout(struct net_device *netdev)
-{
- return;
-}
-#endif
-
-#ifdef HAVE_VLAN_RX_REGISTER
-static void zxdh_en_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
-{
- return;
-}
-#endif
-
-static int __attribute__((unused)) vf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-{
- int ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
-
- msg.payload.hdr.op_code = ZXDH_VLAN_FILTER_ADD;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.rx_vid_add_msg.vlan_id = vid;
-
- ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if (ret != 0 || msg.reps.flag != ZXDH_REPS_SUCC)
- {
- LOG_ERR("pcieid:0x%x send msg to pf add vlan:%d failed! ret = %d, flag = 0x%x\n",
- en_dev->pcie_id,
- vid,
- ret,
- msg.reps.flag);
- return -1;
- }
-
- return 0;
-}
-
-#if defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && defined(NETIF_F_HW_VLAN_CTAG_RX)
-static int zxdh_en_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid)
-{
- int retval = 0;
-#if 0
- struct zxdh_en_priv *zxdev = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &zxdev->edev;
- uint16_t pcieid = en_dev->pcie_id;
-
- if (vid > MAX_VLAN_ID)
- {
- LOG_ERR("vlan id:%d input is err!\n", vid);
- return -EINVAL;
- }
-
- if ((pcieid & PF_AC_MASK) == 0) /* VF */
- {
- retval = vf_vlan_rx_add_vid(netdev, vid);
- goto exit;
- }
-
- retval = dpp_add_vlan_filter(zxdev->edev.vport, vid);
- if (0 != retval)
- {
- LOG_ERR("failed to add vlan: %d\n",vid);
- goto exit;
- }
- LOG_INFO("pf add vlan %d succeed.\n", vid);
-
-exit:
-#endif
- return retval;
-}
-#elif defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && !defined(NETIF_F_HW_VLAN_CTAG_RX)
-static int zxdh_en_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-{
- return 0;
-}
-#else
-static void zxdh_en_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-{
- return;
-}
-#endif
-
-static int vf_vlan_rx_del_vid(struct net_device *netdev, u16 vid)
-{
- int ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
-
- msg.payload.hdr.op_code = ZXDH_VLAN_FILTER_DEL;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.rx_vid_del_msg.vlan_id = vid;
-
- ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if (ret != 0 || msg.reps.flag != ZXDH_REPS_SUCC)
- {
- LOG_ERR("pcieid:0x%x send msg to pf del vlan:%d failed! ret = %d, flag = 0x%x\n",
- en_dev->pcie_id,
- vid,
- ret,
- msg.reps.flag);
- return -1;
- }
-
- return 0;
-}
-
-
-#if defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && defined(NETIF_F_HW_VLAN_CTAG_RX)
-static int zxdh_en_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid)
-{
- int retval = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint16_t pcieid = en_dev->pcie_id;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (vid > MAX_VLAN_ID)
- {
- LOG_ERR("vlan id:%d input is err!\n", vid);
- return -EINVAL;
- }
-
- if ((pcieid & PF_AC_MASK) == 0) /* VF */
- {
- retval = vf_vlan_rx_del_vid(netdev, vid);
- goto exit;
- }
-
- retval = dpp_del_vlan_filter(&pf_info, vid);
- if (0 != retval)
- {
- LOG_ERR("failed to del vlan: %d\n", vid);
- goto exit;
- }
- LOG_INFO("pf del vlan %d succeed.\n", vid);
-
-exit:
- return retval;
-}
-#elif defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && !defined(NETIF_F_HW_VLAN_CTAG_RX)
-static int zxdh_en_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
-{
- return 0;
-}
-#else
-static void zxdh_en_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
-{
- return;
-}
-#endif
-
-static void zxdh_en_netpoll(struct net_device *netdev)
-{
- return;
-}
-
-#ifdef HAVE_SETUP_TC
-int zxdh_en_setup_tc(struct net_device *netdev, u8 tc)
-{
- return 0;
-}
-
-#ifdef NETIF_F_HW_TC
-#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
-static int __zxdh_en_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data)
-#elif defined(HAVE_NDO_SETUP_TC_CHAIN_INDEX)
-static int __zxdh_en_setup_tc(struct net_device *netdev, u32 handle,
- u32 chain_index, __be16 proto,
- struct tc_to_netdev *tc)
-#else
-static int __zxdh_en_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
-#endif
-{
- return 0;
-}
-#endif
-#endif
-
-#ifdef HAVE_NDO_GET_PHYS_PORT_ID
-static int zxdh_en_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid)
-{
- return 0;
-}
-#endif /* HAVE_NDO_GET_PHYS_PORT_ID */
-
-
-static void zxdh_set_en_device(struct net_device *netdev, netdev_features_t features)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- if ((features & NETIF_F_GSO) || (features & NETIF_F_GSO_UDP) || (features & NETIF_F_GSO_UDP_L4))
- {
- en_dev->drs_offload = true;
- }
- else if ((features & NETIF_F_TSO) || (features & NETIF_F_HW_CSUM))
- {
- en_dev->dtp_offload = true;
- }
- else
- {
- en_dev->np_direction = true;
- }
-
- return;
-}
-
-static int zxdh_handle_feature(struct net_device *netdev,
- netdev_features_t *features,
- netdev_features_t wanted_features,
- netdev_features_t feature,
- zxdh_feature_handler feature_handler)
-{
- netdev_features_t changes = wanted_features ^ netdev->features;
- bool enable = !!(wanted_features & feature);
- int err;
-
- if (!(changes & feature) || feature_handler == NULL)
- {
- return 0;
- }
-
- err = feature_handler(netdev, enable);
- if (err)
- {
- LOG_ERR("%s feature %pNF failed, err %d\n",
- enable ? "Enable" : "Disable", &feature, err);
- return err;
- }
-
- ZXDH_SET_FEATURE(features, feature, enable);
- return 0;
-}
-
-static int32_t zxdh_dtp_offload_set(struct zxdh_en_device *en_dev, DPP_PF_INFO_T *pf_info)
-{
- ZXDH_VPORT_T port_attr_entry = {0};
- int32_t ret = 0;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- ret = dpp_egr_port_attr_get(pf_info, &port_attr_entry);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_get failed: %d\n", ret);
- return ret;
- }
-
- if (!port_attr_entry.lro_offload && !port_attr_entry.ip_fragment_offload &&
- !port_attr_entry.ip_checksum_offload && !port_attr_entry.tcp_udp_checksum_offload)
- {
- ret = dpp_egr_port_attr_set(pf_info, EGR_FLAG_ACCELERATOR_OFFLOAD_FLAG, 0);
- }
- else
- {
- ret = dpp_egr_port_attr_set(pf_info, EGR_FLAG_ACCELERATOR_OFFLOAD_FLAG, 1);
- }
-
- return ret;
- }
-
- ret = zxdh_vf_egr_port_attr_get(en_dev, &port_attr_entry);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_get failed: %d\n", ret);
- return ret;
- }
-
- if (!port_attr_entry.lro_offload && !port_attr_entry.ip_fragment_offload &&
- !port_attr_entry.ip_checksum_offload && !port_attr_entry.tcp_udp_checksum_offload)
- {
- ret = zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_ACCELERATOR_OFFLOAD_FLAG, 0, 0);
- }
- else
- {
- ret = zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_ACCELERATOR_OFFLOAD_FLAG, 1, 0);
- }
-
- return ret;
-}
-
-static int32_t set_feature_rx_checksum(struct net_device *netdev, bool enable)
-{
- int en_value = enable ? 1 : 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- DPP_PF_INFO_T pf_info = {0};
- int32_t ret = 0;
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IP_CHKSUM, enable);
- if (ret != 0)
- {
- LOG_ERR("EGR_FLAG_IP_CHKSUM set failed: %d\n", ret);
- return ret;
- }
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_TCP_UDP_CHKSUM, enable);
- if (ret != 0)
- {
- LOG_ERR("EGR_FLAG_TCP_UDP_CHKSUM set failed: %d\n", ret);
- return ret;
- }
- }
- else if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- ret = zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_IP_CHKSUM, en_value, 0);
- if (ret != 0)
- {
- LOG_ERR("EGR_FLAG_IP_CHKSUM set failed: %d\n", ret);
- return ret;
- }
- ret = zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_TCP_UDP_CHKSUM, en_value, 0);
- if (ret != 0)
- {
- LOG_ERR("EGR_FLAG_TCP_UDP_CHKSUM set failed: %d\n", ret);
- return ret;
- }
- }
-
- return zxdh_dtp_offload_set(en_dev, &pf_info);
-}
-
-static int set_feature_tx_checksum(struct net_device *netdev, bool enable)
-{
- if (enable)
- {
- netdev->features |= NETIF_F_HW_CSUM;
- }
- else
- {
- netdev->features &= ~NETIF_F_HW_CSUM;
- }
- return 0;
-}
-
-static int set_feature_vxlan_checksum(struct net_device *netdev, bool enable)
-{
- int ret = 0;
- int en_value = enable ? 1 : 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (enable)
- {
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
- }
- else
- {
- netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
- }
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_OUTER_IP_CHECKSUM_OFFLOAD, enable);
- if (ret != 0)
- {
- LOG_ERR("zxdh set vxlan rx checksum failed!\n");
- return ret;
- }
- }
- else if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- ret = zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_OUTER_IP_CHECKSUM_OFFLOAD, en_value, 0);
- if (ret != 0)
- {
- LOG_ERR("zxdh_vf_egr_port_attr_set vxlan rx checksum failed!\n");
- return ret;
- }
- }
-
- return ret;
-}
-
-static int32_t set_feature_rxhash(struct net_device *netdev, bool enable)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- return dpp_vport_rss_en_set(&pf_info, enable);
- }
-
- return zxdh_vf_rss_en_set(en_dev, enable);
-}
-
-static int32_t set_vf_cvlan_filter(struct net_device *netdev, bool enable)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
- int32_t ret = 0;
-
- msg.payload.hdr.op_code = ZXDH_VLAN_FILTER_SET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.vlan_filter_set_msg.enable = enable;
-
- ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if (ret != 0 || msg.reps.flag != ZXDH_REPS_SUCC)
- {
- LOG_ERR("pcieid:0x%x send msg to pf set vlan filter enable:%s failed! ret = %d, flag = 0x%x\n",
- en_dev->pcie_id,
- enable ? "enable":"disable",
- ret,
- msg.reps.flag);
- return -1;
- }
-
- return 0;
-}
-
-static int __attribute__((unused)) set_feature_cvlan_filter(struct net_device *netdev, bool enable)
-{
- int ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint16_t pcieid = en_dev->pcie_id;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if ((pcieid & PF_AC_MASK) == 0) /* VF */
- {
- ret = set_vf_cvlan_filter(netdev, enable);
- goto exit;
- }
-
- ret = dpp_vport_vlan_filter_en_set(&pf_info, enable);
-
-exit:
- return ret;
-}
-
-static int __attribute__((unused)) set_feature_svlan_filter(struct net_device *netdev, bool enable)
-{
- int ret = 0;
-#if 0 //TODO:STAG 暂时没有设置
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- ret = dpp_vport_vlan_qinq_en_set(en_dev->vport, enable);
-#endif
- return ret;
-}
-
-int set_vf_qinq_tpid(struct net_device *netdev, uint16_t tpid)
-{
- int ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
-
- msg.payload.hdr.op_code = ZXDH_SET_TPID;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.tpid_cfg_msg.tpid = tpid;
-
- ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if (ret != 0 || msg.reps.flag != ZXDH_REPS_SUCC)
- {
- LOG_ERR("pcieid:0x%x send msg to vfs set tpid: 0x%x failed! ret = %d.\n",
- en_dev->pcie_id,
- tpid,
- ret);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int set_vf_vlan_strip(struct net_device *netdev, bool enable, uint8_t flag)
-{
- int ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
-
- msg.payload.hdr.op_code = ZXDH_VLAN_OFFLOAD_SET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.vlan_strip_msg.enable = enable;
- msg.payload.vlan_strip_msg.flag = flag;
-
- ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if (ret != 0 || msg.reps.flag != ZXDH_REPS_SUCC)
- {
- LOG_ERR("pcieid:0x%x send msg to vfs set vlan strip enable:%s failed! ret = %d, flag = 0x%x\n",
- en_dev->pcie_id,
- enable ? "enable":"disable",
- ret,
- msg.reps.flag);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int set_feature_vlan_strip(struct net_device *netdev, bool enable)
-{
- int ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- ret = dpp_vport_vlan_strip_set(&pf_info, enable);
- }
- else
- {
- ret = set_vf_vlan_strip(netdev, enable, VLAN_STRIP_MSG_TYPE);
- }
-
- return ret;
-}
-
-
-static int set_feature_qinq_strip(struct net_device *netdev, bool enable)
-{
- int ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- ret = dpp_vport_vlan_qinq_en_set(&pf_info, enable);
- }
- else
- {
- ret = set_vf_vlan_strip(netdev, enable, QINQ_STRIP_MSG_TYPE);
- }
-
- return ret;
-}
-
-
-static int32_t set_feature_lro(struct net_device *netdev, bool enable)
-{
- uint32_t en_value = enable ? 1 : 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- DPP_PF_INFO_T pf_info = {0};
- int32_t ret = 0;
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IPV4_TCP_ASSEMBLE, en_value);
- if (ret != 0)
- {
- LOG_ERR("EGR_FLAG_IPV4_TCP_ASSEMBLE set failed: %d\n", ret);
- return ret;
- }
- dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IPV6_TCP_ASSEMBLE, en_value);
- if (ret != 0)
- {
- LOG_ERR("EGR_FLAG_IPV6_TCP_ASSEMBLE set failed: %d\n", ret);
- return ret;
- }
- }
- else
- {
- zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_IPV4_TCP_ASSEMBLE, en_value, 0);
- if (ret != 0)
- {
- LOG_ERR("EGR_FLAG_IPV4_TCP_ASSEMBLE set failed: %d\n", ret);
- return ret;
- }
- zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_IPV6_TCP_ASSEMBLE, en_value, 0);
- if (ret != 0)
- {
- LOG_ERR("EGR_FLAG_IPV6_TCP_ASSEMBLE set failed: %d\n", ret);
- return ret;
- }
- }
-
- return zxdh_dtp_offload_set(en_dev, &pf_info);
-}
-
-int32_t zxdh_en_set_features(struct net_device *netdev, netdev_features_t wanted_features)
-{
- int32_t ret = 0;
- netdev_features_t oper_features = netdev->features;
-
- zxdh_set_en_device(netdev, wanted_features);
-
-#define ZXDH_HANDLE_FEATURE(set_feature, handler) \
- zxdh_handle_feature(netdev, &oper_features, wanted_features, set_feature, handler)
-
- ret |= ZXDH_HANDLE_FEATURE(NETIF_F_RXCSUM, set_feature_rx_checksum);
- ret |= ZXDH_HANDLE_FEATURE(NETIF_F_HW_CSUM, set_feature_tx_checksum);
- ret |= ZXDH_HANDLE_FEATURE(NETIF_F_GSO_UDP_TUNNEL_CSUM, set_feature_vxlan_checksum);
- ret |= ZXDH_HANDLE_FEATURE(NETIF_F_RXHASH, set_feature_rxhash);
- ret |= ZXDH_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
- ret |= ZXDH_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_vlan_strip);
- ret |= ZXDH_HANDLE_FEATURE(NETIF_F_HW_VLAN_STAG_RX, set_feature_qinq_strip);
-
- if (ret)
- {
- netdev->features = oper_features;
- return -EINVAL;
- }
-
- return 0;
-}
-
-static uint32_t list_hw_addr_create(struct netdev_hw_addr_list *list, \
- const uint8_t *addr, int32_t addr_len, \
- uint8_t addr_type, bool global, \
- bool sync)
-{
- struct netdev_hw_addr *ha = NULL;
-
- ha = kzalloc(sizeof(struct netdev_hw_addr), GFP_KERNEL);
- if (ha == NULL)
- {
- LOG_ERR("Kzalloc struct netdev_hw_addr failed \n");
- return 1;
- }
-
- /* 结构体赋值 */
- memcpy(ha->addr, addr, addr_len);
- ha->type = addr_type;
- ha->refcount = 1; /* 引用计数 */
- ha->global_use = global;
- ha->synced = sync ? 1 : 0;
- ha->sync_cnt = 0;
- list_add_tail_rcu(&ha->list, &list->list);
- list->count++; /* 链表节点加1 */
-
- return 0;
-}
-
-static uint32_t list_hw_addr_del(struct netdev_hw_addr_list *list, struct netdev_hw_addr *ha)
-{
- int32_t refcount = ha->refcount;
-
- /* 引用的计数大于1,则不能删除此mac地址 */
- if (--refcount)
- {
- return 1;
- }
-
- /* 从链表中删除此条目 */
- list_del_rcu(&ha->list);
-
- /* 释放ha结构体占用内存,rcu_head可以安全地释放ha占用的内存*/
- kfree_rcu(ha, rcu_head);
- list->count--;
-
- return 0;
-}
-
-bool is_this_mac_exist(struct net_device *netdev, const uint8_t *addr)
-{
- struct netdev_hw_addr *ha = NULL;
- bool isexist = false;
-
- /* 给net_device结构体上锁 */
- netif_addr_lock_bh(netdev);
-
- /* 判断此mac地址类型 */
- if (is_unicast_ether_addr(addr))
- {
- /* 遍历单播mac地址链表 */
- list_for_each_entry(ha, &netdev->uc.list, list)
- {
- /* 检查该单播地址链表中是否存在此mac,且此mac地址标志为单播 */
- if ((!memcmp(ha->addr, addr, netdev->addr_len)) \
- && (ha->type == NETDEV_HW_ADDR_T_UNICAST))
- {
- isexist = true;
- goto out;
- }
- }
- }
- else
- {
- /* 遍历组播mac地址链表 */
- list_for_each_entry(ha, &netdev->mc.list, list)
- {
- /* 检查该组播地址链表中是否存在此mac,且此mac地址类型为组播 */
- if ((!memcmp(ha->addr, addr, netdev->addr_len)) \
- && (ha->type == NETDEV_HW_ADDR_T_MULTICAST))
- {
- isexist = true;
- goto out;
- }
- }
- }
-
-out:
- /* 给net_device结构体释放锁 */
- netif_addr_unlock_bh(netdev);
-
- return isexist;
-}
-
-/**
- * zxdh_dev_list_addr_add - 在地址链表中添加此mac地址
- * @netdev: 网络设备结构体
- * @addr: 要添加的mac地址
- * @addr_type: mac地址类型
- */
-int32_t zxdh_dev_list_addr_add(struct net_device *netdev, const uint8_t *addr)
-{
- int32_t err = 0;
-
- /* 给net_device结构体上锁 */
- netif_addr_lock_bh(netdev);
-
- /* 判断此mac地址类型 */
- if (is_unicast_ether_addr(addr))
- {
- /* 将此mac地址添加到地址链表中 */
- err = list_hw_addr_create(&netdev->uc, addr, netdev->addr_len, \
- NETDEV_HW_ADDR_T_UNICAST, false, false);
- if (err != 0)
- {
- LOG_ERR("list_hw_addr_create failed\n");
- }
- }
- else
- {
- err = list_hw_addr_create(&netdev->mc, addr, netdev->addr_len, \
- NETDEV_HW_ADDR_T_MULTICAST, false, false);
- if (err != 0)
- {
- LOG_ERR("list_hw_addr_create failed\n");
- }
- }
-
- /* 给net_device结构体释放锁 */
- netif_addr_unlock_bh(netdev);
-
- return err;
-}
-
-/**
- * zxdh_dev_list_addr_del - 在地址链表中删除此mac地址
- * @netdev: 网络设备结构体
- * @addr: 要删除的mac地址
- * @addr_type: mac地址类型
- */
-int32_t zxdh_dev_list_addr_del(struct net_device *netdev, const uint8_t *addr)
-{
- struct netdev_hw_addr *ha = NULL;
- int32_t err = 0;
-
- /* 给net_device上锁 */
- netif_addr_lock_bh(netdev);
-
- if (is_unicast_ether_addr(addr))
- {
- /* 遍历单播mac地址链表 */
- list_for_each_entry(ha, &netdev->uc.list, list)
- {
- /* 检查该单播地址链表中是否存在此mac,且此mac地址标志为单播 */
- if ((!memcmp(ha->addr, addr, netdev->addr_len)) \
- && (ha->type == NETDEV_HW_ADDR_T_UNICAST))
- {
- /* 从单播地址链表中删除此mac */
- err = list_hw_addr_del(&netdev->uc, ha);
- if (err != 0)
- {
- LOG_ERR("list_hw_addr_del failed\n");
- }
- goto out;
- }
- }
- }
- else
- {
- /* 遍历组播mac地址链表 */
- list_for_each_entry(ha, &netdev->mc.list, list)
- {
- /* 检查该组播地址链表中是否存在此mac,且此mac地址标志为组播 */
- if ((!memcmp(ha->addr, addr, netdev->addr_len)) \
- && (ha->type == NETDEV_HW_ADDR_T_MULTICAST))
- {
- /* 从组播地址链表中删除此mac */
- err = list_hw_addr_del(&netdev->mc, ha);
- if (err != 0)
- {
- LOG_ERR("list_hw_addr_del failed\n");
- }
- goto out;
- }
- }
- }
-
-out:
- /* 给net_device结构体释放锁 */
- netif_addr_unlock_bh(netdev);
-
- return err;
-}
-
-#ifdef MAC_CONFIG_DEBUG
-int32_t zxdh_pf_dump_all_mac(struct zxdh_en_device *en_dev)
-{
- MAC_VPORT_INFO *unicast_mac_arry = NULL;
- MAC_VPORT_INFO *multicast_mac_arry = NULL;
- uint32_t current_unicast_num = 0;
- uint32_t current_multicast_num = 0;
- int32_t err = 1;
- int32_t i = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- /* 开辟单播数组和组播数组*/
- unicast_mac_arry = (MAC_VPORT_INFO *)kzalloc(sizeof(MAC_VPORT_INFO)*UNICAST_MAX_NUM, GFP_KERNEL);
- if (unicast_mac_arry == NULL)
- {
- LOG_ERR("kzalloc unicast_mac_arry failed \n");
- return err;
- }
-
- multicast_mac_arry = (MAC_VPORT_INFO *)kzalloc(sizeof(MAC_VPORT_INFO)*MULTICAST_MAX_NUM, GFP_KERNEL);
- if (multicast_mac_arry == NULL)
- {
- LOG_ERR("kzalloc multicast_mac_arry failed \n");
- goto out1;
- }
-
- /* 从NP中dump所有单播mac地址*/
- err = dpp_unicast_mac_dump(&pf_info, unicast_mac_arry, ¤t_unicast_num);
- if (err != 0)
- {
- LOG_ERR("dpp_unicast_mac_dump failed\n");
- goto out2;
- }
-
- /* 从NP中dump所有组播mac地址*/
- err = dpp_multicast_mac_dump(&pf_info, multicast_mac_arry, ¤t_multicast_num);
- if (err != 0)
- {
- LOG_ERR("dpp_multicast_mac_dump failed\n");
- goto out2;
- }
-
- for(i = 0; i < current_unicast_num; ++i)
- {
- LOG_INFO("unicast_mac_arry[%d].vport is %#x\n", i, unicast_mac_arry[i].vport);
- LOG_INFO("unicast_mac_arry[%d].mac is %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", i, \
- unicast_mac_arry[i].addr[0], unicast_mac_arry[i].addr[1], \
- unicast_mac_arry[i].addr[2], unicast_mac_arry[i].addr[3], \
- unicast_mac_arry[i].addr[4], unicast_mac_arry[i].addr[5]);
- }
- for(i = 0; i < current_multicast_num; ++i)
- {
- LOG_INFO("multicast_mac_arry[%d].vport is %#x\n", i, multicast_mac_arry[i].vport);
- LOG_INFO("multicast_mac_arry[%d].mac is %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", i, \
- multicast_mac_arry[i].addr[0], multicast_mac_arry[i].addr[1], \
- multicast_mac_arry[i].addr[2], multicast_mac_arry[i].addr[3], \
- multicast_mac_arry[i].addr[4], multicast_mac_arry[i].addr[5]);
- }
-
-out2:
- if (multicast_mac_arry != NULL)
- {
- kfree(multicast_mac_arry);
- }
-
-out1:
- if (unicast_mac_arry != NULL)
- {
- kfree(unicast_mac_arry);
- }
-
- return err;
-}
-#endif /* MAC_CONFIG_DEBUG */
-
-int32_t unicast_mac_add(struct zxdh_en_device *en_dev, struct net_device *dev, \
- const uint8_t* addr, uint16_t flags)
-{
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- /* 判断目前所配置mac地址数量是否超过上限 */
- if (en_dev->curr_unicast_num >= DEV_UNICAST_MAX_NUM-1)
- {
- LOG_ERR("curr_unicast_num is beyond maximum\n");
- return -ENOSPC;
- }
-
- /* 遍历单播地址链表,判断是否存在此单播mac */
- if (is_this_mac_exist(dev, addr))
- {
- LOG_DEBUG("Mac already exists\n");
- if (!(flags & NLM_F_EXCL))
- {
- return 0;
- }
- return -EEXIST;
- }
-
- /* 如果待配置mac和本机mac相同,则不配置到NP中, 只将此mac添加到单播地址链表中 */
- if (!memcmp(addr, dev->dev_addr, dev->addr_len))
- {
- goto out;
- }
-
- /* 将此mac地址配置到np中 */
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- err = dpp_add_mac(&pf_info, addr);
- if (err != 0)
- {
- LOG_ERR("dpp_add_mac failed\n");
- return err;
- }
- }
- else
- {
- err = zxdh_vf_dpp_add_mac(en_dev, addr, FILTER_MAC);
- if (err != 0)
- {
- LOG_ERR("zxdh_vf_dpp_add_mac failed\n");
- return err;
- }
- }
-
-out:
- /* 将此单播mac地址添加到地址链表中 */
- err = zxdh_dev_list_addr_add(dev, addr);
- if (err != 0)
- {
- LOG_ERR("zxdh_dev_list_addr_add failed\n");
- return err;
- }
- en_dev->curr_unicast_num++;
- LOG_DEBUG("curr_unicast_num is %d\n", en_dev->curr_unicast_num);
- return err;
-}
-
-bool is_ipv6_mulicast_mac(const uint8_t *mac)
-{
- return ((mac[0] == 0x33) && (mac[1] == 0x33));
-}
-
-int32_t multicast_mac_add(struct zxdh_en_device *en_dev, struct net_device *dev, \
- const uint8_t* addr, uint16_t flags)
-{
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (is_ipv6_mulicast_mac(addr))
- {
- LOG_ERR("invlaid ipv6 mac address\n");
- return -EINVAL;
- }
-
- /* 遍历组播地址链表,判断是否存在此mac */
- if (is_this_mac_exist(dev, addr))
- {
- LOG_DEBUG("Mac already exists\n");
- if (!(flags & NLM_F_EXCL))
- {
- return 0;
- }
- return -EEXIST;
- }
-
- /* 将此组播mac地址配置到np中 */
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- err = dpp_multi_mac_add_member(&pf_info, addr);
- if (err != 0)
- {
- if (err == DPP_RC_TABLE_RANGE_INVALID)
- {
- LOG_ERR("multicast mac is beyond 32\n");
- return -ENOSPC;
- }
- LOG_ERR("dpp_multi_mac_add_member failed\n");
- return err;
- }
- }
- else
- {
- err = zxdh_vf_dpp_add_mac(en_dev, addr, FILTER_MAC);
- if (err != 0)
- {
- if(err == DPP_RC_TABLE_RANGE_INVALID)
- {
- LOG_ERR("multicast mac is beyond 32\n");
- return -ENOSPC;
- }
- LOG_ERR("zxdh_vf_dpp_add_mac failed\n");
- return err;
- }
- }
-
- /* 将此组播mac地址添加到地址链表中 */
- err = zxdh_dev_list_addr_add(dev, addr);
- if (err != 0)
- {
- LOG_ERR("zxdh_dev_list_addr_add failed\n");
- return err;
- }
- return err;
-}
-
-int32_t unicast_mac_del(struct zxdh_en_device *en_dev, struct net_device *dev, const uint8_t* addr)
-{
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- /* 判断目前所配置mac地址数量是否小于0 */
- if (en_dev->curr_unicast_num <= 0)
- {
- LOG_ERR("curr_unicast_num is less than 0\n");
- return -ENOENT;
- }
-
- /* 遍历单播地址链表,判断是否存在此mac */
- if(!is_this_mac_exist(dev, addr))
- {
- LOG_DEBUG("Mac is not exists\n");
- return -ENOENT;
- }
-
- /* 如果待删除mac和本机mac相同,则不从NP中删除,只从链表中删除 */
- if (!memcmp(addr, dev->dev_addr, dev->addr_len))
- {
- goto out;
- }
-
- /* 从np中删除此单播mac */
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- /* 此设备为PF */
- err = dpp_del_mac(&pf_info, addr);
- if (err != 0)
- {
- LOG_ERR("dpp_del_mac failed\n");
- return err;
- }
- LOG_DEBUG("dpp_del_mac succeed\n");
- }
- else
- {
- /* 此设备为VF */
- err = zxdh_vf_dpp_del_mac(en_dev, addr, FILTER_MAC, true);
- if (err != 0)
- {
- LOG_ERR("zxdh_vf_dpp_del_mac failed\n");
- return err;
- }
- LOG_DEBUG("zxdh_vf_dpp_del_mac succeed\n");
- }
-
-out:
- /* 从链表中删除单播mac */
- err = zxdh_dev_list_addr_del(dev, addr);
- if (err != 0)
- {
- LOG_ERR("zxdh_dev_list_addr_del failed\n");
- return err;
- }
- en_dev->curr_unicast_num--;
- LOG_DEBUG("curr_unicast_num is %d\n", en_dev->curr_unicast_num);
- return err;
-}
-
-int32_t multicast_mac_del(struct zxdh_en_device *en_dev, struct net_device *dev, const uint8_t* addr)
-{
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (is_ipv6_mulicast_mac(addr))
- {
- LOG_ERR("invlaid ipv6 mac address\n");
- return -EINVAL;
- }
-
- /* 遍历组播地址链表,判断是否存在此组播mac,如果不存在,则返回报错 */
- if(!is_this_mac_exist(dev, addr))
- {
- LOG_DEBUG("Mac is not exists\n");
- return -ENOENT;
- }
-
- /* 从np中删除此组播mac */
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- /* 此设备为PF */
- err = dpp_multi_mac_del_member(&pf_info, addr);
- if (err != 0)
- {
- LOG_ERR("dpp_multi_mac_del_member failed\n");
- return err;
- }
- LOG_DEBUG("dpp_multi_mac_del_member succeed\n");
- }
- else
- {
- /* 此设备为VF */
- err = zxdh_vf_dpp_del_mac(en_dev, addr, FILTER_MAC, true);
- if (err != 0)
- {
- LOG_ERR("zxdh_vf_dpp_del_mac failed\n");
- return err;
- }
- }
-
- /* 从链表中删除组播mac */
- err = zxdh_dev_list_addr_del(dev, addr);
- if (err != 0)
- {
- LOG_ERR("zxdh_dev_list_addr_del failed\n");
- return err;
- }
-
- return err;
-}
-
-static unsigned int mac_hash(struct zxdh_ipv6_mac_tbl *mac_tbl, const uint8_t *mac_addr)
-{
- unsigned int mact_size_half = mac_tbl->ip6mact_size / 2;
- uint32_t mac_part1 = (mac_addr[0] << 24) | (mac_addr[1] << 16) | (mac_addr[2] << 8) | mac_addr[3];
- uint32_t mac_part2 = (mac_addr[4] << 8) | mac_addr[5];
-
- uint32_t xor = mac_part1 ^ mac_part2;
-
- return (jhash_1word(xor, 0) % mact_size_half);
-}
-
-int32_t zxdh_ip6mac_to_np(struct zxdh_en_device *en_dev, struct zxdh_ipv6_mac_tbl *ip6mac_tbl, const uint8_t *ip6mac, uint8_t action, bool need_lock)
-{
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- switch (action)
- {
- case ADD_IP6MAC:
- {
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- { /* PF流程 */
- if (need_lock)
- {
- write_lock_bh(&ip6mac_tbl->lock);
- }
- /* 将此组播mac地址配置到np中 */
- err = dpp_multi_mac_add_member(&pf_info, ip6mac);
- if (err != 0)
- {
- LOG_ERR("dpp_multi_mac_add_member failed, err:%d\n", err);
- }
- if (need_lock)
- {
- write_unlock_bh(&ip6mac_tbl->lock);
- }
- }
- else
- { /* VF流程*/
- err = zxdh_vf_dpp_add_ipv6_mac(en_dev, ip6mac);
- if (err != 0)
- {
- LOG_ERR("zxdh_vf_dpp_add_ipv6_mac failed, err:%d\n", err);
- }
- }
- break;
- }
- case DEL_IP6MAC:
- {
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {/* PF流程 */
- if (need_lock)
- {
- write_lock_bh(&ip6mac_tbl->lock);
- }
- err = dpp_multi_mac_del_member(&pf_info, ip6mac);
- if (err != 0)
- {
- LOG_ERR("dpp_multi_mac_del_member failed, err:%d\n", err);
- }
- if (need_lock)
- {
- write_unlock_bh(&ip6mac_tbl->lock);
- }
- }
- else
- {/* VF流程 */
- err = zxdh_vf_dpp_del_ipv6_mac(en_dev, ip6mac);
- if (err != 0)
- {
- LOG_ERR("zxdh_vf_dpp_del_ipv6_mac failed, err:%d\n", err);
- }
- }
- break;
- }
- }
- return err;
-}
-
-int32_t zxdh_ip6mac_add(struct zxdh_en_device *en_dev, const uint32_t *addr6, const uint8_t *ip6mac)
-{
- int32_t err = 0;
- unsigned int mac_hash_val;
- struct zxdh_ipv6_mac_tbl *ip6mac_tbl = en_dev->ops->get_ip6mac_tbl(en_dev->parent);
- struct zxdh_ipv6_mac_entry *ce, *cte;
-
- if (!ip6mac_tbl)
- {
- LOG_ERR("ip6mac_tbl is NULL\n");
- return -ENXIO;
- }
-
- err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, ADD_IP6MAC, TRUE);
- if (err != 0)
- {
- return err;
- }
-
- mac_hash_val = mac_hash(ip6mac_tbl, ip6mac);
- //如果没有报错,则说明MAC已经存在或成功存入NP
- read_lock_bh(&ip6mac_tbl->lock);
- list_for_each_entry(cte, &ip6mac_tbl->hash_list[mac_hash_val], list)
- {
- if (memcmp(cte->ipv6_mac, ip6mac, ETH_ALEN) == 0)
- {//MAC已经存在
- ce = cte;
- refcount_inc(&ce->refcnt);
- LOG_INFO("Increase Multicast MAC Address(%pM) refcnt:%d\n", ip6mac, refcount_read(&ce->refcnt));
- read_unlock_bh(&ip6mac_tbl->lock);
- return 0;
- }
- }
- read_unlock_bh(&ip6mac_tbl->lock);
-
- //成功新增MAC至NP,需要更新 ip6mac_tbl
- write_lock_bh(&ip6mac_tbl->lock);
- if (list_empty(&ip6mac_tbl->ip6mac_free_head))
- {
- write_unlock_bh(&ip6mac_tbl->lock);
- err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, DEL_IP6MAC, TRUE);
- LOG_ERR("ip6mac_tbl overflow, can't add; del mac from NP, ret:%d\n",err);
- return -ENOMEM;
- }
- ce = list_first_entry(&ip6mac_tbl->ip6mac_free_head, struct zxdh_ipv6_mac_entry, list);
- list_del(&ce->list);
- INIT_LIST_HEAD(&ce->list);
- spin_lock_init(&ce->lock);
- refcount_set(&ce->refcnt, 0);
- list_add_tail(&ce->list, &ip6mac_tbl->hash_list[mac_hash_val]);
- memcpy(ce->ipv6_mac, ip6mac, ETH_ALEN);
- refcount_set(&ce->refcnt, 1);
- write_unlock_bh(&ip6mac_tbl->lock);
- LOG_INFO("Add New Multicast MAC Address: %pM, refcnt:%d\n", ip6mac, refcount_read(&ce->refcnt));
-
- return 0;
-}
-
-int32_t zxdh_ip6mac_del(struct zxdh_en_device *en_dev, const uint32_t *addr6, const uint8_t *ip6mac)
-{
- int32_t err = 0;
- struct zxdh_ipv6_mac_tbl *ip6mac_tbl = en_dev->ops->get_ip6mac_tbl(en_dev->parent);
- struct zxdh_ipv6_mac_entry *ce, *cte;
- unsigned int mac_hash_val;
- int32_t refcnt = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- if (!ip6mac_tbl)
- {
- LOG_ERR("ip6mac_tbl is NULL");
- return -ENXIO;
- }
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- mac_hash_val = mac_hash(ip6mac_tbl, ip6mac);
-
- read_lock_bh(&ip6mac_tbl->lock);
- list_for_each_entry(cte, &ip6mac_tbl->hash_list[mac_hash_val], list)
- {
- if (memcmp(cte->ipv6_mac, ip6mac, ETH_ALEN) == 0)
- {//MAC存在
- ce = cte;
- read_unlock_bh(&ip6mac_tbl->lock);
- goto found;
- }
- }
- read_unlock_bh(&ip6mac_tbl->lock);
- LOG_INFO("Don't Found Multicast MAC Address: %pM in Hash List\n", ip6mac);
- return zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, DEL_IP6MAC, TRUE);
-
-found:
- write_lock_bh(&ip6mac_tbl->lock);
- spin_lock_bh(&ce->lock);
- if (!refcount_dec_and_test(&ce->refcnt))
- {
- LOG_INFO("Decrease Multicast MAC Address(%pM) refcnt:%d\n", ip6mac, refcount_read(&ce->refcnt));
- spin_unlock_bh(&ce->lock);
- write_unlock_bh(&ip6mac_tbl->lock);
- return err;
- }
- //如果引用计数减到0
- list_del(&ce->list);
- INIT_LIST_HEAD(&ce->list);
- list_add_tail(&ce->list, &ip6mac_tbl->ip6mac_free_head);
- refcnt = refcount_read(&ce->refcnt);
- spin_unlock_bh(&ce->lock);
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- { //VF设备需要先释放锁再发消息下表,因为PF侧会加锁
- write_unlock_bh(&ip6mac_tbl->lock);
- err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, DEL_IP6MAC, FALSE);
- }
- else
- { //PF设备需要带锁下表
- err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, DEL_IP6MAC, FALSE);
- write_unlock_bh(&ip6mac_tbl->lock);
- }
- LOG_INFO("Del Multicast MAC Address: %pM Completely, refcnt:%d, np ret:%d\n", ip6mac, refcnt, err);
- return err;
-}
-
-int32_t zxdh_en_set_vepa(struct zxdh_en_device *en_dev, bool setting)
-{
- struct zxdh_vf_item *vf_item = NULL;
- bool vepa = false;
- uint16_t vf_idx = 0;
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- vepa = en_dev->ops->get_vepa(en_dev->parent);
- if (setting == vepa)
- {
- LOG_ERR("vport(0x%x) is now %s mode\n", en_dev->vport, vepa?"vepa":"veb");
- return 0;
- }
-
- en_dev->ops->set_vepa(en_dev->parent, setting);
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VEPA_EN_OFF, (uint32_t)setting);
- if (ret != 0)
- {
- LOG_ERR("Failed to setup vport(0x%x) %s mode, ret: %d\n", en_dev->vport, setting?"vepa":"veb", ret);
- return ret;
- }
-
- for (vf_idx = 0; vf_idx < ZXDH_VF_NUM_MAX; vf_idx++)
- {
- vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
- if (IS_ERR_OR_NULL(vf_item))
- {
- break;
- }
-
- if (vf_item->is_probed)
- {
- pf_info.vport = vf_item->vport;
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VEPA_EN_OFF, (uint32_t)setting);
- if (ret != 0)
- {
- LOG_ERR("Failed to setup vport(0x%x) %s mode, ret: %d\n", vf_item->vport, setting?"vepa":"veb", ret);
- return ret;
- }
- LOG_INFO("Configure vport(0x%x) to %s mode\n", vf_item->vport, setting?"vepa":"veb");
- }
- }
-
- LOG_INFO("Configure vport(0x%x) to %s mode\n", en_dev->vport, setting?"vepa":"veb");
-
- return ret;
-}
-
-#ifdef HAVE_FDB_OPS
-#if defined(HAVE_NDO_FDB_ADD_EXTACK)
-static int zxdh_en_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 flags, struct netlink_ext_ack *extack)
-#elif defined(HAVE_NDO_FDB_ADD_VID)
-static int zxdh_en_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 flags)
-#elif defined(HAVE_NDO_FDB_ADD_NLATTR)
-static int zxdh_en_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- u16 flags)
-#elif defined(USE_CONST_DEV_UC_CHAR)
-static int zxdh_en_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev,
- const unsigned char *addr, u16 flags)
-#else
-static int zxdh_en_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev,
- unsigned char *addr, u16 flags)
-#endif
-{
- struct zxdh_en_priv *en_priv = netdev_priv(dev);
- struct zxdh_en_device *en_dev = &en_priv->edev; /*aux层net_device的私有结构体 */
- int32_t err = 0;
-
-#ifdef MAC_CONFIG_DEBUG
- LOG_DEBUG("vport is %#x\n", en_dev->vport);
- LOG_DEBUG("addr is %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", \
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
- LOG_DEBUG("ndm_state is %u\n", ndm->ndm_state);
-#endif /* MAC_CONFIG_DEBUG */
-
- /* 检查这个设备的ndm状态是否是静态的 */
- if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT))
- {
- LOG_ERR("FDB only supports static addresses\n");
- return -EINVAL;
- }
-
- /* 判断mac地址是否全0 */
- if (is_zero_ether_addr(addr))
- {
- LOG_ERR("Invalid mac\n");
- return -EINVAL;
- }
-
- if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
- {
- err = unicast_mac_add(en_dev, dev, addr, flags);
- if (err != 0)
- {
- LOG_ERR("unicast_mac_add failed");
- return err;
- }
- }
- else if (is_multicast_ether_addr(addr))
- {
- err = multicast_mac_add(en_dev, dev, addr, flags);
- if (err != 0)
- {
- LOG_ERR("multicast_mac_add failed");
- return err;
- }
- }
- else
- {
- err = -EINVAL;
- }
-
-#ifdef MAC_CONFIG_DEBUG
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- err = zxdh_pf_dump_all_mac(en_dev);
- if (err != 0)
- {
- LOG_INFO("zxdh_pf_dump_all_mac failed\n");
- return err;
- }
- }
-#endif /* MAC_CONFIG_DEBUG */
-
- LOG_DEBUG("zxdh_en_ndo_fdb_add end\n");
- return err;
-}
-
-#ifdef HAVE_NDO_FEATURES_CHECK
-static netdev_features_t zxdh_en_features_check(struct sk_buff *skb, struct net_device *dev,
- netdev_features_t features)
-{
- return features;
-}
-#endif /* HAVE_NDO_FEATURES_CHECK */
-
-#ifdef USE_CONST_DEV_UC_CHAR
-#ifdef HAVE_NDO_FDB_ADD_VID
-static int zxdh_en_ndo_fdb_del(struct ndmsg *ndm, struct nlattr **nla, struct net_device *dev,
- const unsigned char *addr, u16 vid)
-#else
-static int zxdh_en_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev,
- const unsigned char *addr)
-#endif
-#else
-#ifdef HAVE_NDO_FDB_ADD_VID
-static int zxdh_en_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev,
- unsigned char *addr, u16 vid)
-#else
-static int zxdh_en_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev,
- unsigned char *addr)
-#endif
-#endif
-{
- struct zxdh_en_priv *en_priv = netdev_priv(dev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t err = 0;
-
-#ifdef MAC_CONFIG_DEBUG
- LOG_DEBUG("the vport is %#x",en_dev->vport);
- LOG_DEBUG("the addr is %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",\
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
- LOG_DEBUG("ndm_state is %u,\n", ndm->ndm_state);
-#endif /* MAC_CONFIG_DEBUG */
-
- /* 检查这个设备的ndm状态是否是静态的 */
- if (!(ndm->ndm_state & NUD_PERMANENT))
- {
- LOG_ERR("FDB only supports static addresses\n");
- return -EINVAL;
- }
-
- /* 地址是否全为0 */
- if (is_zero_ether_addr(addr))
- {
- LOG_ERR("Invalid mac address\n");
- return -EINVAL;
- }
-
- /* 根据mac地址类型,对相对应地址链表做删除操作 */
- if (is_unicast_ether_addr(addr))
- {
- err = unicast_mac_del(en_dev, dev, addr);
- if (err != 0)
- {
- LOG_ERR("unicast_mac_del failed\n");
- return err;
- }
- }
- else if (is_multicast_ether_addr(addr))
- {
- err = multicast_mac_del(en_dev, dev, addr);
- if (err != 0)
- {
- LOG_ERR("multicast_mac_del failed\n");
- return err;
- }
- }
- else
- {
- return -EINVAL;
- }
-
-#ifdef MAC_CONFIG_DEBUG
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- /*先dump所有mac地址*/
- err = zxdh_pf_dump_all_mac(en_dev);
- if (err != 0)
- {
- LOG_ERR("zxdh_pf_dump_all_mac failed\n");
- return err;
- }
- }
-#endif /* MAC_CONFIG_DEBUG */
-
- LOG_DEBUG("zxdh_en_ndo_fdb_del end\n");
- return err;
-}
-
-#ifdef HAVE_BRIDGE_ATTRIBS
-#if defined(HAVE_NDO_BRIDGE_SETLINK_EXTACK)
-static int zxdh_en_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
- u16 flags, struct netlink_ext_ack *extack)
-#elif defined(HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS)
-static int zxdh_en_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
-#else
-static int zxdh_en_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
-#endif
-{
- struct zxdh_en_priv *en_priv = netdev_priv(dev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct nlattr *attr = NULL;
- struct nlattr *br_spec = NULL;
- int32_t rem = 0;
- uint16_t mode = BRIDGE_MODE_UNDEF;
- bool setting = false;
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- return -EOPNOTSUPP;
- }
-
- br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
- if (br_spec == NULL)
- {
- return -EINVAL;
- }
-
- nla_for_each_nested(attr, br_spec, rem)
- {
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- {
- continue;
- }
-
- if (nla_len(attr) < sizeof(mode))
- {
- return -EINVAL;
- }
-
- mode = nla_get_u16(attr);
- if (mode > BRIDGE_MODE_VEPA)
- {
- return -EINVAL;
- }
- break;
- }
-
- if (mode == BRIDGE_MODE_UNDEF)
- {
- return -EINVAL;
- }
-
- setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0;
-
- return zxdh_en_set_vepa(en_dev, setting);
-}
-
-#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
-static int zxdh_en_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u32 __always_unused filter_mask,
- int nlflags)
-#elif defined(HAVE_BRIDGE_FILTER)
-static int zxdh_en_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u32 __always_unused filter_mask)
-#else
-static int zxdh_en_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev)
-#endif /* NDO_BRIDGE_STUFF */
-{
- struct zxdh_en_priv *en_priv = netdev_priv(dev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint8_t mode = 0;
- bool vepa = false;
-
- vepa = en_dev->ops->get_vepa(en_dev->parent);
- mode = vepa ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
-
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, filter_mask, NULL);
-}
-#endif /* HAVE_BRIDGE_ATTRIBS */
-#endif /* HAVE_FDB_OPS */
-
-static int32_t zxdh_pf_notify_vf_reset(struct zxdh_en_device *en_dev, int vf_idx)
-{
- int32_t retval = 0;
- union zxdh_msg msg = {0};
-
- msg.payload.hdr_vf.op_code = ZXDH_SET_VF_RESET;
- msg.payload.hdr_vf.dst_pcie_id = FIND_VF_PCIE_ID(en_dev->pcie_id, vf_idx);
-
- retval = zxdh_send_command_to_specify(en_dev, MODULE_PF_BAR_MSG_TO_VF, &msg, &msg);
- if(retval != 0)
- {
- LOG_ERR("zxdh_send_command_to_vf failed: %d\n", retval);
- }
- return retval;
-}
-
-static int32_t zxdh_pf_notify_vf_set_link_state(struct zxdh_en_device *en_dev, int vf_idx, bool link_up)
-{
- int32_t retval = 0;
- uint16_t func_no = 0;
- uint16_t pf_no = FIND_PF_ID(en_dev->pcie_id);
- uint8_t link_info = 0;
- uint8_t link_up_val = 0;
- uint8_t phyport_val = 0;
- union zxdh_msg msg = {0};
-
- msg.payload.hdr_to_agt.op_code = AGENT_DEV_STATUS_NOTIFY;
- msg.payload.hdr_to_agt.pcie_id = en_dev->pcie_id;
-
- func_no = GET_FUNC_NO(pf_no, vf_idx);
- LOG_DEBUG("vf_idx:%d, func_no=0x%x\n",vf_idx,func_no);
- msg.payload.pcie_msix_msg.func_no[msg.payload.pcie_msix_msg.num++] = func_no;
- if(en_dev->ops->is_bond(en_dev->parent))
- {
- link_up_val = link_up ? 1 : 0;
- phyport_val = en_dev->ops->get_pf_phy_port(en_dev->parent);
- link_info = (phyport_val & 0x0F) << 4 | (link_up_val & 0x0F);
- LOG_DEBUG("phyport and link_up need write to VQM, val: 0x%x\n", link_info);
- en_dev->ops->set_vf_link_info(en_dev->parent, vf_idx, link_info);
- }
- else
- {
- en_dev->ops->set_vf_link_info(en_dev->parent, vf_idx, link_up ? 1 : 0);
- }
- LOG_DEBUG("msg.pcie_msix_msg.num:%d\n", msg.payload.pcie_msix_msg.num);
- retval = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (retval != 0)
- {
- LOG_ERR("failed to update VF link info\n");
- }
- return retval;
-}
-
-static int32_t zxdh_pf_set_vf_link_state(struct zxdh_en_device *en_dev, int vf_idx, int link_status)
-{
- int32_t retval = 0;
- struct zxdh_vf_item *vf_item = NULL;
- bool pf_link_up = en_dev->ops->get_pf_link_up(en_dev->parent);
-
- vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
- switch (link_status)
- {
- case IFLA_VF_LINK_STATE_AUTO:
- LOG_DEBUG("[SET_VF_LINK_STATE]--NDO set VF %d link state auto\n", vf_idx);
- vf_item->link_forced = FALSE;
- vf_item->link_up = pf_link_up;
- break;
- case IFLA_VF_LINK_STATE_ENABLE:
- LOG_DEBUG("[SET_VF_LINK_STATE]--NDO set VF %d link state enable\n", vf_idx);
- vf_item->link_forced = TRUE;
- vf_item->link_up = TRUE;
- break;
- case IFLA_VF_LINK_STATE_DISABLE:
- LOG_DEBUG("[SET_VF_LINK_STATE]--NDO set VF %d link state disable\n", vf_idx);
- vf_item->link_forced = TRUE;
- vf_item->link_up = FALSE;
- break;
- default:
- LOG_ERR("[SET_VF_LINK_STATE]--NDO set VF %d - invalid link status %d\n", vf_idx, link_status);
- return -EINVAL;
- }
- LOG_DEBUG("vf_item->is_probed: %s\n", vf_item->is_probed?"TRUE":"FALSE");
- if(vf_item->is_probed)
- {
- /* Notify the VF of its new link state */
- retval = zxdh_pf_notify_vf_set_link_state(en_dev, vf_idx, vf_item->link_up);
- if (0 != retval)
- {
- LOG_ERR("[SET_VF_LINK_STATE]--Failed to set VF %d link state %d\n", vf_idx, vf_item->link_up);
- return retval;
- }
- }
- return retval;
-}
-
-int zxdh_en_ndo_set_vf_link_state(struct net_device *netdev, int vf_idx, int link_status)
-{
- int num_vfs = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct pci_dev *pdev = NULL;
- struct dh_core_dev *dh_dev = NULL;
-
- dh_dev = en_dev->parent;
- pdev = en_dev->ops->get_pdev(dh_dev);
- num_vfs = pci_num_vf(pdev);
- if ((vf_idx < 0) || (vf_idx >= num_vfs))
- {
- LOG_ERR("[SET_VF_LINK_STATE]--NDO set VF link - invalid VF idx: %d\n", vf_idx);
- return -EINVAL;
- }
- return zxdh_pf_set_vf_link_state(en_dev, vf_idx, link_status);
-}
-
-static int32_t zxdh_pf_set_vf_port_vlan(struct zxdh_en_device *en_dev, int vf_idx, u16 vid, u8 qos, uint16_t vlan_proto)
-{
- int32_t retval = 0;
- struct zxdh_vf_item *vf_item = NULL;
- union zxdh_msg msg = {0};
- DPP_PF_INFO_T pf_info = {0};
-
- /* 获取pf本地保存的vf变量*/
- vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
- if(!vf_item->is_probed)
- {
- LOG_DEBUG("vf %d is not probed.\n", vf_idx);
- return -EINVAL;
- }
-
- if (vf_item->vlan == vid)
- {
- return 0;
- }
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = vf_item->vport;
- if (vid)
- {
- /* vf端口Vlan strip开启*/
- retval = dpp_vport_vlan_strip_set(&pf_info, 1);
- if (retval != 0)
- {
- LOG_ERR("dpp_vport_vlan_strip_set failed, retval: %d\n", retval);
- return retval;
- }
- /* 将vlan_id add到表项中*/
- retval = dpp_vport_vlan_filter_en_set(&pf_info, 1);
- if (retval != 0)
- {
- LOG_ERR("dpp_vport_vlan_filter_en_set failed, retval: %d\n", retval);
- return retval;
- }
-
- retval = dpp_add_vlan_filter(&pf_info, vid);
- if (0 != retval)
- {
- LOG_ERR("failed to add vlan: %d\n",vid);
- return retval;
- }
- }
- else
- {
- /* vf端口vlan strip关闭*/
- retval = dpp_vport_vlan_strip_set(&pf_info, 0);
- if (retval != 0)
- {
- LOG_ERR("dpp_vport_vlan_strip_set failed, retval: %d\n", retval);
- return retval;
- }
- /* 将Vlan_id 从表项中kill*/
- retval = dpp_vport_vlan_filter_en_set(&pf_info, 0);
- if (retval != 0)
- {
- LOG_ERR("dpp_vport_vlan_filter_en_set failed, retval: %d\n", retval);
- return retval;
- }
-
- retval = dpp_vlan_filter_init(&pf_info);
- if (retval != 0)
- {
- LOG_ERR("dpp_vlan_filter_init failed: %d\n", retval);
- return retval;
- }
- }
-
- msg.payload.hdr_vf.op_code = ZXDH_PF_SET_VF_VLAN;
- msg.payload.hdr_vf.dst_pcie_id = FIND_VF_PCIE_ID(en_dev->pcie_id, vf_idx);
-
- msg.payload.vf_vlan_msg.vlan_id = vid;
- msg.payload.vf_vlan_msg.qos = qos;
- msg.payload.vf_vlan_msg.protocl = vlan_proto;
- msg.payload.vf_vlan_msg.vf_idx = vf_idx;
-
- retval = zxdh_send_command_to_specify(en_dev, MODULE_PF_BAR_MSG_TO_VF, &msg, &msg);
- if(retval != 0)
- {
- LOG_ERR("zxdh_send_command_to_vf failed: %d\n", retval);
- return retval;
- }
-
- /* 更新pf本地的vf vlan信息,用于ip link show显示*/
- vf_item->vlan = vid;
- vf_item->qos = qos;
- return retval;
-}
-
-
-int zxdh_en_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_vf_item *vf_item = NULL;
- int32_t retval = 0;
- bool delete_flag = true;
- uint8_t *addr = NULL;
- uint8_t i = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_id);
- if (IS_ERR_OR_NULL(vf_item))
- {
- LOG_ERR("Failed to get vf_item, vf_id:%d\n", vf_id);
- return PTR_ERR(vf_item);
- }
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = vf_item->vport;
-
- if (is_multicast_ether_addr(mac))
- {
- LOG_ERR("Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
- return -EINVAL;
- }
-
- if (ether_addr_equal(vf_item->mac, mac))
- {
- LOG_INFO("[SET_VF_MAC]--already using mac address %pM\n", mac);
- return retval;
- }
-
- if (is_zero_ether_addr(mac))
- {
- eth_zero_addr(vf_item->mac);
- vf_item->pf_set_mac = false;
- en_dev->ops->set_vf_mac(en_dev->parent, mac, vf_id);
- eth_zero_addr(vf_item->vf_mac_info.unicast_mac[0]);
- goto vf_reset;
- }
-
- for (i = 1; i < DEV_UNICAST_MAX_NUM; ++i)
- {
- addr = vf_item->vf_mac_info.unicast_mac[i];
- if (!memcmp(vf_item->mac, addr, netdev->addr_len))
- {
- delete_flag = false;
- }
- }
-
- if (delete_flag)
- {
- if (!is_zero_ether_addr(vf_item->mac))
- {
- retval = dpp_del_mac(&pf_info, vf_item->mac);
- if (retval != 0)
- {
- LOG_ERR("delete vf old mac in NP failed.\n");
- return retval;
- }
- }
- }
-
- vf_item->pf_set_mac = true;
- en_dev->ops->set_vf_mac(en_dev->parent, mac, vf_id);
- ether_addr_copy(vf_item->vf_mac_info.unicast_mac[0], mac);
- ether_addr_copy(vf_item->mac, mac);
- LOG_INFO("[SET_VF_MAC]--setting MAC %pM on VF %d\n", mac, vf_id);
-
-vf_reset:
- if (vf_item->is_probed)
- {
- retval = zxdh_pf_notify_vf_reset(en_dev, vf_id);
- if(retval != 0)
- {
- LOG_ERR("zxdh_pf_notify_vf_reset failed: %d\n", retval);
- }
- }
-
- return retval;
-}
-
-#ifdef IFLA_VF_VLAN_INFO_MAX
-int zxdh_en_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
- u16 vlan_id, u8 qos, __be16 vlan_proto)
-#else
-int zxdh_en_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos)
-#endif /* IFLA_VF_VLAN_INFO_MAX */
-{
- int num_vfs = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct pci_dev *pdev = NULL;
- struct dh_core_dev *dh_dev = NULL;
-
- /* Comparing with the mellnox network card, it only supports the configuration of cvlan*/
- if (vlan_proto != htons(ETH_P_8021Q))
- {
- return -EPROTONOSUPPORT;
- }
- dh_dev = en_dev->parent;
- pdev = en_dev->ops->get_pdev(dh_dev);
- num_vfs = pci_num_vf(pdev);
- if ((vf_id < 0) || (vf_id >= num_vfs))
- {
- LOG_ERR("[SET+VF_VLAN]--NDO set VF vlan - invalid VF idx: %d\n", vf_id);
- return -EINVAL;
- }
- return zxdh_pf_set_vf_port_vlan(en_dev, vf_id, vlan_id, qos, vlan_proto);
-}
-
-int zxdh_en_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate)
-{
- return 0;
-}
-
-int zxdh_en_ndo_get_vf_config(struct net_device *netdev, int vf_idx, struct ifla_vf_info *ivi)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_vf_item *vf_item = NULL;
-
- vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
- if (IS_ERR_OR_NULL(vf_item))
- {
- LOG_ERR("Failed to get vf_item, vf_idx:%d\n", vf_idx);
- return PTR_ERR(vf_item);
- }
-
- ivi->vf = vf_idx;
-
- ether_addr_copy(ivi->mac, vf_item->mac);
-
-#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
- ivi->max_tx_rate = vf_item->max_tx_rate;
- ivi->min_tx_rate = vf_item->min_tx_rate;
-#else
- ivi->tx_rate = vf_item->max_tx_rate;
-#endif
-
- ivi->vlan = vf_item->vlan;
- ivi->qos = vf_item->qos;
-
-#ifdef HAVE_NDO_SET_VF_LINK_STATE
- if (vf_item->link_forced == false)
- {
- ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
- }
- else if (vf_item->link_up == true)
- {
- ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
- }
- else
- {
- ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
- }
-#endif
-
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
- ivi->spoofchk = vf_item->spoofchk;
-#endif
-
-#ifdef HAVE_NDO_SET_VF_TRUST
- ivi->trusted = vf_item->trusted;
-#endif
-
- return 0;
-}
-
-int zxdh_en_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_idx, bool enable)
-{
- int ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_vf_item *vf_item = NULL;
- DPP_PF_INFO_T pf_info = {0};
-
- vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
- if (IS_ERR_OR_NULL(vf_item))
- {
- LOG_ERR("Failed to get vf_item, vf_idx:%d\n", vf_idx);
- return PTR_ERR(vf_item);
- }
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = vf_item->vport;
- vf_item->spoofchk = enable;
- LOG_INFO("vf %d spoof check is %s\n", vf_idx, vf_item->spoofchk? "on" : "off");
- if (vf_item->is_probed)
- {
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_SPOOFCHK_EN_OFF, enable);
- if (0 != ret)
- {
- LOG_ERR("[SET_VF_SPOOFCHK]--Failed to set vf %d spookchk %s\n", vf_idx, enable ? "on" : "off");
- return ret;
- }
- }
- return ret;
-}
-
-#ifdef HAVE_NDO_SET_VF_TRUST
-int zxdh_en_ndo_set_vf_trust(struct net_device *netdev, int vf_idx, bool setting)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_vf_item *vf_item = NULL;
- DPP_PF_INFO_T pf_info = {0};
-
- vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
- if (IS_ERR_OR_NULL(vf_item))
- {
- LOG_ERR("Failed to get vf_item, vf_idx:%d\n", vf_idx);
- return PTR_ERR(vf_item);
- }
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = vf_item->vport;
- vf_item->trusted = setting;
- LOG_INFO("VF %u is now %strusted\n", vf_idx, setting ? "" : "un");
- if (vf_item->is_probed && !vf_item->trusted)
- {
- LOG_DEBUG("vport[0x%x] promisc and allmulti off\n", vf_item->vport);
- vf_item->promisc = false;
- vf_item->mc_promisc = false;
- dpp_vport_uc_promisc_set(&pf_info, vf_item->promisc);
- dpp_vport_mc_promisc_set(&pf_info, vf_item->mc_promisc);
- }
-
- return 0;
-}
-#endif
-
-int zxdh_en_ndo_set_tx_maxrate(struct net_device *netdev, int qid, uint32_t max_rate)
-{
- int rtn = 0;
- zxdh_plcr_rate_limit_paras rate_limit_paras;
-
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct dh_core_dev *dh_dev = en_dev->parent;
- struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev->parent);
-
- PLCR_FUNC_DBG_ENTER();
-
- /*1. 入参检测:队列号不能超过vf下实际的队列数*/
- if (qid >= en_dev->curr_queue_pairs)
- {
- LOG_ERR("zxdh_en_ndo_set_tx_maxrate : invalid parameter qid=%d\n", qid);
- return -EINVAL;
- }
-#if 0
- if (!en_dev->link_up)
- {
- LOG_ERR("[EN SET TX MAXRATE]--PF is not link up.\n");
- return -EINVAL;
- }
- link_speed = en_dev->link_speed;
-#endif
-
- rate_limit_paras.req_type = E_RATE_LIMIT_REQ_QUEUE_BYTE;
- rate_limit_paras.direction = E_RATE_LIMIT_TX;
- rate_limit_paras.mode = E_RATE_LIMIT_BYTE ;
- rate_limit_paras.max_rate = max_rate;
- rate_limit_paras.min_rate = 0;
- rate_limit_paras.queue_id = qid;
- rate_limit_paras.vf_idx = PLCR_INVALID_PARAM;
- rate_limit_paras.vfid = PLCR_INVALID_PARAM;
- rate_limit_paras.group_id = PLCR_INVALID_PARAM;
-
- rtn = zxdh_plcr_unified_set_rate_limit(pf_dev, &rate_limit_paras);
- PLCR_COMM_ASSERT(rtn);
-
- PLCR_LOG_INFO("The maxrate of tx-%d has been set to %dMbit/s\n", qid, max_rate);
-
- return rtn;
-}
-
-/**-------------------------------------------------------------------------------------------------------------------@n
- * 功能详述:
- * - zxdh_en_ndo_set_vf_rate函数属于接口函数, 其功能是:
- * - 设置vf端口发送方向,最大速率和最小保证速率
- * - 该接口会挂接到内核的钩子上,函数声明是固定的
- *
- * 基于plcr的端口限速背景:
- * - 1.一级flowid与vqm的2K个(接收和发送)队列是一一映射的
- * - 2.二级flow id与vf num的映射关系
- * 端口限速,需要将vf下的发送队列(即一级flow id)映射到二级flowid
- * 二级flow id的资源是4K,dpu限制vf数量是1K,即二级flow id数量 > vf数量
- * 所以规定固定的映射关系:二级flow id前1K <---> 与1K个vf(发送)一一对应
- * 下面的链接整理了pf下vf转换成全局vf(0-1023)的原理
- * https://i.zte.com.cn/#/space/4e62cb2b730540ff8721c1a8552b2356/wiki/page/ff8178f1304e45dc9457e92ff196cce5/view
- * - 3.vf限速的设置
- * 项目对vf提出了最小保证带宽的需求;
- * 二级CAR的限速模板使用:双速率,三色算法,色敏模式
- * - 4.创建vf的其它考虑
- * 参考mlx的做法,vf创建之后,默认关联到vf组0(注意:>>>>>>>>先交付vf端口限速的需求,这一步可以暂时不实现<<<<<<<<);
- * vf创建之后,用户设置限速才会调用到这里,用户不设置限速,vf(二级flow id)就不用关联限速模板
- *
- * 参数概述:
- * - netdev : 网络设备结构体指针
- * - vf_id :pf内vf的编号(从0开始)
- * - min_tx_rate : 最小保证速率
- * - max_tx_rate : 最大速率
- * - 返回值类型是INT32, 含义是: 错误码,正确时为S_OK
- *
- * 引用(类变量,外部变量,接口函数):
- * - 无
- *
- * 注意:该函数挂接到pf的钩子上,只在pf下执行
- *--------------------------------------------------------------------------------------------------------------------*/
-int zxdh_en_ndo_set_vf_rate(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate)
-{
- int rtn;
- zxdh_plcr_rate_limit_paras rate_limit_paras;
-
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct dh_core_dev *dh_dev = en_dev->parent;
- struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev->parent);
-
- PLCR_FUNC_DBG_ENTER();
-
- rate_limit_paras.req_type = E_RATE_LIMIT_REQ_VF_BYTE;
- rate_limit_paras.direction = E_RATE_LIMIT_TX;
- rate_limit_paras.mode = E_RATE_LIMIT_BYTE ;
- rate_limit_paras.max_rate = max_tx_rate;
- rate_limit_paras.min_rate = min_tx_rate;
- rate_limit_paras.queue_id = PLCR_INVALID_PARAM;
- rate_limit_paras.vf_idx = vf_id;
- rate_limit_paras.vfid = PLCR_INVALID_PARAM;
- rate_limit_paras.group_id = PLCR_INVALID_PARAM;
-
- rtn = zxdh_plcr_unified_set_rate_limit(pf_dev, &rate_limit_paras);
- PLCR_COMM_ASSERT(rtn);
-
- PLCR_LOG_INFO("The Rate of VF%d has been set to: Min Tx Rate: %dMbit/s, Max Tx Rate: %dMbit/s\n",
- vf_id, min_tx_rate, max_tx_rate);
-
- return rtn;
-}
-
-const struct net_device_ops zxdh_netdev_ops = {
- .ndo_open = zxdh_en_open,
- .ndo_stop = zxdh_en_close,
- .ndo_start_xmit = zxdh_en_xmit,
-
-#if defined(HAVE_NDO_GET_STATS64) || defined(HAVE_VOID_NDO_GET_STATS64)
- .ndo_get_stats64 = zxdh_en_get_netdev_stats_struct,
-#else
- .ndo_get_stats = zxdh_en_get_netdev_stats_struct,
-#endif
- .ndo_set_rx_mode = zxdh_en_set_rx_mode,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = zxdh_en_set_mac,
-
-#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
- .extended.ndo_change_mtu = zxdh_en_change_mtu,
-#else
- .ndo_change_mtu = zxdh_en_change_mtu,
-#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */
-
- .ndo_do_ioctl = zxdh_en_ioctl,
-#ifdef ZXDH_PLCR_OPEN
- .ndo_set_tx_maxrate = zxdh_en_ndo_set_tx_maxrate,
-#endif
- .ndo_tx_timeout = zxdh_en_tx_timeout,
-
-#ifdef HAVE_VLAN_RX_REGISTER
- .ndo_vlan_rx_register = zxdh_en_vlan_rx_register,
-#endif
- .ndo_vlan_rx_add_vid = zxdh_en_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = zxdh_en_vlan_rx_kill_vid,
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = zxdh_en_netpoll,
-#endif
-
-#ifdef HAVE_SETUP_TC
-#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC
- .extended.ndo_setup_tc_rh = __zxdh_en_setup_tc,
-#else
-#ifdef NETIF_F_HW_TC
- .ndo_setup_tc = __zxdh_en_setup_tc,
-#else
- .ndo_setup_tc = zxdh_en_setup_tc,
-#endif /* NETIF_F_HW_TC */
-#endif /* HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC */
-#endif /* HAVE_SETUP_TC */
-
-#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
- .ndo_size = sizeof(const struct net_device_ops),
-#endif
-
-#ifdef IFLA_VF_MAX
- .ndo_set_vf_mac = zxdh_en_ndo_set_vf_mac,
-#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
- .extended.ndo_set_vf_vlan = zxdh_en_ndo_set_vf_port_vlan,
-#else
- .ndo_set_vf_vlan = zxdh_en_ndo_set_vf_port_vlan,
-#endif
-#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
-#ifdef ZXDH_PLCR_OPEN
- .ndo_set_vf_rate = zxdh_en_ndo_set_vf_rate,
-#else
- .ndo_set_vf_rate = zxdh_en_ndo_set_vf_bw,
-#endif
-#else
- .ndo_set_vf_rate = zxdh_en_ndo_set_vf_bw,
-#endif
- .ndo_get_vf_config = zxdh_en_ndo_get_vf_config,
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
- .ndo_set_vf_spoofchk = zxdh_en_ndo_set_vf_spoofchk,
-#endif
-#ifdef HAVE_NDO_SET_VF_TRUST
-#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
- .extended.ndo_set_vf_trust = zxdh_en_ndo_set_vf_trust,
-#else
- .ndo_set_vf_trust = zxdh_en_ndo_set_vf_trust,
-#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */
-#endif /* HAVE_NDO_SET_VF_TRUST */
-#endif /* IFLA_VF_MAX */
-
-#ifdef HAVE_UDP_ENC_RX_OFFLOAD
-#ifdef HAVE_VXLAN_RX_OFFLOAD
-#if IS_ENABLED(CONFIG_VXLAN)
- .ndo_add_vxlan_port = zxdh_en_add_vxlan_port,
- .ndo_del_vxlan_port = zxdh_en_del_vxlan_port,
-#endif
-#endif /* HAVE_VXLAN_RX_OFFLOAD */
-
-#ifdef HAVE_GENEVE_RX_OFFLOAD
-#if IS_ENABLED(CONFIG_GENEVE)
- .ndo_add_geneve_port = zxdh_en_add_geneve_port,
- .ndo_del_geneve_port = zxdh_en_del_geneve_port,
-#endif
-#endif /* HAVE_GENEVE_RX_OFFLOAD */
-#endif /* HAVE_UDP_ENC_RX_OFFLOAD */
-
-#ifdef HAVE_NDO_GET_PHYS_PORT_ID
- .ndo_get_phys_port_id = zxdh_en_get_phys_port_id,
-#endif /* HAVE_NDO_GET_PHYS_PORT_ID */
-
- .ndo_set_features = zxdh_en_set_features,
-
-#ifdef HAVE_FDB_OPS
- .ndo_fdb_add = zxdh_en_ndo_fdb_add,
- .ndo_fdb_del = zxdh_en_ndo_fdb_del,
-#ifdef HAVE_NDO_FEATURES_CHECK
- .ndo_features_check = zxdh_en_features_check,
-#endif /* HAVE_NDO_FEATURES_CHECK */
-#ifdef HAVE_BRIDGE_ATTRIBS
- .ndo_bridge_getlink = zxdh_en_ndo_bridge_getlink,
- .ndo_bridge_setlink = zxdh_en_ndo_bridge_setlink,
-#endif /* HAVE_BRIDGE_ATTRIBS */
-#endif /* HAVE_FDB_OPS */
-
-#ifdef HAVE_UDP_TUNNEL_OPS
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
-#endif
-
-#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
-};
-
-/* RHEL6 keeps these operations in a separate structure */
-static const struct net_device_ops_ext zxdh_netdev_ops_ext =
-{
- .size = sizeof(struct net_device_ops_ext),
-#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
-
-#ifdef HAVE_NDO_SET_FEATURES
- .ndo_set_features = zxdh_en_set_features,
-#endif /* HAVE_NDO_SET_FEATURES */
-
-#ifdef HAVE_NDO_SET_VF_LINK_STATE
- .ndo_set_vf_link_state = zxdh_en_ndo_set_vf_link_state,
-#endif
-};
-
-static void priv_flags_init(struct zxdh_en_priv *priv)
-{
- priv->edev.pflags = 0;
-
- priv->edev.pflags &= BIT(ZXDH_PFLAG_ENABLE_LLDP); /* LLDP默认为开 */
-}
-
-static int32_t get_max_num_qs(struct zxdh_en_container *en_con)
-{
- return en_con->ops->is_bond(en_con->parent) ? ZXDH_BOND_ETH_MQ_PAIRS_NUM : max_pairs;
-}
-
-static int32_t fw_version_init(struct zxdh_en_device *en_dev)
-{
- int32_t ret = 0;
- uint8_t fw_version[ETHTOOL_FWVERS_LEN] = {0};
- uint8_t fw_version_len = 0;
-
- ret = zxdh_en_firmware_version_get(en_dev, fw_version, &fw_version_len);
- if (ret != 0)
- {
- LOG_ERR("zxdh_en_firmware_version_get err, ret %d!!!!\n", ret);
- return ret;
- }
- if (fw_version_len > ETHTOOL_FWVERS_LEN)
- {
- LOG_ERR("fw_version_len (%d) greater than 31!!!!\n", fw_version_len);
- return -1;
- }
-
- fw_version[ETHTOOL_FWVERS_LEN - 1] = '\0';
- en_dev->fw_version_len = ETHTOOL_FWVERS_LEN;
- memcpy(en_dev->fw_version, (uint8_t *)fw_version, en_dev->fw_version_len);
- LOG_INFO("fw_version:%s\n", en_dev->fw_version);
-
- return 0;
-}
-
-int32_t zxdh_priv_init(struct zxdh_en_priv *priv, struct net_device *netdev)
-{
- int32_t ret = 0;
- struct zxdh_en_device *en_dev = &priv->edev;
-
- mutex_init(&priv->lock);
- priv_flags_init(priv);
- en_dev->msglevel = NETIF_MSG_LINK;
-
- /* 优先级4,暂时写死不支持 */
- en_dev->wol_support = 0;
- en_dev->wolopts = 0;
-
- ret = fw_version_init(en_dev);
- if (ret != 0)
- {
- LOG_ERR("fw_version_init err ret: %d\n", ret);
- return ret;
- }
-
- return 0 ;
-}
-
-struct net_device *zxdh_create_netdev(struct zxdh_en_container *en_con)
-{
- struct net_device *netdev = NULL;
- struct zxdh_en_priv *en_priv = NULL;
- struct dh_core_dev *dh_dev = en_con->parent;
-
- netdev = alloc_etherdev_mqs(sizeof(struct zxdh_en_priv), get_max_num_qs(en_con), get_max_num_qs(en_con));
- if (unlikely(netdev == NULL))
- {
- LOG_ERR("alloc_etherdev_mqs() failed\n");
- return NULL;
- }
-
- en_priv = netdev_priv(netdev);
-
- en_priv->edev.parent = dh_dev;
- en_priv->edev.ops = en_con->ops;
- en_priv->edev.netdev = netdev;
-
- zxdh_priv_init(en_priv, netdev);
-
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
- dev_net_set(netdev, dh_core_net(dh_dev));
-
- return netdev;
-}
-
-void zxdh_netdev_features_init(struct net_device *netdev)
-{
- netdev->features |= NETIF_F_RXCSUM |
- NETIF_F_HW_CSUM |
- NETIF_F_TSO |
- NETIF_F_SG |
- NETIF_F_GSO |
- NETIF_F_LRO |
- NETIF_F_TSO6 |
- NETIF_F_GRO |
- NETIF_F_HW_VLAN_STAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_RXHASH;
-
- netdev->hw_features |= NETIF_F_RXCSUM |
- NETIF_F_HW_CSUM |
- NETIF_F_TSO |
- NETIF_F_SG |
- NETIF_F_GSO |
- NETIF_F_LRO |
- NETIF_F_TSO6 |
- NETIF_F_GRO |
- NETIF_F_HW_VLAN_STAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_RX |
- NETIF_F_HW_VLAN_STAG_TX |
- NETIF_F_RXHASH;
-
- netdev->hw_enc_features |= NETIF_F_RXCSUM |
- NETIF_F_HW_CSUM |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- return;
-}
-
-extern const struct xfrmdev_ops zxdh_xfrmdev_ops;
-static void zxdh_build_nic_netdev(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct dh_core_dev *dh_dev = en_priv->edev.parent;
-
- SET_NETDEV_DEV(netdev, &dh_dev->parent->pdev->dev);
-
- netdev->netdev_ops = &zxdh_netdev_ops;
-
-#ifdef ZXDH_SEC
- /*内核 sec相关*/
- netdev->features |=NETIF_F_HW_ESP;
- netdev->xfrmdev_ops = &zxdh_xfrmdev_ops;
-#endif
-
-#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
- zxdh_en_set_ethtool_ops_ext(netdev);
-#else
- zxdh_en_set_ethtool_ops(netdev);
-#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
-
- zxdh_netdev_features_init(netdev);
-}
-
-int32_t zxdh_en_bond_get_mac(struct net_device *netdev, uint8_t pannel_id, uint8_t *mac)
-{
- int32_t ret = 0;
- union zxdh_msg msg = {0};
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- msg.payload.hdr_to_agt.op_code = AGENT_FLASH_MAC_READ;
- msg.payload.flash_read_msg.index = pannel_id;
-
- ret = zxdh_send_command_to_specify(en_dev, MODULE_FLASH, &msg, &msg);
- if (ret != 0)
- {
- LOG_ERR("zxdh_send_command_to_specify failed: %d\n", ret);
- return ret;
- }
-
- LOG_INFO("bond get mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", msg.reps.flash_mac_read_msg.mac[0],\
- msg.reps.flash_mac_read_msg.mac[1],msg.reps.flash_mac_read_msg.mac[2], msg.reps.flash_mac_read_msg.mac[3],\
- msg.reps.flash_mac_read_msg.mac[4], msg.reps.flash_mac_read_msg.mac[5]);
-
- ether_addr_copy(mac, msg.reps.flash_mac_read_msg.mac);
- return ret;
-}
-
-int32_t zxdh_mac_addr_init(struct net_device *netdev)
-{
- uint8_t mac[6] = {0};
- uint8_t pannel_id = 0;
- int32_t ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- pannel_id = en_dev->pannel_id;
- ret = zxdh_en_bond_get_mac(netdev, pannel_id, mac);
- if (ret != 0)
- {
- LOG_ERR("zxdh_en_bond_mac_get failed: %d\n", ret);
- }
- }
- else
- {
- en_dev->ops->get_mac(en_dev->parent, mac);
- }
-
- if (!is_valid_ether_addr(mac))
- {
- get_random_bytes(mac, 6);
- mac[0] &= 0xfe;
- LOG_INFO("set random mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
- }
- LOG_INFO("set mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
- memcpy(netdev->dev_addr, mac, 6);
-
- return ret;
-}
-
-int32_t zxdh_status_init(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- if (en_dev->ops->if_init(en_dev->parent))
- {
- zxdh_vp_reset(netdev);
- }
-
- /* Disable VQ/configuration callbacks. */
- zxdh_vp_disable_cbs(netdev);
-
- zxdh_add_status(netdev, ZXDH_CONFIG_S_ACKNOWLEDGE);
-
- zxdh_add_status(netdev, ZXDH_CONFIG_S_DRIVER);
-
- /* fix features, not set features*/
- zxdh_pf_features_init(netdev);
-
- might_sleep();
- zxdh_add_status(netdev, ZXDH_CONFIG_S_FEATURES_OK);
- if (!zxdh_has_status(netdev, ZXDH_CONFIG_S_FEATURES_OK))
- {
- LOG_ERR("device refuses features ok\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-void zxdh_device_ready(struct net_device *netdev)
-{
- zxdh_vp_enable_cbs(netdev);
-
- zxdh_add_status(netdev, ZXDH_CONFIG_S_DRIVER_OK);
-}
-
-void zxdh_link_state_notify_kernel(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- if(en_dev->ops->get_pf_link_up(en_dev->parent))
- {
- netif_carrier_off(netdev);
- udelay(10);
- netif_carrier_on(netdev);
- }
- else
- {
- netif_carrier_on(netdev);
- udelay(10);
- netif_carrier_off(netdev);
- }
-}
-
-int32_t aux_get_bond_attrs(struct zxdh_en_device *en_dev, struct zxdh_lag_attrs *attr)
-{
- *attr = (struct zxdh_lag_attrs)
- {
- .pannel_id = en_dev->pannel_id,
- .vport = en_dev->vport,
- .slot_id = en_dev->slot_id,
- .qid[0] = en_dev->phy_index[0],
- .qid[1] = en_dev->phy_index[1],
- .pcie_id = en_dev->pcie_id,
- .phy_port = en_dev->phy_port,
- };
-
- LOG_INFO("bond pf: pannel %hu, vport 0x%hx, phy_qid[0] %u, phy_qid[1] %u, pcie id 0x%x\n",
- attr->pannel_id, attr->vport, attr->qid[0], attr->qid[1], attr->pcie_id);
-
- return 0;
-}
-
-void aux_set_netdev_name(struct net_device *netdev, uint16_t pannel_id)
-{
- struct zxdh_en_device *en_dev = NULL;
- struct zxdh_en_priv *en_priv = NULL;
-
- en_priv = netdev_priv(netdev);
- en_dev = &en_priv->edev;
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- netdev->dev_port = pannel_id + 1;
- }
-}
-
-int32_t zxdh_en_mtu_init(struct net_device *netdev)
-{
- netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = ZXDH_MAX_MTU;
-
- return zxdh_en_config_mtu_to_np(netdev, ZXDH_DEFAULT_MTU);
-}
-
-static int32_t zxdh_en_dev_probe(struct zxdh_auxiliary_device *adev, const struct zxdh_auxiliary_device_id *id)
-{
- struct zxdh_en_container *en_container = container_of(adev, struct zxdh_en_container, adev);
- struct net_device *netdev = NULL;
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- struct zxdh_lag_attrs lag_attrs;
- int32_t err = 0;
- int32_t vqs_channel_num = 0;
-
- LOG_INFO("aux level driver probe start\n");
-
- netdev = zxdh_create_netdev(en_container);
- if (unlikely(netdev == NULL))
- {
- LOG_ERR("zxdh_create_netdev is null\n");
- err = -ENOMEM;
- goto err_create_netdev;
- }
-
- zxdh_build_nic_netdev(netdev);
-
- dev_set_drvdata(&adev->dev, netdev_priv(netdev));
-
- en_priv = netdev_priv(netdev);
- en_dev = &en_priv->edev;
- en_dev->channels_num = en_dev->ops->get_channels_num(en_dev->parent);
- en_dev->ops->set_rdma_netdev(en_dev->parent, netdev);
- en_dev->curr_unicast_num = 0;
- en_dev->curr_multicast_num = 0;
- en_dev->init_comp_flag = AUX_INIT_INCOMPLETED;
- en_dev->delay_statistics_enable = 0;
-
- vqs_channel_num = en_dev->ops->create_vqs_channels(en_dev->parent);
- if (vqs_channel_num < 0)
- {
- LOG_ERR("create_vqs_channels failed, vqs_channel_num: %d\n", vqs_channel_num);
- err = vqs_channel_num;
- goto err_create_vqs_channels;
- }
-
- err = dh_aux_eq_table_init(en_priv);
- if (err != 0)
- {
- LOG_ERR("Failed to alloc IRQs: %d\n", err);
- goto err_eq_table_init;
- }
-
- err = dh_aux_events_init(en_priv);
- if (err != 0)
- {
- LOG_ERR("dh_aux_events_init failed: %d\n", err);
- goto err_events_init;
- }
-
- err = dh_aux_eq_table_create(en_priv);
- if (err != 0)
- {
- LOG_ERR("Failed to alloc EQs: %d\n", err);
- goto err_eq_table_create;
- }
-
- err = zxdh_status_init(netdev);
- if (err != 0)
- {
- LOG_ERR("zxdh_status_init failed: %d\n", err);
- goto err_status_init;
- }
-
- en_dev->ep_bdf = en_dev->ops->get_epbdf(en_dev->parent);
- en_dev->vport = en_dev->ops->get_vport(en_dev->parent);
- en_dev->pcie_id = en_dev->ops->get_pcie_id(en_dev->parent);
- en_dev->slot_id = en_dev->ops->get_slot_id(en_dev->parent);
- LOG_INFO("ep_bdf: 0x%x, vport: 0x%x, pcie_id: %d, slot_id: %d\n", en_dev->ep_bdf, en_dev->vport, en_dev->pcie_id, en_dev->slot_id);
-
- err = zxdh_vqs_init(netdev);
- if (err != 0)
- {
- LOG_ERR("zxdh_vqs_init failed: %d\n", err);
- goto err_vqs_init;
- }
-
- if (en_dev->ops->is_upf(en_dev->parent))
- {
- en_dev->hash_search_idx = 2;//FIXME
- }
- else if (!en_dev->ops->is_bond(en_dev->parent))
- {
- err = zxdh_hash_id_get(en_dev);
- if (err != 0)
- {
- LOG_ERR("zxdh_hash_id_get failed: %d\n", err);
- goto err_do_vqs_free;
- }
-
- err = zxdh_panel_id_get(en_dev);
- if (err != 0)
- {
- LOG_ERR("zxdh_panel_id_get failed: %d\n", err);
- goto err_do_vqs_free;
- }
- }
-
- en_dev->hash_func = ZXDH_FUNC_TOP;
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- err = zxdh_aux_alloc_pannel(en_dev);
- if (err != 0)
- {
- LOG_ERR("zxdh_aux_alloc_pannel failed: %d\n", err);
- goto err_do_vqs_free;
- }
- }
- else if (!en_dev->ops->is_upf(en_dev->parent))
- {
- err = zxdh_phyport_get(en_dev);
- if (err != 0)
- {
- LOG_ERR("zxdh_phyport_get failed: %d\n", err);
- goto err_do_vqs_free;
- }
- }
-
- err = zxdh_mac_addr_init(netdev);
- if (err != 0)
- {
- LOG_ERR("zxdh_mac_addr_init failed: %d\n", err);
- goto err_do_vqs_free;
- }
-
- err = zxdh_pf_port_init(netdev);
- if (err != 0)
- {
- LOG_ERR("zxdh_pf_port_init failed: %d\n", err);
- goto err_do_vqs_free;
- }
- }
- else
- {
- err = zxdh_vf_dpp_port_init(netdev);
- if (err != 0)
- {
- LOG_ERR("zxdh_vf_dpp_port_init failed: %d\n", err);
- goto err_do_vqs_free;
- }
- }
-
- if (!en_dev->ops->is_bond(en_dev->parent))
- {
- netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
- err = zxdh_num_channels_changed(en_dev, en_dev->curr_queue_pairs);
- if (err != 0)
- {
- LOG_ERR("zxdh_num_channels_changed failed: %d\n", err);
- goto err_do_vport_free;
- }
- }
-
- err = zxdh_common_tbl_init(netdev);
- if (err != 0)
- {
- LOG_ERR("zxdh_common_tlb_init failed: %d\n", err);
- goto err_do_rxfh_free;
- }
-
- zxdh_device_ready(netdev);
-
- err = zxdh_en_mtu_init(netdev);
- if (err != 0)
- {
- LOG_ERR("zxdh_en_mtu_init failed: %d\n", err);
- goto err_do_rxfh_free;
- }
-
- en_dev->hw_stats.q_stats = kmalloc_array(max_pairs, sizeof(struct zxdh_en_queue_stats), GFP_KERNEL);
- if (unlikely(en_dev->hw_stats.q_stats == NULL))
- {
- LOG_ERR("hw_stats.q_stats kmalloc failed\n");
- goto err_do_rxfh_free;
- }
- memset(en_dev->hw_stats.q_stats, 0, max_pairs * sizeof(struct zxdh_en_queue_stats));
- memset(&en_dev->pre_stats, 0, sizeof(struct zxdh_en_vport_stats));
-
- err = zxdh_en_vport_pre_stats_get(en_dev);
- if(err != 0)
- {
- LOG_ERR("get vport pre stats failed, %d\n", err);
- goto err_do_q_stats_free;
- }
-
- aux_set_netdev_name(netdev, en_dev->pannel_id);
- err = register_netdev(netdev);
- if (err != 0)
- {
- LOG_ERR("register_netdev failed, %d\n", err);
- goto err_do_q_stats_free;
- }
-
- zxdh_en_bar_del_mac(netdev);
- zxdh_en_bar_cfg_mac(netdev, netdev->dev_addr);
- zxdh_link_state_notify_kernel(netdev);
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- aux_get_bond_attrs(en_dev, &lag_attrs);
- zxdh_ldev_add_netdev(en_container->parent, en_dev->pannel_id, netdev, &lag_attrs);
- }
-
-#ifdef ZXDH_PLCR_OPEN
- err = zxdh_plcr_init(en_priv);
- if (err != 0)
- {
- LOG_ERR("zxdh_plcr_init failed, %d\n", err);
- }
-#endif
-
- en_dev->init_comp_flag = AUX_INIT_COMPLETED;
-
- err = dh_aux_ipv6_notifier_init(en_priv);
- if (err != 0)
- {
- LOG_ERR("dh_aux_ipv6_notifier_init failed: %d\n", err);
- goto err_ipv6_notifier_init;
- }
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- en_dev->autoneg_enable = AUTONEG_ENABLE;
- err = zxdh_en_phyport_init(en_dev);
- if (err != 0)
- {
- LOG_ERR("zxdh_en_phyport_init failed: %d\n", err);
- goto err_phyport_init;
- }
- }
-
-#ifdef ZXDH_MSGQ
- NEED_MSGQ(en_dev)
- {
- err = zxdh_msgq_init(en_dev);
- if (err)
- {
- LOG_ERR("zxdh_msgq_init failed: %d\n", err);
- goto err_phyport_init;
- }
- }
-#endif
-
- en_dev->ops->set_init_comp_flag(en_dev->parent);
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- /* clear mcode gate,successfully build the scheduling tree, and then open it again */
- zxdh_dcbnl_set_tm_pport_mcode_gate_close(netdev);
-#ifdef ZXDH_DCBNL_OPEN
- err = zxdh_dcbnl_initialize(netdev);
- if (err != 0)
- {
- LOG_ERR("zxdh_dcbnl_initialize failed: %d\n", err);
- }
-#endif
- }
-
- en_dev->ops->set_bond_num(en_dev->parent, true);
- LOG_INFO("%s: aux level driver probe completed\n", netdev->name);
-
- return 0;
-
-err_phyport_init:
- dh_inet6_addr_change_notifier_unregister(&(en_dev->ipv6_notifier));
-err_ipv6_notifier_init:
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- aux_get_bond_attrs(en_dev, &lag_attrs);
- zxdh_ldev_remove_netdev(en_dev->parent, netdev, &lag_attrs);
- }
- unregister_netdev(netdev);
-err_do_q_stats_free:
- kfree(en_dev->hw_stats.q_stats);
-err_do_rxfh_free:
- if (!en_dev->ops->is_bond(en_dev->parent))
- {
- zxdh_rxfh_del(en_dev);
- }
-err_do_vport_free:
- zxdh_vport_uninit(netdev);
-err_do_vqs_free:
- zxdh_vqs_uninit(netdev);
-err_vqs_init:
- zxdh_add_status(netdev, ZXDH_CONFIG_S_FAILED);
-err_status_init:
- dh_aux_eq_table_destroy(en_priv);
-err_eq_table_create:
- dh_aux_events_uninit(en_priv);
-err_events_init:
- dh_aux_eq_table_cleanup(en_priv);
-err_eq_table_init:
- en_dev->ops->destroy_vqs_channels(en_dev->parent);
- en_dev->ops->release_port(en_dev->parent, en_dev->pannel_id);
-err_create_vqs_channels:
- free_netdev(netdev);
-err_create_netdev:
- return err;
-}
-
-static int32_t zxdh_en_dev_remove(struct zxdh_auxiliary_device *adev)
-{
- struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)dev_get_drvdata(&adev->dev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct net_device *netdev = en_dev->netdev;
- struct zxdh_lag_attrs lag_attrs;
-
- LOG_INFO("%s: aux level driver remove start\n", netdev->name);
-
- en_dev->ops->set_bond_num(en_dev->parent, false);
- dh_inet6_addr_change_notifier_unregister(&(en_dev->ipv6_notifier));
-#ifdef ZXDH_MSGQ
- NEED_MSGQ(en_dev)
- {
- zxdh_msgq_exit(en_dev);
- }
-#endif
-
-#ifdef ZXDH_PLCR_OPEN
- zxdh_plcr_uninit(en_priv);
-#endif
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- aux_get_bond_attrs(en_dev, &lag_attrs);
- zxdh_ldev_remove_netdev(en_dev->parent, netdev, &lag_attrs);
- }
-
- #ifdef ZXDH_DCBNL_OPEN
- zxdh_dcbnl_ets_uninit(netdev);
- #endif
-
- unregister_netdev(netdev);
- kfree(en_dev->hw_stats.q_stats);
-
- if (!en_dev->ops->is_bond(en_dev->parent))
- {
- zxdh_rxfh_del(en_dev);
- }
-
- zxdh_vport_uninit(netdev);
-
- zxdh_vqs_uninit(netdev);
-
- zxdh_add_status(netdev, ZXDH_CONFIG_S_FAILED);
-
- dh_aux_eq_table_destroy(en_priv);
- dh_aux_events_uninit(en_priv);
- dh_aux_eq_table_cleanup(en_priv);
- en_dev->ops->destroy_vqs_channels(en_dev->parent);
- en_dev->ops->release_port(en_dev->parent, en_dev->pannel_id);
- free_netdev(netdev);
- LOG_INFO("aux level driver remove completed\n");
-
- return 0;
-}
-
-static void zxdh_en_dev_shutdown(struct zxdh_auxiliary_device *adev)
-{
- LOG_INFO("aux level driver shutdown start\n");
- zxdh_en_dev_remove(adev);
- LOG_INFO("aux level driver shutdown completed\n");
-};
-
-static const struct zxdh_auxiliary_device_id zxdh_en_dev_id_table[] = {
- { .name = ZXDH_PF_NAME "." ZXDH_EN_DEV_ID_NAME, },
- { },
-};
-
-MODULE_DEVICE_TABLE(zxdh_auxiliary, zxdh_en_dev_id_table);
-
-static struct zxdh_auxiliary_driver zxdh_en_driver = {
- .name = ZXDH_EN_DEV_ID_NAME,
- .probe = zxdh_en_dev_probe,
- .remove = zxdh_en_dev_remove,
- .shutdown = zxdh_en_dev_shutdown,
- .id_table = zxdh_en_dev_id_table,
-};
-
-int32_t zxdh_en_driver_register(void)
-{
- int32_t err = 0;
-
- if ((max_pairs == 0) || (max_pairs >= ZXDH_MAX_PAIRS_NUM))
- {
- LOG_INFO("max_pairs %u parameter is a invalid value, use the default value %u\n", max_pairs, ZXDH_MQ_PAIRS_NUM);
- max_pairs = ZXDH_MQ_PAIRS_NUM;
- }
-
- err = zxdh_auxiliary_driver_register(&zxdh_en_driver);
- if (err != 0)
- {
- LOG_ERR("zxdh_auxiliary_driver_register failed: %d\n", err);
- goto err_aux_register;
- }
-
- err = dh_aux_msg_recv_func_register();
- if (err != 0)
- {
- LOG_ERR("dh_aux_msg_recv_func_register failed: %d\n", err);
- goto err_msg_recv_register;
- }
-
- err = zxdh_tools_netlink_register();
- if (err != 0)
- {
- LOG_ERR("zxdh_tools_msg_family register error failed: %d\n", err);
- goto err_netlink_register;
- }
-
- LOG_INFO("all driver insmod completed\n");
-
- return 0;
-
-err_netlink_register:
- dh_aux_msg_recv_func_unregister();
-err_msg_recv_register:
- zxdh_auxiliary_driver_unregister(&zxdh_en_driver);
-err_aux_register:
- return err;
-}
-
-void zxdh_en_driver_unregister(void)
-{
- LOG_INFO("driver rmmod start\n");
- zxdh_tools_netlink_unregister();
- dh_aux_msg_recv_func_unregister();
- zxdh_auxiliary_driver_unregister(&zxdh_en_driver);
-}
-
-module_init(zxdh_en_driver_register);
-module_exit(zxdh_en_driver_unregister);
+#ifdef AUX_BUS_NO_SUPPORT
+#include
+#else
+#include
+#endif
+#include "en_aux.h"
+#include "en_aux/en_cmd.h"
+#include "en_aux/en_ioctl.h"
+#include "en_aux/eq.h"
+#include "en_aux/events.h"
+#include "en_ethtool/ethtool.h"
+#include "en_np/table/include/dpp_tbl_api.h"
+#include "en_np/table/include/dpp_tbl_comm.h"
+#include "en_pf.h"
+#include "msg_common.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#ifdef ZXDH_MSGQ
+#include "en_aux/priv_queue.h"
+#endif
+#include "en_aux/en_1588_pkt_proc.h"
+#include "en_aux/en_cmd.h"
+#include "zxdh_tools/zxdh_tools_netlink.h"
+
+#ifdef ZXDH_DCBNL_OPEN
+#include "en_aux/dcbnl/en_dcbnl.h"
+#endif
+
+uint32_t max_pairs = ZXDH_MQ_PAIRS_NUM;
+module_param(max_pairs, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_pairs, "Max queue pairs");
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* WARNING Do not use netif_carrier_on/off(),
+ it may affect the ethtool function */
+int32_t zxdh_en_open(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t i = 0;
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ LOG_INFO("zxdh_en_open start\n");
+ mutex_lock(&en_priv->lock);
+
+ for (i = 0; i < en_dev->curr_queue_pairs; i++) {
+ /* Make sure we have some buffers: if oom use wq */
+ if (!try_fill_recv(netdev, &en_dev->rq[i], GFP_KERNEL)) {
+ schedule_delayed_work(&en_dev->refill, 0);
+ }
+
+ virtnet_napi_enable(en_dev->rq[i].vq, &en_dev->rq[i].napi);
+ virtnet_napi_tx_enable(netdev, en_dev->sq[i].vq, &en_dev->sq[i].napi);
+ }
+
+ mutex_unlock(&en_priv->lock);
+
+ if (!en_dev->link_up) {
+ return 0;
+ }
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ if (!en_dev->ops->is_bond(en_dev->parent)) {
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ return zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_VPORT_IS_UP, 1,
+ 0);
+ }
+ return dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VPORT_IS_UP, 1);
+ }
+
+ /* 给bond-pf的端口属性表配置为up */
+ err = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VPORT_IS_UP, 1);
+ if (err != 0) {
+ LOG_ERR("dpp_egr_port_attr_set bond pf failed\n");
+ return err;
+ }
+
+ return dpp_panel_attr_set(&pf_info, en_dev->phy_port, PANEL_FLAG_IS_UP, 1);
+}
+
+int32_t zxdh_en_close(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t i = 0;
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ LOG_INFO("zxdh_en_close start\n");
+ /* Make sure refill_work doesn't re-enable napi! */
+ cancel_delayed_work_sync(&en_dev->refill);
+
+ for (i = 0; i < en_dev->curr_queue_pairs; i++) {
+ napi_disable(&en_dev->rq[i].napi);
+ virtnet_napi_tx_disable(&en_dev->sq[i].napi);
+ }
+
+ if (!en_dev->link_up) {
+ return 0;
+ }
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ if (!en_dev->ops->is_bond(en_dev->parent)) {
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ return zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_VPORT_IS_UP, 0,
+ 0);
+ }
+ return dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VPORT_IS_UP, 0);
+ }
+
+ /* 给bond-pf的端口属性表配置为down */
+ err = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VPORT_IS_UP, 0);
+ if (err != 0) {
+ LOG_ERR("dpp_egr_port_attr_set bond pf failed\n");
+ return err;
+ }
+
+ return dpp_panel_attr_set(&pf_info, en_dev->phy_port, PANEL_FLAG_IS_UP, 0);
+}
+
+void pkt_transport_protocol_parse(int8_t next_protocol,
+ struct zxdh_net_hdr *hdr)
+{
+ if (next_protocol == IPPROTO_UDP) {
+ hdr->pi_hdr.pt.type_ctx.pkt_code = PCODE_UDP;
+ } else if (next_protocol == IPPROTO_TCP) {
+ hdr->pi_hdr.pt.type_ctx.pkt_code = PCODE_TCP;
+ } else {
+ hdr->pi_hdr.pt.type_ctx.pkt_code = PCODE_IP;
+ }
+
+ return;
+}
+
+void pkt_protocol_parse(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ int32_t flag)
+{
+ struct ethhdr *mach = NULL;
+ struct iphdr *ipv4h = NULL;
+ struct ipv6hdr *ipv6h = NULL;
+
+ if (flag == 0) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ipv4h = (struct iphdr *)skb_network_header(skb);
+ hdr->pi_hdr.pt.type_ctx.ip_type = IPV4_TYPE;
+ pkt_transport_protocol_parse(ipv4h->protocol, hdr);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ ipv6h = (struct ipv6hdr *)skb_network_header(skb);
+ hdr->pi_hdr.pt.type_ctx.ip_type = IPV6_TYPE;
+ pkt_transport_protocol_parse(ipv6h->nexthdr, hdr);
+ } else {
+ hdr->pi_hdr.pt.type_ctx.ip_type = NOT_IP_TYPE;
+ }
+ } else {
+ mach = (struct ethhdr *)skb_inner_mac_header(skb);
+ if (mach->h_proto == htons(ETH_P_IP)) {
+ ipv4h = (struct iphdr *)skb_inner_network_header(skb);
+ hdr->pi_hdr.pt.type_ctx.ip_type = IPV4_TYPE;
+ pkt_transport_protocol_parse(ipv4h->protocol, hdr);
+ } else if (mach->h_proto == htons(ETH_P_IPV6)) {
+ ipv6h = (struct ipv6hdr *)skb_inner_network_header(skb);
+ hdr->pi_hdr.pt.type_ctx.ip_type = IPV6_TYPE;
+ pkt_transport_protocol_parse(ipv6h->nexthdr, hdr);
+ } else {
+ hdr->pi_hdr.pt.type_ctx.ip_type = NOT_IP_TYPE;
+ }
+ }
+}
+
+int32_t pkt_is_vxlan(struct sk_buff *skb)
+{
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (ip_hdr(skb)->protocol != IPPROTO_UDP) {
+ return -1;
+ }
+ break;
+
+ case htons(ETH_P_IPV6):
+ if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP) {
+ return -1;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB) ||
+ (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr))) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t zxdh_tx_checksum_offload(struct zxdh_en_device *edev,
+ struct sk_buff *skb, struct zxdh_net_hdr *hdr)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ return 0;
+ }
+
+ if ((skb->inner_protocol != 0) && (pkt_is_vxlan(skb) == 0)) {
+ skb->encapsulation = 0x1;
+ }
+
+ if ((skb->encapsulation == 0x1) &&
+ (edev->netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) {
+ hdr->pi_hdr.bttl_pi_len = ENABLE_PI_FLAG_32B;
+ hdr->pd_hdr.ol_flag |= htons(0x1 << OUTER_IP_CHECKSUM_OFFSET);
+ pkt_protocol_parse(skb, hdr, 1);
+ hdr->pi_hdr.hdr_l3_offset =
+ htons(edev->hdr_len + skb_inner_network_offset(skb));
+ hdr->pi_hdr.hdr_l4_offset =
+ htons(edev->hdr_len + skb_inner_transport_offset(skb));
+ }
+
+ hdr->pi_hdr.pkt_action_flag1 |= htons(0x1 << INNER_IP_CHECKSUM_OFFSET);
+ hdr->pi_hdr.pkt_action_flag2 |= 0x1 << INNER_L4_CHECKSUM_OFFSET;
+ return 0;
+}
+
+static int pd_hdr_validate_vlan(struct zxdh_en_device *edev,
+ struct sk_buff *skb, struct zxdh_net_hdr *hdr)
+{
+ /* pf set vf vlan is done*/
+ if (edev->vlan_dev.vlan_id) {
+ if (!skb_vlan_tag_present(skb)) {
+ hdr->pd_hdr.cvlan.tci = htons(edev->vlan_dev.vlan_id);
+ hdr->pd_hdr.cvlan.tpid = htons(edev->vlan_dev.protcol);
+ hdr->pd_hdr.ol_flag |= htons(TXCAP_CTAG_INSERT_EN_BIT);
+ return 0;
+ } else {
+ hdr->pd_hdr.svlan.tci = htons(edev->vlan_dev.vlan_id);
+ hdr->pd_hdr.svlan.tpid = htons(edev->vlan_dev.protcol);
+ hdr->pd_hdr.ol_flag |= htons(TXCAP_STAG_INSERT_EN_BIT);
+ }
+ }
+
+ /* insert vlan hard-accellate when skb is taged to be inserted, eg. in vlan
+ * interface case*/
+ if (skb && skb_vlan_tag_present(skb)) {
+ hdr->pd_hdr.cvlan.tci = htons(skb_vlan_tag_get(skb));
+ hdr->pd_hdr.cvlan.tpid = (skb->vlan_proto);
+ hdr->pd_hdr.ol_flag |= htons(TXCAP_CTAG_INSERT_EN_BIT);
+ }
+ return 0;
+}
+
+int32_t pi_net_hdr_from_skb(struct zxdh_en_device *edev, struct sk_buff *skb,
+ struct zxdh_net_hdr *hdr)
+{
+ uint32_t gso_type = 0;
+ uint16_t mss = 0;
+#ifdef TIME_STAMP_1588
+ int32_t ret = 0;
+#endif
+
+ memset(hdr, 0, sizeof(*hdr)); /* no info leak */
+ hdr->pd_len = edev->hdr_len / HDR_2B_UNIT;
+ hdr->pi_hdr.bttl_pi_len = DISABLE_PI_FIELD_PARSE + ENABLE_PI_FLAG_32B;
+ hdr->tx_port = TX_PORT_DTP;
+ hdr->pi_hdr.pt.type_ctx.pkt_src = PKT_SRC_CPU;
+ hdr->pi_hdr.eth_port_id = INVALID_ETH_PORT_ID;
+
+ if (edev->delay_statistics_enable) {
+ pkt_delay_statistics_proc(skb, hdr, edev);
+ }
+
+ // #ifdef TIME_STAMP_1588
+ // ret = pkt_1588_proc_xmit(skb, hdr, edev->clock_no, edev);
+ // switch (ret)
+ // {
+ // case PTP_SUCCESS:
+ // {
+ // LOG_DEBUG("pkt_1588_proc_xmit success!!!\n");
+ // return 0;
+ // }
+ // case IS_NOT_PTP_MSG:
+ // {
+ // LOG_DEBUG("not ptp msg!!\n");
+ // break;
+ // }
+ // default:
+ // {
+ // LOG_ERR("pkt_1588_proc_xmit err!!!\n");
+ // return ret;
+ // }
+ // }
+ // #endif
+
+ pd_hdr_validate_vlan(edev, skb, hdr);
+
+ gso_type = skb_shinfo(skb)->gso_type;
+ if (gso_type & SKB_GSO_TCPV4) {
+ mss = min(skb_shinfo(skb)->gso_size,
+ (uint16_t)(edev->netdev->mtu - IP_BASE_HLEN - TCP_BASE_HLEN));
+ hdr->pi_hdr.pkt_action_flag1 |=
+ htons((mss / ETH_MTU_4B_UNIT) + NOT_IP_FRG_CSUM_FLAG);
+ hdr->pi_hdr.pkt_action_flag2 |=
+ TCP_FRG_CSUM_FLAG; /*0x24 bit21,18: 带pi,tso,计算checksum */
+ } else if (gso_type & SKB_GSO_TCPV6) {
+ mss = min(
+ skb_shinfo(skb)->gso_size,
+ (uint16_t)(edev->netdev->mtu - IPV6_BASE_HLEN - TCP_BASE_HLEN));
+ hdr->pi_hdr.pkt_action_flag1 |=
+ htons((mss / ETH_MTU_4B_UNIT) + NOT_IP_FRG_CSUM_FLAG);
+ hdr->pi_hdr.pkt_action_flag2 |=
+ TCP_FRG_CSUM_FLAG; /*0x24 bit21,18: 带pi,tso,计算checksum */
+ } else if (gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4 | SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ hdr->pi_hdr.pkt_action_flag1 =
+ htons((edev->netdev->mtu / ETH_MTU_4B_UNIT) + IP_FRG_CSUM_FLAG);
+ } else {
+ hdr->pi_hdr.pkt_action_flag1 |= htons(
+ (edev->netdev->mtu / ETH_MTU_4B_UNIT) + NOT_IP_FRG_CSUM_FLAG);
+ hdr->pi_hdr.pkt_action_flag2 |= NOT_TCP_FRG_CSUM_FLAG;
+ }
+
+ if (edev->netdev->features & NETIF_F_HW_CSUM) {
+ zxdh_tx_checksum_offload(edev, skb, hdr);
+ }
+
+ if ((edev->ops->is_bond(edev->parent)) &&
+ (skb->protocol == htons(ETH_P_SLOW) ||
+ skb->protocol == htons(ETH_P_PAUSE))) {
+ hdr->pd_hdr.ol_flag |= htons(PANELID_EN);
+ hdr->pd_hdr.panel_id = edev->phy_port;
+ }
+
+#ifdef ZXDH_DCBNL_OPEN
+ if (NULL != skb->sk) {
+ hdr->pd_hdr.ol_flag |=
+ htons(ZXDH_DCBNL_SET_SK_PRIO(skb->sk->sk_priority));
+ }
+#endif
+
+#ifdef TIME_STAMP_1588
+ ret = pkt_1588_proc_xmit(skb, hdr, edev->clock_no, edev);
+ switch (ret) {
+ case PTP_SUCCESS: {
+ LOG_DEBUG("pkt_1588_proc_xmit success!!!\n");
+ return 0;
+ }
+ case IS_NOT_PTP_MSG: {
+ LOG_DEBUG("not ptp msg!!\n");
+ break;
+ }
+ default: {
+ LOG_ERR("pkt_1588_proc_xmit err!!!\n");
+ return ret;
+ }
+ }
+#endif
+ return 0;
+}
+
+int32_t xmit_skb(struct net_device *netdev, struct send_queue *sq,
+ struct sk_buff *skb)
+{
+ struct zxdh_net_hdr *hdr = NULL;
+ // const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t num_sg = 0;
+ unsigned hdr_len = en_dev->hdr_len;
+ bool can_push = false;
+ uint8_t *hdr_buf = sq->hdr_buf;
+
+ can_push = en_dev->any_header_sg &&
+ !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
+ !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
+ /* Even if we can, don't push here yet as this would skew
+ * csum_start offset below. */
+ if (can_push) {
+ hdr = (struct zxdh_net_hdr *)(skb->data - hdr_len);
+ } else {
+ memset(hdr_buf, 0, HDR_BUFFER_LEN);
+ hdr = (struct zxdh_net_hdr *)hdr_buf;
+ }
+
+ if (pi_net_hdr_from_skb(en_dev, skb, hdr)) {
+ return -EPROTO;
+ }
+
+ sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
+ if (can_push) {
+ __skb_push(skb, hdr_len);
+ num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
+ if (unlikely(num_sg < 0)) {
+ return num_sg;
+ }
+ /* Pull header back to avoid skew in tx bytes calculations. */
+ __skb_pull(skb, hdr_len);
+ } else {
+ sg_set_buf(sq->sg, hdr, hdr_len);
+ num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+ if (unlikely(num_sg < 0)) {
+ return num_sg;
+ }
+ num_sg++;
+ }
+
+ return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
+}
+
+netdev_tx_t zxdh_en_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t qnum = skb_get_queue_mapping(skb);
+ struct send_queue *sq = &en_dev->sq[qnum];
+ int32_t err = 0;
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, qnum);
+ bool kick = !netdev_xmit_more();
+ bool use_napi = sq->napi.weight;
+
+ /* Free up any pending old buffers before queueing new ones. */
+ do {
+ if (use_napi) {
+ virtqueue_disable_cb(sq->vq);
+ }
+
+ free_old_xmit_skbs(netdev, sq, false);
+
+ } while (use_napi && kick &&
+ unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+
+ /* timestamp packet in software */
+ skb_tx_timestamp(skb);
+
+ /* Try to transmit */
+ err = xmit_skb(netdev, sq, skb);
+
+ /* This should not happen! */
+ if (unlikely(err)) {
+ netdev->stats.tx_fifo_errors++;
+ netdev->stats.tx_errors++;
+ if (net_ratelimit()) {
+ LOG_WARN("unexpected TXQ (%d) queue failure: %d\n", qnum, err);
+ }
+ netdev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* If running out of space, stop queue to avoid getting packets that we
+ * are then unable to transmit.
+ * An alternative would be to force queuing layer to requeue the skb by
+ * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
+ * returned in a normal path of operation: it means that driver is not
+ * maintaining the TX queue stop/start state properly, and causes
+ * the stack to do a non-trivial amount of useless work.
+ * Since most packets only take 1 or 2 ring slots, stopping the queue
+ * early means 16 slots are typically wasted.
+ */
+ if (sq->vq->num_free < 2 + MAX_SKB_FRAGS) {
+ netif_stop_subqueue(netdev, qnum);
+ en_dev->hw_stats.q_stats[qnum].q_tx_stopped++;
+ if (!use_napi && unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ /* More just got used, free them then recheck. */
+ free_old_xmit_skbs(netdev, sq, false);
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
+ netif_start_subqueue(netdev, qnum);
+ virtqueue_disable_cb(sq->vq);
+ }
+ }
+ }
+
+ if (kick || netif_xmit_stopped(txq)) {
+ if (virtqueue_kick_prepare_packed(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.kicks++;
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+ }
+
+ return NETDEV_TX_OK;
+}
+
+#ifdef HAVE_NDO_GET_STATS64
+#ifdef HAVE_VOID_NDO_GET_STATS64
+static void zxdh_en_get_netdev_stats_struct(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+#else
+static struct rtnl_link_stats64 *
+zxdh_en_get_netdev_stats_struct(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+#endif
+{
+#ifdef HAVE_VOID_NDO_GET_STATS64
+ struct zxdh_en_device *en_dev = netdev_priv(netdev);
+ struct receive_queue *rq = NULL;
+ struct send_queue *sq = NULL;
+ uint32_t start = 0;
+ uint32_t i = 0;
+ uint64_t tpackets = 0;
+ uint64_t tbytes = 0;
+ uint64_t rpackets = 0;
+ uint64_t rbytes = 0;
+ uint64_t rdrops = 0;
+ uint32_t loop_cnt = en_dev->max_queue_pairs;
+ int32_t ret = 0;
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ ret = zxdh_mac_stats_get(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_mac_stats_get failed, ret: %d\n", ret);
+ return;
+ }
+
+ stats->rx_packets = en_dev->hw_stats.phy_stats.rx_packets_phy;
+ stats->rx_bytes = en_dev->hw_stats.phy_stats.rx_bytes_phy;
+ stats->rx_errors = en_dev->hw_stats.phy_stats.rx_errors;
+ stats->rx_dropped = en_dev->hw_stats.phy_stats.rx_discards;
+ stats->tx_packets = en_dev->hw_stats.phy_stats.tx_packets_phy;
+ stats->tx_bytes = en_dev->hw_stats.phy_stats.tx_bytes_phy;
+ stats->tx_errors = en_dev->hw_stats.phy_stats.tx_errors;
+ stats->tx_dropped = en_dev->hw_stats.phy_stats.tx_drop;
+ return;
+ }
+
+#ifdef ZXDH_MSGQ
+ if (en_dev->need_msgq)
+ loop_cnt--;
+#endif
+
+ for (i = 0; i < loop_cnt; ++i) {
+ sq = &en_dev->sq[i];
+ rq = &en_dev->rq[i];
+ do {
+ start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
+ tpackets = sq->stats.packets;
+ tbytes = sq->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
+ rpackets = rq->stats.packets;
+ rbytes = rq->stats.bytes;
+ rdrops = rq->stats.drops;
+ } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
+
+ stats->rx_packets += rpackets;
+ stats->rx_bytes += rbytes;
+ stats->rx_dropped += rdrops;
+ stats->tx_packets += tpackets;
+ stats->tx_bytes += tbytes;
+ }
+
+ stats->rx_errors = netdev->stats.rx_errors;
+ stats->tx_errors = netdev->stats.tx_errors;
+ stats->tx_dropped = netdev->stats.tx_dropped;
+ stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
+ return;
+#else
+ return stats;
+#endif
+}
+#endif /* HAVE_VOID_NDO_GET_STATS_64 */
+
+static void zxdh_en_set_rx_mode(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ queue_work(en_priv->events->wq, &en_dev->rx_mode_set_work);
+}
+
+void rx_mode_set_handler(struct work_struct *work)
+{
+ struct zxdh_en_device *en_dev =
+ container_of(work, struct zxdh_en_device, rx_mode_set_work);
+ bool promisc_changed = false;
+ bool allmulti_changed = false;
+ int32_t err = 0;
+ uint8_t fow = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ ZXDH_AUX_INIT_COMP_CHECK(en_dev);
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ return;
+ }
+
+ promisc_changed = en_dev->netdev->flags & IFF_PROMISC;
+ allmulti_changed = en_dev->netdev->flags & IFF_ALLMULTI;
+ if (en_dev->promisc_enabled != promisc_changed) {
+ LOG_INFO("promisc_changed: %d", promisc_changed);
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ dpp_vport_uc_promisc_set(&pf_info, promisc_changed);
+ if (!en_dev->allmulti_enabled) {
+ dpp_vport_mc_promisc_set(&pf_info, promisc_changed);
+ }
+ } else {
+ if (!en_dev->allmulti_enabled) {
+ fow = 1;
+ }
+ err = zxdh_vf_port_promisc_set(en_dev, ZXDH_PROMISC_MODE,
+ promisc_changed, fow);
+ if (err != 0) {
+ LOG_ERR("zxdh_vf_port_promisc_set failed\n");
+ return;
+ }
+ }
+ en_dev->promisc_enabled = promisc_changed;
+ }
+
+ if (en_dev->allmulti_enabled != allmulti_changed) {
+ LOG_INFO("allmulti_changed: %d", allmulti_changed);
+ if (!en_dev->promisc_enabled) {
+ if (en_dev->ops->get_coredev_type(en_dev->parent) ==
+ DH_COREDEV_PF) {
+ dpp_vport_mc_promisc_set(&pf_info, allmulti_changed);
+ } else {
+ err = zxdh_vf_port_promisc_set(en_dev, ZXDH_ALLMULTI_MODE,
+ allmulti_changed, fow);
+ if (err != 0) {
+ LOG_ERR("zxdh_vf_port_promisc_set failed\n");
+ return;
+ }
+ }
+ }
+ en_dev->allmulti_enabled = allmulti_changed;
+ }
+}
+
+static int zxdh_en_bar_cfg_mac(struct net_device *netdev, const char *mac)
+{
+ int ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ memcpy(msg.mac_cfg_msg.ifname, netdev->name, IFNAMSIZ);
+ memcpy(msg.mac_cfg_msg.mac, mac, ETH_ALEN);
+ msg.mac_cfg_msg.pannel_id = en_dev->panel_id;
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ msg.mac_cfg_msg.pannel_id = en_dev->pannel_id;
+ }
+ msg.mac_cfg_msg.ctl = 1;
+
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_CFG_MAC, &msg, &reps);
+ if (ret != 0 || reps.flag != ZXDH_REPS_SUCC) {
+ LOG_ERR("config mac info failed\n");
+ return -reps.flag;
+ }
+ return 0;
+}
+
+static int zxdh_en_bar_del_mac(struct net_device *netdev)
+{
+ int ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ msg.mac_cfg_msg.pannel_id = en_dev->panel_id;
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ msg.mac_cfg_msg.pannel_id = en_dev->pannel_id;
+ }
+ msg.mac_cfg_msg.ctl = 0;
+
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_CFG_MAC, &msg, &reps);
+ if (ret != 0 || reps.flag != ZXDH_REPS_SUCC) {
+ LOG_ERR("del mac info failed.\n");
+ return -reps.flag;
+ }
+ return 0;
+}
+
+static int zxdh_en_set_mac(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct zxdh_en_device *en_dev = NULL;
+ struct zxdh_en_priv *en_priv = NULL;
+ struct netdev_hw_addr *ha = NULL;
+ bool delete_flag = true;
+ bool add_flag = true;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ LOG_INFO("invalid mac address %pM\n", addr->sa_data);
+ return -EADDRNOTAVAIL;
+ }
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
+ LOG_INFO("already using mac address %pM\n", addr->sa_data);
+ return 0;
+ }
+
+ list_for_each_entry(ha, &netdev->uc.list, list) {
+ if (!memcmp(ha->addr, netdev->dev_addr, netdev->addr_len)) {
+ delete_flag = false;
+ }
+
+ if (!memcmp(ha->addr, addr->sa_data, netdev->addr_len)) {
+ add_flag = false;
+ }
+ }
+
+ en_priv = netdev_priv(netdev);
+ en_dev = &en_priv->edev;
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ zxdh_en_bar_del_mac(netdev);
+ zxdh_en_bar_cfg_mac(netdev, netdev->dev_addr);
+ return 0;
+ }
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ if (delete_flag) {
+ ret = dpp_del_mac(&pf_info, netdev->dev_addr);
+ if (ret != 0) {
+ LOG_ERR("pf del mac failed, retval: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (add_flag) {
+ ret = dpp_add_mac(&pf_info, addr->sa_data);
+ if (ret != 0) {
+ LOG_ERR("pf add mac failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ LOG_INFO("set pf new mac address %pM\n", addr->sa_data);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ zxdh_en_bar_del_mac(netdev);
+ zxdh_en_bar_cfg_mac(netdev, netdev->dev_addr);
+ } else {
+ ret = zxdh_vf_dpp_del_mac(en_dev, netdev->dev_addr, UNFILTER_MAC,
+ delete_flag);
+ if (ret != 0) {
+ LOG_ERR("zxdh vf dpp del mac failed: %d\n", ret);
+ return ret;
+ }
+
+ if (add_flag) {
+ ret = zxdh_vf_dpp_add_mac(en_dev, addr->sa_data, UNFILTER_MAC);
+ if (ret != 0) {
+ LOG_ERR("zxdh vf dpp add mac failed: %d\n", ret);
+ return ret;
+ }
+ en_dev->ops->set_mac(en_dev->parent, addr->sa_data);
+ }
+
+ LOG_INFO("set vf new mac address %pM\n", addr->sa_data);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ }
+
+ return ret;
+}
+
+int32_t zxdh_en_config_mtu_to_np(struct net_device *netdev, int32_t mtu_value)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ dpp_panel_attr_set(&pf_info, en_dev->phy_port,
+ PANEL_FLAG_MTU_OFFLOAD_ENABLE, 1);
+ dpp_panel_attr_set(&pf_info, en_dev->phy_port, PANEL_FLAG_MTU,
+ mtu_value);
+ dpp_egr_port_attr_set(&pf_info, EGR_FLAG_MTU_OFFLOAD_EN_OFF, 1);
+ dpp_egr_port_attr_set(&pf_info, EGR_FLAG_MTU, mtu_value);
+ } else {
+ ret = zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_MTU_OFFLOAD_EN_OFF, 1,
+ 0);
+ if (ret != 0) {
+ LOG_ERR("zxdh_vf_egr_port_attr_set config mtu enable failed: %d\n",
+ ret);
+ return ret;
+ }
+ ret = zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_MTU, mtu_value, 0);
+ if (ret != 0) {
+ LOG_ERR("zxdh_vf_egr_port_attr_set config mut value failed: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int zxdh_en_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int32_t ret = 0;
+
+ if ((new_mtu < ETH_MIN_MTU) || (new_mtu > ZXDH_MAX_MTU)) {
+ return -EINVAL;
+ }
+ LOG_INFO("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+
+ netdev->mtu = new_mtu;
+
+ ret = zxdh_en_config_mtu_to_np(netdev, new_mtu);
+ if (ret != 0) {
+ LOG_ERR("zxdh_en_config_mtu_to_np failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef HAVE_TX_TIMEOUT_TXQUEUE
+static void zxdh_en_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ return;
+}
+#else
+static void zxdh_en_tx_timeout(struct net_device *netdev)
+{
+ return;
+}
+#endif
+
+#ifdef HAVE_VLAN_RX_REGISTER
+static void zxdh_en_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ return;
+}
+#endif
+
+static int vf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ int ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ msg.hdr.op_code = ZXDH_VLAN_FILTER_ADD;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.rx_vid_add_msg.vlan_id = vid;
+
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &reps);
+ if (ret != 0 || reps.flag != ZXDH_REPS_SUCC) {
+ LOG_ERR("pcieid:0x%x send msg to pf add vlan:%d failed! ret = %d, flag = "
+ "0x%x\n",
+ en_dev->pcie_id, vid, ret, reps.flag);
+ return -1;
+ }
+
+ return 0;
+}
+
+#if defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && defined(NETIF_F_HW_VLAN_CTAG_RX)
+static int zxdh_en_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ int retval = 0;
+ return retval;
+}
+#elif defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && !defined(NETIF_F_HW_VLAN_CTAG_RX)
+static int zxdh_en_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ return 0;
+}
+#else
+static void zxdh_en_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ return;
+}
+#endif
+
+static int vf_vlan_rx_del_vid(struct net_device *netdev, u16 vid)
+{
+ int ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ msg.hdr.op_code = ZXDH_VLAN_FILTER_DEL;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.rx_vid_del_msg.vlan_id = vid;
+
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &reps);
+ if (ret != 0 || reps.flag != ZXDH_REPS_SUCC) {
+ LOG_ERR("pcieid:0x%x send msg to pf del vlan:%d failed! ret = %d, flag = "
+ "0x%x\n",
+ en_dev->pcie_id, vid, ret, reps.flag);
+ return -1;
+ }
+
+ return 0;
+}
+
+#if defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && defined(NETIF_F_HW_VLAN_CTAG_RX)
+static int zxdh_en_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ int retval = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint16_t pcieid = en_dev->pcie_id;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (vid > MAX_VLAN_ID) {
+ LOG_ERR("vlan id:%d input is err!\n", vid);
+ return -EINVAL;
+ }
+
+ if ((pcieid & PF_AC_MASK) == 0) { /* VF */
+ retval = vf_vlan_rx_del_vid(netdev, vid);
+ goto exit;
+ }
+
+ retval = dpp_del_vlan_filter(&pf_info, vid);
+ if (0 != retval) {
+ LOG_ERR("failed to del vlan: %d\n", vid);
+ goto exit;
+ }
+ LOG_INFO("pf del vlan %d succeed.\n", vid);
+
+exit:
+ return retval;
+}
+#elif defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && !defined(NETIF_F_HW_VLAN_CTAG_RX)
+static int zxdh_en_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ return 0;
+}
+#else
+static void zxdh_en_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ return;
+}
+#endif
+
+static void zxdh_en_netpoll(struct net_device *netdev)
+{
+ return;
+}
+
+#ifdef HAVE_SETUP_TC
+int zxdh_en_setup_tc(struct net_device *netdev, u8 tc)
+{
+ return 0;
+}
+
+#ifdef NETIF_F_HW_TC
+#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV
+static int __zxdh_en_setup_tc(struct net_device *netdev,
+ enum tc_setup_type type, void *type_data)
+#elif defined(HAVE_NDO_SETUP_TC_CHAIN_INDEX)
+static int __zxdh_en_setup_tc(struct net_device *netdev, u32 handle,
+ u32 chain_index, __be16 proto,
+ struct tc_to_netdev *tc)
+#else
+static int __zxdh_en_setup_tc(struct net_device *netdev, u32 handle,
+ __be16 proto, struct tc_to_netdev *tc)
+#endif
+{
+ return 0;
+}
+#endif
+#endif
+
+#ifdef HAVE_NDO_GET_PHYS_PORT_ID
+static int zxdh_en_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_item_id *ppid)
+{
+ return 0;
+}
+#endif /* HAVE_NDO_GET_PHYS_PORT_ID */
+
+static void zxdh_set_en_device(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ if ((features & NETIF_F_GSO) || (features & NETIF_F_GSO_UDP) ||
+ (features & NETIF_F_GSO_UDP_L4)) {
+ en_dev->drs_offload = true;
+ } else if ((features & NETIF_F_TSO) || (features & NETIF_F_HW_CSUM)) {
+ en_dev->dtp_offload = true;
+ } else {
+ en_dev->np_direction = true;
+ }
+
+ return;
+}
+
+static int zxdh_handle_feature(struct net_device *netdev,
+ netdev_features_t *features,
+ netdev_features_t wanted_features,
+ netdev_features_t feature,
+ zxdh_feature_handler feature_handler)
+{
+ netdev_features_t changes = wanted_features ^ netdev->features;
+ bool enable = !!(wanted_features & feature);
+ int err;
+
+ if (!(changes & feature) || feature_handler == NULL) {
+ return 0;
+ }
+
+ err = feature_handler(netdev, enable);
+ if (err) {
+ LOG_ERR("%s feature %pNF failed, err %d\n",
+ enable ? "Enable" : "Disable", &feature, err);
+ return err;
+ }
+
+ ZXDH_SET_FEATURE(features, feature, enable);
+ return 0;
+}
+
+static int32_t zxdh_dtp_offload_set(struct zxdh_en_device *en_dev,
+ DPP_PF_INFO_T *pf_info)
+{
+ ZXDH_VPORT_T port_attr_entry = { 0 };
+ int32_t ret = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ ret = dpp_egr_port_attr_get(pf_info, &port_attr_entry);
+ if (ret != 0) {
+ LOG_ERR("dpp_egr_port_attr_get failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!port_attr_entry.lro_offload &&
+ !port_attr_entry.ip_fragment_offload &&
+ !port_attr_entry.ip_checksum_offload &&
+ !port_attr_entry.tcp_udp_checksum_offload) {
+ ret = dpp_egr_port_attr_set(pf_info,
+ EGR_FLAG_ACCELERATOR_OFFLOAD_FLAG, 0);
+ } else {
+ ret = dpp_egr_port_attr_set(pf_info,
+ EGR_FLAG_ACCELERATOR_OFFLOAD_FLAG, 1);
+ }
+
+ return ret;
+ }
+
+ ret = zxdh_vf_egr_port_attr_get(en_dev, &port_attr_entry);
+ if (ret != 0) {
+ LOG_ERR("dpp_egr_port_attr_get failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!port_attr_entry.lro_offload && !port_attr_entry.ip_fragment_offload &&
+ !port_attr_entry.ip_checksum_offload &&
+ !port_attr_entry.tcp_udp_checksum_offload) {
+ ret = zxdh_vf_egr_port_attr_set(
+ en_dev, EGR_FLAG_ACCELERATOR_OFFLOAD_FLAG, 0, 0);
+ } else {
+ ret = zxdh_vf_egr_port_attr_set(
+ en_dev, EGR_FLAG_ACCELERATOR_OFFLOAD_FLAG, 1, 0);
+ }
+
+ return ret;
+}
+
+static int32_t set_feature_rx_checksum(struct net_device *netdev, bool enable)
+{
+ int en_value = enable ? 1 : 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ DPP_PF_INFO_T pf_info = { 0 };
+ int32_t ret = 0;
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IP_CHKSUM, enable);
+ if (ret != 0) {
+ LOG_ERR("EGR_FLAG_IP_CHKSUM set failed: %d\n", ret);
+ return ret;
+ }
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_TCP_UDP_CHKSUM, enable);
+ if (ret != 0) {
+ LOG_ERR("EGR_FLAG_TCP_UDP_CHKSUM set failed: %d\n", ret);
+ return ret;
+ }
+ } else if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ ret = zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_IP_CHKSUM, en_value,
+ 0);
+ if (ret != 0) {
+ LOG_ERR("EGR_FLAG_IP_CHKSUM set failed: %d\n", ret);
+ return ret;
+ }
+ ret = zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_TCP_UDP_CHKSUM,
+ en_value, 0);
+ if (ret != 0) {
+ LOG_ERR("EGR_FLAG_TCP_UDP_CHKSUM set failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return zxdh_dtp_offload_set(en_dev, &pf_info);
+}
+
+static int set_feature_tx_checksum(struct net_device *netdev, bool enable)
+{
+ if (enable) {
+ netdev->features |= NETIF_F_HW_CSUM;
+ } else {
+ netdev->features &= ~NETIF_F_HW_CSUM;
+ }
+ return 0;
+}
+
+static int set_feature_vxlan_checksum(struct net_device *netdev, bool enable)
+{
+ int ret = 0;
+ int en_value = enable ? 1 : 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (enable) {
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ } else {
+ netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ ret = dpp_egr_port_attr_set(&pf_info,
+ EGR_FLAG_OUTER_IP_CHECKSUM_OFFLOAD, enable);
+ if (ret != 0) {
+ LOG_ERR("zxdh set vxlan rx checksum failed!\n");
+ return ret;
+ }
+ } else if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ ret = zxdh_vf_egr_port_attr_set(
+ en_dev, EGR_FLAG_OUTER_IP_CHECKSUM_OFFLOAD, en_value, 0);
+ if (ret != 0) {
+ LOG_ERR("zxdh_vf_egr_port_attr_set vxlan rx checksum failed!\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int32_t set_feature_rxhash(struct net_device *netdev, bool enable)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ return dpp_vport_rss_en_set(&pf_info, enable);
+ }
+
+ return zxdh_vf_rss_en_set(en_dev, enable);
+}
+
+static int32_t set_vf_cvlan_filter(struct net_device *netdev, bool enable)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t ret = 0;
+
+ msg.hdr.op_code = ZXDH_VLAN_FILTER_SET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.vlan_filter_set_msg.enable = enable;
+
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &reps);
+ if (ret != 0 || reps.flag != ZXDH_REPS_SUCC) {
+ LOG_ERR("pcieid:0x%x send msg to pf set vlan filter enable:%s failed! ret "
+ "= %d, flag = 0x%x\n",
+ en_dev->pcie_id, enable ? "enable" : "disable", ret, reps.flag);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
+{
+ int ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint16_t pcieid = en_dev->pcie_id;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if ((pcieid & PF_AC_MASK) == 0) { /* VF */
+ ret = set_vf_cvlan_filter(netdev, enable);
+ goto exit;
+ }
+
+ ret = dpp_vport_vlan_filter_en_set(&pf_info, enable);
+
+exit:
+ return ret;
+}
+
+static int set_feature_svlan_filter(struct net_device *netdev, bool enable)
+{
+ int ret = 0;
+ return ret;
+}
+
+int set_vf_qinq_tpid(struct net_device *netdev, uint16_t tpid)
+{
+ int ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ msg.hdr.op_code = ZXDH_SET_TPID;
+ msg.hdr.vport = en_dev->vport;
+ msg.tpid_cfg_msg.tpid = tpid;
+
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &reps);
+ if (ret != 0 || reps.flag != ZXDH_REPS_SUCC) {
+ LOG_ERR("pcieid:0x%x send msg to vfs set tpid: 0x%x failed! ret = %d.\n",
+ en_dev->pcie_id, tpid, ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_vf_vlan_strip(struct net_device *netdev, bool enable,
+ uint8_t flag)
+{
+ int ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ msg.hdr.op_code = ZXDH_VLAN_OFFLOAD_SET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.vlan_strip_msg.enable = enable;
+ msg.vlan_strip_msg.flag = flag;
+
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &reps);
+ if (ret != 0 || reps.flag != ZXDH_REPS_SUCC) {
+ LOG_ERR("pcieid:0x%x send msg to vfs set vlan strip enable:%s failed! ret "
+ "= %d, flag = 0x%x\n",
+ en_dev->pcie_id, enable ? "enable" : "disable", ret, reps.flag);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_feature_vlan_strip(struct net_device *netdev, bool enable)
+{
+ int ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ ret = dpp_vport_vlan_strip_set(&pf_info, enable);
+ } else {
+ ret = set_vf_vlan_strip(netdev, enable, VLAN_STRIP_MSG_TYPE);
+ }
+
+ return ret;
+}
+
+static int set_feature_qinq_strip(struct net_device *netdev, bool enable)
+{
+ int ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ ret = dpp_vport_vlan_qinq_en_set(&pf_info, enable);
+ } else {
+ ret = set_vf_vlan_strip(netdev, enable, QINQ_STRIP_MSG_TYPE);
+ }
+
+ return ret;
+}
+
+static int32_t set_feature_lro(struct net_device *netdev, bool enable)
+{
+ uint32_t en_value = enable ? 1 : 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ DPP_PF_INFO_T pf_info = { 0 };
+ int32_t ret = 0;
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IPV4_TCP_ASSEMBLE, en_value);
+ if (ret != 0) {
+ LOG_ERR("EGR_FLAG_IPV4_TCP_ASSEMBLE set failed: %d\n", ret);
+ return ret;
+ }
+ dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IPV6_TCP_ASSEMBLE, en_value);
+ if (ret != 0) {
+ LOG_ERR("EGR_FLAG_IPV6_TCP_ASSEMBLE set failed: %d\n", ret);
+ return ret;
+ }
+ } else {
+ zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_IPV4_TCP_ASSEMBLE, en_value,
+ 0);
+ if (ret != 0) {
+ LOG_ERR("EGR_FLAG_IPV4_TCP_ASSEMBLE set failed: %d\n", ret);
+ return ret;
+ }
+ zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_IPV6_TCP_ASSEMBLE, en_value,
+ 0);
+ if (ret != 0) {
+ LOG_ERR("EGR_FLAG_IPV6_TCP_ASSEMBLE set failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return zxdh_dtp_offload_set(en_dev, &pf_info);
+}
+
+int32_t zxdh_en_set_features(struct net_device *netdev,
+ netdev_features_t wanted_features)
+{
+ int32_t ret = 0;
+ netdev_features_t oper_features = netdev->features;
+
+ zxdh_set_en_device(netdev, wanted_features);
+
+#define ZXDH_HANDLE_FEATURE(set_feature, handler) \
+ zxdh_handle_feature(netdev, &oper_features, wanted_features, set_feature, \
+ handler)
+
+ ret |= ZXDH_HANDLE_FEATURE(NETIF_F_RXCSUM, set_feature_rx_checksum);
+ ret |= ZXDH_HANDLE_FEATURE(NETIF_F_HW_CSUM, set_feature_tx_checksum);
+ ret |= ZXDH_HANDLE_FEATURE(NETIF_F_GSO_UDP_TUNNEL_CSUM,
+ set_feature_vxlan_checksum);
+ ret |= ZXDH_HANDLE_FEATURE(NETIF_F_RXHASH, set_feature_rxhash);
+ ret |= ZXDH_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ ret |= ZXDH_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_vlan_strip);
+ ret |= ZXDH_HANDLE_FEATURE(NETIF_F_HW_VLAN_STAG_RX, set_feature_qinq_strip);
+
+ if (ret) {
+ netdev->features = oper_features;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static uint32_t list_hw_addr_create(struct netdev_hw_addr_list *list,
+ const uint8_t *addr, int32_t addr_len,
+ uint8_t addr_type, bool global, bool sync)
+{
+ struct netdev_hw_addr *ha = NULL;
+
+ ha = kzalloc(sizeof(struct netdev_hw_addr), GFP_KERNEL);
+ if (ha == NULL) {
+ LOG_ERR("Kzalloc struct netdev_hw_addr failed \n");
+ return 1;
+ }
+
+ /* 结构体赋值 */
+ memcpy(ha->addr, addr, addr_len);
+ ha->type = addr_type;
+ ha->refcount = 1; /* 引用计数 */
+ ha->global_use = global;
+ ha->synced = sync ? 1 : 0;
+ ha->sync_cnt = 0;
+ list_add_tail_rcu(&ha->list, &list->list);
+ list->count++; /* 链表节点加1 */
+
+ return 0;
+}
+
+static uint32_t list_hw_addr_del(struct netdev_hw_addr_list *list,
+ struct netdev_hw_addr *ha)
+{
+ int32_t refcount = ha->refcount;
+
+ /* 引用的计数大于1,则不能删除此mac地址 */
+ if (--refcount) {
+ return 1;
+ }
+
+ /* 从链表中删除此条目 */
+ list_del_rcu(&ha->list);
+
+ /* 释放ha结构体占用内存,rcu_head可以安全地释放ha占用的内存*/
+ kfree_rcu(ha, rcu_head);
+ list->count--;
+
+ return 0;
+}
+
+bool is_this_mac_exist(struct net_device *netdev, const uint8_t *addr)
+{
+ struct netdev_hw_addr *ha = NULL;
+ bool isexist = false;
+
+ /* 给net_device结构体上锁 */
+ netif_addr_lock_bh(netdev);
+
+ /* 判断此mac地址类型 */
+ if (is_unicast_ether_addr(addr)) {
+ /* 遍历单播mac地址链表 */
+ list_for_each_entry(ha, &netdev->uc.list, list) {
+ /* 检查该单播地址链表中是否存在此mac,且此mac地址标志为单播 */
+ if ((!memcmp(ha->addr, addr, netdev->addr_len)) &&
+ (ha->type == NETDEV_HW_ADDR_T_UNICAST)) {
+ isexist = true;
+ goto out;
+ }
+ }
+ } else {
+ /* 遍历组播mac地址链表 */
+ list_for_each_entry(ha, &netdev->mc.list, list) {
+ /* 检查该组播地址链表中是否存在此mac,且此mac地址类型为组播 */
+ if ((!memcmp(ha->addr, addr, netdev->addr_len)) &&
+ (ha->type == NETDEV_HW_ADDR_T_MULTICAST)) {
+ isexist = true;
+ goto out;
+ }
+ }
+ }
+
+out:
+ /* 给net_device结构体释放锁 */
+ netif_addr_unlock_bh(netdev);
+
+ return isexist;
+}
+
+/**
+ * zxdh_dev_list_addr_add - 在地址链表中添加此mac地址
+ * @netdev: 网络设备结构体
+ * @addr: 要添加的mac地址
+ * @addr_type: mac地址类型
+ */
+int32_t zxdh_dev_list_addr_add(struct net_device *netdev, const uint8_t *addr)
+{
+ int32_t err = 0;
+
+ /* 给net_device结构体上锁 */
+ netif_addr_lock_bh(netdev);
+
+ /* 判断此mac地址类型 */
+ if (is_unicast_ether_addr(addr)) {
+ /* 将此mac地址添加到地址链表中 */
+ err = list_hw_addr_create(&netdev->uc, addr, netdev->addr_len,
+ NETDEV_HW_ADDR_T_UNICAST, false, false);
+ if (err != 0) {
+ LOG_ERR("list_hw_addr_create failed\n");
+ }
+ } else {
+ err = list_hw_addr_create(&netdev->mc, addr, netdev->addr_len,
+ NETDEV_HW_ADDR_T_MULTICAST, false, false);
+ if (err != 0) {
+ LOG_ERR("list_hw_addr_create failed\n");
+ }
+ }
+
+ /* 给net_device结构体释放锁 */
+ netif_addr_unlock_bh(netdev);
+
+ return err;
+}
+
+/**
+ * zxdh_dev_list_addr_del - 在地址链表中删除此mac地址
+ * @netdev: 网络设备结构体
+ * @addr: 要删除的mac地址
+ * @addr_type: mac地址类型
+ */
+int32_t zxdh_dev_list_addr_del(struct net_device *netdev, const uint8_t *addr)
+{
+ struct netdev_hw_addr *ha = NULL;
+ int32_t err = 0;
+
+ /* 给net_device上锁 */
+ netif_addr_lock_bh(netdev);
+
+ if (is_unicast_ether_addr(addr)) {
+ /* 遍历单播mac地址链表 */
+ list_for_each_entry(ha, &netdev->uc.list, list) {
+ /* 检查该单播地址链表中是否存在此mac,且此mac地址标志为单播 */
+ if ((!memcmp(ha->addr, addr, netdev->addr_len)) &&
+ (ha->type == NETDEV_HW_ADDR_T_UNICAST)) {
+ /* 从单播地址链表中删除此mac */
+ err = list_hw_addr_del(&netdev->uc, ha);
+ if (err != 0) {
+ LOG_ERR("list_hw_addr_del failed\n");
+ }
+ goto out;
+ }
+ }
+ } else {
+ /* 遍历组播mac地址链表 */
+ list_for_each_entry(ha, &netdev->mc.list, list) {
+ /* 检查该组播地址链表中是否存在此mac,且此mac地址标志为组播 */
+ if ((!memcmp(ha->addr, addr, netdev->addr_len)) &&
+ (ha->type == NETDEV_HW_ADDR_T_MULTICAST)) {
+ /* 从组播地址链表中删除此mac */
+ err = list_hw_addr_del(&netdev->mc, ha);
+ if (err != 0) {
+ LOG_ERR("list_hw_addr_del failed\n");
+ }
+ goto out;
+ }
+ }
+ }
+
+out:
+ /* 给net_device结构体释放锁 */
+ netif_addr_unlock_bh(netdev);
+
+ return err;
+}
+
+#ifdef MAC_CONFIG_DEBUG
+int32_t zxdh_pf_dump_all_mac(struct zxdh_en_device *en_dev)
+{
+ MAC_VPORT_INFO *unicast_mac_arry = NULL;
+ MAC_VPORT_INFO *multicast_mac_arry = NULL;
+ uint32_t current_unicast_num = 0;
+ uint32_t current_multicast_num = 0;
+ int32_t err = 1;
+ int32_t i = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ /* 开辟单播数组和组播数组*/
+ unicast_mac_arry = (MAC_VPORT_INFO *)kzalloc(
+ sizeof(MAC_VPORT_INFO) * UNICAST_MAX_NUM, GFP_KERNEL);
+ if (unicast_mac_arry == NULL) {
+ LOG_ERR("kzalloc unicast_mac_arry failed \n");
+ return err;
+ }
+
+ multicast_mac_arry = (MAC_VPORT_INFO *)kzalloc(
+ sizeof(MAC_VPORT_INFO) * MULTICAST_MAX_NUM, GFP_KERNEL);
+ if (multicast_mac_arry == NULL) {
+ LOG_ERR("kzalloc multicast_mac_arry failed \n");
+ goto out1;
+ }
+
+ /* 从NP中dump所有单播mac地址*/
+ err = dpp_unicast_mac_dump(&pf_info, unicast_mac_arry,
+ ¤t_unicast_num);
+ if (err != 0) {
+ LOG_ERR("dpp_unicast_mac_dump failed\n");
+ goto out2;
+ }
+
+ /* 从NP中dump所有组播mac地址*/
+ err = dpp_multicast_mac_dump(&pf_info, multicast_mac_arry,
+ ¤t_multicast_num);
+ if (err != 0) {
+ LOG_ERR("dpp_multicast_mac_dump failed\n");
+ goto out2;
+ }
+
+ for (i = 0; i < current_unicast_num; ++i) {
+ LOG_INFO("unicast_mac_arry[%d].vport is %#x\n", i,
+ unicast_mac_arry[i].vport);
+ LOG_INFO("unicast_mac_arry[%d].mac is %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+ i, unicast_mac_arry[i].addr[0], unicast_mac_arry[i].addr[1],
+ unicast_mac_arry[i].addr[2], unicast_mac_arry[i].addr[3],
+ unicast_mac_arry[i].addr[4], unicast_mac_arry[i].addr[5]);
+ }
+ for (i = 0; i < current_multicast_num; ++i) {
+ LOG_INFO("multicast_mac_arry[%d].vport is %#x\n", i,
+ multicast_mac_arry[i].vport);
+ LOG_INFO(
+ "multicast_mac_arry[%d].mac is %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+ i, multicast_mac_arry[i].addr[0], multicast_mac_arry[i].addr[1],
+ multicast_mac_arry[i].addr[2], multicast_mac_arry[i].addr[3],
+ multicast_mac_arry[i].addr[4], multicast_mac_arry[i].addr[5]);
+ }
+
+out2:
+ if (multicast_mac_arry != NULL) {
+ kfree(multicast_mac_arry);
+ }
+
+out1:
+ if (unicast_mac_arry != NULL) {
+ kfree(unicast_mac_arry);
+ }
+
+ return err;
+}
+#endif /* MAC_CONFIG_DEBUG */
+
+int32_t unicast_mac_add(struct zxdh_en_device *en_dev, struct net_device *dev,
+ const uint8_t *addr, uint16_t flags)
+{
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ /* 判断目前所配置mac地址数量是否超过上限 */
+ if (en_dev->curr_unicast_num >= DEV_UNICAST_MAX_NUM - 1) {
+ LOG_ERR("curr_unicast_num is beyond maximum\n");
+ return -ENOSPC;
+ }
+
+ /* 遍历单播地址链表,判断是否存在此单播mac */
+ if (is_this_mac_exist(dev, addr)) {
+ LOG_DEBUG("Mac already exists\n");
+ if (!(flags & NLM_F_EXCL)) {
+ return 0;
+ }
+ return -EEXIST;
+ }
+
+ /* 如果待配置mac和本机mac相同,则不配置到NP中, 只将此mac添加到单播地址链表中
+ */
+ if (!memcmp(addr, dev->dev_addr, dev->addr_len)) {
+ goto out;
+ }
+
+ /* 将此mac地址配置到np中 */
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ err = dpp_add_mac(&pf_info, addr);
+ if (err != 0) {
+ LOG_ERR("dpp_add_mac failed\n");
+ return err;
+ }
+ } else {
+ err = zxdh_vf_dpp_add_mac(en_dev, addr, FILTER_MAC);
+ if (err != 0) {
+ LOG_ERR("zxdh_vf_dpp_add_mac failed\n");
+ return err;
+ }
+ }
+
+out:
+ /* 将此单播mac地址添加到地址链表中 */
+ err = zxdh_dev_list_addr_add(dev, addr);
+ if (err != 0) {
+ LOG_ERR("zxdh_dev_list_addr_add failed\n");
+ return err;
+ }
+ en_dev->curr_unicast_num++;
+ LOG_DEBUG("curr_unicast_num is %d\n", en_dev->curr_unicast_num);
+ return err;
+}
+
+int32_t multicast_mac_add(struct zxdh_en_device *en_dev, struct net_device *dev,
+ const uint8_t *addr, uint16_t flags)
+{
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ /* 判断目前所配置mac地址数量是否超过上限*/
+ if (en_dev->curr_multicast_num >= DEV_MULTICAST_MAX_NUM) {
+ LOG_ERR("curr_multicast_num is beyond maximum\n");
+ return -ENOSPC;
+ }
+
+ /* 遍历组播地址链表,判断是否存在此mac */
+ if (is_this_mac_exist(dev, addr)) {
+ LOG_DEBUG("Mac already exists\n");
+ if (!(flags & NLM_F_EXCL)) {
+ return 0;
+ }
+ return -EEXIST;
+ }
+
+ /* 将此组播mac地址配置到np中 */
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ err = dpp_multi_mac_add_member(&pf_info, addr);
+ if (err != 0) {
+ LOG_ERR("dpp_multi_mac_add_member failed\n");
+ return err;
+ }
+ } else {
+ err = zxdh_vf_dpp_add_mac(en_dev, addr, FILTER_MAC);
+ if (err != 0) {
+ LOG_ERR("zxdh_vf_dpp_add_mac failed\n");
+ return err;
+ }
+ }
+
+ /* 将此组播mac地址添加到地址链表中 */
+ err = zxdh_dev_list_addr_add(dev, addr);
+ if (err != 0) {
+ LOG_ERR("zxdh_dev_list_addr_add failed\n");
+ return err;
+ }
+ en_dev->curr_multicast_num++;
+ LOG_DEBUG("curr_multicast_num is %d\n", en_dev->curr_multicast_num);
+
+ return err;
+}
+
+int32_t ipv6_multicast_mac_add(struct zxdh_en_device *en_dev,
+ struct net_device *dev, const uint8_t *addr)
+{
+ int32_t err = 1;
+ int i = 0;
+ MAC_VPORT_INFO *multicast_mac_arry = NULL;
+ uint32_t current_multicast_num = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ /* VF流程 */
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ err = zxdh_vf_dpp_add_ipv6_mac(en_dev, addr);
+ if (err != 0) {
+ LOG_ERR("zxdh_vf_dpp_add_ipv6_mac failed\n");
+ return err;
+ }
+ en_dev->curr_multicast_num++;
+ return 0;
+ }
+
+ /* PF流程 */
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ /* 开辟组播数组*/
+ multicast_mac_arry = (MAC_VPORT_INFO *)kzalloc(
+ sizeof(MAC_VPORT_INFO) * MULTICAST_MAX_NUM, GFP_KERNEL);
+ if (multicast_mac_arry == NULL) {
+ LOG_ERR("kzalloc multicast_mac_arry failed \n");
+ return err;
+ }
+
+ /* 从NP中dump所有组播mac地址、总数量*/
+ err = dpp_multicast_mac_dump(&pf_info, multicast_mac_arry,
+ ¤t_multicast_num);
+ if (err != 0) {
+ LOG_ERR("dpp_multicast_mac_dump failed\n");
+ goto out;
+ }
+
+ /* 判断NP中是否已经存储此多播MAC */
+ for (i = 0; i < current_multicast_num; ++i) {
+ if (memcmp(multicast_mac_arry[i].addr, addr, ETH_ALEN) == 0) {
+ LOG_ERR("Multicast MAC Address: %pM Exist, can't add!!",
+ multicast_mac_arry[i].addr);
+ err = 0;
+ goto out;
+ }
+ }
+
+ /* 判断目前NP中所配置mac地址数量是否超过上限*/
+ if (current_multicast_num >= DEV_MULTICAST_MAX_NUM) {
+ LOG_ERR("curr_multicast_num is beyond maximum, can't add!!\n");
+ err = 1;
+ goto out;
+ }
+
+ /* 将此组播mac地址配置到np中 */
+ err = dpp_multi_mac_add_member(&pf_info, addr);
+ if (err != 0) {
+ LOG_ERR("dpp_multi_mac_add_member failed\n");
+ goto out;
+ }
+
+ en_dev->curr_multicast_num++;
+ LOG_DEBUG("%s: curr_multicast_num is %d\n", en_dev->netdev->name,
+ en_dev->curr_multicast_num);
+
+out:
+ if (multicast_mac_arry != NULL) {
+ kfree(multicast_mac_arry);
+ }
+
+ return err;
+}
+
+int32_t unicast_mac_del(struct zxdh_en_device *en_dev, struct net_device *dev,
+ const uint8_t *addr)
+{
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ /* 判断目前所配置mac地址数量是否小于0 */
+ if (en_dev->curr_unicast_num <= 0) {
+ LOG_ERR("curr_unicast_num is less than 0\n");
+ return -ENOENT;
+ }
+
+ /* 遍历单播地址链表,判断是否存在此mac */
+ if (!is_this_mac_exist(dev, addr)) {
+ LOG_DEBUG("Mac is not exists\n");
+ return -ENOENT;
+ }
+
+ /* 如果待删除mac和本机mac相同,则不从NP中删除,只从链表中删除 */
+ if (!memcmp(addr, dev->dev_addr, dev->addr_len)) {
+ goto out;
+ }
+
+ /* 从np中删除此单播mac */
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ /* 此设备为PF */
+ err = dpp_del_mac(&pf_info, addr);
+ if (err != 0) {
+ LOG_ERR("dpp_del_mac failed\n");
+ return err;
+ }
+ LOG_DEBUG("dpp_del_mac succeed\n");
+ } else {
+ /* 此设备为VF */
+ err = zxdh_vf_dpp_del_mac(en_dev, addr, FILTER_MAC, true);
+ if (err != 0) {
+ LOG_ERR("zxdh_vf_dpp_del_mac failed\n");
+ return err;
+ }
+ LOG_DEBUG("zxdh_vf_dpp_del_mac succeed\n");
+ }
+
+out:
+ /* 从链表中删除单播mac */
+ err = zxdh_dev_list_addr_del(dev, addr);
+ if (err != 0) {
+ LOG_ERR("zxdh_dev_list_addr_del failed\n");
+ return err;
+ }
+ en_dev->curr_unicast_num--;
+ LOG_DEBUG("curr_unicast_num is %d\n", en_dev->curr_unicast_num);
+ return err;
+}
+
+int32_t multicast_mac_del(struct zxdh_en_device *en_dev, struct net_device *dev,
+ const uint8_t *addr)
+{
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ /* 判断目前所配置mac地址数量是否小于0 */
+ if (en_dev->curr_multicast_num <= 0) {
+ LOG_ERR("curr_multicast_num is less than 0\n");
+ return -ENOENT;
+ }
+
+ /* 遍历组播地址链表,判断是否存在此组播mac,如果不存在,则返回报错 */
+ if (!is_this_mac_exist(dev, addr)) {
+ LOG_DEBUG("Mac is not exists\n");
+ return -ENOENT;
+ }
+
+ /* 从np中删除此组播mac */
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ /* 此设备为PF */
+ err = dpp_multi_mac_del_member(&pf_info, addr);
+ if (err != 0) {
+ LOG_ERR("dpp_multi_mac_del_member failed\n");
+ return err;
+ }
+ LOG_DEBUG("dpp_multi_mac_del_member succeed\n");
+ } else {
+ /* 此设备为VF */
+ err = zxdh_vf_dpp_del_mac(en_dev, addr, FILTER_MAC, true);
+ if (err != 0) {
+ LOG_ERR("zxdh_vf_dpp_del_mac failed\n");
+ return err;
+ }
+ }
+
+ /* 从链表中删除组播mac */
+ err = zxdh_dev_list_addr_del(dev, addr);
+ if (err != 0) {
+ LOG_ERR("zxdh_dev_list_addr_del failed\n");
+ return err;
+ }
+ en_dev->curr_multicast_num--;
+ LOG_DEBUG("curr_multicast_num is %d\n", en_dev->curr_multicast_num);
+
+ return err;
+}
+
+int32_t ipv6_multicast_mac_del(struct zxdh_en_device *en_dev,
+ struct net_device *dev, const uint8_t *addr)
+{
+ int32_t err = 1;
+ int i = 0;
+ MAC_VPORT_INFO *multicast_mac_arry = NULL;
+ uint32_t current_multicast_num = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ /* VF流程 */
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ err = zxdh_vf_dpp_del_ipv6_mac(en_dev, addr);
+ if (err != 0) {
+ LOG_ERR("zxdh_vf_dpp_del_ipv6_mac failed\n");
+ return err;
+ }
+ en_dev->curr_multicast_num--;
+ return 0;
+ }
+
+ /* PF流程 */
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ /* 开辟组播数组*/
+ multicast_mac_arry = (MAC_VPORT_INFO *)kzalloc(
+ sizeof(MAC_VPORT_INFO) * MULTICAST_MAX_NUM, GFP_KERNEL);
+ if (multicast_mac_arry == NULL) {
+ LOG_ERR("kzalloc multicast_mac_arry failed \n");
+ return err;
+ }
+
+ /* 从NP中dump所有组播mac地址、总数量*/
+ err = dpp_multicast_mac_dump(&pf_info, multicast_mac_arry,
+ ¤t_multicast_num);
+ if (err != 0) {
+ LOG_ERR("dpp_multicast_mac_dump failed\n");
+ goto out;
+ }
+
+ /* 判断NP中是否已经删除此多播MAC */
+ for (i = 0; i < current_multicast_num; ++i) {
+ if (memcmp(multicast_mac_arry[i].addr, addr, ETH_ALEN) != 0) {
+ LOG_ERR("Multicast MAC Address: %pM don't Exist, can't delete!!",
+ multicast_mac_arry[i].addr);
+ err = 0;
+ goto out;
+ }
+ }
+
+ /* 判断目前NP中所配置mac地址数量是否小于0 */
+ if (current_multicast_num <= 0) {
+ LOG_ERR("curr_multicast_num is less than 0\n");
+ err = 1;
+ goto out;
+ }
+
+ /* 从np中删除此组播mac */
+ err = dpp_multi_mac_del_member(&pf_info, addr);
+ if (err != 0) {
+ LOG_ERR("dpp_multi_mac_del_member failed\n");
+ goto out;
+ }
+ LOG_DEBUG("dpp_multi_mac_del_member succeed\n");
+
+ en_dev->curr_multicast_num--;
+ LOG_DEBUG("%s: curr_multicast_num is %d\n", en_dev->netdev->name,
+ en_dev->curr_multicast_num);
+
+out:
+ if (multicast_mac_arry != NULL) {
+ kfree(multicast_mac_arry);
+ }
+
+ return err;
+}
+
+int32_t zxdh_en_set_vepa(struct zxdh_en_device *en_dev, bool setting)
+{
+ struct zxdh_vf_item *vf_item = NULL;
+ bool vepa = false;
+ uint16_t vf_idx = 0;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ vepa = en_dev->ops->get_vepa(en_dev->parent);
+ if (setting == vepa) {
+ LOG_ERR("vport(0x%x) is now %s mode\n", en_dev->vport,
+ vepa ? "vepa" : "veb");
+ return 0;
+ }
+
+ en_dev->ops->set_vepa(en_dev->parent, setting);
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VEPA_EN_OFF,
+ (uint32_t)setting);
+ if (ret != 0) {
+ LOG_ERR("Failed to setup vport(0x%x) %s mode, ret: %d\n", en_dev->vport,
+ setting ? "vepa" : "veb", ret);
+ return ret;
+ }
+
+ for (vf_idx = 0; vf_idx < ZXDH_VF_NUM_MAX; vf_idx++) {
+ vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
+ if (IS_ERR_OR_NULL(vf_item)) {
+ break;
+ }
+
+ if (vf_item->is_probed) {
+ pf_info.vport = vf_item->vport;
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VEPA_EN_OFF,
+ (uint32_t)setting);
+ if (ret != 0) {
+ LOG_ERR("Failed to setup vport(0x%x) %s mode, ret: %d\n",
+ vf_item->vport, setting ? "vepa" : "veb", ret);
+ return ret;
+ }
+ LOG_INFO("Configure vport(0x%x) to %s mode\n", vf_item->vport,
+ setting ? "vepa" : "veb");
+ }
+ }
+
+ LOG_INFO("Configure vport(0x%x) to %s mode\n", en_dev->vport,
+ setting ? "vepa" : "veb");
+
+ return ret;
+}
+
+#ifdef HAVE_FDB_OPS
+#if defined(HAVE_NDO_FDB_ADD_EXTACK)
+static int zxdh_en_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
+#elif defined(HAVE_NDO_FDB_ADD_VID)
+static int zxdh_en_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid, u16 flags)
+#elif defined(HAVE_NDO_FDB_ADD_NLATTR)
+static int zxdh_en_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 flags)
+#elif defined(USE_CONST_DEV_UC_CHAR)
+static int zxdh_en_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+ const unsigned char *addr, u16 flags)
+#else
+static int zxdh_en_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+ unsigned char *addr, u16 flags)
+#endif
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(dev);
+ struct zxdh_en_device *en_dev =
+ &en_priv->edev; /*aux层net_device的私有结构体 */
+ int32_t err = 0;
+
+#ifdef MAC_CONFIG_DEBUG
+ LOG_DEBUG("vport is %#x\n", en_dev->vport);
+ LOG_DEBUG("addr is %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", addr[0], addr[1],
+ addr[2], addr[3], addr[4], addr[5]);
+ LOG_DEBUG("ndm_state is %u\n", ndm->ndm_state);
+#endif /* MAC_CONFIG_DEBUG */
+
+ /* 检查这个设备的ndm状态是否是静态的 */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ LOG_ERR("FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ /* 判断mac地址是否全0 */
+ if (is_zero_ether_addr(addr)) {
+ LOG_ERR("Invalid mac\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
+ err = unicast_mac_add(en_dev, dev, addr, flags);
+ if (err != 0) {
+ LOG_ERR("unicast_mac_add failed");
+ return err;
+ }
+ } else if (is_multicast_ether_addr(addr)) {
+ err = multicast_mac_add(en_dev, dev, addr, flags);
+ if (err != 0) {
+ LOG_ERR("multicast_mac_add failed");
+ return err;
+ }
+ } else {
+ err = -EINVAL;
+ }
+
+#ifdef MAC_CONFIG_DEBUG
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ err = zxdh_pf_dump_all_mac(en_dev);
+ if (err != 0) {
+ LOG_INFO("zxdh_pf_dump_all_mac failed\n");
+ return err;
+ }
+ }
+#endif /* MAC_CONFIG_DEBUG */
+
+ LOG_DEBUG("zxdh_en_ndo_fdb_add end\n");
+ return err;
+}
+
+#ifdef HAVE_NDO_FEATURES_CHECK
+static netdev_features_t zxdh_en_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ return features;
+}
+#endif /* HAVE_NDO_FEATURES_CHECK */
+
+#ifdef USE_CONST_DEV_UC_CHAR
+#ifdef HAVE_NDO_FDB_ADD_VID
+static int zxdh_en_ndo_fdb_del(struct ndmsg *ndm, struct nlattr **nla,
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+#else
+static int zxdh_en_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+ const unsigned char *addr)
+#endif
+#else
+#ifdef HAVE_NDO_FDB_ADD_VID
+static int zxdh_en_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+ unsigned char *addr, u16 vid)
+#else
+static int zxdh_en_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+ unsigned char *addr)
+#endif
+#endif
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(dev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t err = 0;
+
+#ifdef MAC_CONFIG_DEBUG
+ LOG_DEBUG("the vport is %#x", en_dev->vport);
+ LOG_DEBUG("the addr is %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", addr[0], addr[1],
+ addr[2], addr[3], addr[4], addr[5]);
+ LOG_DEBUG("ndm_state is %u,\n", ndm->ndm_state);
+#endif /* MAC_CONFIG_DEBUG */
+
+ /* 检查这个设备的ndm状态是否是静态的 */
+ if (!(ndm->ndm_state & NUD_PERMANENT)) {
+ LOG_ERR("FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ /* 地址是否全为0 */
+ if (is_zero_ether_addr(addr)) {
+ LOG_ERR("Invalid mac address\n");
+ return -EINVAL;
+ }
+
+ /* 根据mac地址类型,对相对应地址链表做删除操作 */
+ if (is_unicast_ether_addr(addr)) {
+ err = unicast_mac_del(en_dev, dev, addr);
+ if (err != 0) {
+ LOG_ERR("unicast_mac_del failed\n");
+ return err;
+ }
+ } else if (is_multicast_ether_addr(addr)) {
+ err = multicast_mac_del(en_dev, dev, addr);
+ if (err != 0) {
+ LOG_ERR("multicast_mac_del failed\n");
+ return err;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+#ifdef MAC_CONFIG_DEBUG
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ /*先dump所有mac地址*/
+ err = zxdh_pf_dump_all_mac(en_dev);
+ if (err != 0) {
+ LOG_ERR("zxdh_pf_dump_all_mac failed\n");
+ return err;
+ }
+ }
+#endif /* MAC_CONFIG_DEBUG */
+
+ LOG_DEBUG("zxdh_en_ndo_fdb_del end\n");
+ return err;
+}
+
+#ifdef HAVE_BRIDGE_ATTRIBS
+#if defined(HAVE_NDO_BRIDGE_SETLINK_EXTACK)
+static int zxdh_en_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags,
+ struct netlink_ext_ack *extack)
+#elif defined(HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS)
+static int zxdh_en_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags)
+#else
+static int zxdh_en_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+#endif
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(dev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct nlattr *attr = NULL;
+ struct nlattr *br_spec = NULL;
+ int32_t rem = 0;
+ uint16_t mode = BRIDGE_MODE_UNDEF;
+ bool setting = false;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ return -EOPNOTSUPP;
+ }
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (br_spec == NULL) {
+ return -EINVAL;
+ }
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_MODE) {
+ continue;
+ }
+
+ if (nla_len(attr) < sizeof(mode)) {
+ return -EINVAL;
+ }
+
+ mode = nla_get_u16(attr);
+ if (mode > BRIDGE_MODE_VEPA) {
+ return -EINVAL;
+ }
+ break;
+ }
+
+ if (mode == BRIDGE_MODE_UNDEF) {
+ return -EINVAL;
+ }
+
+ setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0;
+
+ return zxdh_en_set_vepa(en_dev, setting);
+}
+
+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+static int zxdh_en_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 __always_unused filter_mask,
+ int nlflags)
+#elif defined(HAVE_BRIDGE_FILTER)
+static int zxdh_en_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 __always_unused filter_mask)
+#else
+static int zxdh_en_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev)
+#endif /* NDO_BRIDGE_STUFF */
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(dev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint8_t mode = 0;
+ bool vepa = false;
+
+ vepa = en_dev->ops->get_vepa(en_dev->parent);
+ mode = vepa ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags,
+ filter_mask, NULL);
+}
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif /* HAVE_FDB_OPS */
+
+static int32_t zxdh_pf_notify_vf_reset(struct zxdh_en_device *en_dev,
+ int vf_idx)
+{
+ int32_t retval = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack;
+
+ msg.hdr_vf.op_code = ZXDH_SET_VF_RESET;
+ msg.hdr_vf.dst_pcie_id = FIND_VF_PCIE_ID(en_dev->pcie_id, vf_idx);
+
+ retval = zxdh_send_command_to_specify(en_dev, MODULE_PF_BAR_MSG_TO_VF, &msg,
+ &ack);
+ if (retval != 0) {
+ LOG_ERR("zxdh_send_command_to_vf failed: %d\n", retval);
+ }
+ return retval;
+}
+
+static int32_t zxdh_pf_notify_vf_set_link_state(struct zxdh_en_device *en_dev,
+ int vf_idx, bool link_up)
+{
+ int32_t retval = 0;
+ uint16_t func_no = 0;
+ uint16_t pf_no = FIND_PF_ID(en_dev->pcie_id);
+ uint8_t link_info = 0;
+ uint8_t link_up_val = 0;
+ uint8_t phyport_val = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+
+ msg.hdr_to_agt.op_code = AGENT_DEV_STATUS_NOTIFY;
+ msg.hdr_to_agt.pcie_id = en_dev->pcie_id;
+
+ func_no = GET_FUNC_NO(pf_no, vf_idx);
+ LOG_DEBUG("vf_idx:%d, func_no=0x%x\n", vf_idx, func_no);
+ msg.pcie_msix_msg.func_no[msg.pcie_msix_msg.num++] = func_no;
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ link_up_val = link_up ? 1 : 0;
+ phyport_val = en_dev->ops->get_pf_phy_port(en_dev->parent);
+ link_info = (phyport_val & 0x0F) << 4 | (link_up_val & 0x0F);
+ LOG_DEBUG("phyport and link_up need write to VQM, val: 0x%x\n",
+ link_info);
+ en_dev->ops->set_vf_link_info(en_dev->parent, vf_idx, link_info);
+ } else {
+ en_dev->ops->set_vf_link_info(en_dev->parent, vf_idx, link_up ? 1 : 0);
+ }
+ LOG_DEBUG("msg.pcie_msix_msg.num:%d\n", msg.pcie_msix_msg.num);
+ retval = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &ack);
+ if (retval != 0) {
+ LOG_ERR("failed to update VF link info\n");
+ }
+ return retval;
+}
+
+static int32_t zxdh_pf_set_vf_link_state(struct zxdh_en_device *en_dev,
+ int vf_idx, int link_status)
+{
+ int32_t retval = 0;
+ struct zxdh_vf_item *vf_item = NULL;
+ bool pf_link_up = en_dev->ops->get_pf_link_up(en_dev->parent);
+
+ vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
+ switch (link_status) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ LOG_DEBUG("[SET_VF_LINK_STATE]--NDO set VF %d link state auto\n",
+ vf_idx);
+ vf_item->link_forced = FALSE;
+ vf_item->link_up = pf_link_up;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ LOG_DEBUG("[SET_VF_LINK_STATE]--NDO set VF %d link state enable\n",
+ vf_idx);
+ vf_item->link_forced = TRUE;
+ vf_item->link_up = TRUE;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ LOG_DEBUG("[SET_VF_LINK_STATE]--NDO set VF %d link state disable\n",
+ vf_idx);
+ vf_item->link_forced = TRUE;
+ vf_item->link_up = FALSE;
+ break;
+ default:
+ LOG_ERR("[SET_VF_LINK_STATE]--NDO set VF %d - invalid link status %d\n",
+ vf_idx, link_status);
+ return -EINVAL;
+ }
+ LOG_DEBUG("vf_item->is_probed: %s\n",
+ vf_item->is_probed ? "TRUE" : "FALSE");
+ if (vf_item->is_probed) {
+ /* Notify the VF of its new link state */
+ retval = zxdh_pf_notify_vf_set_link_state(en_dev, vf_idx,
+ vf_item->link_up);
+ if (0 != retval) {
+ LOG_ERR("[SET_VF_LINK_STATE]--Failed to set VF %d link state %d\n",
+ vf_idx, vf_item->link_up);
+ return retval;
+ }
+ }
+ return retval;
+}
+
+int zxdh_en_ndo_set_vf_link_state(struct net_device *netdev, int vf_idx,
+ int link_status)
+{
+ int num_vfs = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct pci_dev *pdev = NULL;
+ struct dh_core_dev *dh_dev = NULL;
+
+ dh_dev = en_dev->parent;
+ pdev = en_dev->ops->get_pdev(dh_dev);
+ num_vfs = pci_num_vf(pdev);
+ if ((vf_idx < 0) || (vf_idx >= num_vfs)) {
+ LOG_ERR("[SET_VF_LINK_STATE]--NDO set VF link - invalid VF idx: %d\n",
+ vf_idx);
+ return -EINVAL;
+ }
+ return zxdh_pf_set_vf_link_state(en_dev, vf_idx, link_status);
+}
+
+static int32_t zxdh_pf_set_vf_port_vlan(struct zxdh_en_device *en_dev,
+ int vf_idx, u16 vid, u8 qos,
+ uint16_t vlan_proto)
+{
+ int32_t retval = 0;
+ struct zxdh_vf_item *vf_item = NULL;
+ zxdh_msg_info msg;
+ zxdh_reps_info ack;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ /* 获取pf本地保存的vf变量*/
+ vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
+ if (!vf_item->is_probed) {
+ LOG_DEBUG("vf %d is not probed.\n", vf_idx);
+ return -EINVAL;
+ }
+
+ if (vf_item->vlan == vid) {
+ return 0;
+ }
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = vf_item->vport;
+ if (vid) {
+ /* vf端口Vlan strip开启*/
+ retval = dpp_vport_vlan_strip_set(&pf_info, 1);
+ if (retval != 0) {
+ LOG_ERR("dpp_vport_vlan_strip_set failed, retval: %d\n", retval);
+ return retval;
+ }
+ /* 将vlan_id add到表项中*/
+ retval = dpp_vport_vlan_filter_en_set(&pf_info, 1);
+ if (retval != 0) {
+ LOG_ERR("dpp_vport_vlan_filter_en_set failed, retval: %d\n",
+ retval);
+ return retval;
+ }
+
+ retval = dpp_add_vlan_filter(&pf_info, vid);
+ if (0 != retval) {
+ LOG_ERR("failed to add vlan: %d\n", vid);
+ return retval;
+ }
+ } else {
+ /* vf端口vlan strip关闭*/
+ retval = dpp_vport_vlan_strip_set(&pf_info, 0);
+ if (retval != 0) {
+ LOG_ERR("dpp_vport_vlan_strip_set failed, retval: %d\n", retval);
+ return retval;
+ }
+ /* 将Vlan_id 从表项中kill*/
+ retval = dpp_vport_vlan_filter_en_set(&pf_info, 0);
+ if (retval != 0) {
+ LOG_ERR("dpp_vport_vlan_filter_en_set failed, retval: %d\n",
+ retval);
+ return retval;
+ }
+
+ retval = dpp_vlan_filter_init(&pf_info);
+ if (retval != 0) {
+ LOG_ERR("dpp_vlan_filter_init failed: %d\n", retval);
+ return retval;
+ }
+ }
+
+ msg.hdr_vf.op_code = ZXDH_PF_SET_VF_VLAN;
+ msg.hdr_vf.dst_pcie_id = FIND_VF_PCIE_ID(en_dev->pcie_id, vf_idx);
+
+ msg.vf_vlan_msg.vlan_id = vid;
+ msg.vf_vlan_msg.qos = qos;
+ msg.vf_vlan_msg.protocl = vlan_proto;
+ msg.vf_vlan_msg.vf_idx = vf_idx;
+
+ retval = zxdh_send_command_to_specify(en_dev, MODULE_PF_BAR_MSG_TO_VF, &msg,
+ &ack);
+ if (retval != 0) {
+ LOG_ERR("zxdh_send_command_to_vf failed: %d\n", retval);
+ return retval;
+ }
+
+ /* 更新pf本地的vf vlan信息,用于ip link show显示*/
+ vf_item->vlan = vid;
+ vf_item->qos = qos;
+ return retval;
+}
+
+int zxdh_en_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_vf_item *vf_item = NULL;
+ int32_t retval = 0;
+ bool delete_flag = true;
+ uint8_t *addr = NULL;
+ uint8_t i = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_id);
+ if (IS_ERR_OR_NULL(vf_item)) {
+ LOG_ERR("Failed to get vf_item, vf_id:%d\n", vf_id);
+ return PTR_ERR(vf_item);
+ }
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = vf_item->vport;
+
+ if (is_multicast_ether_addr(mac)) {
+ LOG_ERR("Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
+ return -EINVAL;
+ }
+
+ if (ether_addr_equal(vf_item->mac, mac)) {
+ LOG_INFO("[SET_VF_MAC]--already using mac address %pM\n", mac);
+ return retval;
+ }
+
+ if (is_zero_ether_addr(mac)) {
+ eth_zero_addr(vf_item->mac);
+ vf_item->pf_set_mac = false;
+ en_dev->ops->set_vf_mac(en_dev->parent, mac, vf_id);
+ eth_zero_addr(vf_item->vf_mac_info.unicast_mac[0]);
+ goto vf_reset;
+ }
+
+ for (i = 1; i < DEV_UNICAST_MAX_NUM; ++i) {
+ addr = vf_item->vf_mac_info.unicast_mac[i];
+ if (!memcmp(vf_item->mac, addr, netdev->addr_len)) {
+ delete_flag = false;
+ }
+ }
+
+ if (delete_flag) {
+ if (!is_zero_ether_addr(vf_item->mac)) {
+ retval = dpp_del_mac(&pf_info, vf_item->mac);
+ if (retval != 0) {
+ LOG_ERR("delete vf old mac in NP failed.\n");
+ return retval;
+ }
+ }
+ }
+
+ vf_item->pf_set_mac = true;
+ en_dev->ops->set_vf_mac(en_dev->parent, mac, vf_id);
+ ether_addr_copy(vf_item->vf_mac_info.unicast_mac[0], mac);
+ ether_addr_copy(vf_item->mac, mac);
+ LOG_INFO("[SET_VF_MAC]--setting MAC %pM on VF %d\n", mac, vf_id);
+
+vf_reset:
+ if (vf_item->is_probed) {
+ retval = zxdh_pf_notify_vf_reset(en_dev, vf_id);
+ if (retval != 0) {
+ LOG_ERR("zxdh_pf_notify_vf_reset failed: %d\n", retval);
+ }
+ }
+
+ return retval;
+}
+
+#ifdef IFLA_VF_VLAN_INFO_MAX
+int zxdh_en_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto)
+#else
+int zxdh_en_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos)
+#endif /* IFLA_VF_VLAN_INFO_MAX */
+{
+ int num_vfs = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct pci_dev *pdev = NULL;
+ struct dh_core_dev *dh_dev = NULL;
+
+ /* Comparing with the mellnox network card, it only supports the configuration
+ * of cvlan*/
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ return -EPROTONOSUPPORT;
+ }
+ dh_dev = en_dev->parent;
+ pdev = en_dev->ops->get_pdev(dh_dev);
+ num_vfs = pci_num_vf(pdev);
+ if ((vf_id < 0) || (vf_id >= num_vfs)) {
+ LOG_ERR("[SET+VF_VLAN]--NDO set VF vlan - invalid VF idx: %d\n", vf_id);
+ return -EINVAL;
+ }
+ return zxdh_pf_set_vf_port_vlan(en_dev, vf_id, vlan_id, qos, vlan_proto);
+}
+
+int zxdh_en_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
+{
+ return 0;
+}
+
+int zxdh_en_ndo_get_vf_config(struct net_device *netdev, int vf_idx,
+ struct ifla_vf_info *ivi)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_vf_item *vf_item = NULL;
+
+ vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
+ if (IS_ERR_OR_NULL(vf_item)) {
+ LOG_ERR("Failed to get vf_item, vf_idx:%d\n", vf_idx);
+ return PTR_ERR(vf_item);
+ }
+
+ ivi->vf = vf_idx;
+
+ ether_addr_copy(ivi->mac, vf_item->mac);
+
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ ivi->max_tx_rate = vf_item->max_tx_rate;
+ ivi->min_tx_rate = vf_item->min_tx_rate;
+#else
+ ivi->tx_rate = vf_item->max_tx_rate;
+#endif
+
+ ivi->vlan = vf_item->vlan;
+ ivi->qos = vf_item->qos;
+
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+ if (vf_item->link_forced == false) {
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ } else if (vf_item->link_up == true) {
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ } else {
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ }
+#endif
+
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ ivi->spoofchk = vf_item->spoofchk;
+#endif
+
+#ifdef HAVE_NDO_SET_VF_TRUST
+ ivi->trusted = vf_item->trusted;
+#endif
+
+ return 0;
+}
+
+int zxdh_en_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_idx,
+ bool enable)
+{
+ int ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_vf_item *vf_item = NULL;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
+ if (IS_ERR_OR_NULL(vf_item)) {
+ LOG_ERR("Failed to get vf_item, vf_idx:%d\n", vf_idx);
+ return PTR_ERR(vf_item);
+ }
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = vf_item->vport;
+ vf_item->spoofchk = enable;
+ LOG_INFO("vf %d spoof check is %s\n", vf_idx,
+ vf_item->spoofchk ? "on" : "off");
+ if (vf_item->is_probed) {
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_SPOOFCHK_EN_OFF, enable);
+ if (0 != ret) {
+ LOG_ERR("[SET_VF_SPOOFCHK]--Failed to set vf %d spookchk %s\n",
+ vf_idx, enable ? "on" : "off");
+ return ret;
+ }
+ }
+ return ret;
+}
+
+#ifdef HAVE_NDO_SET_VF_TRUST
+int zxdh_en_ndo_set_vf_trust(struct net_device *netdev, int vf_idx,
+ bool setting)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_vf_item *vf_item = NULL;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
+ if (IS_ERR_OR_NULL(vf_item)) {
+ LOG_ERR("Failed to get vf_item, vf_idx:%d\n", vf_idx);
+ return PTR_ERR(vf_item);
+ }
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = vf_item->vport;
+ vf_item->trusted = setting;
+ LOG_INFO("VF %u is now %strusted\n", vf_idx, setting ? "" : "un");
+ if (vf_item->is_probed && !vf_item->trusted) {
+ LOG_DEBUG("vport[0x%x] promisc and allmulti off\n", vf_item->vport);
+ vf_item->promisc = false;
+ vf_item->mc_promisc = false;
+ dpp_vport_uc_promisc_set(&pf_info, vf_item->promisc);
+ dpp_vport_mc_promisc_set(&pf_info, vf_item->mc_promisc);
+ }
+
+ return 0;
+}
+#endif
+
+int zxdh_en_ndo_set_tx_maxrate(struct net_device *netdev, int qid,
+ uint32_t max_rate)
+{
+ int rtn = 0;
+ int link_speed = 0;
+ uint32_t flowid = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct dh_core_dev *dh_dev = en_dev->parent;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev->parent);
+
+ LOG_INFO("zxdh_en_set_tx_maxrate start\n");
+
+ /*1. 入参检测:队列号不能超过vf下实际的队列数*/
+ if (qid >= en_dev->curr_queue_pairs) {
+ LOG_ERR("zxdh_en_ndo_set_tx_maxrate : invalid parameter qid=%d\n", qid);
+ return -EINVAL;
+ }
+
+ // if (!en_dev->link_up)
+ // {
+ // LOG_ERR("[EN SET TX MAXRATE]--PF is not link up.\n");
+ // return -EINVAL;
+ // }
+
+ link_speed = en_dev->link_speed;
+
+ /*2. 将vf内的队列号,转换成全局flow id;注意>>>>>>>>vqm tx队列与CAR A的flow
+ * id映射关系:一一映射<<<<<<<<*/
+ flowid = zxdh_plcr_vf_qid_2_flowid(en_priv, qid);
+ if (flowid >= PLCR_CAR_A_FLOWID_RES_NUM) {
+ LOG_ERR("zxdh_en_ndo_set_tx_maxrate : invalid parameter flowid=%d\n",
+ flowid);
+ return -EINVAL;
+ }
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ rtn = zxdh_plcr_set_rate_limit(pf_dev, E_PLCR_CAR_A, en_dev->vport,
+ flowid, max_rate, 0);
+ if (rtn) {
+ LOG_ERR("zxdh_plcr_set_rate_limit failed: %d\n", rtn);
+ return -EINVAL;
+ }
+ } else if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ msg.hdr.op_code = ZXDH_TX_MAXRATE_SET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.hdr.vf_id = en_dev->pcie_id & (0xff);
+
+ msg.tx_maxrate_set_msg.flowid = flowid;
+ msg.tx_maxrate_set_msg.max_rate = max_rate;
+
+ rtn = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF,
+ &msg, &reps);
+ if (rtn) {
+ LOG_ERR("zxdh_send_command_to_specify failed: %d\n", rtn);
+ return -EINVAL;
+ }
+ }
+ LOG_INFO("The maxrate of tx-%u has been set to %u\n", qid, max_rate);
+ return rtn;
+}
+
+/**-------------------------------------------------------------------------------------------------------------------@n
+ * 功能详述:
+ * - zxdh_en_ndo_set_vf_rate函数属于接口函数, 其功能是:
+ * - 设置vf端口发送方向,最大速率和最小保证速率
+ * - 该接口会挂接到内核的钩子上,函数声明是固定的
+ *
+ * 基于plcr的端口限速背景:
+ * - 1.一级flowid与vqm的2K个(接收和发送)队列是一一映射的
+ * - 2.二级flow id与vf num的映射关系
+ * 端口限速,需要将vf下的发送队列(即一级flow id)映射到二级flowid
+ * 二级flow id的资源是4K,dpu限制vf数量是1K,即二级flow id数量 > vf数量
+ * 所以规定固定的映射关系:二级flow id前1K <--->
+ *与1K个vf(发送)一一对应 下面的链接整理了pf下vf转换成全局vf(0-1023)的原理
+ * https://i.zte.com.cn/#/space/4e62cb2b730540ff8721c1a8552b2356/wiki/page/ff8178f1304e45dc9457e92ff196cce5/view
+ * - 3.vf限速的设置
+ * 项目对vf提出了最小保证带宽的需求;
+ * 二级CAR的限速模板使用:双速率,三色算法,色敏模式
+ * - 4.创建vf的其它考虑
+ * 参考mlx的做法,vf创建之后,默认关联到vf组0(注意:>>>>>>>>先交付vf端口限速的需求,这一步可以暂时不实现<<<<<<<<);
+ * vf创建之后,用户设置限速才会调用到这里,用户不设置限速,vf(二级flow
+ *id)就不用关联限速模板
+ *
+ * 参数概述:
+ * - netdev : 网络设备结构体指针
+ * - vf_id :pf内vf的编号(从0开始)
+ * - min_tx_rate : 最小保证速率
+ * - max_tx_rate : 最大速率
+ * - 返回值类型是INT32, 含义是: 错误码,正确时为S_OK
+ *
+ * 引用(类变量,外部变量,接口函数):
+ * - 无
+ *
+ * 注意:该函数挂接到pf的钩子上,只在pf下执行
+ *--------------------------------------------------------------------------------------------------------------------*/
+int zxdh_en_ndo_set_vf_rate(struct net_device *netdev, int vf_id,
+ int min_tx_rate, int max_tx_rate)
+{
+ int rtn = 0;
+ int ep_id = 0;
+ int pf_func_num = 0;
+ uint32_t flowid = 0;
+
+ struct zxdh_vf_item *vf_item;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct dh_core_dev *dh_dev = en_dev->parent;
+ struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev->parent);
+
+ LOG_INFO("%s-%d:enter\n", __FUNCTION__, __LINE__);
+ /*1. 计算vf是全局编号(0-1023),即为vf tx的flow id*/
+ ep_id = ((en_dev->vport & 0x7000) >> 12);
+ pf_func_num = ((en_dev->vport & 0x0700) >> 8);
+ flowid = 256 * ep_id + 32 * pf_func_num + vf_id; //每个pf下32个vf
+ vf_item = en_dev->ops->get_vf_item(en_dev->parent, (uint16_t)vf_id);
+
+ LOG_INFO("%s-%d:vf_item->vport = 0x%x, flowid = %d\n", __FUNCTION__,
+ __LINE__, vf_item->vport, flowid);
+
+ rtn = zxdh_plcr_set_rate_limit(pf_dev, E_PLCR_CAR_B, vf_item->vport, flowid,
+ max_tx_rate, min_tx_rate);
+ if (rtn) {
+ LOG_ERR("zxdh_plcr_set_rate_limit failed: %d\n", rtn);
+ return -EINVAL;
+ }
+ LOG_INFO(
+ "The rate of VF-%u has been set to: min_tx_rate = %u, max_tx_rate = %u\n",
+ vf_id, min_tx_rate, max_tx_rate);
+
+ return rtn;
+}
+
+const struct net_device_ops zxdh_netdev_ops = {
+ .ndo_open = zxdh_en_open,
+ .ndo_stop = zxdh_en_close,
+ .ndo_start_xmit = zxdh_en_xmit,
+
+#if defined(HAVE_NDO_GET_STATS64) || defined(HAVE_VOID_NDO_GET_STATS64)
+ .ndo_get_stats64 = zxdh_en_get_netdev_stats_struct,
+#else
+ .ndo_get_stats = zxdh_en_get_netdev_stats_struct,
+#endif
+ .ndo_set_rx_mode = zxdh_en_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = zxdh_en_set_mac,
+
+#ifdef HAVE_RHEL7_EXTENDED_MIN_MAX_MTU
+ .extended.ndo_change_mtu = zxdh_en_change_mtu,
+#else
+ .ndo_change_mtu = zxdh_en_change_mtu,
+#endif /* HAVE_RHEL7_EXTENDED_MIN_MAX_MTU */
+
+ .ndo_do_ioctl = zxdh_en_ioctl,
+#ifdef ZXDH_PLCR_OPEN
+ .ndo_set_tx_maxrate = zxdh_en_ndo_set_tx_maxrate,
+#endif
+ .ndo_tx_timeout = zxdh_en_tx_timeout,
+
+#ifdef HAVE_VLAN_RX_REGISTER
+ .ndo_vlan_rx_register = zxdh_en_vlan_rx_register,
+#endif
+ .ndo_vlan_rx_add_vid = zxdh_en_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = zxdh_en_vlan_rx_kill_vid,
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = zxdh_en_netpoll,
+#endif
+
+#ifdef HAVE_SETUP_TC
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC
+ .extended.ndo_setup_tc_rh = __zxdh_en_setup_tc,
+#else
+#ifdef NETIF_F_HW_TC
+ .ndo_setup_tc = __zxdh_en_setup_tc,
+#else
+ .ndo_setup_tc = zxdh_en_setup_tc,
+#endif /* NETIF_F_HW_TC */
+#endif /* HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC */
+#endif /* HAVE_SETUP_TC */
+
+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
+ .ndo_size = sizeof(const struct net_device_ops),
+#endif
+
+#ifdef IFLA_VF_MAX
+ .ndo_set_vf_mac = zxdh_en_ndo_set_vf_mac,
+#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
+ .extended.ndo_set_vf_vlan = zxdh_en_ndo_set_vf_port_vlan,
+#else
+ .ndo_set_vf_vlan = zxdh_en_ndo_set_vf_port_vlan,
+#endif
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+#ifdef ZXDH_PLCR_OPEN
+ .ndo_set_vf_rate = zxdh_en_ndo_set_vf_rate,
+#else
+ .ndo_set_vf_rate = zxdh_en_ndo_set_vf_bw,
+#endif
+#else
+ .ndo_set_vf_rate = zxdh_en_ndo_set_vf_bw,
+#endif
+ .ndo_get_vf_config = zxdh_en_ndo_get_vf_config,
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ .ndo_set_vf_spoofchk = zxdh_en_ndo_set_vf_spoofchk,
+#endif
+#ifdef HAVE_NDO_SET_VF_TRUST
+#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
+ .extended.ndo_set_vf_trust = zxdh_en_ndo_set_vf_trust,
+#else
+ .ndo_set_vf_trust = zxdh_en_ndo_set_vf_trust,
+#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */
+#endif /* HAVE_NDO_SET_VF_TRUST */
+#endif /* IFLA_VF_MAX */
+
+#ifdef HAVE_UDP_ENC_RX_OFFLOAD
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_VXLAN)
+ .ndo_add_vxlan_port = zxdh_en_add_vxlan_port,
+ .ndo_del_vxlan_port = zxdh_en_del_vxlan_port,
+#endif
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
+#ifdef HAVE_GENEVE_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_GENEVE)
+ .ndo_add_geneve_port = zxdh_en_add_geneve_port,
+ .ndo_del_geneve_port = zxdh_en_del_geneve_port,
+#endif
+#endif /* HAVE_GENEVE_RX_OFFLOAD */
+#endif /* HAVE_UDP_ENC_RX_OFFLOAD */
+
+#ifdef HAVE_NDO_GET_PHYS_PORT_ID
+ .ndo_get_phys_port_id = zxdh_en_get_phys_port_id,
+#endif /* HAVE_NDO_GET_PHYS_PORT_ID */
+
+ .ndo_set_features = zxdh_en_set_features,
+
+#ifdef HAVE_FDB_OPS
+ .ndo_fdb_add = zxdh_en_ndo_fdb_add,
+ .ndo_fdb_del = zxdh_en_ndo_fdb_del,
+#ifdef HAVE_NDO_FEATURES_CHECK
+ .ndo_features_check = zxdh_en_features_check,
+#endif /* HAVE_NDO_FEATURES_CHECK */
+#ifdef HAVE_BRIDGE_ATTRIBS
+ .ndo_bridge_getlink = zxdh_en_ndo_bridge_getlink,
+ .ndo_bridge_setlink = zxdh_en_ndo_bridge_setlink,
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif /* HAVE_FDB_OPS */
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
+
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+};
+
+/* RHEL6 keeps these operations in a separate structure */
+static const struct net_device_ops_ext zxdh_netdev_ops_ext = {
+ .size = sizeof(struct net_device_ops_ext),
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+
+#ifdef HAVE_NDO_SET_FEATURES
+ .ndo_set_features = zxdh_en_set_features,
+#endif /* HAVE_NDO_SET_FEATURES */
+
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+ .ndo_set_vf_link_state = zxdh_en_ndo_set_vf_link_state,
+#endif
+};
+
+static void priv_flags_init(struct zxdh_en_priv *priv)
+{
+ priv->edev.pflags = 0;
+
+ priv->edev.pflags &= BIT(ZXDH_PFLAG_ENABLE_LLDP); /* LLDP默认为开 */
+}
+
+static int32_t get_max_num_qs(struct zxdh_en_container *en_con)
+{
+ return en_con->ops->is_bond(en_con->parent) ? ZXDH_BOND_ETH_MQ_PAIRS_NUM :
+ max_pairs;
+}
+
+static int32_t fw_version_init(struct zxdh_en_device *en_dev)
+{
+ int32_t ret = 0;
+ uint8_t fw_version[ETHTOOL_FWVERS_LEN] = { 0 };
+ uint8_t fw_version_len = 0;
+
+ ret = zxdh_en_firmware_version_get(en_dev, fw_version, &fw_version_len);
+ if (ret != 0) {
+ LOG_ERR("zxdh_en_firmware_version_get err, ret %d!!!!\n", ret);
+ return ret;
+ }
+ if (fw_version_len > ETHTOOL_FWVERS_LEN) {
+ LOG_ERR("fw_version_len (%d) greater than 31!!!!\n", fw_version_len);
+ return -1;
+ }
+
+ fw_version[ETHTOOL_FWVERS_LEN - 1] = '\0';
+ en_dev->fw_version_len = ETHTOOL_FWVERS_LEN;
+ memcpy(en_dev->fw_version, (uint8_t *)fw_version, en_dev->fw_version_len);
+ LOG_INFO("fw_version:%s\n", en_dev->fw_version);
+
+ return 0;
+}
+
+int32_t zxdh_priv_init(struct zxdh_en_priv *priv, struct net_device *netdev)
+{
+ int32_t ret = 0;
+ struct zxdh_en_device *en_dev = &priv->edev;
+
+ mutex_init(&priv->lock);
+ priv_flags_init(priv);
+ en_dev->msglevel = NETIF_MSG_LINK;
+
+ /* 优先级4,暂时写死不支持 */
+ en_dev->wol_support = 0;
+ en_dev->wolopts = 0;
+
+ ret = fw_version_init(en_dev);
+ if (ret != 0) {
+ LOG_ERR("fw_version_init err ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct net_device *zxdh_create_netdev(struct zxdh_en_container *en_con)
+{
+ struct net_device *netdev = NULL;
+ struct zxdh_en_priv *en_priv = NULL;
+ struct dh_core_dev *dh_dev = en_con->parent;
+
+ netdev = alloc_etherdev_mqs(sizeof(struct zxdh_en_priv),
+ get_max_num_qs(en_con), get_max_num_qs(en_con));
+ if (unlikely(netdev == NULL)) {
+ LOG_ERR("alloc_etherdev_mqs() failed\n");
+ return NULL;
+ }
+
+ en_priv = netdev_priv(netdev);
+
+ en_priv->edev.parent = dh_dev;
+ en_priv->edev.ops = en_con->ops;
+ en_priv->edev.netdev = netdev;
+
+ zxdh_priv_init(en_priv, netdev);
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+ dev_net_set(netdev, dh_core_net(dh_dev));
+
+ return netdev;
+}
+
+void zxdh_netdev_features_init(struct net_device *netdev)
+{
+ netdev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_TSO |
+ NETIF_F_SG | NETIF_F_GSO | NETIF_F_LRO | NETIF_F_TSO6 |
+ NETIF_F_GRO | NETIF_F_HW_VLAN_STAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_RXHASH;
+
+ netdev->hw_features |=
+ NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_SG |
+ NETIF_F_GSO | NETIF_F_LRO | NETIF_F_TSO6 | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_STAG_FILTER | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX | NETIF_F_RXHASH;
+
+ netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ return;
+}
+
+extern const struct xfrmdev_ops zxdh_xfrmdev_ops;
+static void zxdh_build_nic_netdev(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct dh_core_dev *dh_dev = en_priv->edev.parent;
+
+ SET_NETDEV_DEV(netdev, &dh_dev->parent->pdev->dev);
+
+ netdev->netdev_ops = &zxdh_netdev_ops;
+
+#ifdef ZXDH_SEC
+ /*内核 sec相关*/
+ netdev->features |= NETIF_F_HW_ESP;
+ netdev->xfrmdev_ops = &zxdh_xfrmdev_ops;
+#endif
+
+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+ zxdh_en_set_ethtool_ops_ext(netdev);
+#else
+ zxdh_en_set_ethtool_ops(netdev);
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+
+ zxdh_netdev_features_init(netdev);
+}
+
+int32_t zxdh_en_bond_get_mac(struct net_device *netdev, uint8_t pannel_id,
+ uint8_t *mac)
+{
+ int32_t ret = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ msg.hdr_to_agt.op_code = AGENT_FLASH_MAC_READ;
+ msg.flash_read_msg.index = pannel_id;
+
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_FLASH, &msg, &reps);
+ if (ret != 0) {
+ LOG_ERR("zxdh_send_command_to_specify failed: %d\n", ret);
+ return ret;
+ }
+
+ LOG_INFO("bond get mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+ reps.flash_mac_read_msg.mac[0], reps.flash_mac_read_msg.mac[1],
+ reps.flash_mac_read_msg.mac[2], reps.flash_mac_read_msg.mac[3],
+ reps.flash_mac_read_msg.mac[4], reps.flash_mac_read_msg.mac[5]);
+
+ ether_addr_copy(mac, reps.flash_mac_read_msg.mac);
+ return ret;
+}
+
+int32_t zxdh_mac_addr_init(struct net_device *netdev)
+{
+ uint8_t mac[6] = { 0 };
+ uint8_t pannel_id = 0;
+ int32_t ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ pannel_id = en_dev->pannel_id;
+ LOG_INFO("zxdh_mac_addr_init pannel_id:%d", pannel_id);
+ ret = zxdh_en_bond_get_mac(netdev, pannel_id, mac);
+ if (ret != 0) {
+ LOG_ERR("zxdh_en_bond_mac_get failed: %d\n", ret);
+ }
+ } else {
+ en_dev->ops->get_mac(en_dev->parent, mac);
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ get_random_bytes(mac, 6);
+ mac[0] &= 0xfe;
+ LOG_INFO("set random mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", mac[0],
+ mac[1], mac[2], mac[3], mac[4], mac[5]);
+ }
+ LOG_INFO("set mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", mac[0], mac[1], mac[2],
+ mac[3], mac[4], mac[5]);
+ memcpy(netdev->dev_addr, mac, 6);
+
+ return ret;
+}
+
+int32_t zxdh_status_init(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ if (en_dev->ops->if_init(en_dev->parent)) {
+ LOG_INFO("vp reset \n");
+ zxdh_vp_reset(netdev);
+ }
+
+ /* Disable VQ/configuration callbacks. */
+ zxdh_vp_disable_cbs(netdev);
+
+ zxdh_add_status(netdev, ZXDH_CONFIG_S_ACKNOWLEDGE);
+
+ zxdh_add_status(netdev, ZXDH_CONFIG_S_DRIVER);
+
+ /* fix features, not set features*/
+ zxdh_pf_features_init(netdev);
+
+ might_sleep();
+ zxdh_add_status(netdev, ZXDH_CONFIG_S_FEATURES_OK);
+ if (!zxdh_has_status(netdev, ZXDH_CONFIG_S_FEATURES_OK)) {
+ LOG_ERR("device refuses features ok\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void zxdh_device_ready(struct net_device *netdev)
+{
+ zxdh_vp_enable_cbs(netdev);
+
+ zxdh_add_status(netdev, ZXDH_CONFIG_S_DRIVER_OK);
+}
+
+void zxdh_link_state_notify_kernel(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ if (en_dev->ops->get_pf_link_up(en_dev->parent)) {
+ netif_carrier_off(netdev);
+ udelay(10);
+ netif_carrier_on(netdev);
+ } else {
+ netif_carrier_on(netdev);
+ udelay(10);
+ netif_carrier_off(netdev);
+ }
+}
+
+int32_t aux_get_bond_attrs(struct zxdh_en_device *en_dev,
+ struct zxdh_lag_attrs *attr)
+{
+ *attr = (struct zxdh_lag_attrs){
+ .pannel_id = en_dev->pannel_id,
+ .vport = en_dev->vport,
+ .slot_id = en_dev->slot_id,
+ .qid[0] = en_dev->phy_index[0],
+ .qid[1] = en_dev->phy_index[1],
+ .pcie_id = en_dev->pcie_id,
+ .phy_port = en_dev->phy_port,
+ };
+
+ LOG_INFO("pannel %hu, vport 0x%hx, qid[0] %u, qid[1] %u, pcie id 0x%x\n",
+ attr->pannel_id, attr->vport, attr->qid[0], attr->qid[1],
+ attr->pcie_id);
+
+ return 0;
+}
+
+void aux_set_netdev_name(struct net_device *netdev, uint16_t pannel_id)
+{
+ struct zxdh_en_device *en_dev = NULL;
+ struct zxdh_en_priv *en_priv = NULL;
+
+ en_priv = netdev_priv(netdev);
+ en_dev = &en_priv->edev;
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ netdev->dev_port = pannel_id + 1;
+ }
+}
+
+int32_t zxdh_en_mtu_init(struct net_device *netdev)
+{
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = ZXDH_MAX_MTU;
+
+ return zxdh_en_config_mtu_to_np(netdev, ZXDH_DEFAULT_MTU);
+}
+
+static int32_t zxdh_en_dev_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct zxdh_en_container *en_container =
+ container_of(adev, struct zxdh_en_container, adev);
+ struct net_device *netdev = NULL;
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ struct zxdh_lag_attrs lag_attrs;
+ int32_t err = 0;
+ int32_t vqs_channel_num = 0;
+
+ LOG_INFO("aux driver start to probe");
+
+ netdev = zxdh_create_netdev(en_container);
+ if (unlikely(netdev == NULL)) {
+ LOG_ERR("zxdh_create_netdev is null\n");
+ err = -ENOMEM;
+ goto err_create_netdev;
+ }
+
+ zxdh_build_nic_netdev(netdev);
+
+ dev_set_drvdata(&adev->dev, netdev_priv(netdev));
+
+ en_priv = netdev_priv(netdev);
+ en_dev = &en_priv->edev;
+ en_dev->channels_num = en_dev->ops->get_channels_num(en_dev->parent);
+ en_dev->ops->set_rdma_netdev(en_dev->parent, netdev);
+ en_dev->curr_unicast_num = 0;
+ en_dev->curr_multicast_num = 0;
+ en_dev->init_comp_flag = AUX_INIT_INCOMPLETED;
+ en_dev->delay_statistics_enable = 0;
+
+ vqs_channel_num = en_dev->ops->create_vqs_channels(en_dev->parent);
+ if (vqs_channel_num < 0) {
+ LOG_ERR("create_vqs_channels failed, vqs_channel_num: %d\n",
+ vqs_channel_num);
+ err = vqs_channel_num;
+ goto err_create_vqs_channels;
+ }
+
+ err = dh_aux_eq_table_init(en_priv);
+ if (err != 0) {
+ LOG_ERR("Failed to alloc IRQs: %d\n", err);
+ goto err_eq_table_init;
+ }
+
+ err = dh_aux_events_init(en_priv);
+ if (err != 0) {
+ LOG_ERR("dh_aux_events_init failed: %d\n", err);
+ goto err_events_init;
+ }
+
+ err = dh_aux_eq_table_create(en_priv);
+ if (err != 0) {
+ LOG_ERR("Failed to alloc EQs: %d\n", err);
+ goto err_eq_table_create;
+ }
+
+ err = zxdh_status_init(netdev);
+ if (err != 0) {
+ LOG_ERR("zxdh_status_init failed: %d\n", err);
+ goto err_status_init;
+ }
+
+ en_dev->ep_bdf = en_dev->ops->get_epbdf(en_dev->parent);
+ en_dev->vport = en_dev->ops->get_vport(en_dev->parent);
+ en_dev->pcie_id = en_dev->ops->get_pcie_id(en_dev->parent);
+ en_dev->slot_id = en_dev->ops->get_slot_id(en_dev->parent);
+ LOG_INFO("en_dev->ep_bdf: 0x%x, en_dev->vport: 0x%x, en_dev->pcie_id: %d\n",
+ en_dev->ep_bdf, en_dev->vport, en_dev->pcie_id);
+
+ err = zxdh_vqs_init(netdev);
+ if (err != 0) {
+ LOG_ERR("zxdh_vqs_init failed: %d\n", err);
+ goto err_vqs_init;
+ }
+
+ if (!en_dev->ops->is_bond(en_dev->parent)) {
+ en_dev->hash_func = ZXDH_FUNC_TOP;
+ err = zxdh_hash_id_get(en_dev);
+ if (err != 0) {
+ LOG_ERR("zxdh_hash_id_get failed: %d\n", err);
+ goto err_do_vqs_free;
+ }
+
+ err = zxdh_panel_id_get(en_dev);
+ if (err != 0) {
+ LOG_ERR("zxdh_panel_id_get failed: %d\n", err);
+ goto err_do_vqs_free;
+ }
+ }
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ LOG_INFO(" en_dev->pcie_id: %x is a pf dev\n", en_dev->pcie_id);
+
+ if (!en_dev->ops->is_bond(en_dev->parent)) {
+ err = zxdh_phyport_get(en_dev);
+ if (err != 0) {
+ LOG_ERR("zxdh_phyport_get failed: %d\n", err);
+ goto err_do_vqs_free;
+ }
+ } else {
+ err = zxdh_aux_alloc_pannel(en_dev);
+ if (err != 0) {
+ LOG_ERR("zxdh_aux_alloc_pannel failed: %d\n", err);
+ goto err_do_vqs_free;
+ }
+ }
+
+ err = zxdh_mac_addr_init(netdev);
+ if (err != 0) {
+ LOG_ERR("zxdh_mac_addr_init failed: %d\n", err);
+ goto err_do_vqs_free;
+ }
+
+ err = zxdh_pf_port_init(netdev);
+ if (err != 0) {
+ LOG_ERR("zxdh_pf_port_init failed: %d\n", err);
+ goto err_do_vqs_free;
+ }
+ } else {
+ LOG_INFO(" en_dev->pcie_id: %x is a vf dev\n", en_dev->pcie_id);
+ err = zxdh_vf_dpp_port_init(netdev);
+ if (err != 0) {
+ LOG_ERR("zxdh_vf_dpp_port_init failed: %d\n", err);
+ goto err_do_vqs_free;
+ }
+ }
+
+ if (!en_dev->ops->is_bond(en_dev->parent)) {
+ netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
+ err = zxdh_num_channels_changed(en_dev, en_dev->curr_queue_pairs);
+ if (err != 0) {
+ LOG_ERR("zxdh_num_channels_changed failed: %d\n", err);
+ goto err_do_vport_free;
+ }
+ }
+
+ err = zxdh_common_tbl_init(netdev);
+ if (err != 0) {
+ LOG_ERR("zxdh_common_tlb_init failed: %d\n", err);
+ goto err_do_rxfh_free;
+ }
+
+ zxdh_device_ready(netdev);
+
+ err = zxdh_en_mtu_init(netdev);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_mtu_init failed: %d\n", err);
+ goto err_do_rxfh_free;
+ }
+
+ en_dev->hw_stats.q_stats = kmalloc_array(
+ max_pairs, sizeof(struct zxdh_en_queue_stats), GFP_KERNEL);
+ if (unlikely(en_dev->hw_stats.q_stats == NULL)) {
+ LOG_ERR("hw_stats.q_stats kmalloc failed\n");
+ goto err_do_rxfh_free;
+ }
+ memset(en_dev->hw_stats.q_stats, 0,
+ max_pairs * sizeof(struct zxdh_en_queue_stats));
+ memset(&en_dev->pre_stats, 0, sizeof(struct zxdh_en_vport_stats));
+
+ err = zxdh_en_vport_pre_stats_get(en_dev);
+ if (err != 0) {
+ LOG_ERR("get vport pre stats failed, %d\n", err);
+ goto err_do_q_stats_free;
+ }
+
+ aux_set_netdev_name(netdev, en_dev->pannel_id);
+ err = register_netdev(netdev);
+ if (err != 0) {
+ LOG_ERR("register_netdev failed, %d\n", err);
+ goto err_do_q_stats_free;
+ }
+
+ zxdh_en_bar_del_mac(netdev);
+ zxdh_en_bar_cfg_mac(netdev, netdev->dev_addr);
+ zxdh_link_state_notify_kernel(netdev);
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ aux_get_bond_attrs(en_dev, &lag_attrs);
+ zxdh_ldev_add_netdev(en_container->parent, en_dev->pannel_id, netdev,
+ &lag_attrs);
+ }
+
+#ifdef ZXDH_PLCR_OPEN
+ err = zxdh_plcr_init(en_priv);
+ if (err != 0) {
+ LOG_ERR("zxdh_plcr_init failed, %d\n", err);
+ }
+#endif
+
+ en_dev->init_comp_flag = AUX_INIT_COMPLETED;
+
+ err = dh_aux_ipv6_notifier_init(en_priv);
+ if (err != 0) {
+ LOG_ERR("dh_aux_ipv6_notifier_init failed: %d\n", err);
+ goto err_ipv6_notifier_init;
+ }
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ en_dev->autoneg_enable = AUTONEG_ENABLE;
+ err = zxdh_en_phyport_init(en_dev);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_phyport_init failed: %d\n", err);
+ goto err_phyport_init;
+ }
+ }
+
+#ifdef ZXDH_MSGQ
+ if (en_dev->need_msgq) {
+ err = zxdh_msgq_init(en_dev);
+ if (err) {
+ LOG_ERR("zxdh_msgq_init failed: %d\n", err);
+ goto err_phyport_init;
+ }
+ }
+#endif
+
+ en_dev->ops->set_init_comp_flag(en_dev->parent);
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ /* clear mcode gate,successfully build the scheduling tree, and then open it
+ * again */
+ zxdh_dcbnl_set_tm_pport_mcode_gate_close(netdev);
+#ifdef ZXDH_DCBNL_OPEN
+ err = zxdh_dcbnl_initialize(netdev);
+ if (err != 0) {
+ LOG_ERR("zxdh_dcbnl_initialize failed: %d\n", err);
+ }
+#endif
+ }
+
+ en_dev->ops->set_bond_num(en_dev->parent, true);
+ LOG_INFO("%s aux probe completed\n", netdev->name);
+
+ return 0;
+
+err_phyport_init:
+ dh_inet6_addr_change_notifier_unregister(&(en_dev->ipv6_notifier));
+err_ipv6_notifier_init:
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ aux_get_bond_attrs(en_dev, &lag_attrs);
+ zxdh_ldev_remove_netdev(en_dev->parent, netdev, &lag_attrs);
+ }
+ unregister_netdev(netdev);
+err_do_q_stats_free:
+ kfree(en_dev->hw_stats.q_stats);
+err_do_rxfh_free:
+ if (!en_dev->ops->is_bond(en_dev->parent)) {
+ zxdh_rxfh_del(en_dev);
+ }
+err_do_vport_free:
+ zxdh_vport_uninit(netdev);
+err_do_vqs_free:
+ zxdh_vqs_uninit(netdev);
+err_vqs_init:
+ zxdh_add_status(netdev, ZXDH_CONFIG_S_FAILED);
+err_status_init:
+ dh_aux_eq_table_destroy(en_priv);
+err_eq_table_create:
+ dh_aux_events_uninit(en_priv);
+err_events_init:
+ dh_aux_eq_table_cleanup(en_priv);
+err_eq_table_init:
+ en_dev->ops->destroy_vqs_channels(en_dev->parent);
+ en_dev->ops->release_port(en_dev->parent, en_dev->pannel_id);
+err_create_vqs_channels:
+ free_netdev(netdev);
+err_create_netdev:
+ return err;
+}
+
+static int32_t zxdh_en_dev_remove(struct auxiliary_device *adev)
+{
+ struct zxdh_en_priv *en_priv =
+ (struct zxdh_en_priv *)dev_get_drvdata(&adev->dev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct net_device *netdev = en_dev->netdev;
+ struct zxdh_lag_attrs lag_attrs;
+ int32_t ret = 0;
+
+ LOG_INFO("zxdh_en_dev_remove start\n");
+
+ en_dev->ops->set_bond_num(en_dev->parent, false);
+#ifdef ZXDH_MSGQ
+ if (en_dev->need_msgq)
+ zxdh_msgq_exit(en_dev);
+#endif
+
+#ifdef ZXDH_PLCR_OPEN
+ zxdh_plcr_uninit(en_priv);
+#endif
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ aux_get_bond_attrs(en_dev, &lag_attrs);
+ zxdh_ldev_remove_netdev(en_dev->parent, netdev, &lag_attrs);
+ }
+
+#ifdef ZXDH_DCBNL_OPEN
+ zxdh_dcbnl_ets_uninit(netdev);
+#endif
+
+ unregister_netdev(netdev);
+ kfree(en_dev->hw_stats.q_stats);
+
+ if (!en_dev->ops->is_bond(en_dev->parent)) {
+ zxdh_rxfh_del(en_dev);
+ }
+
+ zxdh_vport_uninit(netdev);
+
+ zxdh_vqs_uninit(netdev);
+
+ zxdh_add_status(netdev, ZXDH_CONFIG_S_FAILED);
+
+ dh_aux_eq_table_destroy(en_priv);
+ dh_aux_events_uninit(en_priv);
+ dh_aux_eq_table_cleanup(en_priv);
+ en_dev->ops->destroy_vqs_channels(en_dev->parent);
+ en_dev->ops->release_port(en_dev->parent, en_dev->pannel_id);
+ ret = dh_inet6_addr_change_notifier_unregister(&(en_dev->ipv6_notifier));
+ LOG_INFO("dh_inet6_addr_change_notifier_unregister, ret:%d\n", ret);
+ free_netdev(netdev);
+ LOG_INFO("zxdh_en_dev_remove stop\n");
+
+ return 0;
+}
+
+static void zxdh_en_dev_shutdown(struct auxiliary_device *adev)
+{
+ LOG_INFO("zxdh_en_dev_shutdown start\n");
+ zxdh_en_dev_remove(adev);
+ LOG_INFO("zxdh_en_dev_shutdown stop\n");
+};
+
+static const struct auxiliary_device_id zxdh_en_dev_id_table[] = {
+ {
+ .name = ZXDH_PF_NAME "." ZXDH_EN_DEV_ID_NAME,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, zxdh_en_dev_id_table);
+
+static struct auxiliary_driver zxdh_en_driver = {
+ .name = ZXDH_EN_DEV_ID_NAME,
+ .probe = zxdh_en_dev_probe,
+ .remove = zxdh_en_dev_remove,
+ .shutdown = zxdh_en_dev_shutdown,
+ .id_table = zxdh_en_dev_id_table,
+};
+
+int32_t zxdh_en_driver_register(void)
+{
+ int32_t err = 0;
+
+ if ((max_pairs == 0) || (max_pairs >= ZXDH_MAX_PAIRS_NUM)) {
+ LOG_INFO(
+ "max_pairs %u parameter is a invalid value, use the default value %u\n",
+ max_pairs, ZXDH_MQ_PAIRS_NUM);
+ max_pairs = ZXDH_MQ_PAIRS_NUM;
+ }
+
+ err = auxiliary_driver_register(&zxdh_en_driver);
+ if (err != 0) {
+ LOG_ERR("auxiliary_driver_register failed: %d\n", err);
+ goto err_aux_register;
+ }
+
+ err = dh_aux_msg_recv_func_register();
+ if (err != 0) {
+ LOG_ERR("dh_aux_msg_recv_func_register failed: %d\n", err);
+ goto err_msg_recv_register;
+ }
+
+ err = zxdh_tools_netlink_register();
+ if (err != 0) {
+ LOG_ERR("zxdh_tools_msg_family register error failed: %d\n", err);
+ goto err_netlink_register;
+ }
+
+ return 0;
+
+err_netlink_register:
+ dh_aux_msg_recv_func_unregister();
+err_msg_recv_register:
+ auxiliary_driver_unregister(&zxdh_en_driver);
+err_aux_register:
+ return err;
+}
+
+void zxdh_en_driver_unregister(void)
+{
+ zxdh_tools_netlink_unregister();
+ dh_aux_msg_recv_func_unregister();
+ auxiliary_driver_unregister(&zxdh_en_driver);
+}
+
+module_init(zxdh_en_driver_register);
+module_exit(zxdh_en_driver_unregister);
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux.h b/src/net/drivers/net/ethernet/dinghai/en_aux.h
old mode 100755
new mode 100644
index 6c7755a718af9dbe735d05c252e276823b2c0e8c..969c95923e630aa5140ca9593819c3737afb842c
--- a/src/net/drivers/net/ethernet/dinghai/en_aux.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux.h
@@ -1,373 +1,374 @@
-#ifndef __ZXDH_EN_AUX_H__
-#define __ZXDH_EN_AUX_H__
-
-#include "msg_common.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include "./en_aux/queue.h"
-#include "./en_aux/en_cmd.h"
-#include "./en_pf.h"
-#include "./en_aux/dcbnl/en_dcbnl.h"
-
-#define MAX_VLAN_ID 4095
-
-#define PF_AC_MASK 0x800
-#define FILTER_MAC 0xAA
-#define UNFILTER_MAC 0xFF
-
-#define AUX_INIT_INCOMPLETED 0
-#define AUX_INIT_COMPLETED 1
-
-#define IS_DELAY_STATISTICS_PKT 0
-#define IS_NOT_DELAY_STATICTICS_PKT 1
-
-#define ADD_IP6MAC 1
-#define DEL_IP6MAC 2
-
-#define ZXDH_SET_FEATURE(features, feature, enable) \
- do { \
- if (enable) \
- { \
- *features |= feature; \
- } \
- else \
- { \
- *features &= ~feature; \
- } \
- } while (0)
-
-#define ZXDH_AUX_INIT_COMP_CHECK(en_dev) \
- do { \
- if (en_dev->init_comp_flag != AUX_INIT_COMPLETED) \
- { \
- return; \
- } \
- } while (0)
-
-typedef int (*zxdh_feature_handler)(struct net_device *netdev, bool enable);
-
-extern uint32_t max_pairs;
-
-struct zxdh_rdma_if;
-struct zxdh_en_if;
-
-struct zxdh_en_container {
- struct zxdh_auxiliary_device adev;
- struct zxdh_rdma_dev_info *rdma_infos;
- struct zxdh_rdma_if *rdma_ops;
- struct zxdh_en_if *ops;
- struct dh_core_dev *parent;
- int32_t aux_id;
-};
-
-struct zxdh_en_queue_stats
-{
- uint64_t q_rx_pkts;
- uint64_t q_tx_pkts;
- uint64_t q_rx_bytes;
- uint64_t q_tx_bytes;
- uint64_t q_tx_stopped;
- uint64_t q_tx_wake;
- uint64_t q_tx_dropped;
-};
-
-struct zxdh_en_netdev_stats
-{
- uint64_t rx_packets;
- uint64_t tx_packets;
- uint64_t rx_bytes;
- uint64_t tx_bytes;
- uint64_t tx_queue_wake;
- uint64_t tx_queue_stopped;
- uint64_t tx_queue_dropped;
-};
-
-struct zxdh_en_vport_vqm_stats
-{
- uint64_t rx_vport_packets;
- uint64_t tx_vport_packets;
- uint64_t rx_vport_bytes;
- uint64_t tx_vport_bytes;
- uint64_t rx_vport_dropped;
-};
-
-struct zxdh_en_vport_np_stats
-{
- uint64_t rx_vport_broadcast_packets;
- uint64_t tx_vport_broadcast_packets;
- uint64_t rx_vport_mtu_drop_packets;
- uint64_t tx_vport_mtu_drop_packets;
- uint64_t rx_vport_mtu_drop_bytes;
- uint64_t tx_vport_mtu_drop_bytes;
- uint64_t rx_vport_plcr_drop_packets;
- uint64_t tx_vport_plcr_drop_packets;
- uint64_t rx_vport_plcr_drop_bytes;
- uint64_t tx_vport_plcr_drop_bytes;
-};
-
-
-struct zxdh_en_vport_stats
-{
- struct zxdh_en_vport_vqm_stats vqm_stats;
- struct zxdh_en_vport_np_stats np_stats;
-};
-
-struct zxdh_en_phy_stats
-{
- uint64_t rx_packets_phy;
- uint64_t tx_packets_phy;
- uint64_t rx_bytes_phy;
- uint64_t tx_bytes_phy;
- uint64_t rx_errors;
- uint64_t tx_errors;
- uint64_t rx_discards;
- uint64_t tx_drop;
- uint64_t rx_multicast_phy;
- uint64_t tx_multicast_phy;
- uint64_t rx_broadcast_phy;
- uint64_t tx_broadcast_phy;
- uint64_t rx_size_64_phy;
- uint64_t rx_size_65_127;
- uint64_t rx_size_128_255;
- uint64_t rx_size_256_511;
- uint64_t rx_size_512_1023;
- uint64_t rx_size_1024_1518;
- uint64_t rx_size_1519_mru;
- uint64_t rx_pause;
- uint64_t tx_pause;
-}__attribute__((packed));
-
-struct zxdh_en_hw_stats
-{
- struct zxdh_en_netdev_stats netdev_stats;
- struct zxdh_en_vport_stats vport_stats;
- struct zxdh_en_phy_stats phy_stats;
- struct zxdh_en_queue_stats *q_stats;
-};
-
-struct zxdh_vlan_dev
-{
- uint8_t qos;
- uint8_t rsv;
- uint16_t protcol;
- uint16_t vlan_id;
-};
-
-/* drs sec */
-typedef struct
-{
- uint64_t SecVAddr; /*每个设备的sec私有内存的虚拟基地址*/
- uint64_t SecPAddr; /*每个设备的sec私有内存的物理基地址*/
- uint32_t SecMemSize; /*每个设备的sec私有内存的大小*/
-}zxdh_sec_pri;
-
-
-struct zxdh_en_device {
- struct dh_core_dev *parent;
- struct net_device *netdev;
- void *msgq_dev;
- struct zxdh_en_if *ops;
- struct zxdh_en_hw_stats hw_stats;
- struct zxdh_en_vport_stats pre_stats;
- struct zxdh_vlan_dev vlan_dev;
-
- uint32_t device_id;
- uint32_t vendor_id;
-
- uint64_t driver_feature;
- uint64_t device_feature;
- uint64_t guest_feature;
-
- struct list_head vqs_list;
- spinlock_t vqs_list_lock;
- uint32_t indir_rqt[ZXDH_INDIR_RQT_SIZE];
-
- int32_t channels_num;
-
- /* a list of queues so we can dispatch IRQs */
- spinlock_t lock;
- struct list_head virtqueues;
- /* array of all queues for house-keeping */
- struct zxdh_pci_vq_info **vqs;
-
- struct send_queue *sq;
- struct receive_queue *rq;
- uint32_t status;
-
- /* Max # of queue pairs supported by the device */
- uint16_t curr_queue_pairs;
- uint16_t max_queue_pairs;
-
- bool need_msgq;
- /* Host can handle any s/g split between our header and packet data */
- bool any_header_sg;
- /* Packet custom queue header size */
- uint8_t hdr_len;
- /* Work struct for refilling if we run low on memory. */
- struct delayed_work refill;
-
- /* CPU hotplug instances for online & dead */
- struct hlist_node node;
- struct hlist_node node_dead;
-
- bool np_direction;
- bool drs_offload;
- bool dtp_offload;
-
- uint32_t phy_index[ZXDH_MAX_QUEUES_NUM];
-
- uint8_t link_check_bit;
- uint8_t pannel_id;
- uint8_t rsv[2];
-
- uint16_t ep_bdf;
- uint16_t pcie_id;
- /* vfunc_active */
- uint16_t slot_id;
- uint16_t vport;
- uint8_t phy_port;
- uint8_t panel_id;
- uint8_t hash_search_idx;
- uint8_t hash_func;
-
- uint32_t link_speed;
- bool link_up;
- uint8_t duplex;
-
- uint32_t speed;
- uint32_t autoneg_enable;
- uint32_t supported_speed_modes;
- uint32_t advertising_speed_modes;
-
- bool promisc_enabled;
- bool allmulti_enabled;
- uint32_t pflags;
- uint8_t clock_no;
- uint32_t msglevel;
- uint32_t wol_support;
- uint32_t wolopts;
- uint8_t fw_version[ETHTOOL_FWVERS_LEN];
- uint8_t fw_version_len;
- uint32_t vf_1588_call_np_num;
- uint32_t ptp_tc_enable_opt;
- uint32_t delay_statistics_enable;
-
- struct work_struct vf_link_info_update_work;
- struct work_struct link_info_irq_update_vf_work;
- struct work_struct link_info_irq_process_work;
- struct work_struct link_info_irq_update_np_work;
- struct work_struct rx_mode_set_work;
-
- uint8_t curr_unicast_num;
- uint8_t curr_multicast_num;
- struct work_struct pf_notify_vf_link_state_work;
- struct work_struct pf2vf_msg_proc_work;
- struct work_struct pf_notify_vf_reset_work;
- struct work_struct service_task;
- struct work_struct service_riscv_task;
- struct timer_list service_timer;
- struct timer_list service_riscv_timer;
- struct work_struct riscv2aux_msg_proc_work;
- /* QoS DCB */
- struct zxdh_dcbnl_para dcb_para;
- /* SEC */
- zxdh_sec_pri drs_sec_pri;
-
- /* initialization completion flag */
- uint8_t init_comp_flag;
-
- struct notifier_block ipv6_notifier;
-};
-
-struct zxdh_en_priv {
- struct zxdh_en_device edev;
- struct mutex lock;
- struct dh_eq_table eq_table;
- struct dh_events *events;
-};
-
-#define DEV_UNICAST_MAX_NUM 32 /* 每个PF/VF存储的单播mac转发表上限 */
-#define DEV_MULTICAST_MAX_NUM 32 /* 每个PF/VF存储的组播mac转发表上限 */
-#define UNICAST_MAX_NUM (DEV_UNICAST_MAX_NUM * 257)
-#define MULTICAST_MAX_NUM (DEV_MULTICAST_MAX_NUM * 257)
-
-int32_t dh_aux_eq_table_init(struct zxdh_en_priv *en_priv);
-void dh_aux_eq_table_cleanup(struct zxdh_en_priv *en_priv);
-int32_t zxdh_ip6mac_add(struct zxdh_en_device *en_dev, const uint32_t *addr6, const uint8_t *ip6mac);
-int32_t zxdh_ip6mac_del(struct zxdh_en_device *en_dev, const uint32_t *addr6, const uint8_t *ip6mac);
-struct zxdh_rdma_if {
- void *(*get_rdma_netdev)(struct dh_core_dev *dh_dev);
-};
-
-struct zxdh_en_if {
- uint16_t (*get_channels_num)(struct dh_core_dev *dh_dev);
- int32_t (*create_vqs_channels)(struct dh_core_dev *dh_dev);
- void (*destroy_vqs_channels)(struct dh_core_dev *dh_dev);
- void (*switch_vqs_channel)(struct dh_core_dev *dh_dev, int32_t channel, int32_t op);
- int32_t (*vqs_channel_bind_handler)(struct dh_core_dev *dh_dev, int32_t vqs_channel_num, struct dh_vq_handler *handler);
- void (*vqs_channel_unbind_handler)(struct dh_core_dev *dh_dev, int32_t vqs_channel_num);
- int32_t (*vq_bind_channel)(struct dh_core_dev *dh_dev, int32_t channel_num, int32_t queue_index);
- void (*vq_unbind_channel)(struct dh_core_dev *dh_dev, int32_t queue_index);
- int32_t (*vqs_bind_eqs)(struct dh_core_dev *dh_dev, int32_t vqs_channel_num, struct list_head *vq_node);
- void (*vqs_unbind_eqs)(struct dh_core_dev *dh_dev, int32_t vqs_channel_num);
- void __iomem * (*vp_modern_map_vq_notify)(struct dh_core_dev *dh_dev, uint32_t index, resource_size_t *pa);
- void (*vp_modern_unmap_vq_notify)(struct dh_core_dev *dh_dev, void *priv);
- int32_t (*get_phy_vq)(struct dh_core_dev *dh_dev, uint16_t index);
- void (*activate_phy_vq)(struct dh_core_dev *dh_dev, uint32_t phy_index, int32_t queue_size, uint64_t desc_addr, uint64_t driver_addr, uint64_t device_addr);
- void (*de_activate_phy_vq)(struct dh_core_dev *dh_dev, uint32_t phy_index);
- int32_t (*release_phy_vq)(struct dh_core_dev *dh_dev, uint32_t *phy_index, uint16_t total_qnum);
- void (*set_status)(struct dh_core_dev *dh_dev, uint8_t status);
- uint8_t (*get_status)(struct dh_core_dev *dh_dev);
- void (*set_vf_mac)(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id);
- void (*get_vf_mac)(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id);
- void (*set_mac)(struct dh_core_dev *dh_dev, uint8_t *mac);
- void (*get_mac)(struct dh_core_dev *dh_dev, uint8_t *mac);
- uint64_t (*get_features)(struct dh_core_dev *dh_dev);
- void (*set_features)(struct dh_core_dev *dh_dev, uint64_t features);
- uint16_t (*get_queue_num)(struct dh_core_dev *dh_dev);
- uint16_t (*get_queue_size)(struct dh_core_dev *dh_dev, uint32_t index);
- void (*set_queue_enable)(struct dh_core_dev *dh_dev, uint16_t index, bool enable);
- uint32_t (*get_epbdf)(struct dh_core_dev *dh_dev);
- uint16_t (*get_vport)(struct dh_core_dev *dh_dev);
- uint16_t (*get_pcie_id)(struct dh_core_dev *dh_dev);
- uint16_t (*get_slot_id)(struct dh_core_dev *dh_dev);
- bool (*is_bond)(struct dh_core_dev *dh_dev);
- bool (*is_upf)(struct dh_core_dev *dh_dev);
- enum dh_coredev_type (*get_coredev_type)(struct dh_core_dev *dh_dev);
- struct pci_dev * (*get_pdev)(struct dh_core_dev *dh_dev);
- uint64_t (*get_bar_virt_addr)(struct dh_core_dev *dh_dev, uint8_t bar_num);
- int32_t (*msg_send_cmd)(struct dh_core_dev *dh_dev, uint16_t module_id, void *msg, void *ack, bool is_sync);
- int32_t (*async_eq_enable)(struct dh_core_dev *dh_dev, struct dh_eq_async *eq, const char *name, bool attach);
- struct zxdh_vf_item *(*get_vf_item)(struct dh_core_dev *dh_dev, uint16_t vf_idx);
- void (*set_pf_link_up) (struct dh_core_dev *dh_dev, bool link_up);
- bool (*get_pf_link_up) (struct dh_core_dev *dh_dev);
- void (*update_pf_link_info)(struct dh_core_dev *dh_dev, struct link_info_struct *link_info_val);
- int32_t (*get_pf_drv_msg)(struct dh_core_dev *dh_dev, uint8_t *drv_version, uint8_t *drv_version_len);
- void (*set_vepa) (struct dh_core_dev *dh_dev, bool setting);
- bool (*get_vepa) (struct dh_core_dev *dh_dev);
- void (*set_bond_num)(struct dh_core_dev *dh_dev, bool add);
- bool (*if_init)(struct dh_core_dev *dh_dev);
- int32_t (*request_port)(struct dh_core_dev *dh_dev, void *data);
- int32_t (*release_port)(struct dh_core_dev *dh_dev, uint32_t port_id);
- void (*get_link_info_from_vqm)(struct dh_core_dev *dh_dev, uint8_t *link_up);
- void (*set_vf_link_info)(struct dh_core_dev *dh_dev, uint16_t vf_idx, uint8_t link_up);
- void (*set_pf_phy_port)(struct dh_core_dev *dh_dev, uint8_t phy_port);
- void (*set_rdma_netdev)(struct dh_core_dev *dh_dev, void *data);
- uint8_t (*get_pf_phy_port)(struct dh_core_dev *dh_dev);
- void (*set_init_comp_flag)(struct dh_core_dev *dh_dev);
- struct zxdh_ipv6_mac_tbl * (*get_ip6mac_tbl)(struct dh_core_dev *dh_dev);
-};
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
+#ifndef __ZXDH_EN_AUX_H__
+#define __ZXDH_EN_AUX_H__
+
+#include "msg_common.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include "./en_aux/queue.h"
+#include "./en_aux/en_cmd.h"
+#include "./en_pf.h"
+#include "./en_aux/dcbnl/en_dcbnl.h"
+
+#define MAX_VLAN_ID 4095
+
+#define PF_AC_MASK 0x800
+#define FILTER_MAC 0xAA
+#define UNFILTER_MAC 0xFF
+
+#define AUX_INIT_INCOMPLETED 0
+#define AUX_INIT_COMPLETED 1
+
+#define IS_DELAY_STATISTICS_PKT 0
+#define IS_NOT_DELAY_STATICTICS_PKT 1
+
+#define ZXDH_SET_FEATURE(features, feature, enable) \
+ do { \
+ if (enable) { \
+ *features |= feature; \
+ } else { \
+ *features &= ~feature; \
+ } \
+ } while (0)
+
+#define ZXDH_AUX_INIT_COMP_CHECK(en_dev) \
+ do { \
+ if (en_dev->init_comp_flag != AUX_INIT_COMPLETED) { \
+ return; \
+ } \
+ } while (0)
+
+typedef int (*zxdh_feature_handler)(struct net_device *netdev, bool enable);
+
+extern uint32_t max_pairs;
+
+struct zxdh_rdma_if;
+struct zxdh_en_if;
+
+struct zxdh_en_container {
+ struct auxiliary_device adev;
+ struct zxdh_rdma_dev_info *rdma_infos;
+ struct zxdh_rdma_if *rdma_ops;
+ struct zxdh_en_if *ops;
+ struct dh_core_dev *parent;
+ int32_t aux_id;
+};
+
+struct zxdh_en_queue_stats {
+ uint64_t q_rx_pkts;
+ uint64_t q_tx_pkts;
+ uint64_t q_rx_bytes;
+ uint64_t q_tx_bytes;
+ uint64_t q_tx_stopped;
+ uint64_t q_tx_wake;
+ uint64_t q_tx_dropped;
+};
+
+struct zxdh_en_netdev_stats {
+ uint64_t rx_packets;
+ uint64_t tx_packets;
+ uint64_t rx_bytes;
+ uint64_t tx_bytes;
+ uint64_t tx_queue_wake;
+ uint64_t tx_queue_stopped;
+ uint64_t tx_queue_dropped;
+};
+
+struct zxdh_en_vport_vqm_stats {
+ uint64_t rx_vport_packets;
+ uint64_t tx_vport_packets;
+ uint64_t rx_vport_bytes;
+ uint64_t tx_vport_bytes;
+ uint64_t rx_vport_dropped;
+};
+
+struct zxdh_en_vport_np_stats {
+ uint64_t rx_vport_broadcast_packets;
+ uint64_t tx_vport_broadcast_packets;
+ uint64_t rx_vport_mtu_drop_packets;
+ uint64_t tx_vport_mtu_drop_packets;
+ uint64_t rx_vport_mtu_drop_bytes;
+ uint64_t tx_vport_mtu_drop_bytes;
+ uint64_t rx_vport_plcr_drop_packets;
+ uint64_t tx_vport_plcr_drop_packets;
+ uint64_t rx_vport_plcr_drop_bytes;
+ uint64_t tx_vport_plcr_drop_bytes;
+};
+
+struct zxdh_en_vport_stats {
+ struct zxdh_en_vport_vqm_stats vqm_stats;
+ struct zxdh_en_vport_np_stats np_stats;
+};
+
+struct zxdh_en_phy_stats {
+ uint64_t rx_packets_phy;
+ uint64_t tx_packets_phy;
+ uint64_t rx_bytes_phy;
+ uint64_t tx_bytes_phy;
+ uint64_t rx_errors;
+ uint64_t tx_errors;
+ uint64_t rx_discards;
+ uint64_t tx_drop;
+ uint64_t rx_multicast_phy;
+ uint64_t tx_multicast_phy;
+ uint64_t rx_broadcast_phy;
+ uint64_t tx_broadcast_phy;
+ uint64_t rx_size_64_phy;
+ uint64_t rx_size_65_127;
+ uint64_t rx_size_128_255;
+ uint64_t rx_size_256_511;
+ uint64_t rx_size_512_1023;
+ uint64_t rx_size_1024_1518;
+ uint64_t rx_size_1519_mru;
+} __attribute__((packed));
+
+struct zxdh_en_hw_stats {
+ struct zxdh_en_netdev_stats netdev_stats;
+ struct zxdh_en_vport_stats vport_stats;
+ struct zxdh_en_phy_stats phy_stats;
+ struct zxdh_en_queue_stats *q_stats;
+};
+
+struct zxdh_vlan_dev {
+ uint8_t qos;
+ uint8_t rsv;
+ uint16_t protcol;
+ uint16_t vlan_id;
+};
+
+/* drs sec */
+typedef struct {
+ uint64_t SecVAddr; /*每个设备的sec私有内存的虚拟基地址*/
+ uint64_t SecPAddr; /*每个设备的sec私有内存的物理基地址*/
+ uint32_t SecMemSize; /*每个设备的sec私有内存的大小*/
+} zxdh_sec_pri;
+
+struct zxdh_en_device {
+ struct dh_core_dev *parent;
+ struct net_device *netdev;
+ void *msgq_dev;
+ struct zxdh_en_if *ops;
+ struct zxdh_en_hw_stats hw_stats;
+ struct zxdh_en_vport_stats pre_stats;
+ struct zxdh_vlan_dev vlan_dev;
+
+ uint32_t device_id;
+ uint32_t vendor_id;
+
+ uint64_t driver_feature;
+ uint64_t device_feature;
+ uint64_t guest_feature;
+
+ struct list_head vqs_list;
+ spinlock_t vqs_list_lock;
+ uint32_t indir_rqt[ZXDH_INDIR_RQT_SIZE];
+
+ int32_t channels_num;
+
+ /* a list of queues so we can dispatch IRQs */
+ spinlock_t lock;
+ struct list_head virtqueues;
+ /* array of all queues for house-keeping */
+ struct zxdh_pci_vq_info **vqs;
+
+ struct send_queue *sq;
+ struct receive_queue *rq;
+ uint32_t status;
+
+ /* Max # of queue pairs supported by the device */
+ uint16_t curr_queue_pairs;
+ uint16_t max_queue_pairs;
+
+ bool need_msgq;
+ /* Host can handle any s/g split between our header and packet data */
+ bool any_header_sg;
+ /* Packet custom queue header size */
+ uint8_t hdr_len;
+ /* Work struct for refilling if we run low on memory. */
+ struct delayed_work refill;
+
+ /* CPU hotplug instances for online & dead */
+ struct hlist_node node;
+ struct hlist_node node_dead;
+
+ bool np_direction;
+ bool drs_offload;
+ bool dtp_offload;
+
+ uint32_t phy_index[ZXDH_MAX_QUEUES_NUM];
+
+ uint8_t link_check_bit;
+ uint8_t pannel_id;
+ uint8_t rsv[2];
+
+ uint16_t ep_bdf;
+ uint16_t pcie_id;
+ /* vfunc_active */
+ uint16_t slot_id;
+ uint16_t vport;
+ uint8_t phy_port;
+ uint8_t panel_id;
+ uint8_t hash_search_idx;
+ uint8_t hash_func;
+
+ uint32_t link_speed;
+ bool link_up;
+ uint8_t duplex;
+
+ uint32_t speed;
+ uint32_t autoneg_enable;
+ uint32_t supported_speed_modes;
+ uint32_t advertising_speed_modes;
+
+ bool promisc_enabled;
+ bool allmulti_enabled;
+ uint32_t pflags;
+ uint8_t clock_no;
+ uint32_t msglevel;
+ uint32_t wol_support;
+ uint32_t wolopts;
+ uint8_t fw_version[ETHTOOL_FWVERS_LEN];
+ uint8_t fw_version_len;
+ uint32_t vf_1588_call_np_num;
+ uint32_t ptp_tc_enable_opt;
+ uint32_t delay_statistics_enable;
+
+ struct work_struct vf_link_info_update_work;
+ struct work_struct link_info_irq_update_vf_work;
+ struct work_struct link_info_irq_process_work;
+ struct work_struct link_info_irq_update_np_work;
+ struct work_struct rx_mode_set_work;
+
+ uint8_t curr_unicast_num;
+ uint8_t curr_multicast_num;
+ struct work_struct pf_notify_vf_link_state_work;
+ struct work_struct pf2vf_msg_proc_work;
+ struct work_struct pf_notify_vf_reset_work;
+ struct work_struct service_task;
+ struct work_struct service_riscv_task;
+ struct timer_list service_timer;
+ struct timer_list service_riscv_timer;
+ struct work_struct riscv2aux_msg_proc_work;
+ /* QoS DCB */
+ struct zxdh_dcbnl_para dcb_para;
+ /* SEC */
+ zxdh_sec_pri drs_sec_pri;
+
+ /* initialization completion flag */
+ uint8_t init_comp_flag;
+
+ struct notifier_block ipv6_notifier;
+};
+
+struct zxdh_en_priv {
+ struct zxdh_en_device edev;
+ struct mutex lock;
+ struct dh_eq_table eq_table;
+ struct dh_events *events;
+};
+
+#define DEV_UNICAST_MAX_NUM 32 /* 每个PF/VF存储的单播mac转发表上限 */
+#define DEV_MULTICAST_MAX_NUM 32 /* 每个PF/VF存储的组播mac转发表上限 */
+#define UNICAST_MAX_NUM (DEV_UNICAST_MAX_NUM * 257)
+#define MULTICAST_MAX_NUM (DEV_MULTICAST_MAX_NUM * 257)
+
+int32_t dh_aux_eq_table_init(struct zxdh_en_priv *en_priv);
+void dh_aux_eq_table_cleanup(struct zxdh_en_priv *en_priv);
+int32_t ipv6_multicast_mac_add(struct zxdh_en_device *en_dev,
+ struct net_device *dev, const uint8_t *addr);
+int32_t ipv6_multicast_mac_del(struct zxdh_en_device *en_dev,
+ struct net_device *dev, const uint8_t *addr);
+
+struct zxdh_rdma_if {
+ void *(*get_rdma_netdev)(struct dh_core_dev *dh_dev);
+};
+
+struct zxdh_en_if {
+ uint16_t (*get_channels_num)(struct dh_core_dev *dh_dev);
+ int32_t (*create_vqs_channels)(struct dh_core_dev *dh_dev);
+ void (*destroy_vqs_channels)(struct dh_core_dev *dh_dev);
+ void (*switch_vqs_channel)(struct dh_core_dev *dh_dev, int32_t channel,
+ int32_t op);
+ int32_t (*vqs_channel_bind_handler)(struct dh_core_dev *dh_dev,
+ int32_t vqs_channel_num,
+ struct dh_vq_handler *handler);
+ void (*vqs_channel_unbind_handler)(struct dh_core_dev *dh_dev,
+ int32_t vqs_channel_num);
+ int32_t (*vq_bind_channel)(struct dh_core_dev *dh_dev, int32_t channel_num,
+ int32_t queue_index);
+ void (*vq_unbind_channel)(struct dh_core_dev *dh_dev, int32_t queue_index);
+ int32_t (*vqs_bind_eqs)(struct dh_core_dev *dh_dev, int32_t vqs_channel_num,
+ struct list_head *vq_node);
+ void (*vqs_unbind_eqs)(struct dh_core_dev *dh_dev, int32_t vqs_channel_num);
+ void __iomem *(*vp_modern_map_vq_notify)(struct dh_core_dev *dh_dev,
+ uint32_t index,
+ resource_size_t *pa);
+ void (*vp_modern_unmap_vq_notify)(struct dh_core_dev *dh_dev, void *priv);
+ int32_t (*get_phy_vq)(struct dh_core_dev *dh_dev, uint16_t index);
+ void (*activate_phy_vq)(struct dh_core_dev *dh_dev, uint32_t phy_index,
+ int32_t queue_size, uint64_t desc_addr,
+ uint64_t driver_addr, uint64_t device_addr);
+ void (*de_activate_phy_vq)(struct dh_core_dev *dh_dev, uint32_t phy_index);
+ int32_t (*release_phy_vq)(struct dh_core_dev *dh_dev, uint32_t *phy_index,
+ uint16_t total_qnum);
+ void (*set_status)(struct dh_core_dev *dh_dev, uint8_t status);
+ uint8_t (*get_status)(struct dh_core_dev *dh_dev);
+ void (*set_vf_mac)(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id);
+ void (*get_vf_mac)(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id);
+ void (*set_mac)(struct dh_core_dev *dh_dev, uint8_t *mac);
+ void (*get_mac)(struct dh_core_dev *dh_dev, uint8_t *mac);
+ uint64_t (*get_features)(struct dh_core_dev *dh_dev);
+ void (*set_features)(struct dh_core_dev *dh_dev, uint64_t features);
+ uint16_t (*get_queue_num)(struct dh_core_dev *dh_dev);
+ uint16_t (*get_queue_size)(struct dh_core_dev *dh_dev, uint32_t index);
+ void (*set_queue_enable)(struct dh_core_dev *dh_dev, uint16_t index,
+ bool enable);
+ uint32_t (*get_epbdf)(struct dh_core_dev *dh_dev);
+ uint16_t (*get_vport)(struct dh_core_dev *dh_dev);
+ uint16_t (*get_pcie_id)(struct dh_core_dev *dh_dev);
+ uint16_t (*get_slot_id)(struct dh_core_dev *dh_dev);
+ bool (*is_bond)(struct dh_core_dev *dh_dev);
+ enum dh_coredev_type (*get_coredev_type)(struct dh_core_dev *dh_dev);
+ struct pci_dev *(*get_pdev)(struct dh_core_dev *dh_dev);
+ uint64_t (*get_bar_virt_addr)(struct dh_core_dev *dh_dev, uint8_t bar_num);
+ int32_t (*msg_send_cmd)(struct dh_core_dev *dh_dev, uint16_t module_id,
+ void *msg, void *ack, bool is_sync);
+ int32_t (*async_eq_enable)(struct dh_core_dev *dh_dev,
+ struct dh_eq_async *eq, const char *name,
+ bool attach);
+ struct zxdh_vf_item *(*get_vf_item)(struct dh_core_dev *dh_dev,
+ uint16_t vf_idx);
+ void (*set_pf_link_up)(struct dh_core_dev *dh_dev, bool link_up);
+ bool (*get_pf_link_up)(struct dh_core_dev *dh_dev);
+ void (*update_pf_link_info)(struct dh_core_dev *dh_dev,
+ struct link_info_struct *link_info_val);
+ int32_t (*get_pf_drv_msg)(struct dh_core_dev *dh_dev, uint8_t *drv_version,
+ uint8_t *drv_version_len);
+ void (*set_vepa)(struct dh_core_dev *dh_dev, bool setting);
+ bool (*get_vepa)(struct dh_core_dev *dh_dev);
+ void (*set_bond_num)(struct dh_core_dev *dh_dev, bool add);
+ bool (*if_init)(struct dh_core_dev *dh_dev);
+ int32_t (*request_port)(struct dh_core_dev *dh_dev, void *data);
+ int32_t (*release_port)(struct dh_core_dev *dh_dev, uint32_t port_id);
+ void (*get_link_info_from_vqm)(struct dh_core_dev *dh_dev,
+ uint8_t *link_up);
+ void (*set_vf_link_info)(struct dh_core_dev *dh_dev, uint16_t vf_idx,
+ uint8_t link_up);
+ void (*set_pf_phy_port)(struct dh_core_dev *dh_dev, uint8_t phy_port);
+ void (*set_rdma_netdev)(struct dh_core_dev *dh_dev, void *data);
+ uint8_t (*get_pf_phy_port)(struct dh_core_dev *dh_dev);
+ void (*set_init_comp_flag)(struct dh_core_dev *dh_dev);
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c b/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c
index 34bfadd3b95f1ef9f0db394192bc658a4dd200f3..5ca72daaff49c087d31793e8d8caaa91a0c6e2f5 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c
@@ -1,877 +1,805 @@
-//#include
-#include "../../en_aux.h"
-#include "en_dcbnl.h"
-#include "en_np/qos/include/dpp_drv_qos.h"
-#include "en_aux/en_cmd.h"
-#include "en_dcbnl_api.h"
-
-static int zxdh_dcbnl_ieee_getets(struct net_device *netdev, struct ieee_ets *ets)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t tc = 0;
- uint32_t j = 0;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR("zxdh_dcbnl_ieee_getets: coredev type is not a PF");
- return -EOPNOTSUPP;
- }
-
- ets->willing = 0;
-
- ets->ets_cap = ZXDH_DCBNL_MAX_TRAFFIC_CLASS;
-
- memcpy(ets->tc_tsa, en_dev->dcb_para.ets_cfg.tc_tsa, sizeof(ets->tc_tsa));
- memcpy(ets->tc_tx_bw, en_dev->dcb_para.ets_cfg.tc_tx_bw, sizeof(ets->tc_tx_bw));
- memcpy(ets->prio_tc, en_dev->dcb_para.ets_cfg.prio_tc, sizeof(ets->prio_tc));
-
- for (tc = 0; tc < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; tc++)
- {
- if (ets->tc_tsa[tc] != IEEE_8021QAZ_TSA_ETS)
- {
- ets->tc_tx_bw[tc] = 0;
- }
- }
-
- /* debug */
- for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++)
- {
- LOG_INFO(" idx:%d, ets->tc_tsa:%d, ets->tc_tx_bw:%d, ets->prio_tc:%d \n", j,
- ets->tc_tsa[j], ets->tc_tx_bw[j], ets->prio_tc[j]);
- }
-
- return 0;
-}
-
-static int zxdh_dcbnl_check_ets_maxtc(struct ieee_ets *ets)
-{
- uint32_t i;
-
- for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++)
- {
- if (ets->prio_tc[i] >= ZXDH_DCBNL_MAX_TRAFFIC_CLASS)
- {
- LOG_ERR("dcbnl_check_ets: Failed! TC value greater than max(%d)\n", ZXDH_DCBNL_MAX_TRAFFIC_CLASS);
- return 1;
- }
- }
- return 0;
-}
-
-static int zxdh_dcbnl_check_ets_tcbw(struct ieee_ets *ets)
-{
- bool have_ets_tc = false;
- uint32_t bw_sum = 0;
- uint32_t i;
-
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++)
- {
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
- {
- have_ets_tc = true;
- bw_sum += ets->tc_tx_bw[i];
- }
- }
-
- if (have_ets_tc && ((bw_sum != 100) && (bw_sum != 0)))
- {
- LOG_ERR("dcbnl_check_ets_tcbw: Failed! ETS BW sum is illegal\n");
- return 1;
- }
-
- return 0;
-}
-
-static int zxdh_dcbnl_check_ets_para(struct ieee_ets *ets)
-{
- uint32_t err = 0;
-
- err = zxdh_dcbnl_check_ets_maxtc(ets);
- if (err)
- {
- return -EINVAL;
- }
-
- err = zxdh_dcbnl_check_ets_tcbw(ets);
- if (err)
- {
- return -EINVAL;
- }
- LOG_INFO(" end \n");
- return 0;
-}
-
-
-static int zxdh_dcbnl_ieee_divide_tc_type(struct ieee_ets *ets, uint8_t *tc_type)
-{
- uint32_t i;
-
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++)
- {
- switch (ets->tc_tsa[i])
- {
- case IEEE_8021QAZ_TSA_ETS:
- tc_type[i] = ets->tc_tx_bw[i] ? ZXDH_DCBNL_ETS_TC : ZXDH_DCBNL_ZEROBW_ETS_TC;
- break;
- case IEEE_8021QAZ_TSA_STRICT:
- tc_type[i] = ZXDH_DCBNL_STRICT_TC;
- break;
- case IEEE_8021QAZ_TSA_VENDOR:
- tc_type[i] = ZXDH_DCBNL_VENDOR_TC;
- break;
- default:
- tc_type[i] = ZXDH_DCBNL_STRICT_TC;
- LOG_ERR("dcbnl: %d tsa error, change to strict \n", ets->tc_tsa[i]);
- break;
- }
- }
-
- return 0;
-}
-
-static int zxdh_dcbnl_ieee_convert_tc_bw(struct ieee_ets *ets, uint8_t *tc_type, uint8_t *tc_tx_bw)
-{
- uint32_t i;
- uint8_t zero_ets_bw = 0;
- uint8_t zero_ets_num = 0;
-
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++)
- {
- if (tc_type[i] == ZXDH_DCBNL_ZEROBW_ETS_TC)
- {
- zero_ets_num++;
- }
- }
-
- if (zero_ets_num)
- {
- zero_ets_bw = (uint8_t)ZXDH_DCBNL_MAX_BW_ALLOC / zero_ets_num;
- }
-
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++)
- {
- switch (tc_type[i])
- {
- case ZXDH_DCBNL_ZEROBW_ETS_TC:
- tc_tx_bw[i] = zero_ets_bw;
- break;
- case ZXDH_DCBNL_ETS_TC:
- tc_tx_bw[i] = ets->tc_tx_bw[i];
- break;
- case ZXDH_DCBNL_STRICT_TC:
- case ZXDH_DCBNL_VENDOR_TC:
- tc_tx_bw[i] = ZXDH_DCBNL_MAX_BW_ALLOC;
- break;
- default:
- break;
- }
- }
- /* debug */
- LOG_INFO(" zero_ets_num:%d, zero_ets_bw:%d \n", zero_ets_num, zero_ets_bw);
-
- return 0;
-}
-
-static uint32_t zxdh_dcbnl_ieee_set_ets_para(struct zxdh_en_priv *en_priv, struct ieee_ets *ets)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint8_t tc_type[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
- uint8_t tc_tx_bw[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
- uint32_t err = 0;
- uint32_t j = 0;
-
- zxdh_dcbnl_ieee_divide_tc_type(ets, tc_type);
-
- zxdh_dcbnl_ieee_convert_tc_bw(ets, tc_type, tc_tx_bw);
-
- err = zxdh_dcbnl_set_tc_scheduling(en_priv, tc_type, tc_tx_bw);
- if (err)
- {
- LOG_ERR("set_tc_scheduling failed \n");
- return err;
- }
-
- err = zxdh_dcbnl_set_ets_up_tc_map(en_priv, ets->prio_tc);
- if (err)
- {
- LOG_ERR("set_prio_tc_map failed \n");
- return err;
- }
-
- memcpy(en_dev->dcb_para.ets_cfg.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
- memcpy(en_dev->dcb_para.ets_cfg.tc_tx_bw, ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
- memcpy(en_dev->dcb_para.ets_cfg.prio_tc, ets->prio_tc, sizeof(ets->prio_tc));
- /* debug */
- for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++)
- {
- LOG_INFO(" idx:%d, tc_tsa:%d, tc_tx_bw:%d, prio_tc:%d \n", j,
- en_dev->dcb_para.ets_cfg.tc_tsa[j], en_dev->dcb_para.ets_cfg.tc_tx_bw[j], en_dev->dcb_para.ets_cfg.prio_tc[j]);
-
- LOG_INFO(" idx:%d, tc_type:%d, tc_tx_bw:%d \n", j, tc_type[j], tc_tx_bw[j]);
- }
-
- return 0;
-}
-
-static int zxdh_dcbnl_ieee_setets(struct net_device *netdev, struct ieee_ets *ets)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t err;
- uint32_t j = 0;
-
- /* debug */
- for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++)
- {
- LOG_INFO(" idx:%d, ets->tc_tsa:%d, ets->tc_tx_bw:%d, ets->prio_tc:%d \n", j,
- ets->tc_tsa[j], ets->tc_tx_bw[j], ets->prio_tc[j]);
- }
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR(" coredev type is not a PF");
- return -EOPNOTSUPP;
- }
-
- err = zxdh_dcbnl_check_ets_para(ets);
- if (err)
- {
- return err;
- }
-
- err = zxdh_dcbnl_ieee_set_ets_para(en_priv, ets);
- if (err)
- {
- return err;
- }
-
- return 0;
-}
-
-static int zxdh_dcbnl_ieee_getpfc(struct net_device *netdev, struct ieee_pfc *pfc)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t pfc_cur_tm_en = 0;
- uint32_t pfc_cur_mac_en = 0;
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- LOG_INFO("zxdh_dcbnl_ieee_getpfc start\n");
-
- /*获取端口pfc使能函数*/
- ret = zxdh_en_fc_mode_get(en_dev, &pfc_cur_mac_en);
- LOG_INFO("zxdh_en_fc_mode_get:%d", pfc_cur_mac_en);
-
- if(0 != ret)
- {
- LOG_ERR("zxdh_port_pfc_enable_get failed");
- return ret;
- }
- ret = dpp_qmu_port_pfc_get(&pf_info, en_dev->phy_port, &pfc_cur_tm_en);
- LOG_INFO("dpp_qmu_port_pfc_get:%d", pfc_cur_tm_en);
-
- if(ret != 0)
- {
- LOG_ERR("dpp_qmu_port_pfc_get failed");
- return ret;
- }
- if((pfc_cur_tm_en == 1)&&(pfc_cur_mac_en == BIT(SPM_FC_PFC_FULL)))
- {
- pfc->pfc_en = 255;
- }
- else if((pfc_cur_tm_en == 0)&&(pfc_cur_mac_en == BIT(SPM_FC_NONE)))
- {
- pfc->pfc_en = 0;
- }
- else
- {
- //ret = -1;
- LOG_INFO("pfc_cur_mac_en != pfc_cur_tm_en");
- }
-
- LOG_INFO("zxdh_dcbnl_ieee_getpfc end\n");
-
- return ret;
-}
-
-static int zxdh_dcbnl_ieee_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t port_mac_en = 0;
- uint32_t port_tm_en = 0;
- uint32_t ret = 0;
- uint32_t test_pfc_mac_en = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- LOG_INFO("zxdh_dcbnl_ieee_setpfc start\n");
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- //兼容现有工具考虑,后续开发工具后可以注释掉
- if(pfc->pfc_en != 0 && pfc->pfc_en != 0xff )
- {
- LOG_INFO("pfc->pfc_en input invalid: %d", pfc->pfc_en);
- return EINVAL;
- }
-
- if(pfc->pfc_en != 0)
- {
- port_mac_en = BIT(SPM_FC_PFC_FULL);
- port_tm_en = 1;
- }
- else
- {
- port_mac_en = BIT(SPM_FC_NONE);
- //不使能后重新设置为初始状态阈值
- //ret = zxdh_port_th_update_to_default(en_dev);
-
- if(ret)
- {
- LOG_INFO("zxdh_port_th_update_to_last failed");
- }
- }
- /*tm端口pfc使能*/
- //ret |= dpp_qmu_port_pfc_set(&pf_info, en_dev->phy_port, port_tm_en);
- //dpp_qmu_port_pfc_get(&pf_info, en_dev->phy_port, &test_pfc_tm_en);
- //LOG_INFO("dpp_qmu_port_pfc_get: %d", test_pfc_tm_en);
-
- /*mac部分端口pfc使能*/
- ret |= zxdh_en_fc_mode_set(en_dev, port_mac_en);
- zxdh_en_fc_mode_get(en_dev, &test_pfc_mac_en);
- LOG_INFO("zxdh_port_pfc_enable_get: %d", test_pfc_mac_en);
-
- if(pfc->pfc_en != 0)
- {
- //ret = zxdh_port_th_update(en_dev);
- }
-
- /*错误判断及打印*/
- if(0 != ret)
- {
- LOG_ERR("zxdh_dcbnl_ieee_setpfc pfc_en:%c failed, %d", pfc->pfc_en, ret);
- }
-
- LOG_INFO("zxdh_dcbnl_ieee_setpfc end\n");
-
- return ret;
-}
-
-static int zxdh_dcbnl_ieee_getmaxrate(struct net_device *netdev, struct ieee_maxrate *maxrate)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t i = 0;
- uint32_t j = 0;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR("coredev type is not a PF");
- return -EOPNOTSUPP;
- }
-
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++)
- {
- if (ZXDH_DCBNL_MAXRATE_KBITPS <= en_dev->dcb_para.tc_maxrate[i])
- {
- maxrate->tc_maxrate[i] = 0; //0 indicates unlimited
- }
- else
- {
- maxrate->tc_maxrate[i] = en_dev->dcb_para.tc_maxrate[i];
- }
- }
-
- /* debug */
- for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++)
- {
- LOG_INFO(" tc:%d,tc_maxrate:%lld \n", j, maxrate->tc_maxrate[j]);
- }
-
- return 0;
-}
-
-static int zxdh_dcbnl_ieee_setmaxrate(struct net_device *netdev, struct ieee_maxrate *maxrate)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t maxrate_kbps[ZXDH_DCBNL_MAX_TRAFFIC_CLASS] = {0};
- uint32_t err,i;
- uint32_t j = 0;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR("coredev type is not a PF");
- return -EOPNOTSUPP;
- }
-
- /* Values are 64 bits and specified in Kbps */
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++)
- {
- if ((maxrate->tc_maxrate[i] == 0) || (maxrate->tc_maxrate[i] >= ZXDH_DCBNL_MAXRATE_KBITPS))
- {
- maxrate_kbps[i] = ZXDH_DCBNL_MAXRATE_KBITPS;
- }
- else if (maxrate->tc_maxrate[i] <= ZXDH_DCBNL_MINRATE_KBITPS)
- {
- maxrate_kbps[i] = ZXDH_DCBNL_MINRATE_KBITPS;
- }
- else
- {
- maxrate_kbps[i] = (uint32_t)maxrate->tc_maxrate[i];
- }
- }
- /* debug */
- for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++)
- {
- LOG_INFO(" tc:%d,maxrate->tc_maxrate:%lld,maxrate_kbps:%d \n",
- j, maxrate->tc_maxrate[j], maxrate_kbps[j]);
- }
-
- err = zxdh_dcbnl_set_tc_maxrate(en_priv, maxrate_kbps);
- if (err)
- {
- return err;
- }
-
- return 0;
-}
-
-
-static int zxdh_dcbnl_ieee_setapp(struct net_device *netdev, struct dcb_app *app)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct dcb_app app_old;
- bool is_new = false;
- int err = 0;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR(" coredev type is not a PF");
- return -EOPNOTSUPP;
- }
-
- if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
- (app->protocol >= ZXDH_DCBNL_MAX_DSCP) ||
- (app->priority >= ZXDH_DCBNL_MAX_PRIORITY))
- {
- return -EINVAL;
- }
- /* Save the old entry info */
- app_old.selector = IEEE_8021QAZ_APP_SEL_DSCP;
- app_old.protocol = app->protocol;
- app_old.priority = en_dev->dcb_para.dscp2prio[app->protocol];
-
- LOG_INFO(" protocol:%d, priority:%d \n", app->protocol, app->priority);
-
- if (!en_dev->dcb_para.dscp_app_num)
- {
- err = zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_DSCP);
- if (err)
- {
- return err;
- }
- }
-
- if (app->priority != en_dev->dcb_para.dscp2prio[app->protocol])
- {
- err = zxdh_dcbnl_set_dscp2prio(en_priv, app->protocol, app->priority);
- if (err)
- {
- zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_PCP);
- return err;
- }
- }
-
- /* Delete the old entry if exists */
- err = dcb_ieee_delapp(netdev, &app_old);
- if (err)
- {
- is_new = true;
- }
- /* Add new entry and update counter */
- err = dcb_ieee_setapp(netdev, app);
- if (err)
- {
- return err;
- }
- if (is_new)
- {
- en_dev->dcb_para.dscp_app_num++;
- }
- LOG_INFO(" dscp_app_num:%d \n", en_dev->dcb_para.dscp_app_num);
-
- return err;
-}
-
-static int zxdh_dcbnl_ieee_delapp(struct net_device *netdev, struct dcb_app *app)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int err = 0;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR("zxdh_dcbnl_ieee_delapp coredev type is not a PF");
- return -EOPNOTSUPP;
- }
-
- if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
- (app->protocol >= ZXDH_DCBNL_MAX_DSCP))
- {
- return -EINVAL;
- }
-
- if (!en_dev->dcb_para.dscp_app_num)
- {
- return -ENOENT;
- }
-
- if (app->priority != en_dev->dcb_para.dscp2prio[app->protocol])
- {
- return -ENOENT;
- }
-
- /* Delete the app entry */
- err = dcb_ieee_delapp(netdev, app);
- if (err)
- {
- return err;
- }
-
- /* Restore to default */
- err = zxdh_dcbnl_set_dscp2prio(en_priv, app->protocol, app->protocol>>3);
- if (err)
- {
- zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_PCP);
- return err;
- }
- en_dev->dcb_para.dscp_app_num--;
- LOG_INFO(" protocol:%d, dscp_app_num:%d \n", app->protocol, en_dev->dcb_para.dscp_app_num);
-
- if (!en_dev->dcb_para.dscp_app_num)
- {
- err = zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_PCP);
- }
-
- return err;
-
-}
-#ifdef ZXDH_DCBNL_CEE_SUPPORT
-static void zxdh_dcbnl_setpgtccfgtx(struct net_device *netdev, int tc,
- uint8_t prio_type, uint8_t pgid,
- uint8_t bw_pct, uint8_t up_map)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_cee_ets *cee_ets_cfg;
- uint32_t i;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR("zxdh_dcbnl_setpgtccfgtx coredev type is not a PF");
- return;
- }
-
- if ((tc < 0) || (tc >= ZXDH_DCBNL_MAX_TRAFFIC_CLASS))
- {
- return;
- }
-
- cee_ets_cfg = &en_dev->dcb_para.cee_ets_cfg;
- for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++)
- {
- if (up_map & BIT(i))
- {
- cee_ets_cfg->prio_tc[i] = tc;
- }
- }
- cee_ets_cfg->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS;
-
-}
-static void zxdh_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid, uint8_t bw_pct)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR("zxdh_dcbnl_setpgbwgcfgtx coredev type is not a PF");
- return;
- }
-
- if ((pgid >= 0) && (pgid < ZXDH_DCBNL_MAX_TRAFFIC_CLASS))
- {
- en_dev->dcb_para.cee_ets_cfg.tc_tx_bw[pgid] = bw_pct;
- }
- LOG_INFO(" tc_tx_bw[%d]:%d \n", pgid, bw_pct);
-
-}
-
-static void zxdh_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio,
- uint8_t *prio_type, uint8_t *pgid,
- uint8_t *bw_pct, uint8_t *up_map)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- /* pf检查 */
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR("zxdh_dcbnl_getpgtccfgtx coredev type is not a PF");
- return;
- }
-
- if ((prio >= 0) && (prio < ZXDH_DCBNL_MAX_PRIORITY))
- {
- *pgid = en_dev->dcb_para.ets_cfg.prio_tc[prio];
- }
-
-}
-
-static void zxdh_dcbnl_getpgbwgcfgtx(struct net_device *netdev, int pgid, uint8_t *bw_pct)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR("zxdh_dcbnl_getpgbwgcfgtx coredev type is not a PF");
- return;
- }
-
- if ((pgid >= 0) && (pgid < ZXDH_DCBNL_MAX_TRAFFIC_CLASS))
- {
- *bw_pct = en_dev->dcb_para.ets_cfg.tc_tx_bw[pgid];
- }
-
-}
-
-
-static void zxdh_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio,
- uint8_t prio_type, uint8_t pgid,
- uint8_t bw_pct, uint8_t up_map)
-{
- LOG_ERR("Rx PG TC Config Not Supported.\n");
-}
-
-static void zxdh_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid, uint8_t bw_pct)
-{
- LOG_ERR("Rx PG BWG Config Not Supported.\n");
-}
-
-
-static void zxdh_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio,
- uint8_t *prio_type, uint8_t *pgid,
- uint8_t *bw_pct, uint8_t *up_map)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR("zxdh_dcbnl_getpgtccfgrx coredev type is not a PF");
- return;
- }
-
- if ((prio >= 0) && (prio < ZXDH_DCBNL_MAX_PRIORITY))
- {
- *pgid = en_dev->dcb_para.ets_cfg.prio_tc[prio];
- }
-
-}
-
-static void zxdh_dcbnl_getpgbwgcfgrx(struct net_device *netdev, int pgid, uint8_t *bw_pct)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR("zxdh_dcbnl_getpgbwgcfgrx coredev type is not a PF");
- return;
- }
-
- if ((pgid >= 0) && (pgid < ZXDH_DCBNL_MAX_TRAFFIC_CLASS))
- {
- *bw_pct = 0;
- }
-
-}
-
-static uint8_t zxdh_dcbnl_setall(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct ieee_ets ets = {0};
- uint32_t i = 0;
- uint32_t err = 0;
- uint32_t j = 0;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- LOG_ERR("zxdh_dcbnl_setall coredev type is not a PF");
- return 1;
- }
-
- ets.ets_cap = ZXDH_DCBNL_MAX_TRAFFIC_CLASS;
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++)
- {
- ets.tc_tx_bw[i] = en_dev->dcb_para.cee_ets_cfg.tc_tx_bw[i];
- ets.tc_rx_bw[i] = en_dev->dcb_para.cee_ets_cfg.tc_tx_bw[i];
- ets.tc_tsa[i] = en_dev->dcb_para.cee_ets_cfg.tc_tsa[i];
- }
-
- for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++)
- {
- ets.prio_tc[i] = en_dev->dcb_para.cee_ets_cfg.prio_tc[i];
- }
- /* debug */
- for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++)
- {
- LOG_INFO(" idx:%d, tc_tsa:%d, tc_tx_bw:%d, prio_tc:%d \n", j, ets.tc_tx_bw[j], ets.tc_tsa[j], ets.prio_tc[j]);
- }
-
- err = zxdh_dcbnl_check_ets_para(&ets);
- if (err)
- {
- return err;
- }
-
- err = zxdh_dcbnl_ieee_set_ets_para(en_priv, &ets);
- if (err)
- {
- return err;
- }
-
- return 0;
-}
-
-static uint8_t zxdh_dcbnl_getstate(struct net_device *netdev)
-{
- return ZXDH_DCBNL_CEE_STATE_UP;
-}
-
-static uint8_t zxdh_dcbnl_setstate(struct net_device *netdev, u8 state)
-{
-
- return 0;
-}
-#endif
-
-static const struct dcbnl_rtnl_ops zxdh_dcbnl_ops ={
- .ieee_getets = zxdh_dcbnl_ieee_getets,
- .ieee_setets = zxdh_dcbnl_ieee_setets,
- .ieee_getpfc = zxdh_dcbnl_ieee_getpfc,
- .ieee_setpfc = zxdh_dcbnl_ieee_setpfc,
-
- .ieee_getmaxrate = zxdh_dcbnl_ieee_getmaxrate,
- .ieee_setmaxrate = zxdh_dcbnl_ieee_setmaxrate,
-
- .ieee_setapp = zxdh_dcbnl_ieee_setapp,
- .ieee_delapp = zxdh_dcbnl_ieee_delapp,
-
-#ifdef ZXDH_DCBNL_CEE_SUPPORT
- /* CEE not support */
- .setall = zxdh_dcbnl_setall,
-
- .getstate = zxdh_dcbnl_getstate,
- .setstate = zxdh_dcbnl_setstate,
-
- .setpgtccfgtx = zxdh_dcbnl_setpgtccfgtx,
- .setpgbwgcfgtx = zxdh_dcbnl_setpgbwgcfgtx,
- .getpgtccfgtx = zxdh_dcbnl_getpgtccfgtx,
- .getpgbwgcfgtx = zxdh_dcbnl_getpgbwgcfgtx,
-
- .setpgtccfgrx = zxdh_dcbnl_setpgtccfgrx,
- .setpgbwgcfgrx = zxdh_dcbnl_setpgbwgcfgrx,
- .getpgtccfgrx = zxdh_dcbnl_getpgtccfgrx,
- .getpgbwgcfgrx = zxdh_dcbnl_getpgbwgcfgrx,
-#endif
-};
-
-uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_open(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- uint32_t err = 0;
- err = zxdh_dcbnl_set_tm_gate(en_priv, 1);
- if (err)
- {
- LOG_ERR(" set_tm_gate close failed \n");
- }
- LOG_INFO(" tm mcode gate open ");
- return err;
-}
-
-uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_close(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- uint32_t err = 0;
- err = zxdh_dcbnl_set_tm_gate(en_priv, 0);
- if (err)
- {
- LOG_ERR(" set_tm_gate close failed \n");
- }
- LOG_INFO(" tm mcode gate close ");
- return err;
-}
-
-uint32_t zxdh_dcbnl_initialize(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t err = 0;
-
- LOG_INFO("%s dcbnl init begin\n", netdev->name);
-
- err = zxdh_dcbnl_init_port_speed(en_priv);
- if (err)
- {
- LOG_INFO("dcbnl_init_ets: init_port_speed failed \n");
- //return err;
- }
-
- err = zxdh_dcbnl_init_ets_scheduling_tree(en_priv);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: init_ets_scheduling_tree failed \n");
- return err;
- }
-
- zxdh_dcbnl_printk_ets_tree(en_priv);
-
- zxdh_dcbnl_pfc_init(en_priv);
-
- en_dev->dcb_para.init_flag = ZXDH_DCBNL_INIT_FLAG;
- netdev->dcbnl_ops = &zxdh_dcbnl_ops;
- zxdh_dcbnl_set_tm_pport_mcode_gate_open(netdev);
- LOG_INFO("%s dcbnl init ok ", netdev->name);
- return 0;
-}
-
-uint32_t zxdh_dcbnl_ets_uninit(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF)
- {
- return 0;
- }
- LOG_INFO("%s dcbnl uninit begin\n", netdev->name);
-
- en_dev->dcb_para.init_flag = 0;
- netdev->dcbnl_ops = NULL;
- zxdh_dcbnl_set_tm_pport_mcode_gate_close(netdev);
-
- zxdh_dcbnl_free_flow_resources(en_priv);
-
- zxdh_dcbnl_free_se_resources(en_priv);
-
- LOG_INFO("%s dcbnl uninit ok ", netdev->name);
- return 0;
-}
+//#include
+#include "../../en_aux.h"
+#include "en_dcbnl.h"
+#include "en_np/qos/include/dpp_drv_qos.h"
+#include "en_aux/en_cmd.h"
+#include "en_dcbnl_api.h"
+
+static int zxdh_dcbnl_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t tc = 0;
+ uint32_t j = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR("zxdh_dcbnl_ieee_getets: coredev type is not a PF");
+ return -EOPNOTSUPP;
+ }
+
+ ets->willing = 0;
+
+ ets->ets_cap = ZXDH_DCBNL_MAX_TRAFFIC_CLASS;
+
+ memcpy(ets->tc_tsa, en_dev->dcb_para.ets_cfg.tc_tsa, sizeof(ets->tc_tsa));
+ memcpy(ets->tc_tx_bw, en_dev->dcb_para.ets_cfg.tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(ets->prio_tc, en_dev->dcb_para.ets_cfg.prio_tc,
+ sizeof(ets->prio_tc));
+
+ for (tc = 0; tc < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; tc++) {
+ if (ets->tc_tsa[tc] != IEEE_8021QAZ_TSA_ETS) {
+ ets->tc_tx_bw[tc] = 0;
+ }
+ }
+
+ /* debug */
+ for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) {
+ LOG_INFO(
+ " idx:%d, ets->tc_tsa:%d, ets->tc_tx_bw:%d, ets->prio_tc:%d \n",
+ j, ets->tc_tsa[j], ets->tc_tx_bw[j], ets->prio_tc[j]);
+ }
+
+ return 0;
+}
+
+static int zxdh_dcbnl_check_ets_maxtc(struct ieee_ets *ets)
+{
+ uint32_t i;
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++) {
+ if (ets->prio_tc[i] >= ZXDH_DCBNL_MAX_TRAFFIC_CLASS) {
+ LOG_ERR("dcbnl_check_ets: Failed! TC value greater than max(%d)\n",
+ ZXDH_DCBNL_MAX_TRAFFIC_CLASS);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int zxdh_dcbnl_check_ets_tcbw(struct ieee_ets *ets)
+{
+ bool have_ets_tc = false;
+ uint32_t bw_sum = 0;
+ uint32_t i;
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) {
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ have_ets_tc = true;
+ bw_sum += ets->tc_tx_bw[i];
+ }
+ }
+
+ if (have_ets_tc && ((bw_sum != 100) && (bw_sum != 0))) {
+ LOG_ERR("dcbnl_check_ets_tcbw: Failed! ETS BW sum is illegal\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int zxdh_dcbnl_check_ets_para(struct ieee_ets *ets)
+{
+ uint32_t err = 0;
+
+ err = zxdh_dcbnl_check_ets_maxtc(ets);
+ if (err) {
+ return -EINVAL;
+ }
+
+ err = zxdh_dcbnl_check_ets_tcbw(ets);
+ if (err) {
+ return -EINVAL;
+ }
+ LOG_INFO(" end \n");
+ return 0;
+}
+
+static int zxdh_dcbnl_ieee_divide_tc_type(struct ieee_ets *ets,
+ uint8_t *tc_type)
+{
+ uint32_t i;
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_ETS:
+ tc_type[i] = ets->tc_tx_bw[i] ? ZXDH_DCBNL_ETS_TC :
+ ZXDH_DCBNL_ZEROBW_ETS_TC;
+ break;
+ case IEEE_8021QAZ_TSA_STRICT:
+ tc_type[i] = ZXDH_DCBNL_STRICT_TC;
+ break;
+ case IEEE_8021QAZ_TSA_VENDOR:
+ tc_type[i] = ZXDH_DCBNL_VENDOR_TC;
+ break;
+ default:
+ tc_type[i] = ZXDH_DCBNL_STRICT_TC;
+ LOG_ERR("dcbnl: %d tsa error, change to strict \n", ets->tc_tsa[i]);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int zxdh_dcbnl_ieee_convert_tc_bw(struct ieee_ets *ets, uint8_t *tc_type,
+ uint8_t *tc_tx_bw)
+{
+ uint32_t i;
+ uint8_t zero_ets_bw = 0;
+ uint8_t zero_ets_num = 0;
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) {
+ if (tc_type[i] == ZXDH_DCBNL_ZEROBW_ETS_TC) {
+ zero_ets_num++;
+ }
+ }
+
+ if (zero_ets_num) {
+ zero_ets_bw = (uint8_t)ZXDH_DCBNL_MAX_BW_ALLOC / zero_ets_num;
+ }
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) {
+ switch (tc_type[i]) {
+ case ZXDH_DCBNL_ZEROBW_ETS_TC:
+ tc_tx_bw[i] = zero_ets_bw;
+ break;
+ case ZXDH_DCBNL_ETS_TC:
+ tc_tx_bw[i] = ets->tc_tx_bw[i];
+ break;
+ case ZXDH_DCBNL_STRICT_TC:
+ case ZXDH_DCBNL_VENDOR_TC:
+ tc_tx_bw[i] = ZXDH_DCBNL_MAX_BW_ALLOC;
+ break;
+ default:
+ break;
+ }
+ }
+ /* debug */
+ LOG_INFO(" zero_ets_num:%d, zero_ets_bw:%d \n", zero_ets_num, zero_ets_bw);
+
+ return 0;
+}
+
+static uint32_t zxdh_dcbnl_ieee_set_ets_para(struct zxdh_en_priv *en_priv,
+ struct ieee_ets *ets)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint8_t tc_type[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
+ uint8_t tc_tx_bw[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
+ uint32_t err = 0;
+ uint32_t j = 0;
+
+ zxdh_dcbnl_ieee_divide_tc_type(ets, tc_type);
+
+ zxdh_dcbnl_ieee_convert_tc_bw(ets, tc_type, tc_tx_bw);
+
+ err = zxdh_dcbnl_set_tc_scheduling(en_priv, tc_type, tc_tx_bw);
+ if (err) {
+ LOG_ERR("set_tc_scheduling failed \n");
+ return err;
+ }
+
+ err = zxdh_dcbnl_set_ets_up_tc_map(en_priv, ets->prio_tc);
+ if (err) {
+ LOG_ERR("set_prio_tc_map failed \n");
+ return err;
+ }
+
+ memcpy(en_dev->dcb_para.ets_cfg.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
+ memcpy(en_dev->dcb_para.ets_cfg.tc_tx_bw, ets->tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(en_dev->dcb_para.ets_cfg.prio_tc, ets->prio_tc,
+ sizeof(ets->prio_tc));
+ /* debug */
+ for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) {
+ LOG_INFO(" idx:%d, tc_tsa:%d, tc_tx_bw:%d, prio_tc:%d \n", j,
+ en_dev->dcb_para.ets_cfg.tc_tsa[j],
+ en_dev->dcb_para.ets_cfg.tc_tx_bw[j],
+ en_dev->dcb_para.ets_cfg.prio_tc[j]);
+
+ LOG_INFO(" idx:%d, tc_type:%d, tc_tx_bw:%d \n", j, tc_type[j],
+ tc_tx_bw[j]);
+ }
+
+ return 0;
+}
+
+static int zxdh_dcbnl_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t err;
+ uint32_t j = 0;
+
+ /* debug */
+ for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) {
+ LOG_INFO(
+ " idx:%d, ets->tc_tsa:%d, ets->tc_tx_bw:%d, ets->prio_tc:%d \n",
+ j, ets->tc_tsa[j], ets->tc_tx_bw[j], ets->prio_tc[j]);
+ }
+
+ /* 检查设备是否支持ets,cap,待补充 */
+ /*if (!DH_CAP_GEN(en_priv->parent, ets))
+ {
+ return -EOPNOTSUPP;
+ }*/
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR(" coredev type is not a PF");
+ return -EOPNOTSUPP;
+ }
+
+ err = zxdh_dcbnl_check_ets_para(ets);
+ if (err) {
+ return err;
+ }
+
+ err = zxdh_dcbnl_ieee_set_ets_para(en_priv, ets);
+ if (err) {
+ return err;
+ }
+
+ return 0;
+}
+
+static int zxdh_dcbnl_ieee_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t pfc_cur_tm_en = 0;
+ uint32_t pfc_cur_mac_en = 0;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ LOG_INFO("zxdh_dcbnl_ieee_getpfc start\n");
+
+ /*获取端口pfc使能函数*/
+ ret = zxdh_en_fc_mode_get(en_dev, &pfc_cur_mac_en);
+ LOG_INFO("zxdh_en_fc_mode_get:%d", pfc_cur_mac_en);
+
+ if (0 != ret) {
+ LOG_ERR("zxdh_port_pfc_enable_get failed");
+ return ret;
+ }
+ ret = dpp_qmu_port_pfc_get(&pf_info, en_dev->phy_port, &pfc_cur_tm_en);
+ LOG_INFO("dpp_qmu_port_pfc_get:%d", pfc_cur_tm_en);
+
+ if (ret != 0) {
+ LOG_ERR("dpp_qmu_port_pfc_get failed");
+ return ret;
+ }
+ if ((pfc_cur_tm_en == 1) && (pfc_cur_mac_en == BIT(SPM_FC_PFC_FULL))) {
+ pfc->pfc_en = 255;
+ } else if ((pfc_cur_tm_en == 0) && (pfc_cur_mac_en == BIT(SPM_FC_NONE))) {
+ pfc->pfc_en = 0;
+ } else {
+ // ret = -1;
+ LOG_INFO("pfc_cur_mac_en != pfc_cur_tm_en");
+ }
+
+ LOG_INFO("zxdh_dcbnl_ieee_getpfc end\n");
+
+ return ret;
+}
+
+static int zxdh_dcbnl_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t port_mac_en = 0;
+ uint32_t port_tm_en = 0;
+ uint32_t ret = 0;
+ uint32_t test_pfc_mac_en = 0;
+ uint32_t test_pfc_tm_en = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ LOG_INFO("zxdh_dcbnl_ieee_setpfc start\n");
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ //兼容现有工具考虑,后续开发工具后可以注释掉
+ if (pfc->pfc_en != 0 && pfc->pfc_en != 0xff) {
+ LOG_INFO("pfc->pfc_en input invalid: %d", pfc->pfc_en);
+ return EINVAL;
+ }
+
+ if (pfc->pfc_en != 0) {
+ port_mac_en = BIT(SPM_FC_PFC_FULL);
+ port_tm_en = 1;
+ } else {
+ port_mac_en = BIT(SPM_FC_NONE);
+ //不使能后重新设置为初始状态阈值
+ ret = zxdh_port_th_update_to_default(en_dev);
+
+ if (ret) {
+ LOG_INFO("zxdh_port_th_update_to_last failed");
+ }
+ }
+ /*tm端口pfc使能*/
+ ret |= dpp_qmu_port_pfc_set(&pf_info, en_dev->phy_port, port_tm_en);
+ dpp_qmu_port_pfc_get(&pf_info, en_dev->phy_port, &test_pfc_tm_en);
+ LOG_INFO("dpp_qmu_port_pfc_get: %d", test_pfc_tm_en);
+
+ /*mac部分端口pfc使能*/
+ ret |= zxdh_en_fc_mode_set(en_dev, port_mac_en);
+ zxdh_en_fc_mode_get(en_dev, &test_pfc_mac_en);
+ LOG_INFO("zxdh_port_pfc_enable_get: %d", test_pfc_mac_en);
+
+ if (pfc->pfc_en != 0) {
+ ret = zxdh_port_th_update(en_dev);
+ }
+ if (ret) {
+ LOG_INFO("zxdh_port_th_update failed");
+ }
+
+ /*错误判断及打印*/
+ if (0 != ret) {
+ LOG_ERR("zxdh_dcbnl_ieee_setpfc pfc_en:%c failed, %d", pfc->pfc_en,
+ ret);
+ }
+
+ LOG_INFO("zxdh_dcbnl_ieee_setpfc end\n");
+
+ return ret;
+}
+
+static int zxdh_dcbnl_ieee_getmaxrate(struct net_device *netdev,
+ struct ieee_maxrate *maxrate)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t i = 0;
+ uint32_t j = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR("coredev type is not a PF");
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) {
+ if (ZXDH_DCBNL_MAXRATE_KBITPS <= en_dev->dcb_para.tc_maxrate[i]) {
+ maxrate->tc_maxrate[i] = 0; // 0 indicates unlimited
+ } else {
+ maxrate->tc_maxrate[i] = en_dev->dcb_para.tc_maxrate[i];
+ }
+ }
+
+ /* debug */
+ for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) {
+ LOG_INFO(" tc:%d,tc_maxrate:%lld \n", j, maxrate->tc_maxrate[j]);
+ }
+
+ return 0;
+}
+
+static int zxdh_dcbnl_ieee_setmaxrate(struct net_device *netdev,
+ struct ieee_maxrate *maxrate)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t maxrate_kbps[ZXDH_DCBNL_MAX_TRAFFIC_CLASS] = { 0 };
+ uint32_t err, i;
+ uint32_t j = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR("coredev type is not a PF");
+ return -EOPNOTSUPP;
+ }
+
+ /* Values are 64 bits and specified in Kbps */
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) {
+ if ((maxrate->tc_maxrate[i] == 0) ||
+ (maxrate->tc_maxrate[i] >= ZXDH_DCBNL_MAXRATE_KBITPS)) {
+ maxrate_kbps[i] = ZXDH_DCBNL_MAXRATE_KBITPS;
+ } else if (maxrate->tc_maxrate[i] <= ZXDH_DCBNL_MINRATE_KBITPS) {
+ maxrate_kbps[i] = ZXDH_DCBNL_MINRATE_KBITPS;
+ } else {
+ maxrate_kbps[i] = (uint32_t)maxrate->tc_maxrate[i];
+ }
+ }
+ /* debug */
+ for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) {
+ LOG_INFO(" tc:%d,maxrate->tc_maxrate:%lld,maxrate_kbps:%d \n", j,
+ maxrate->tc_maxrate[j], maxrate_kbps[j]);
+ }
+
+ err = zxdh_dcbnl_set_tc_maxrate(en_priv, maxrate_kbps);
+ if (err) {
+ return err;
+ }
+
+ return 0;
+}
+
+static int zxdh_dcbnl_ieee_setapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct dcb_app app_old;
+ bool is_new = false;
+ int err = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR(" coredev type is not a PF");
+ return -EOPNOTSUPP;
+ }
+
+ if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+ (app->protocol >= ZXDH_DCBNL_MAX_DSCP) ||
+ (app->priority >= ZXDH_DCBNL_MAX_PRIORITY)) {
+ return -EINVAL;
+ }
+ /* Save the old entry info */
+ app_old.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ app_old.protocol = app->protocol;
+ app_old.priority = en_dev->dcb_para.dscp2prio[app->protocol];
+
+ LOG_INFO(" protocol:%d, priority:%d \n", app->protocol, app->priority);
+
+ if (!en_dev->dcb_para.dscp_app_num) {
+ err = zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_DSCP);
+ if (err) {
+ return err;
+ }
+ }
+
+ if (app->priority != en_dev->dcb_para.dscp2prio[app->protocol]) {
+ err = zxdh_dcbnl_set_dscp2prio(en_priv, app->protocol, app->priority);
+ if (err) {
+ zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_PCP);
+ return err;
+ }
+ }
+
+ /* Delete the old entry if exists */
+ err = dcb_ieee_delapp(netdev, &app_old);
+ if (err) {
+ is_new = true;
+ }
+ /* Add new entry and update counter */
+ err = dcb_ieee_setapp(netdev, app);
+ if (err) {
+ return err;
+ }
+ if (is_new) {
+ en_dev->dcb_para.dscp_app_num++;
+ }
+ LOG_INFO(" dscp_app_num:%d \n", en_dev->dcb_para.dscp_app_num);
+
+ return err;
+}
+
+static int zxdh_dcbnl_ieee_delapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int err = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR("zxdh_dcbnl_ieee_delapp coredev type is not a PF");
+ return -EOPNOTSUPP;
+ }
+
+ if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+ (app->protocol >= ZXDH_DCBNL_MAX_DSCP)) {
+ return -EINVAL;
+ }
+
+ if (!en_dev->dcb_para.dscp_app_num) {
+ return -ENOENT;
+ }
+
+ if (app->priority != en_dev->dcb_para.dscp2prio[app->protocol]) {
+ return -ENOENT;
+ }
+
+ /* Delete the app entry */
+ err = dcb_ieee_delapp(netdev, app);
+ if (err) {
+ return err;
+ }
+
+ /* Restore to default */
+ err = zxdh_dcbnl_set_dscp2prio(en_priv, app->protocol, app->protocol >> 3);
+ if (err) {
+ zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_PCP);
+ return err;
+ }
+ en_dev->dcb_para.dscp_app_num--;
+ LOG_INFO(" protocol:%d, dscp_app_num:%d \n", app->protocol,
+ en_dev->dcb_para.dscp_app_num);
+
+ if (!en_dev->dcb_para.dscp_app_num) {
+ err = zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_PCP);
+ }
+
+ return err;
+}
+
+static void zxdh_dcbnl_setpgtccfgtx(struct net_device *netdev, int tc,
+ uint8_t prio_type, uint8_t pgid,
+ uint8_t bw_pct, uint8_t up_map)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_cee_ets *cee_ets_cfg;
+ uint32_t i;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR("zxdh_dcbnl_setpgtccfgtx coredev type is not a PF");
+ return;
+ }
+
+ if ((tc < 0) || (tc >= ZXDH_DCBNL_MAX_TRAFFIC_CLASS)) {
+ return;
+ }
+
+ cee_ets_cfg = &en_dev->dcb_para.cee_ets_cfg;
+ for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++) {
+ if (up_map & BIT(i)) {
+ cee_ets_cfg->prio_tc[i] = tc;
+ }
+ }
+ cee_ets_cfg->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS;
+}
+static void zxdh_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid,
+ uint8_t bw_pct)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR("zxdh_dcbnl_setpgbwgcfgtx coredev type is not a PF");
+ return;
+ }
+
+ if ((pgid >= 0) && (pgid < ZXDH_DCBNL_MAX_TRAFFIC_CLASS)) {
+ en_dev->dcb_para.cee_ets_cfg.tc_tx_bw[pgid] = bw_pct;
+ }
+ LOG_INFO(" tc_tx_bw[%d]:%d \n", pgid, bw_pct);
+}
+
+static void zxdh_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio,
+ uint8_t *prio_type, uint8_t *pgid,
+ uint8_t *bw_pct, uint8_t *up_map)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ /* pf检查 */
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR("zxdh_dcbnl_getpgtccfgtx coredev type is not a PF");
+ return;
+ }
+
+ if ((prio >= 0) && (prio < ZXDH_DCBNL_MAX_PRIORITY)) {
+ *pgid = en_dev->dcb_para.ets_cfg.prio_tc[prio];
+ }
+}
+
+static void zxdh_dcbnl_getpgbwgcfgtx(struct net_device *netdev, int pgid,
+ uint8_t *bw_pct)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR("zxdh_dcbnl_getpgbwgcfgtx coredev type is not a PF");
+ return;
+ }
+
+ if ((pgid >= 0) && (pgid < ZXDH_DCBNL_MAX_TRAFFIC_CLASS)) {
+ *bw_pct = en_dev->dcb_para.ets_cfg.tc_tx_bw[pgid];
+ }
+}
+
+static void zxdh_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio,
+ uint8_t prio_type, uint8_t pgid,
+ uint8_t bw_pct, uint8_t up_map)
+{
+ LOG_ERR("Rx PG TC Config Not Supported.\n");
+}
+
+static void zxdh_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid,
+ uint8_t bw_pct)
+{
+ LOG_ERR("Rx PG BWG Config Not Supported.\n");
+}
+
+static void zxdh_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio,
+ uint8_t *prio_type, uint8_t *pgid,
+ uint8_t *bw_pct, uint8_t *up_map)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR("zxdh_dcbnl_getpgtccfgrx coredev type is not a PF");
+ return;
+ }
+
+ if ((prio >= 0) && (prio < ZXDH_DCBNL_MAX_PRIORITY)) {
+ *pgid = en_dev->dcb_para.ets_cfg.prio_tc[prio];
+ }
+}
+
+static void zxdh_dcbnl_getpgbwgcfgrx(struct net_device *netdev, int pgid,
+ uint8_t *bw_pct)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR("zxdh_dcbnl_getpgbwgcfgrx coredev type is not a PF");
+ return;
+ }
+
+ if ((pgid >= 0) && (pgid < ZXDH_DCBNL_MAX_TRAFFIC_CLASS)) {
+ *bw_pct = 0;
+ }
+}
+
+static uint8_t zxdh_dcbnl_setall(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct ieee_ets ets = { 0 };
+ uint32_t i = 0;
+ uint32_t err = 0;
+ uint32_t j = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ LOG_ERR("zxdh_dcbnl_setall coredev type is not a PF");
+ return 1;
+ }
+
+ ets.ets_cap = ZXDH_DCBNL_MAX_TRAFFIC_CLASS;
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) {
+ ets.tc_tx_bw[i] = en_dev->dcb_para.cee_ets_cfg.tc_tx_bw[i];
+ ets.tc_rx_bw[i] = en_dev->dcb_para.cee_ets_cfg.tc_tx_bw[i];
+ ets.tc_tsa[i] = en_dev->dcb_para.cee_ets_cfg.tc_tsa[i];
+ }
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++) {
+ ets.prio_tc[i] = en_dev->dcb_para.cee_ets_cfg.prio_tc[i];
+ }
+ /* debug */
+ for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) {
+ LOG_INFO(" idx:%d, tc_tsa:%d, tc_tx_bw:%d, prio_tc:%d \n", j,
+ ets.tc_tx_bw[j], ets.tc_tsa[j], ets.prio_tc[j]);
+ }
+
+ err = zxdh_dcbnl_check_ets_para(&ets);
+ if (err) {
+ return err;
+ }
+
+ err = zxdh_dcbnl_ieee_set_ets_para(en_priv, &ets);
+ if (err) {
+ return err;
+ }
+
+ return 0;
+}
+
+static uint8_t zxdh_dcbnl_getstate(struct net_device *netdev)
+{
+ return ZXDH_DCBNL_CEE_STATE_UP;
+}
+
+static uint8_t zxdh_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops zxdh_dcbnl_ops = {
+ .ieee_getets = zxdh_dcbnl_ieee_getets,
+ .ieee_setets = zxdh_dcbnl_ieee_setets,
+ .ieee_getpfc = zxdh_dcbnl_ieee_getpfc,
+ .ieee_setpfc = zxdh_dcbnl_ieee_setpfc,
+
+ .ieee_getmaxrate = zxdh_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = zxdh_dcbnl_ieee_setmaxrate,
+
+ .ieee_setapp = zxdh_dcbnl_ieee_setapp,
+ .ieee_delapp = zxdh_dcbnl_ieee_delapp,
+
+ /* CEE */
+ .setall = zxdh_dcbnl_setall,
+
+ .getstate = zxdh_dcbnl_getstate,
+ .setstate = zxdh_dcbnl_setstate,
+
+ .setpgtccfgtx = zxdh_dcbnl_setpgtccfgtx,
+ .setpgbwgcfgtx = zxdh_dcbnl_setpgbwgcfgtx,
+ .getpgtccfgtx = zxdh_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = zxdh_dcbnl_getpgbwgcfgtx,
+
+ /*不支持RX的配置,这里只是为了通过__dcbnl_pg_setcfg中函数指针非空检查*/
+ .setpgtccfgrx = zxdh_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgrx = zxdh_dcbnl_setpgbwgcfgrx,
+ .getpgtccfgrx = zxdh_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = zxdh_dcbnl_getpgbwgcfgrx,
+
+};
+
+uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_open(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ uint32_t err = 0;
+ err = zxdh_dcbnl_set_tm_gate(en_priv, 1);
+ if (err) {
+ LOG_ERR(" set_tm_gate close failed \n");
+ }
+ LOG_INFO(" tm mcode gate open ");
+ return err;
+}
+
+uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_close(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ uint32_t err = 0;
+ err = zxdh_dcbnl_set_tm_gate(en_priv, 0);
+ if (err) {
+ LOG_ERR(" set_tm_gate close failed \n");
+ }
+ LOG_INFO(" tm mcode gate close ");
+ return err;
+}
+
+uint32_t zxdh_dcbnl_initialize(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t err = 0;
+
+ err = zxdh_dcbnl_init_port_speed(en_priv);
+ if (err) {
+ LOG_INFO("dcbnl_init_ets: init_port_speed failed \n");
+ // return err;
+ }
+
+ err = zxdh_dcbnl_init_ets_scheduling_tree(en_priv);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: init_ets_scheduling_tree failed \n");
+ return err;
+ }
+
+ zxdh_dcbnl_printk_ets_tree(en_priv);
+
+ zxdh_dcbnl_pfc_init(en_priv);
+
+ en_dev->dcb_para.init_flag = ZXDH_DCBNL_INIT_FLAG;
+ netdev->dcbnl_ops = &zxdh_dcbnl_ops;
+ zxdh_dcbnl_set_tm_pport_mcode_gate_open(netdev);
+ LOG_INFO(" dcbnl init ok ");
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_ets_uninit(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) {
+ return 0;
+ }
+
+ en_dev->dcb_para.init_flag = 0;
+ netdev->dcbnl_ops = NULL;
+ zxdh_dcbnl_set_tm_pport_mcode_gate_close(netdev);
+
+ zxdh_dcbnl_free_flow_resources(en_priv);
+
+ zxdh_dcbnl_free_se_resources(en_priv);
+ LOG_INFO(" dcbnl uninit ok ");
+ return 0;
+}
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h b/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h
index 2ade85abf18217f2b8d711aae559c0689ff0da47..ed463ddc183ba8c4503e466908e6b8aac70fa763 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h
@@ -1,213 +1,202 @@
-#ifndef __ZXDH_EN_DCBNL_H__
-#define __ZXDH_EN_DCBNL_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-
-/* 启用dcb会大幅度增加初始化时间,暂时先注释 */
-//#define ZXDH_DCBNL_OPEN
-
-/* CEE not support */
-//#define ZXDH_DCBNL_CEE_SUPPORT
-
-#define ZXDH_DCBNL_INIT_FLAG (0x5a5a5a5a)
-#define ZXDH_DCBNL_NULL_ID (0xffffffff)
-
-#define ZXDH_DCBNL_MAX_PRIORITY (8)
-#define ZXDH_DCBNL_MAX_TRAFFIC_CLASS (8)
-
-#define ZXDH_DCBNL_MAX_DSCP (64)
-
-#define ZXDH_DCBNL_MAX_BW_ALLOC (100)
-#define ZXDH_DCBNL_MAX_WEIGHT (512)
-
-#define ZXDH_DCBNL_RATEUNIT_K (1000)
-#define ZXDH_DCBNL_RATEUNIT_M (1000000)
-#define ZXDH_DCBNL_RATEUNIT_G (1000000000)
-#define ZXDH_DCBNL_MAXRATE_KBITPS (400*1000000)
-#define ZXDH_DCBNL_MINRATE_KBITPS (64)
-
-#define ZXDH_DCBNL_INITRATE_KBITPS (400*1000000)
-
-#define ZXDH_DCBNL_FLOW_RATE_CIR (0)
-
-#define ZXDH_DCBNL_FLOW_RATE_CBS (2000)
-#define ZXDH_DCBNL_FLOW_RATE_EBS (4000)
-#define ZXDH_DCBNL_PORT_RATE_CBS (4000)
-
-#define ZXDH_DCBNL_FLOW_RATE_CBS_REFRESH (0)
-#define ZXDH_DCBNL_FLOW_RATE_EBS_REFRESH (0)
-
-#define ZXDH_DCBNL_FLOW_TDTH (150)
-
-#define ZXDH_DCBNL_CEE_STATE_UP (1)
-
-#define ZXDH_DCBNL_MAX_SE_NODE_NUM (12)
-#define ZXDH_DCBNL_MAX_TREE_LEVEL (7)
-#define ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL (4)
-#define ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL (0)
-
-#define ZXDH_DCBNL_GSCHID_ID_MASK (0xFFFF)
-#define ZXDH_DCBNL_GSCHID_ID_SHIFT (0)
-
-#define ZXDH_DCBNL_GET_GSCHID_MSG(val,mask,shift) ((val >> shift)&mask)
-
-enum zxdh_dcbnl_ets_trust {
- ZXDH_DCBNL_ETS_TRUST_PCP = 0,
- ZXDH_DCBNL_ETS_TRUST_DSCP = 1,
-};
-
-enum zxdh_dcbnl_ets_tc_tsa {
- ZXDH_DCBNL_VENDOR_TC = 0,
- ZXDH_DCBNL_STRICT_TC = 1,
- ZXDH_DCBNL_ETS_TC = 2,
- ZXDH_DCBNL_ZEROBW_ETS_TC = 3,
-};
-
-enum zxdh_dcbnl_ets_node_link_point {
- ZXDH_DCBNL_ETS_NODE_NULL = 0,
- ZXDH_DCBNL_ETS_NODE_VENDOR_C = 1,
- ZXDH_DCBNL_ETS_NODE_STRICT_C = 2,
- ZXDH_DCBNL_ETS_NODE_ETS_C = 3,
- ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_C = 4,
- ZXDH_DCBNL_ETS_NODE_VENDOR_E = 5,
- ZXDH_DCBNL_ETS_NODE_STRICT_E = 6,
- ZXDH_DCBNL_ETS_NODE_ETS_E = 7,
- ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_E = 8,
-};
-
-enum zxdh_dcbnl_se_flow_node_type {
- ZXDH_DCBNL_ETS_NODE_FQ = 0,
- ZXDH_DCBNL_ETS_NODE_FQ2 = 1,
- ZXDH_DCBNL_ETS_NODE_FQ4 = 2,
- ZXDH_DCBNL_ETS_NODE_FQ8 = 3,
- ZXDH_DCBNL_ETS_NODE_SP = 4,
- ZXDH_DCBNL_ETS_NODE_WFQ = 5,
- ZXDH_DCBNL_ETS_NODE_WFQ2 = 6,
- ZXDH_DCBNL_ETS_NODE_WFQ4 = 7,
- ZXDH_DCBNL_ETS_NODE_WFQ8 = 8,
- ZXDH_DCBNL_ETS_NODE_FLOW = 9,
-};
-
-enum zxdh_tm_trpg_speed{
- ZXDH_TRPG_SPEED_50G = 0,
- ZXDH_TRPG_SPEED_100G = 1,
- ZXDH_TRPG_SPEED_200G = 2,
- ZXDH_TRPG_SPEED_RDMA_400G = 3,
- ZXDH_TRPG_DEFAULT = 4,
-
- ZXDH_TRPG_SPEED_NUM,
-
-};
-struct zxdh_dcbnl_ets_se_node{
- struct zxdh_dcbnl_ets_se_node *se_next;
- uint64_t gsch_id;
- uint32_t node_idx;
- uint32_t node_type;
- uint32_t se_id;
- uint32_t se_link_id;
- uint32_t se_link_weight;
- uint32_t se_link_sp;
- uint32_t link_point;
-};
-
-struct zxdh_dcbnl_ets_flow_node{
- struct zxdh_dcbnl_ets_flow_node *flow_next;
- uint64_t gsch_id;
- uint32_t flow_id;
- uint32_t tc_id;
- uint32_t tc_type;
- uint32_t tc_tx_bw;
- uint32_t td_th;
- uint32_t c_linkid;
- uint32_t c_weight;
- uint32_t c_sp;
- uint32_t c_rate;
- uint32_t mode;
- uint32_t e_linkid;
- uint32_t e_weight;
- uint32_t e_sp;
- uint32_t e_rate;
-};
-
-struct zxdh_dcbnl_ets_node_list_head{
- struct zxdh_dcbnl_ets_se_node *se_next;
- struct zxdh_dcbnl_ets_flow_node *flow_next;
- uint32_t node_num;
-};
-
-struct zxdh_dcbnl_ets_se_flow_resource{
- uint32_t numq;
- uint32_t level;
- uint32_t flags;
- uint32_t resource_id;
- uint64_t gsch_id;
-};
-
-struct zxdh_dcbnl_se_tree_config{
- uint32_t level;
- uint32_t idx;
- uint32_t type;
- uint32_t link_level;
- uint32_t link_idx;
- uint32_t link_weight;
- uint32_t link_sp;
- uint32_t link_point;
-};
-
-struct zxdh_dcbnl_tc_flow_config{
- uint32_t link_level;
- uint32_t tc_type;
- uint32_t tc_tx_bw;
- uint32_t c_rate;
- uint32_t e_rate;
- uint32_t td_th;
-};
-
-struct zxdh_dcbnl_tc_flow_shape_para{
- uint32_t cir;
- uint32_t cbs;
- uint32_t db_en;
- uint32_t eir;
- uint32_t ebs;
-};
-
-struct zxdh_dcbnl_ieee_ets {
- uint8_t willing;
- uint8_t ets_cap;
- uint8_t cbs;
- uint8_t tc_tx_bw[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
- uint8_t tc_tsa[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
- uint8_t prio_tc[ZXDH_DCBNL_MAX_PRIORITY];
-};
-
-struct zxdh_dcbnl_cee_ets {
- uint8_t tc_tx_bw[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
- uint8_t tc_tsa[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
- uint8_t prio_tc[ZXDH_DCBNL_MAX_PRIORITY];
-};
-
-struct zxdh_dcbnl_para {
- uint32_t init_flag;
- uint32_t trust;
- uint32_t dscp_app_num;
- uint8_t dscp2prio[ZXDH_DCBNL_MAX_DSCP];
- uint64_t tc_maxrate[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
- struct zxdh_dcbnl_ieee_ets ets_cfg;
- struct zxdh_dcbnl_cee_ets cee_ets_cfg;
- struct zxdh_dcbnl_ets_node_list_head ets_node_list_head[ZXDH_DCBNL_MAX_TREE_LEVEL];
-};
-
-uint32_t zxdh_dcbnl_initialize(struct net_device *netdev);
-uint32_t zxdh_dcbnl_ets_uninit(struct net_device *netdev);
-uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_open(struct net_device *netdev);
-uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_close(struct net_device *netdev);
-
-#ifdef __cplusplus
-}
-#endif
-
+#ifndef __ZXDH_EN_DCBNL_H__
+#define __ZXDH_EN_DCBNL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+
+/* 启用dcb会大幅度增加初始化时间,暂时先注释 */
+//#define ZXDH_DCBNL_OPEN
+
+#define ZXDH_DCBNL_INIT_FLAG (0x5a5a5a5a)
+#define ZXDH_DCBNL_NULL_ID (0xffffffff)
+
+#define ZXDH_DCBNL_MAX_PRIORITY (8)
+#define ZXDH_DCBNL_MAX_TRAFFIC_CLASS (8)
+
+#define ZXDH_DCBNL_MAX_DSCP (64)
+
+#define ZXDH_DCBNL_MAX_BW_ALLOC (100)
+#define ZXDH_DCBNL_MAX_WEIGHT (512)
+
+#define ZXDH_DCBNL_RATEUNIT_K (1000)
+#define ZXDH_DCBNL_RATEUNIT_M (1000000)
+#define ZXDH_DCBNL_RATEUNIT_G (1000000000)
+#define ZXDH_DCBNL_MAXRATE_KBITPS (400 * 1000000)
+#define ZXDH_DCBNL_MINRATE_KBITPS (64)
+
+#define ZXDH_DCBNL_FLOW_RATE_CBS (1000)
+#define ZXDH_DCBNL_FLOW_RATE_EBS (2000)
+#define ZXDH_DCBNL_PORT_RATE_CBS (1000)
+
+#define ZXDH_DCBNL_CEE_STATE_UP (1)
+
+#define ZXDH_DCBNL_MAX_SE_NODE_NUM (12)
+#define ZXDH_DCBNL_MAX_TREE_LEVEL (7)
+#define ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL (4)
+#define ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL (0)
+
+#define ZXDH_DCBNL_GSCHID_ID_MASK (0xFFFF)
+#define ZXDH_DCBNL_GSCHID_ID_SHIFT (0)
+
+#define ZXDH_DCBNL_GET_GSCHID_MSG(val, mask, shift) ((val >> shift) & mask)
+
+enum zxdh_dcbnl_ets_trust {
+ ZXDH_DCBNL_ETS_TRUST_PCP = 0,
+ ZXDH_DCBNL_ETS_TRUST_DSCP = 1,
+};
+
+enum zxdh_dcbnl_ets_tc_tsa {
+ ZXDH_DCBNL_VENDOR_TC = 0,
+ ZXDH_DCBNL_STRICT_TC = 1,
+ ZXDH_DCBNL_ETS_TC = 2,
+ ZXDH_DCBNL_ZEROBW_ETS_TC = 3,
+};
+
+enum zxdh_dcbnl_ets_node_link_point {
+ ZXDH_DCBNL_ETS_NODE_NULL = 0,
+ ZXDH_DCBNL_ETS_NODE_VENDOR_C = 1,
+ ZXDH_DCBNL_ETS_NODE_STRICT_C = 2,
+ ZXDH_DCBNL_ETS_NODE_ETS_C = 3,
+ ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_C = 4,
+ ZXDH_DCBNL_ETS_NODE_VENDOR_E = 5,
+ ZXDH_DCBNL_ETS_NODE_STRICT_E = 6,
+ ZXDH_DCBNL_ETS_NODE_ETS_E = 7,
+ ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_E = 8,
+};
+
+enum zxdh_dcbnl_se_flow_node_type {
+ ZXDH_DCBNL_ETS_NODE_FQ = 0,
+ ZXDH_DCBNL_ETS_NODE_FQ2 = 1,
+ ZXDH_DCBNL_ETS_NODE_FQ4 = 2,
+ ZXDH_DCBNL_ETS_NODE_FQ8 = 3,
+ ZXDH_DCBNL_ETS_NODE_SP = 4,
+ ZXDH_DCBNL_ETS_NODE_WFQ = 5,
+ ZXDH_DCBNL_ETS_NODE_WFQ2 = 6,
+ ZXDH_DCBNL_ETS_NODE_WFQ4 = 7,
+ ZXDH_DCBNL_ETS_NODE_WFQ8 = 8,
+ ZXDH_DCBNL_ETS_NODE_FLOW = 9,
+};
+
+enum zxdh_tm_trpg_speed {
+ ZXDH_TRPG_SPEED_50G = 0,
+ ZXDH_TRPG_SPEED_100G = 1,
+ ZXDH_TRPG_SPEED_200G = 2,
+ ZXDH_TRPG_SPEED_RDMA_400G = 3,
+ ZXDH_TRPG_DEFAULT = 4,
+
+ ZXDH_TRPG_SPEED_NUM,
+
+};
+struct zxdh_dcbnl_ets_se_node {
+ struct zxdh_dcbnl_ets_se_node *se_next;
+ uint64_t gsch_id;
+ uint32_t node_idx;
+ uint32_t node_type;
+ uint32_t se_id;
+ uint32_t se_link_id;
+ uint32_t se_link_weight;
+ uint32_t se_link_sp;
+ uint32_t link_point;
+};
+
+struct zxdh_dcbnl_ets_flow_node {
+ struct zxdh_dcbnl_ets_flow_node *flow_next;
+ uint64_t gsch_id;
+ uint32_t flow_id;
+ uint32_t tc_id;
+ uint32_t tc_type;
+ uint32_t tc_tx_bw;
+ uint32_t td_th;
+ uint32_t c_linkid;
+ uint32_t c_weight;
+ uint32_t c_sp;
+ uint32_t c_rate;
+ uint32_t mode;
+ uint32_t e_linkid;
+ uint32_t e_weight;
+ uint32_t e_sp;
+ uint32_t e_rate;
+};
+
+struct zxdh_dcbnl_ets_node_list_head {
+ struct zxdh_dcbnl_ets_se_node *se_next;
+ struct zxdh_dcbnl_ets_flow_node *flow_next;
+ uint32_t node_num;
+};
+
+struct zxdh_dcbnl_ets_se_flow_resource {
+ uint32_t numq;
+ uint32_t level;
+ uint32_t flags;
+ uint32_t resource_id;
+ uint64_t gsch_id;
+};
+
+struct zxdh_dcbnl_se_tree_config {
+ uint32_t level;
+ uint32_t idx;
+ uint32_t type;
+ uint32_t link_level;
+ uint32_t link_idx;
+ uint32_t link_weight;
+ uint32_t link_sp;
+ uint32_t link_point;
+};
+
+struct zxdh_dcbnl_tc_flow_config {
+ uint32_t link_level;
+ uint32_t tc_type;
+ uint32_t tc_tx_bw;
+ uint32_t c_rate;
+ uint32_t e_rate;
+ uint32_t td_th;
+};
+
+struct zxdh_dcbnl_tc_flow_shape_para {
+ uint32_t cir;
+ uint32_t cbs;
+ uint32_t db_en;
+ uint32_t eir;
+ uint32_t ebs;
+};
+
+struct zxdh_dcbnl_ieee_ets {
+ uint8_t willing;
+ uint8_t ets_cap;
+ uint8_t cbs;
+ uint8_t tc_tx_bw[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
+ uint8_t tc_tsa[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
+ uint8_t prio_tc[ZXDH_DCBNL_MAX_PRIORITY];
+};
+
+struct zxdh_dcbnl_cee_ets {
+ uint8_t tc_tx_bw[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
+ uint8_t tc_tsa[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
+ uint8_t prio_tc[ZXDH_DCBNL_MAX_PRIORITY];
+};
+
+struct zxdh_dcbnl_para {
+ uint32_t init_flag;
+ uint32_t trust;
+ uint32_t dscp_app_num;
+ uint8_t dscp2prio[ZXDH_DCBNL_MAX_DSCP];
+ uint64_t tc_maxrate[ZXDH_DCBNL_MAX_TRAFFIC_CLASS];
+ struct zxdh_dcbnl_ieee_ets ets_cfg;
+ struct zxdh_dcbnl_cee_ets cee_ets_cfg;
+ struct zxdh_dcbnl_ets_node_list_head
+ ets_node_list_head[ZXDH_DCBNL_MAX_TREE_LEVEL];
+};
+
+uint32_t zxdh_dcbnl_initialize(struct net_device *netdev);
+uint32_t zxdh_dcbnl_ets_uninit(struct net_device *netdev);
+uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_open(struct net_device *netdev);
+uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_close(struct net_device *netdev);
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.c b/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.c
index c65759c83f6331942ca9a3884a5299f8f3875806..061be5f8b7c706ca9bdd5f5fbc744006561ada34 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.c
@@ -1,1463 +1,1426 @@
-//#include
-#include "../../en_aux.h"
-#include "en_dcbnl.h"
-#include "en_dcbnl_api.h"
-#include "en_np/qos/include/dpp_drv_qos.h"
-#include "en_np/table/include/dpp_tbl_tm.h"
-#include "en_np/fc/include/dpp_drv_fc.h"
-#include "en_np/sdk/include/api/dpp_pbu_api.h"
-
-uint32_t zxdh_dcbnl_get_se_flow_resources(struct zxdh_en_device *en_dev,
- struct zxdh_dcbnl_ets_se_flow_resource *tree_resource)
-{
- uint64_t gsch_id = 0;
- uint32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- if (tree_resource->level == ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL)
- {
- err = dpp_sch_base_node_get(&pf_info, en_dev->phy_port, &gsch_id);
- }
- else
- {
- err = dpp_cosq_gsch_id_add(&pf_info, en_dev->phy_port, tree_resource->numq,
- tree_resource->level, tree_resource->flags, &gsch_id);
- }
-
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: get se/flow resources failed, level: %d, type: %d, err:%d \n",
- tree_resource->level, tree_resource->flags, err);
- return err;
- }
- tree_resource->gsch_id = gsch_id;
- tree_resource->resource_id = ZXDH_DCBNL_GET_GSCHID_MSG(gsch_id, ZXDH_DCBNL_GSCHID_ID_MASK, ZXDH_DCBNL_GSCHID_ID_SHIFT);
- /* debug */
- LOG_INFO(" gsch_id:0x%llx,resource_id:0x%x level:%d, flags:%d\n",
- gsch_id, tree_resource->resource_id,tree_resource->level, tree_resource->flags);
-
- return 0;
-}
-
-uint32_t zxdh_dcbnl_find_se_link_id(struct zxdh_en_priv *en_priv,
- uint32_t level,
- uint32_t link_level,
- uint32_t link_idx,
- uint32_t link_sp,
- uint32_t *link_id)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_se_node *se_link_node = NULL;
- struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[link_level];
-
- *link_id = ZXDH_DCBNL_NULL_ID;
-
- if (level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL)
- {
- if (ets_node_list_head->se_next == NULL)
- {
- LOG_ERR("dcbnl: no nodes in the link_level: %d \n", link_level);
- return 1;
- }
-
- se_link_node = ets_node_list_head->se_next;
-
- while ((NULL != se_link_node) && (se_link_node->node_idx != link_idx))
- {
- se_link_node = se_link_node->se_next;
- }
-
- if (se_link_node != NULL)
- {
- *link_id = se_link_node->se_id + link_sp;
- }
- else
- {
- LOG_ERR("dcbnl: find se link_id failed, link_level: %d, link_idx: %d\n", link_level, link_idx);
- return 1;
- }
- }
-
- return 0;
-}
-
-uint32_t zxdh_dcbnl_save_se_resources(struct zxdh_en_priv *en_priv, struct zxdh_dcbnl_se_tree_config *tree_node_cfg)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_se_flow_resource tree_resource = {0};
- struct zxdh_dcbnl_ets_se_node *new_se_node = NULL;
- struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL;
- uint32_t level = 0;
- uint32_t link_level = 0;
- uint32_t link_idx = 0;
- uint32_t link_id = 0;
- uint32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- level = tree_node_cfg->level;
- link_level = tree_node_cfg->link_level;
- link_idx = tree_node_cfg->link_idx;
-
- if ((level == 0) || (level > 4) || (link_level > 5) || (level >= link_level))
- {
- LOG_ERR("dcbnl_init_ets: configuration level error, level: %d, link_level: %d\n", level, link_level);
- return 1; //todo:考虑使用标准的错误定义
- }
-
- ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[level];
-
- tree_resource.numq = 1;
- tree_resource.level = level;
- tree_resource.flags = tree_node_cfg->type;
- err = zxdh_dcbnl_get_se_flow_resources(en_dev, &tree_resource);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: get se resources failed, level: %d, idx: %d\n",
- tree_resource.level, tree_node_cfg->idx);
- return err;
- }
-
- err = zxdh_dcbnl_find_se_link_id(en_priv, level, link_level, link_idx, tree_node_cfg->link_sp, &link_id);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: find se link_id failed, link_level: %d, link_idx: %d\n", link_level, link_idx);
- return err;
- }
-
- new_se_node = kmalloc(sizeof(struct zxdh_dcbnl_ets_se_node), GFP_KERNEL);
- if (NULL == new_se_node)
- {
- LOG_ERR("dcbnl_init_ets: kmalloc se node failed\n");
- return 1;
- }
-
- new_se_node->se_next = NULL;
- new_se_node->gsch_id = tree_resource.gsch_id;
- new_se_node->node_idx = tree_node_cfg->idx;
- new_se_node->node_type = tree_node_cfg->type;
- new_se_node->se_id = tree_resource.resource_id;
- new_se_node->se_link_id = link_id;
- new_se_node->se_link_weight = tree_node_cfg->link_weight;
- new_se_node->se_link_sp = tree_node_cfg->link_sp;
- new_se_node->link_point = tree_node_cfg->link_point;
-
- if (level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL)
- {
- err = dpp_crdt_se_link_set(&pf_info, new_se_node->se_id, new_se_node->se_link_id,
- new_se_node->se_link_weight, new_se_node->se_link_sp);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: dpp_crdt_se_link_set failed, level: %d, idx: %d, err:%d\n",
- level, tree_node_cfg->idx, err);
- kfree(new_se_node);
- return err;
- }
- }
-
- new_se_node->se_next = ets_node_list_head->se_next;
- ets_node_list_head->se_next = new_se_node;
-
- ets_node_list_head->node_num += 1;
-
- LOG_INFO(" level:%d, node_idx:%d, node_num:%d \n",
- level, new_se_node->node_idx, ets_node_list_head->node_num);
- return 0;
-}
-
-uint32_t zxdh_dcbnl_build_ets_scheduling_tree(struct zxdh_en_priv *en_priv)
-{
- uint32_t i = 0;
- uint32_t err = 0;
-
- struct zxdh_dcbnl_se_tree_config ets_se_config_table[ZXDH_DCBNL_MAX_SE_NODE_NUM + 1] =
- {
- /*level idx type link_level link_idx link_weight link_sp link_point*/
- {4, 0, ZXDH_DCBNL_ETS_NODE_WFQ, 5, 0, 1, 0, ZXDH_DCBNL_ETS_NODE_NULL},
- {3, 0, ZXDH_DCBNL_ETS_NODE_FQ2, 4, 0, 1, 0, ZXDH_DCBNL_ETS_NODE_NULL},
- {2, 0, ZXDH_DCBNL_ETS_NODE_FQ4, 3, 0, 1, 0, ZXDH_DCBNL_ETS_NODE_NULL},
- {2, 1, ZXDH_DCBNL_ETS_NODE_FQ4, 3, 0, 1, 1, ZXDH_DCBNL_ETS_NODE_NULL},
- {1, 0, ZXDH_DCBNL_ETS_NODE_FQ, 2, 0, 1, 0, ZXDH_DCBNL_ETS_NODE_VENDOR_C},
- {1, 1, ZXDH_DCBNL_ETS_NODE_FQ8, 2, 0, 1, 1, ZXDH_DCBNL_ETS_NODE_STRICT_C},
- {1, 2, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 0, 1, 2, ZXDH_DCBNL_ETS_NODE_ETS_C},
- {1, 3, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 0, 1, 3, ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_C},
- {1, 4, ZXDH_DCBNL_ETS_NODE_FQ, 2, 1, 1, 0, ZXDH_DCBNL_ETS_NODE_VENDOR_E},
- {1, 5, ZXDH_DCBNL_ETS_NODE_FQ8, 2, 1, 1, 1, ZXDH_DCBNL_ETS_NODE_STRICT_E},
- {1, 6, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 1, 1, 2, ZXDH_DCBNL_ETS_NODE_ETS_E},
- {1, 7, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 1, 1, 3, ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_E},
- {0xff}
- };
-
- for (i = 0; i < ZXDH_DCBNL_MAX_SE_NODE_NUM && ets_se_config_table[i].level != 0xff; i++)
- {
- err = zxdh_dcbnl_save_se_resources(en_priv, &ets_se_config_table[i]);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: build_tc_scheduling_tree failed, entry: %d\n", i);
- return err;
- }
- }
-
- return 0;
-}
-
-void zxdh_dcbnl_tc_map_to_link_point(uint32_t tc_type, uint32_t *c_type, uint32_t *e_type)
-{
- switch (tc_type)
- {
- case ZXDH_DCBNL_VENDOR_TC:
- *c_type = ZXDH_DCBNL_ETS_NODE_VENDOR_C;
- *e_type = ZXDH_DCBNL_ETS_NODE_VENDOR_E;
- break;
-
- case ZXDH_DCBNL_STRICT_TC:
- *c_type = ZXDH_DCBNL_ETS_NODE_STRICT_C;
- *e_type = ZXDH_DCBNL_ETS_NODE_STRICT_E;
- break;
-
- case ZXDH_DCBNL_ETS_TC:
- *c_type = ZXDH_DCBNL_ETS_NODE_ETS_C;
- *e_type = ZXDH_DCBNL_ETS_NODE_ETS_E;
- break;
-
- case ZXDH_DCBNL_ZEROBW_ETS_TC:
- *c_type = ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_C;
- *e_type = ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_E;
- break;
- default:
- break;
- }
-}
-
-void zxdh_dcbnl_get_tc_weight_sp(uint32_t tc_type, uint32_t tc_tx_bw, uint32_t tc_id,
- uint32_t *c_weight, uint32_t *e_weight,
- uint32_t *c_sp, uint32_t *e_sp)
-{
- if (tc_tx_bw == ZXDH_DCBNL_MAX_BW_ALLOC)
- {
- *c_weight = 1;
- *e_weight = 1;
- }
- else
- {
- *c_weight = ZXDH_DCBNL_MAX_WEIGHT * tc_tx_bw / ZXDH_DCBNL_MAX_BW_ALLOC;
- *e_weight = ZXDH_DCBNL_MAX_WEIGHT * tc_tx_bw / ZXDH_DCBNL_MAX_BW_ALLOC;
- }
-
- if ((tc_type == ZXDH_DCBNL_STRICT_TC) && (tc_id < ZXDH_DCBNL_MAX_TRAFFIC_CLASS))
- {
- *c_sp = ZXDH_DCBNL_MAX_TRAFFIC_CLASS - 1 - tc_id;
- *e_sp = ZXDH_DCBNL_MAX_TRAFFIC_CLASS - 1 - tc_id;
- }
- else
- {
- *c_sp = 0;
- *e_sp = 0;
- }
-}
-
-uint32_t zxdh_dcbnl_find_flow_link_se_id(struct zxdh_en_priv *en_priv,
- uint32_t tc_type, uint32_t link_level,
- uint32_t *c_linkid, uint32_t *e_linkid,
- uint32_t c_sp, uint32_t e_sp)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_se_node *se_node = en_dev->dcb_para.ets_node_list_head[link_level].se_next;
- uint32_t c_type = 0;
- uint32_t e_type = 0;
-
- if (NULL == se_node)
- {
- LOG_ERR("dcbnl: find_flow_link_se_id no nodes \n");
- return 1;
- }
-
- zxdh_dcbnl_tc_map_to_link_point(tc_type, &c_type, &e_type);
-
- *c_linkid = ZXDH_DCBNL_NULL_ID;
- *e_linkid = ZXDH_DCBNL_NULL_ID;
-
- while ((NULL != se_node) && ((ZXDH_DCBNL_NULL_ID == *c_linkid) || (ZXDH_DCBNL_NULL_ID == *e_linkid)))
- {
- if (se_node->link_point == c_type)
- {
- *c_linkid = se_node->se_id + c_sp;
- }
- else if (se_node->link_point == e_type)
- {
- *e_linkid = se_node->se_id + e_sp;
- }
-
- se_node = se_node->se_next;
- }
-
- if ((ZXDH_DCBNL_NULL_ID == *c_linkid) || (ZXDH_DCBNL_NULL_ID == *e_linkid))
- {
- LOG_ERR("dcbnl: find_flow_link_se_id failed, c_linkid: 0x%x, e_linkid: 0x%x\n", *c_linkid, *e_linkid);
- return 1;
- }
- return 0;
-}
-
-uint32_t zxdh_dcbnl_get_ieee_tsa(uint32_t tc_type)
-{
- uint32_t tsa = 0;
- switch (tc_type)
- {
- case ZXDH_DCBNL_ETS_TC:
- case ZXDH_DCBNL_ZEROBW_ETS_TC:
- tsa = IEEE_8021QAZ_TSA_ETS;
- break;
- case ZXDH_DCBNL_STRICT_TC:
- tsa = IEEE_8021QAZ_TSA_STRICT;
- break;
- case ZXDH_DCBNL_VENDOR_TC:
- tsa = IEEE_8021QAZ_TSA_VENDOR;
- break;
- default:
- tsa = IEEE_8021QAZ_TSA_STRICT;
- LOG_ERR("dcbnl:tsa error, change to strict \n");
- break;
- }
- return tsa;
-}
-
-uint32_t zxdh_dcbnl_save_flow_resources(struct zxdh_en_priv *en_priv,
- struct zxdh_dcbnl_tc_flow_config *tc_flow_config,
- struct zxdh_dcbnl_ets_se_flow_resource *tree_resource,
- uint32_t tc_id)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_flow_node *new_flow_node = NULL;
- struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL;
- struct zxdh_dcbnl_tc_flow_shape_para p_para = {0};
- uint32_t c_linkid = 0;
- uint32_t e_linkid = 0;
- uint32_t c_weight = 0;
- uint32_t e_weight = 0;
- uint32_t c_sp = 0;
- uint32_t e_sp = 0;
- uint32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (tc_flow_config->link_level != 1)
- {
- LOG_ERR("dcbnl_init_ets: zxdh_dcbnl_save_flow_resources link_level err\n");
- return 1;
- }
-
- ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL];
-
- if (tc_id == 0)
- {
- tree_resource->numq = ZXDH_DCBNL_MAX_TRAFFIC_CLASS;
- tree_resource->level = ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL;
- tree_resource->flags = ZXDH_DCBNL_ETS_NODE_FLOW;
- err = zxdh_dcbnl_get_se_flow_resources(en_dev, tree_resource);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: get flow resources err\n");
- return err;
- }
-
- err = dpp_tm_flowid_pport_table_set(&pf_info, en_dev->phy_port, tree_resource->resource_id);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: flowid_pport_table_set failed, port: %d, flowid:%d, err:%d\n",
- en_dev->phy_port, tree_resource->resource_id, err);
- return err;
- }
- }
-
- zxdh_dcbnl_get_tc_weight_sp(tc_flow_config->tc_type, tc_flow_config->tc_tx_bw, tc_id, &c_weight, &e_weight, &c_sp, &e_sp);
-
- err = zxdh_dcbnl_find_flow_link_se_id(en_priv, tc_flow_config->tc_type, tc_flow_config->link_level, &c_linkid, &e_linkid, c_sp, e_sp);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets init ets: find_flow_link_se_id failed, tc_id: %d, tc_type: %d\n",
- tc_id, tc_flow_config->tc_type);
- return err;
- }
-
- new_flow_node = kmalloc(sizeof(struct zxdh_dcbnl_ets_flow_node), GFP_KERNEL);
- if (new_flow_node == NULL)
- {
- LOG_ERR("dcbnl_init_ets: kmalloc new flow node failed\n");
- return 1;
- }
-
- new_flow_node->flow_next = NULL;
- new_flow_node->gsch_id = tree_resource->gsch_id + tc_id;
- new_flow_node->flow_id = tree_resource->resource_id + tc_id;
- new_flow_node->tc_id = tc_id;
- new_flow_node->tc_type = tc_flow_config->tc_type;
- new_flow_node->tc_tx_bw = tc_flow_config->tc_tx_bw;
- new_flow_node->td_th = tc_flow_config->td_th;
- new_flow_node->c_linkid = c_linkid;
- new_flow_node->c_weight = c_weight;
- new_flow_node->c_sp = c_sp;
- new_flow_node->c_rate = tc_flow_config->c_rate;
- new_flow_node->mode = 1;
- new_flow_node->e_linkid = e_linkid;
- new_flow_node->e_weight = e_weight;
- new_flow_node->e_sp = e_sp;
- new_flow_node->e_rate = tc_flow_config->e_rate;
-
- err = dpp_flow_map_port_set(&pf_info, new_flow_node->flow_id, en_dev->phy_port);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: dpp_flow_map_port_set failed, flow_id: %d, phy_port: %d, err:%d\n",
- new_flow_node->flow_id, en_dev->phy_port, err);
- kfree(new_flow_node);
- return err;
- }
-
- err = dpp_crdt_flow_link_set(&pf_info, new_flow_node->flow_id, c_linkid, c_weight, c_sp,
- new_flow_node->mode, e_linkid, e_weight, e_sp);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: dpp_crdt_flow_link_set failed, flow_id: %d, c_linkid: %d, e_linkid: %d, err:%d\n",
- new_flow_node->flow_id, c_linkid, e_linkid, err);
- kfree(new_flow_node);
- return err;
- }
-
- err = dpp_flow_td_th_set(&pf_info, new_flow_node->flow_id, new_flow_node->td_th);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, td_th: %d, err:%d\n",
- en_dev->vport, new_flow_node->flow_id, new_flow_node->td_th, err);
- //kfree(new_flow_node); //The default is 150
- //return err;
- }
-
- p_para.cir = new_flow_node->c_rate;
- p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS;
- p_para.db_en = 1;
- p_para.eir = new_flow_node->e_rate;
- p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS;
-
- err = dpp_flow_shape_set(&pf_info, new_flow_node->flow_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, e_rate: %d, err:%d\n",
- en_dev->vport, new_flow_node->flow_id, new_flow_node->tc_id, new_flow_node->e_rate, err);
- }
- LOG_INFO("dcbnl_init_ets dpp_flow_shape_set end vport%d,phy_port:%d, flow_id:%d,tc_id:%d, cir:%d, cbs:%d, db_en:%d, eir:%d,ebs:%d,err:%d \n",
- en_dev->vport,en_dev->phy_port, new_flow_node->flow_id,new_flow_node->tc_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs,err);
-
- new_flow_node->flow_next = ets_node_list_head->flow_next;
- ets_node_list_head->flow_next = new_flow_node;
- ets_node_list_head->node_num += 1;
-
- en_dev->dcb_para.ets_cfg.tc_tsa[tc_id] = zxdh_dcbnl_get_ieee_tsa(new_flow_node->tc_type);
- en_dev->dcb_para.ets_cfg.tc_tx_bw[tc_id] = tc_flow_config->tc_tx_bw;
- en_dev->dcb_para.tc_maxrate[tc_id] = new_flow_node->e_rate;
-
- LOG_INFO(" level:%d, tc_id:%d, flow_id:%d, node_num:%d \n",
- tree_resource->level, new_flow_node->tc_id, new_flow_node->flow_id, ets_node_list_head->node_num);
-
- return 0;
-}
-
-uint32_t zxdh_dcbnl_scheduling_tree_link_tc(struct zxdh_en_priv *en_priv)
-{
- struct zxdh_dcbnl_ets_se_flow_resource tree_resource;
- uint32_t i = 0;
- uint32_t err = 0;
-
- struct zxdh_dcbnl_tc_flow_config ets_tc_config_table[ZXDH_DCBNL_MAX_TRAFFIC_CLASS+1] =
- {
- /*link_level tc_type tc_tx_bw c_rate e_rate td_th */
- {1, ZXDH_DCBNL_STRICT_TC, 100, ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, ZXDH_DCBNL_FLOW_TDTH},
- {1, ZXDH_DCBNL_STRICT_TC, 100, ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, ZXDH_DCBNL_FLOW_TDTH},
- {1, ZXDH_DCBNL_STRICT_TC, 100, ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, ZXDH_DCBNL_FLOW_TDTH},
- {1, ZXDH_DCBNL_STRICT_TC, 100, ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, ZXDH_DCBNL_FLOW_TDTH},
- {1, ZXDH_DCBNL_STRICT_TC, 100, ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, ZXDH_DCBNL_FLOW_TDTH},
- {1, ZXDH_DCBNL_STRICT_TC, 100, ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, ZXDH_DCBNL_FLOW_TDTH},
- {1, ZXDH_DCBNL_STRICT_TC, 100, ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, ZXDH_DCBNL_FLOW_TDTH},
- {1, ZXDH_DCBNL_STRICT_TC, 100, ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, ZXDH_DCBNL_FLOW_TDTH},
- {0xff}
- };
-
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && ets_tc_config_table[i].link_level != 0xff; i++)
- {
-
- err = zxdh_dcbnl_save_flow_resources(en_priv, &ets_tc_config_table[i], &tree_resource, i);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: save_flow_resources failed, entry: %d\n", i);
- return err;
- }
-
- }
-
- return 0;
-}
-
-uint32_t zxdh_dcbnl_set_ets_trust(struct zxdh_en_priv *en_priv, uint32_t trust)
-{
-
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- err = dpp_tm_pport_trust_mode_table_set(&pf_info, en_dev->phy_port, trust);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: set_ets_trust failed, vport: %d, trust: %d, err:%d\n", en_dev->vport, trust, err);
- return err;
- }
- en_dev->dcb_para.trust = trust;
- LOG_INFO(" trust:%d \n", trust);
- return 0;
-}
-
-uint32_t zxdh_dcbnl_init_trust_and_table(struct zxdh_en_priv *en_priv)
-{
-
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t i = 0;
- uint32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++)
- {
- err = dpp_tm_pport_up_map_table_set(&pf_info, en_dev->phy_port, i, i); //初始时,配置一一对应
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: dpp_tm_pport_up_map_table_set failed, vport: %d, phy_port: %d, err:%d\n",
- en_dev->vport, en_dev->phy_port, err);
- return err;
- }
- en_dev->dcb_para.ets_cfg.prio_tc[i] = i;
- }
- LOG_INFO(" vport:%d,phy_port:%d prio2tc ok \n", en_dev->vport, en_dev->phy_port);
-
- for (i = 0; i < ZXDH_DCBNL_MAX_DSCP; i++)
- {
- err = dpp_tm_pport_dscp_map_table_set(&pf_info, en_dev->phy_port, i, i>>3);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: dscp_map_table_set failed, vport: %d, phy_port: %d, err:%d\n",
- en_dev->vport, en_dev->phy_port, err);
- return err;
- }
-
- en_dev->dcb_para.dscp2prio[i] = i>>3;
- }
- LOG_INFO("vport:%d,phy_port:%d,dscp2prio ok \n", en_dev->vport, en_dev->phy_port);
-
- err = zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_PCP);
- if (err)
- {
- LOG_INFO("set_ets_trust failed \n");
- return err;
- }
- en_dev->dcb_para.trust = ZXDH_DCBNL_ETS_TRUST_PCP;
- LOG_INFO(" vport:%d,phy_port:%d,trust:%d \n", en_dev->vport, en_dev->phy_port, en_dev->dcb_para.trust);
- return 0;
-}
-
-uint32_t zxdh_dcbnl_init_ets_list(struct zxdh_en_priv *en_priv)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t level = 0;
-
- for (level = 0; level < ZXDH_DCBNL_MAX_TREE_LEVEL; level++)
- {
- en_dev->dcb_para.ets_node_list_head[level].se_next = NULL;
- en_dev->dcb_para.ets_node_list_head[level].flow_next = NULL;
- en_dev->dcb_para.ets_node_list_head[level].node_num = 0;
- }
- return 0;
-}
-
-/* Normal release se*/
-uint32_t zxdh_dcbnl_free_se_resources(struct zxdh_en_priv *en_priv)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_se_node *se_node = NULL;
- struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL;
- uint32_t err = 0;
- uint32_t level = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- LOG_INFO(" vport:%d, phy_port:%d \n",en_dev->vport, en_dev->phy_port);
- for (level = 1; level <= ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL; level++)
- {
- ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[level];
- while (NULL != ets_node_list_head->se_next)
- {
- se_node = ets_node_list_head->se_next;
- ets_node_list_head->se_next = se_node->se_next;
- if (level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL)
- {
- err = dpp_crdt_del_se_link_set(&pf_info, se_node->se_id, se_node->se_id);
- if (err)
- {
- LOG_ERR("dcbnl_free_ets: dpp_crdt_del_se_link_set failed, se_id: %d, err:%d \n", se_node->se_id, err);
- }
- LOG_INFO(" dpp_crdt_del_se_link_set");
-
- err = dpp_cosq_gsch_id_delete(&pf_info, en_dev->phy_port, se_node->gsch_id);
- if (err)
- {
- LOG_ERR("dcbnl_free_ets: dpp_cosq_gsch_id_delete failed, se_id: %lld, err:%d \n", se_node->gsch_id, err);
- }
- LOG_INFO("del se id dpp_cosq_gsch_id_delete");
- }
- LOG_INFO(" free level:%d se_id:%x \n", level, se_node->se_id);
- kfree(se_node);
- ets_node_list_head->node_num -= 1;
- LOG_INFO("current node_num:%d \n", ets_node_list_head->node_num);
- }
- }
-
- return 0;
-}
-/* Normal release flow*/
-uint32_t zxdh_dcbnl_free_flow_resources(struct zxdh_en_priv *en_priv)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_flow_node *flow_node = NULL;
- struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL];
- uint32_t err = 0;
- bool have_flow = false;
- DPP_PF_INFO_T pf_info = {0};
- struct zxdh_dcbnl_tc_flow_shape_para p_para = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- LOG_INFO(" vport:%d, phy_port:%d \n",en_dev->vport, en_dev->phy_port);
- while (NULL != ets_node_list_head->flow_next)
- {
- have_flow = true;
- flow_node = ets_node_list_head->flow_next;
- ets_node_list_head->flow_next = flow_node->flow_next;
-
- p_para.cir = 0;
- p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS_REFRESH;
- p_para.db_en = 0;
- p_para.eir = 0;
- p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS_REFRESH;
-
- err = dpp_flow_shape_set(&pf_info, flow_node->flow_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, eir: %d, err:%d\n",
- en_dev->vport, flow_node->flow_id, flow_node->tc_id, p_para.eir , err);
- return err;
- }
- LOG_INFO("clean maxrate");
- err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, 0);
- if (err)
- {
- LOG_ERR("dcbnl_free_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, td_th: 0, err:%d\n",
- en_dev->vport, flow_node->flow_id, err);
- }
- LOG_INFO(" clean TD ");
-
- err = dpp_crdt_del_flow_link_set(&pf_info, flow_node->flow_id, flow_node->flow_id);
- if (err)
- {
- LOG_ERR("dcbnl_free_ets: dpp_crdt_del_flow_link_set failed, flow_id: %d, err:%d \n", flow_node->flow_id, err);
- }
- LOG_INFO(" dpp_crdt_del_flow_link_set");
-
- err = dpp_cosq_gsch_id_delete(&pf_info, en_dev->phy_port, flow_node->gsch_id);
- if (err)
- {
- LOG_ERR("dcbnl_free_ets: dpp_cosq_gsch_id_delete failed, gsch_id: %lld ,err:%d\n", flow_node->gsch_id, err);
- }
- LOG_INFO("del id dpp_cosq_gsch_id_delete");
-
- LOG_INFO(" free level:0, flow_id:%d, tc:%d\n", flow_node->flow_id, flow_node->tc_id);
-
- kfree(flow_node);
- ets_node_list_head->node_num -= 1;
- LOG_INFO("current node_num:%d \n", ets_node_list_head->node_num);
- }
-
- if (have_flow)
- {
- err = dpp_tm_flowid_pport_table_del(&pf_info, en_dev->phy_port);
- if (err)
- {
- LOG_ERR("dcbnl_free_ets: dpp_tm_flowid_pport_table_del failed,vport:%d, phy_port: %d \n",
- en_dev->vport, en_dev->phy_port);
- }
- LOG_INFO("del table dpp_tm_flowid_pport_table_del");
- }
-
- return 0;
-}
-
-/* host no reset,risc reset? */
-uint32_t zxdh_dcbnl_check_and_free_node_memory(struct zxdh_en_priv *en_priv)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_flow_node *flow_node = NULL;
- struct zxdh_dcbnl_ets_se_node *se_node = NULL;
- struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL;
- uint32_t level = 0;
-
- ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL];
- while (NULL != ets_node_list_head->flow_next)
- {
- flow_node = ets_node_list_head->flow_next;
- ets_node_list_head->flow_next = flow_node->flow_next;
- kfree(flow_node);
- ets_node_list_head->node_num -= 1;
- }
-
- for (level = 1; level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL + 1; level++)
- {
- ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[level];
- while (NULL != ets_node_list_head->se_next)
- {
- se_node = ets_node_list_head->se_next;
- ets_node_list_head->se_next = se_node->se_next;
- kfree(se_node);
- ets_node_list_head->node_num -= 1;
- }
- }
-
- return 0;
-}
-
-uint32_t zxdh_dcbnl_set_tc_scheduling(struct zxdh_en_priv *en_priv, uint8_t *tc_type, uint8_t *tc_tx_bw)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_flow_node *flow_node = en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL].flow_next;
- uint32_t tc_id = 0;
- uint32_t c_linkid = 0;
- uint32_t e_linkid = 0;
- uint32_t c_weight = 0;
- uint32_t e_weight = 0;
- uint32_t c_sp = 0;
- uint32_t e_sp = 0;
- uint32_t i = 0;
- uint32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
- struct zxdh_dcbnl_tc_flow_shape_para p_para = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (NULL == flow_node)
- {
- LOG_ERR("dcbnl_set_ets: set_tc_scheduling no flow in the tree\n");
- return 1;
- }
-
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; i++)
- {
- tc_id = flow_node->tc_id;
- if ((flow_node->tc_type == tc_type[tc_id]) && (flow_node->tc_tx_bw == tc_tx_bw[tc_id]))
- {
- LOG_INFO("Same configuration,tc_id:%d, tc_type:%d, tc_tx_bw:%d\n",tc_id, tc_type[tc_id], tc_tx_bw[tc_id]);
- flow_node = flow_node->flow_next;
- continue;
- }
-
- zxdh_dcbnl_get_tc_weight_sp(tc_type[tc_id], tc_tx_bw[tc_id], tc_id, &c_weight, &e_weight, &c_sp, &e_sp);
-
- err = zxdh_dcbnl_find_flow_link_se_id(en_priv, tc_type[tc_id], 1, &c_linkid, &e_linkid, c_sp, e_sp);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: find_flow_link_se_id failed, tc_id: %d, tc_type: %d\n", tc_id, tc_type[tc_id]);
- return err;
- }
-
- /* 1、清限速,刷新桶深,断流*/
- p_para.cir = 0;
- p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS_REFRESH;
- p_para.db_en = 0;
- p_para.eir = 0;
- p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS_REFRESH;
-
- LOG_INFO("clean maxrate vport%d,phy_port:%d, flow_id:%d,tc_id:%d, cir:%d, cbs:%d, db_en:%d, eir:%d,ebs:%d \n",
- en_dev->vport,en_dev->phy_port, flow_node->flow_id, tc_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs);
-
- err = dpp_flow_shape_set(&pf_info, flow_node->flow_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, eir: %d, err:%d\n",
- en_dev->vport, flow_node->flow_id, tc_id, p_para.eir , err);
- return err;
- }
-
- /* 2、清TD,断流*/
- err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, 0);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, td_th: 0, err:%d\n",
- en_dev->vport, flow_node->flow_id, err);
- }
-
- /* 3、删除挂接*/
- err = dpp_crdt_del_flow_link_set(&pf_info, flow_node->flow_id, flow_node->flow_id);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: dpp_crdt_del_flow_link_set failed, vport: %d, flow_id: %d, err:%d\n",
- en_dev->vport, flow_node->flow_id, err);
- return err;
- }
-
- /* 4、重新挂接*/
- err = dpp_crdt_flow_link_set(&pf_info, flow_node->flow_id, c_linkid, c_weight, c_sp, 1,
- e_linkid, e_weight, e_sp);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: dpp_crdt_flow_link_set failed, flow_id: %d, flow_id: %d, flow_id: %d, err:%d\n",
- flow_node->flow_id, c_linkid, e_linkid, err);
- return err;
- }
-
- /* 5、恢复TD*/
- err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, flow_node->td_th);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, td_th:%d, err:%d\n",
- en_dev->vport, flow_node->flow_id, flow_node->td_th, err);
- }
-
- /* 6、恢复限速*/
- p_para.cir = ZXDH_DCBNL_FLOW_RATE_CIR;
- p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS;
- p_para.db_en = 1;
- p_para.eir = flow_node->e_rate;
- p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS;
-
- LOG_INFO("dpp_flow_shape_set begin vport%d,phy_port:%d, flow_id:%d,tc_id:%d, cir:%d, cbs:%d, db_en:%d, eir:%d,ebs:%d \n",
- en_dev->vport,en_dev->phy_port, flow_node->flow_id, tc_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs);
-
- err = dpp_flow_shape_set(&pf_info, flow_node->flow_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, eir: %d, err:%d\n",
- en_dev->vport, flow_node->flow_id, tc_id, p_para.eir , err);
- return err;
- }
- LOG_INFO(" dpp_flow_shape_set end");
- flow_node->tc_type = tc_type[tc_id];
- flow_node->tc_tx_bw = tc_tx_bw[tc_id];
-
- flow_node->c_linkid = c_linkid;
- flow_node->c_weight = c_weight;
- flow_node->c_sp = c_sp;
-
- flow_node->e_linkid = e_linkid;
- flow_node->e_weight = e_weight;
- flow_node->e_sp = e_sp;
-
- LOG_INFO(" tc_id:%d, tc_type:%d, c_linkid:%x, e_weight:%d, e_sp:%d ,e_linkid:%x, e_weight:%d, e_sp:%d \n",
- tc_id, flow_node->tc_type, flow_node->c_linkid,flow_node->c_weight, flow_node->c_sp, flow_node->e_linkid, flow_node->e_weight, flow_node->e_sp);
-
- flow_node = flow_node->flow_next;
- }
-
- return 0;
-}
-
-uint32_t zxdh_dcbnl_set_ets_up_tc_map(struct zxdh_en_priv *en_priv, uint8_t *prio_tc)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t i = 0;
- uint32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- LOG_INFO(" begin \n");
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++)
- {
- err = dpp_tm_pport_up_map_table_set(&pf_info, en_dev->phy_port, i, prio_tc[i]);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: failed, vport: %d, prio: %d, tc: %d, err:%d\n",
- en_dev->vport, i, prio_tc[i], err);
- return err;
- }
- LOG_INFO(" vport:%d, phy_port:%d, prio:%d, tc:%d \n",en_dev->vport, en_dev->phy_port, i, prio_tc[i]);
- }
- return 0;
-}
-
-uint32_t zxdh_dcbnl_set_tc_maxrate(struct zxdh_en_priv *en_priv, uint32_t *maxrate)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_flow_node *flow_node = en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL].flow_next;
- struct zxdh_dcbnl_tc_flow_shape_para p_para = {0};
- uint32_t tc_id = 0;
- uint32_t i = 0;
- uint32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (NULL == flow_node)
- {
- LOG_ERR("dcbnl_set_ets: set_tc_maxrate no flow in the tree\n");
- return 1;
- }
-
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; i++)
- {
- tc_id = flow_node->tc_id;
- if (flow_node->e_rate == maxrate[tc_id])
- {
- LOG_INFO("Same configuration, tc_id:%d, maxrate:%d\n", tc_id, maxrate[tc_id]);
- flow_node = flow_node->flow_next;
- continue;
- }
- /* clean CBS、EBS*/
- p_para.cir = 0;
- p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS_REFRESH;
- p_para.db_en = 0;
- p_para.eir = 0;
- p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS_REFRESH;
- LOG_INFO(" refresh maxrate ");
- err = dpp_flow_shape_set(&pf_info, flow_node->flow_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, eir: %d, err:%d\n",
- en_dev->vport, flow_node->flow_id, tc_id, p_para.eir, err);
- return err;
- }
- /* 2、set maxrate*/
- p_para.cir = ZXDH_DCBNL_FLOW_RATE_CIR;
- p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS;
- p_para.db_en = 1;
- p_para.eir = maxrate[tc_id];
- p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS;
-
- LOG_INFO(" vport%d,phy_port:%d, flow_id:%d,tc_id:%d, cir:%d, cbs:%d, db_en:%d, eir:%d,ebs:%d \n",
- en_dev->vport,en_dev->phy_port, flow_node->flow_id, tc_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs);
-
- err = dpp_flow_shape_set(&pf_info, flow_node->flow_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, eir: %d, err:%d\n",
- en_dev->vport, flow_node->flow_id, tc_id, p_para.eir, err);
- return err;
- }
-
- flow_node->e_rate = maxrate[tc_id];
- en_dev->dcb_para.tc_maxrate[tc_id] = maxrate[tc_id];
-
- flow_node = flow_node->flow_next;
-
- }
-
- return 0;
-}
-
-uint32_t zxdh_dcbnl_set_dscp2prio(struct zxdh_en_priv *en_priv, uint16_t dscp, uint8_t prio)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- err = dpp_tm_pport_dscp_map_table_set(&pf_info, en_dev->phy_port, dscp, prio);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: set_dscp2prio failed, vport: %d, dscp: %d, prio: %d, err:%d\n", en_dev->vport, dscp, prio, err);
- return err;
- }
- en_dev->dcb_para.dscp2prio[dscp] = prio;
- LOG_INFO(" vport:%d, ephy_port:%d,dscp:%d, up:%d \n",en_dev->vport, en_dev->phy_port,dscp, prio);
-
- return 0;
-}
-
-uint32_t zxdh_dcbnl_set_flow_td_th(struct zxdh_en_priv *en_priv, uint32_t* tc_td_th)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_flow_node *flow_node = en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL].flow_next;
- uint32_t err = 0;
- uint32_t i = 0;
- uint32_t tc_id = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (flow_node == NULL)
- {
- LOG_ERR("dcbnl_set_ets: set_flow_td_th no flow in the tree\n");
- return 1;
- }
-
- if (tc_td_th == NULL)
- {
- LOG_ERR("dcbnl_set_ets: tc_td_th is null \n");
- return 1;
- }
-
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; i++)
- {
- tc_id = flow_node->tc_id;
- err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, tc_td_th[tc_id]);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: set_flow_td_th failed, vport: %d, flow_id:%d, tc_id:%d, td_th: %d, err:%d\n",
- en_dev->vport, flow_node->flow_id, tc_id, tc_td_th[tc_id], err);
- return err;
- }
- flow_node->td_th = tc_td_th[tc_id];
- flow_node = flow_node->flow_next;
- }
-
- return 0;
-}
-
-uint32_t zxdh_dcbnl_get_flow_td_th(struct zxdh_en_priv *en_priv, uint32_t* tc_td_th)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_flow_node *flow_node = en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL].flow_next;
- uint32_t err = 0;
- uint32_t i = 0;
- uint32_t tc_id = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (flow_node == NULL)
- {
- LOG_ERR("get_flow_td_th no flow in the tree\n");
- return 1;
- }
-
- if (tc_td_th == NULL)
- {
- LOG_ERR(" tc_td_th is null \n");
- return 1;
- }
-
- for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; i++)
- {
- tc_id = flow_node->tc_id;
- err = dpp_flow_td_th_get(&pf_info, flow_node->flow_id, &tc_td_th[tc_id]);
- if (err)
- {
- LOG_ERR("get_flow_td_th failed, vport: %d, flow_id:%d, tc_id:%d, err:%d\n",
- en_dev->vport, flow_node->flow_id, tc_id, err);
- return err;
- }
- flow_node = flow_node->flow_next;
- }
-
- return 0;
-}
-
-uint32_t zxdh_dcbnl_init_port_speed(struct zxdh_en_priv *en_priv)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_tc_flow_shape_para p_para = {0};
- uint32_t err = 0;
- uint32_t speed = 0;
- uint32_t max_speed = ZXDH_DCBNL_MAXRATE_KBITPS / ZXDH_DCBNL_RATEUNIT_K;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- speed = en_dev->speed;
- if ((0 == speed) || (speed > max_speed))
- {
- LOG_INFO("get port speed is : %u ,set to max:%u\n",speed, max_speed);
- speed = max_speed;
- }
-
- p_para.cir = speed * ZXDH_DCBNL_RATEUNIT_K; //Mbps->Kbps
- p_para.cbs = ZXDH_DCBNL_PORT_RATE_CBS;
- p_para.db_en = 0;
- p_para.eir = 0;
- p_para.ebs = 0;
-
- LOG_INFO(" vport:%d,phy_port:%d, p_para.cir:%d, speed:%d \n",
- en_dev->vport, en_dev->phy_port, p_para.cir, speed);
-
- err = dpp_port_shape_set(&pf_info, en_dev->phy_port, p_para.cir, p_para.cbs, 1);
- if (err)
- {
- LOG_ERR("dcbnl_set_ets: dpp_port_shape_set failed, port:%d, speed:%d, speed:%d,err:%d \n",
- en_dev->phy_port, speed, p_para.cir, err);
- return err;
- }
-
- return 0;
-}
-
-uint32_t zxdh_dcbnl_printk_ets_tree(struct zxdh_en_priv *en_priv)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_dcbnl_ets_flow_node *flow_node = NULL;
- struct zxdh_dcbnl_ets_se_node *se_node = NULL;
- struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL;
- uint32_t level = 0;
-
- LOG_INFO(" ***vport:%d port:%d \n", en_dev->vport,en_dev->phy_port);
-
- for (level = ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL; level > 0; level--)
- {
- ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[level];
- se_node = ets_node_list_head->se_next;
- while (NULL != se_node)
- {
- LOG_INFO(" se_node *** level:%d, node_idx:%d, se_id:0x%x *** \n",
- level, se_node->node_idx, se_node->se_id);
- LOG_INFO(" se_node gsch_id:0x%llx, node_type:%d, se_id:0x%x \n",
- se_node->gsch_id, se_node->node_type, se_node->se_id);
- LOG_INFO(" se_node se_link_id:0x%x, se_link_weight:%d, se_link_sp:%d, link_point:%d \n",
- se_node->se_link_id, se_node->se_link_weight, se_node->se_link_sp, se_node->link_point);
- se_node = se_node->se_next;
- }
- }
-
- ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL];
- flow_node = ets_node_list_head->flow_next;
- while (NULL != flow_node)
- {
- LOG_INFO(" flow_node *** tc_id:%d, flow_id:%d *** \n",
- flow_node->tc_id, flow_node->flow_id);
- LOG_INFO(" flow_node gsch_id:0x%llx, tc_type:%d, td_th:%d \n",
- flow_node->gsch_id, flow_node->tc_type, flow_node->td_th);
- LOG_INFO(" flow_node c_linkid:0x%x, c_weight:%d, c_sp:%d, c_rate:%d \n",
- flow_node->c_linkid, flow_node->c_weight,flow_node->c_sp,flow_node->c_rate);
- LOG_INFO(" flow_node e_linkid:0x%x, e_weight:%d, e_sp:%d, e_rate:%d \n",
- flow_node->e_linkid, flow_node->e_weight, flow_node->e_sp, flow_node->e_rate);
- flow_node = flow_node->flow_next;
- }
-
- return 0;
-}
-
-uint32_t zxdh_link_speed_to_index(uint32_t link_speed)
-{
- //50G以下统一按50G处理
- //暂未使用RDMA端口
- uint32_t index = ZXDH_TRPG_DEFAULT; //riscv上默认初值
- if(link_speed == 200000)
- {
- index = ZXDH_TRPG_SPEED_200G;
- }
- else if(link_speed == 100000)
- {
- index = ZXDH_TRPG_SPEED_100G;
- }
- else
- {
- index = ZXDH_TRPG_SPEED_50G;
- }
-
- return index;
-}
-
-DPP_PBU_PORT_TH_PARA_T port_th_para_tbl[ZXDH_TRPG_SPEED_NUM] =
-{
- /*brief lif阈值 lif私有阈值 idma私有阈值 idma_th0 idma_th1 idma_th2 idma_th3 idma_th4 idma_th5 idma_th6 idma_th7*/
- /*单位512byte,512代表芯片处理包粒度*/
- {100, 140, 140, 110, 130, 150, 170, 190, 210, 230, 250}, //50G
- {210, 280, 280, 180, 230, 280, 330, 380, 430, 480, 530}, //100G
- {480, 560, 560, 370, 450, 550, 670, 770, 870, 970, 1070}, //200G
- {1400, 0, 0, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500}, //400G RDMA
- {2036, 2000, 2000, 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100} //riscv上默认初始值
-};
-
-DPP_PBU_PORT_COS_TH_PARA_T port_cos_th_para_tbl[ZXDH_TRPG_SPEED_NUM] =
-{
- /*单位512byte*/
- {{100, 120, 140, 160, 180, 200, 220, 240}}, //50G
- {{160, 210, 260, 310, 360, 410, 460, 510}}, //100G
- {{320, 420, 480, 620, 720, 820, 920, 1020}}, //200G
- {{1200, 1200, 2000, 2000, 2800, 2800, 2800, 2800}}, //400G RDMA
- {{1650, 1700, 1750, 1800, 1850, 1900, 1950, 2000}} //riscv上默认初始值
-};
-
-uint32_t flow_td_th_tbl[ZXDH_TRPG_SPEED_NUM][ZXDH_DCBNL_MAX_TRAFFIC_CLASS] =
-{
- /*单位KB*/
- {72, 119, 166, 213, 260, 306, 353, 400}, //50G 梯度47
- {144, 207, 269, 332, 394, 457, 519, 582}, //100G 梯度63
- {287, 365, 443, 521, 600, 678, 756, 834}, //200G 梯度78 最小560*512/1000
- {375, 570, 766, 961, 1156, 1352, 1547, 1742}, //400G RDMA 梯度195
- {150, 150, 150, 150, 150, 150, 150, 150} //riscv上默认初始值
-};
-
-uint32_t zxdh_config_param_compare_test(uint32_t tbl_index,
- DPP_PBU_PORT_TH_PARA_T port_th_para,
- DPP_PBU_PORT_COS_TH_PARA_T port_cos_th_para,
- uint32_t *flow_td_th_para)
-{
- uint32_t index = 0;
- uint32_t *port_th_para_p1 = (uint32_t *)&port_th_para;
- uint32_t *port_th_para_p2 = (uint32_t *)&port_cos_th_para_tbl[tbl_index];
- for(index = 0; index<11; index++)
- {
- if(*(port_th_para_p1+index) != *(port_th_para_p2+index))
- {
- return false;
- }
- }
- for(index = 0; indexnetdev;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- //en_dev里面其实是包含link_speed的,填的是具体的速率值
- uint32_t tbl_index;
- uint32_t index;
- uint32_t pfc_cur_mac_en = 0;
- uint32_t ret = 0;
- uint32_t speed = 0;
- uint32_t max_speed = ZXDH_DCBNL_MAXRATE_KBITPS;
- uint32_t params_check = 0;
- DPP_PBU_PORT_TH_PARA_T port_th_para_test = {0};
- DPP_PBU_PORT_COS_TH_PARA_T port_cos_th_para_test = {0};
- DPP_TM_SHAPE_PP_PARA_T port_shape_para_test = {0};
- uint32_t flow_td_th_para_test[ZXDH_DCBNL_MAX_TRAFFIC_CLASS] = {0};
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- LOG_INFO("port speed en_dev->speed is abnormal: %d \n",en_dev->speed)
-
- if(en_dev->speed)
- {
- speed = en_dev->speed *ZXDH_DCBNL_RATEUNIT_K;
- }
-
- zxdh_en_fc_mode_get(en_dev, &pfc_cur_mac_en);
- if(pfc_cur_mac_en == BIT(SPM_FC_NONE))
- {
- LOG_INFO("port pfc & fc disable");
- return 0;
- }
-
- //speed过滤
- if ((0 == speed) || (speed > max_speed))
- {
- LOG_INFO("port speed is abnormal: %u \n",speed);
- speed = max_speed;
- }
- ret = dpp_port_shape_set(&pf_info, en_dev->phy_port, speed, ZXDH_DCBNL_PORT_RATE_CBS, 1);
-
- if(en_dev->phy_port > 9)
- {
- LOG_INFO("en_dev->phy_port not supported");
- return 0;
- }
-
- tbl_index = zxdh_link_speed_to_index(en_dev->link_speed);
-
- LOG_INFO("link_speed to tbl_index: %d", tbl_index);
-
- if(pfc_cur_mac_en != BIT(SPM_FC_PAUSE_RX))
- {
- ret = dpp_port_th_set(&pf_info, en_dev->phy_port, port_th_para_tbl+tbl_index);
-
- ret |= dpp_port_cos_th_set(&pf_info, en_dev->phy_port, port_cos_th_para_tbl+tbl_index);
- }
-
- //暂时规避fc下的td阈值改配
- //if(pfc_cur_mac_en != BIT(SPM_FC_PAUSE_TX))
- if(pfc_cur_mac_en == SPM_FC_PFC_FULL)
- {
- ret |= zxdh_dcbnl_set_flow_td_th(en_priv, flow_td_th_tbl[tbl_index]);
- }
-
- /*维测需要*/
- dpp_port_th_get(&pf_info, en_dev->phy_port, &port_th_para_test);
- LOG_INFO("dpp_port_th_get lif_th:%d, lif_prv:%d, idma_prv:%d \n",
- port_th_para_test.lif_th, port_th_para_test.lif_prv, port_th_para_test.idma_prv);
- LOG_INFO("idma_th0:%d, idma_th1:%d, idma_th2:%d, idma_th3:%d, idma_th4:%d, idma_th5:%d, idma_th6:%d, idma_th7:%d \n",
- port_th_para_test.idma_th_cos0, port_th_para_test.idma_th_cos1, port_th_para_test.idma_th_cos2, port_th_para_test.idma_th_cos3,
- port_th_para_test.idma_th_cos4, port_th_para_test.idma_th_cos5, port_th_para_test.idma_th_cos6, port_th_para_test.idma_th_cos7);
-
- dpp_port_cos_th_get(&pf_info, en_dev->phy_port, &port_cos_th_para_test);
- for(index = 0; indexcos_th[0], port_cos_th_para_test->cos_th[1], port_cos_th_para_test->cos_th[2], port_cos_th_para_test->cos_th[3],\
- port_cos_th_para_test->cos_th[4], port_cos_th_para_test->cos_th[6], port_cos_th_para_test->cos_th[0], port_cos_th_para_test->cos_th[0], )
- */
- dpp_port_shape_get(&pf_info, en_dev->phy_port, &port_shape_para_test);
- LOG_INFO("dpp_port_shape_get cir:%d, cbs:%d, c_en:%d\n",port_shape_para_test.cir, port_shape_para_test.cbs, port_shape_para_test.c_en);
-
- zxdh_dcbnl_get_flow_td_th(en_priv, flow_td_th_para_test);
- for(index = 0; indexnetdev;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- uint32_t ret = 0;
- uint32_t pfc_cur_mac_en = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- zxdh_en_fc_mode_get(en_dev, &pfc_cur_mac_en);
-
- if(pfc_cur_mac_en != BIT(SPM_FC_PAUSE_TX))
- {
- ret = dpp_port_th_set(&pf_info, en_dev->phy_port, port_th_para_tbl+ZXDH_TRPG_DEFAULT);
-
- ret |= dpp_port_cos_th_set(&pf_info, en_dev->phy_port, port_cos_th_para_tbl+ZXDH_TRPG_DEFAULT);
- }
-
- if(pfc_cur_mac_en != BIT(SPM_FC_PAUSE_RX))
- {
- ret |= zxdh_dcbnl_set_flow_td_th(en_priv, flow_td_th_tbl[ZXDH_TRPG_DEFAULT]);
- }
-
- return ret;
-}
-
-uint32_t zxdh_dcbnl_pfc_init(struct zxdh_en_priv *en_priv)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t ret = 0;
- uint32_t test_en = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- LOG_INFO("zxdh_dcbnl_pfc_init start\n");
-
- ret = dpp_qmu_pfc_en_set(&pf_info, 1);
-
- if(ret)
- {
- LOG_ERR("dpp_qmu_pfc_en_set failed");
- }
-
- dpp_qmu_pfc_en_get(&pf_info, &test_en);
- LOG_INFO("dpp_qmu_pfc_en_get:%d", test_en);
- LOG_INFO("zxdh_dcbnl_pfc_init end\n");
-
- return ret;
-}
-
-uint32_t zxdh_dcbnl_init_ets_scheduling_tree(struct zxdh_en_priv *en_priv)
-{
- uint32_t err = 0;
-
- zxdh_dcbnl_init_ets_list(en_priv);
-
- err = zxdh_dcbnl_build_ets_scheduling_tree(en_priv);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: build_tc_scheduling_tree failed \n");
- goto init_ets_se_error;
- }
-
- err = zxdh_dcbnl_scheduling_tree_link_tc(en_priv);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: scheduling_tree_link_tc failed \n");
- goto init_ets_error;
- }
-
- err = zxdh_dcbnl_init_trust_and_table(en_priv);
- if (err)
- {
- LOG_ERR("dcbnl_init_ets: init_trust_and_table failed \n");
- goto init_ets_error;
- }
-
- return 0;
-
-init_ets_error:
- zxdh_dcbnl_free_flow_resources(en_priv);
-init_ets_se_error:
- zxdh_dcbnl_free_se_resources(en_priv);
- LOG_INFO("dcbnl_init_ets failed \n");
- return err;
-}
-
-uint32_t zxdh_dcbnl_set_tm_gate(struct zxdh_en_priv *en_priv, uint32_t mode)
-{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (mode == 1)
- {
- err = dpp_tm_pport_mcode_switch_set(&pf_info, en_dev->phy_port, 1);
- if (err)
- {
- LOG_ERR(" set_tm_gate open failed \n");
- }
- }
- else if (mode == 0)
- {
- err = dpp_tm_pport_mcode_switch_del(&pf_info, en_dev->phy_port);
- if (err)
- {
- LOG_ERR(" set_tm_gate close failed \n");
- }
- }
- else
- {
- LOG_ERR(" error \n");
- }
-
- return err;
-}
+//#include
+#include "../../en_aux.h"
+#include "en_dcbnl.h"
+#include "en_dcbnl_api.h"
+#include "en_np/qos/include/dpp_drv_qos.h"
+#include "en_np/table/include/dpp_tbl_tm.h"
+#include "en_np/fc/include/dpp_drv_fc.h"
+#include "en_np/sdk/include/api/dpp_pbu_api.h"
+
+uint32_t zxdh_dcbnl_get_se_flow_resources(
+ struct zxdh_en_device *en_dev,
+ struct zxdh_dcbnl_ets_se_flow_resource *tree_resource)
+{
+ uint64_t gsch_id = 0;
+ uint32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ if (tree_resource->level == ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL) {
+ err = dpp_sch_base_node_get(&pf_info, en_dev->phy_port, &gsch_id);
+ } else {
+ err = dpp_cosq_gsch_id_add(&pf_info, en_dev->phy_port,
+ tree_resource->numq, tree_resource->level,
+ tree_resource->flags, &gsch_id);
+ }
+
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: get se/flow resources failed, level: %d, type: "
+ "%d, err:%d \n",
+ tree_resource->level, tree_resource->flags, err);
+ return err;
+ }
+ tree_resource->gsch_id = gsch_id;
+ tree_resource->resource_id = ZXDH_DCBNL_GET_GSCHID_MSG(
+ gsch_id, ZXDH_DCBNL_GSCHID_ID_MASK, ZXDH_DCBNL_GSCHID_ID_SHIFT);
+ /* debug */
+ LOG_INFO(" gsch_id:0x%llx,resource_id:0x%x level:%d, flags:%d\n", gsch_id,
+ tree_resource->resource_id, tree_resource->level,
+ tree_resource->flags);
+
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_find_se_link_id(struct zxdh_en_priv *en_priv,
+ uint32_t level, uint32_t link_level,
+ uint32_t link_idx, uint32_t link_sp,
+ uint32_t *link_id)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_se_node *se_link_node = NULL;
+ struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head =
+ &en_dev->dcb_para.ets_node_list_head[link_level];
+
+ *link_id = ZXDH_DCBNL_NULL_ID;
+
+ if (level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL) {
+ if (ets_node_list_head->se_next == NULL) {
+ LOG_ERR("dcbnl: no nodes in the link_level: %d \n", link_level);
+ return 1;
+ }
+
+ se_link_node = ets_node_list_head->se_next;
+
+ while ((NULL != se_link_node) && (se_link_node->node_idx != link_idx)) {
+ se_link_node = se_link_node->se_next;
+ }
+
+ if (se_link_node != NULL) {
+ *link_id = se_link_node->se_id + link_sp;
+ } else {
+ LOG_ERR("dcbnl: find se link_id failed, link_level: %d, link_idx: %d\n",
+ link_level, link_idx);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+uint32_t
+zxdh_dcbnl_save_se_resources(struct zxdh_en_priv *en_priv,
+ struct zxdh_dcbnl_se_tree_config *tree_node_cfg)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_se_flow_resource tree_resource = { 0 };
+ struct zxdh_dcbnl_ets_se_node *new_se_node = NULL;
+ struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL;
+ uint32_t level = 0;
+ uint32_t link_level = 0;
+ uint32_t link_idx = 0;
+ uint32_t link_id = 0;
+ uint32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ level = tree_node_cfg->level;
+ link_level = tree_node_cfg->link_level;
+ link_idx = tree_node_cfg->link_idx;
+
+ if ((level == 0) || (level > 4) || (link_level > 5) ||
+ (level >= link_level)) {
+ LOG_ERR("dcbnl_init_ets: configuration level error, level: %d, link_level: "
+ "%d\n",
+ level, link_level);
+ return 1; // todo:考虑使用标准的错误定义
+ }
+
+ ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[level];
+
+ tree_resource.numq = 1;
+ tree_resource.level = level;
+ tree_resource.flags = tree_node_cfg->type;
+ err = zxdh_dcbnl_get_se_flow_resources(en_dev, &tree_resource);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: get se resources failed, level: %d, idx: %d\n",
+ tree_resource.level, tree_node_cfg->idx);
+ return err;
+ }
+
+ err = zxdh_dcbnl_find_se_link_id(en_priv, level, link_level, link_idx,
+ tree_node_cfg->link_sp, &link_id);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: find se link_id failed, link_level: %d, link_idx: "
+ "%d\n",
+ link_level, link_idx);
+ return err;
+ }
+
+ new_se_node = kmalloc(sizeof(struct zxdh_dcbnl_ets_se_node), GFP_KERNEL);
+ if (NULL == new_se_node) {
+ LOG_ERR("dcbnl_init_ets: kmalloc se node failed\n");
+ return 1;
+ }
+
+ new_se_node->se_next = NULL;
+ new_se_node->gsch_id = tree_resource.gsch_id;
+ new_se_node->node_idx = tree_node_cfg->idx;
+ new_se_node->node_type = tree_node_cfg->type;
+ new_se_node->se_id = tree_resource.resource_id;
+ new_se_node->se_link_id = link_id;
+ new_se_node->se_link_weight = tree_node_cfg->link_weight;
+ new_se_node->se_link_sp = tree_node_cfg->link_sp;
+ new_se_node->link_point = tree_node_cfg->link_point;
+
+ if (level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL) {
+ err = dpp_crdt_se_link_set(&pf_info, new_se_node->se_id,
+ new_se_node->se_link_id,
+ new_se_node->se_link_weight,
+ new_se_node->se_link_sp);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: dpp_crdt_se_link_set failed, level: %d, idx: "
+ "%d, err:%d\n",
+ level, tree_node_cfg->idx, err);
+ kfree(new_se_node);
+ return err;
+ }
+ }
+
+ new_se_node->se_next = ets_node_list_head->se_next;
+ ets_node_list_head->se_next = new_se_node;
+
+ ets_node_list_head->node_num += 1;
+
+ LOG_INFO(" level:%d, node_idx:%d, node_num:%d \n", level,
+ new_se_node->node_idx, ets_node_list_head->node_num);
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_build_ets_scheduling_tree(struct zxdh_en_priv *en_priv)
+{
+ uint32_t i = 0;
+ uint32_t err = 0;
+
+ struct zxdh_dcbnl_se_tree_config
+ ets_se_config_table[ZXDH_DCBNL_MAX_SE_NODE_NUM + 1] = {
+ /*level idx type link_level link_idx link_weight
+ link_sp link_point*/
+ { 4, 0, ZXDH_DCBNL_ETS_NODE_WFQ, 5, 0, 1, 0,
+ ZXDH_DCBNL_ETS_NODE_NULL },
+ { 3, 0, ZXDH_DCBNL_ETS_NODE_FQ2, 4, 0, 1, 0,
+ ZXDH_DCBNL_ETS_NODE_NULL },
+ { 2, 0, ZXDH_DCBNL_ETS_NODE_FQ4, 3, 0, 1, 0,
+ ZXDH_DCBNL_ETS_NODE_NULL },
+ { 2, 1, ZXDH_DCBNL_ETS_NODE_FQ4, 3, 0, 1, 1,
+ ZXDH_DCBNL_ETS_NODE_NULL },
+ { 1, 0, ZXDH_DCBNL_ETS_NODE_FQ, 2, 0, 1, 0,
+ ZXDH_DCBNL_ETS_NODE_VENDOR_C },
+ { 1, 1, ZXDH_DCBNL_ETS_NODE_FQ8, 2, 0, 1, 1,
+ ZXDH_DCBNL_ETS_NODE_STRICT_C },
+ { 1, 2, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 0, 1, 2,
+ ZXDH_DCBNL_ETS_NODE_ETS_C },
+ { 1, 3, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 0, 1, 3,
+ ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_C },
+ { 1, 4, ZXDH_DCBNL_ETS_NODE_FQ, 2, 1, 1, 0,
+ ZXDH_DCBNL_ETS_NODE_VENDOR_E },
+ { 1, 5, ZXDH_DCBNL_ETS_NODE_FQ8, 2, 1, 1, 1,
+ ZXDH_DCBNL_ETS_NODE_STRICT_E },
+ { 1, 6, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 1, 1, 2,
+ ZXDH_DCBNL_ETS_NODE_ETS_E },
+ { 1, 7, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 1, 1, 3,
+ ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_E },
+ { 0xff }
+ };
+
+ for (i = 0;
+ i < ZXDH_DCBNL_MAX_SE_NODE_NUM && ets_se_config_table[i].level != 0xff;
+ i++) {
+ err = zxdh_dcbnl_save_se_resources(en_priv, &ets_se_config_table[i]);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: build_tc_scheduling_tree failed, entry: %d\n",
+ i);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void zxdh_dcbnl_tc_map_to_link_point(uint32_t tc_type, uint32_t *c_type,
+ uint32_t *e_type)
+{
+ switch (tc_type) {
+ case ZXDH_DCBNL_VENDOR_TC:
+ *c_type = ZXDH_DCBNL_ETS_NODE_VENDOR_C;
+ *e_type = ZXDH_DCBNL_ETS_NODE_VENDOR_E;
+ break;
+
+ case ZXDH_DCBNL_STRICT_TC:
+ *c_type = ZXDH_DCBNL_ETS_NODE_STRICT_C;
+ *e_type = ZXDH_DCBNL_ETS_NODE_STRICT_E;
+ break;
+
+ case ZXDH_DCBNL_ETS_TC:
+ *c_type = ZXDH_DCBNL_ETS_NODE_ETS_C;
+ *e_type = ZXDH_DCBNL_ETS_NODE_ETS_E;
+ break;
+
+ case ZXDH_DCBNL_ZEROBW_ETS_TC:
+ *c_type = ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_C;
+ *e_type = ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_E;
+ break;
+ default:
+ break;
+ }
+}
+
+void zxdh_dcbnl_get_tc_weight_sp(uint32_t tc_type, uint32_t tc_tx_bw,
+ uint32_t tc_id, uint32_t *c_weight,
+ uint32_t *e_weight, uint32_t *c_sp,
+ uint32_t *e_sp)
+{
+ if (tc_tx_bw == ZXDH_DCBNL_MAX_BW_ALLOC) {
+ *c_weight = 1;
+ *e_weight = 1;
+ } else {
+ *c_weight = ZXDH_DCBNL_MAX_WEIGHT * tc_tx_bw / ZXDH_DCBNL_MAX_BW_ALLOC;
+ *e_weight = ZXDH_DCBNL_MAX_WEIGHT * tc_tx_bw / ZXDH_DCBNL_MAX_BW_ALLOC;
+ }
+
+ if ((tc_type == ZXDH_DCBNL_STRICT_TC) &&
+ (tc_id < ZXDH_DCBNL_MAX_TRAFFIC_CLASS)) {
+ *c_sp = ZXDH_DCBNL_MAX_TRAFFIC_CLASS - 1 - tc_id;
+ *e_sp = ZXDH_DCBNL_MAX_TRAFFIC_CLASS - 1 - tc_id;
+ } else {
+ *c_sp = 0;
+ *e_sp = 0;
+ }
+}
+
+uint32_t zxdh_dcbnl_find_flow_link_se_id(struct zxdh_en_priv *en_priv,
+ uint32_t tc_type, uint32_t link_level,
+ uint32_t *c_linkid, uint32_t *e_linkid,
+ uint32_t c_sp, uint32_t e_sp)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_se_node *se_node =
+ en_dev->dcb_para.ets_node_list_head[link_level].se_next;
+ uint32_t c_type = 0;
+ uint32_t e_type = 0;
+
+ if (NULL == se_node) {
+ LOG_ERR("dcbnl: find_flow_link_se_id no nodes \n");
+ return 1;
+ }
+
+ zxdh_dcbnl_tc_map_to_link_point(tc_type, &c_type, &e_type);
+
+ *c_linkid = ZXDH_DCBNL_NULL_ID;
+ *e_linkid = ZXDH_DCBNL_NULL_ID;
+
+ while ((NULL != se_node) && ((ZXDH_DCBNL_NULL_ID == *c_linkid) ||
+ (ZXDH_DCBNL_NULL_ID == *e_linkid))) {
+ if (se_node->link_point == c_type) {
+ *c_linkid = se_node->se_id + c_sp;
+ } else if (se_node->link_point == e_type) {
+ *e_linkid = se_node->se_id + e_sp;
+ }
+
+ se_node = se_node->se_next;
+ }
+
+ if ((ZXDH_DCBNL_NULL_ID == *c_linkid) ||
+ (ZXDH_DCBNL_NULL_ID == *e_linkid)) {
+ LOG_ERR("dcbnl: find_flow_link_se_id failed, c_linkid: 0x%x, e_linkid: 0x%x\n",
+ *c_linkid, *e_linkid);
+ return 1;
+ }
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_get_ieee_tsa(uint32_t tc_type)
+{
+ uint32_t tsa = 0;
+ switch (tc_type) {
+ case ZXDH_DCBNL_ETS_TC:
+ case ZXDH_DCBNL_ZEROBW_ETS_TC:
+ tsa = IEEE_8021QAZ_TSA_ETS;
+ break;
+ case ZXDH_DCBNL_STRICT_TC:
+ tsa = IEEE_8021QAZ_TSA_STRICT;
+ break;
+ case ZXDH_DCBNL_VENDOR_TC:
+ tsa = IEEE_8021QAZ_TSA_VENDOR;
+ break;
+ default:
+ tsa = IEEE_8021QAZ_TSA_STRICT;
+ LOG_ERR("dcbnl:tsa error, change to strict \n");
+ break;
+ }
+ return tsa;
+}
+
+uint32_t zxdh_dcbnl_save_flow_resources(
+ struct zxdh_en_priv *en_priv,
+ struct zxdh_dcbnl_tc_flow_config *tc_flow_config,
+ struct zxdh_dcbnl_ets_se_flow_resource *tree_resource, uint32_t tc_id)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_flow_node *new_flow_node = NULL;
+ struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL;
+ struct zxdh_dcbnl_tc_flow_shape_para p_para = { 0 };
+ uint32_t c_linkid = 0;
+ uint32_t e_linkid = 0;
+ uint32_t c_weight = 0;
+ uint32_t e_weight = 0;
+ uint32_t c_sp = 0;
+ uint32_t e_sp = 0;
+ uint32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (tc_flow_config->link_level != 1) {
+ LOG_ERR("dcbnl_init_ets: zxdh_dcbnl_save_flow_resources link_level err\n");
+ return 1;
+ }
+
+ ets_node_list_head =
+ &en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL];
+
+ if (tc_id == 0) {
+ tree_resource->numq = ZXDH_DCBNL_MAX_TRAFFIC_CLASS;
+ tree_resource->level = ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL;
+ tree_resource->flags = ZXDH_DCBNL_ETS_NODE_FLOW;
+ err = zxdh_dcbnl_get_se_flow_resources(en_dev, tree_resource);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: get flow resources err\n");
+ return err;
+ }
+
+ err = dpp_tm_flowid_pport_table_set(&pf_info, en_dev->phy_port,
+ tree_resource->resource_id);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: flowid_pport_table_set failed, port: %d, "
+ "flowid:%d, err:%d\n",
+ en_dev->phy_port, tree_resource->resource_id, err);
+ return err;
+ }
+ }
+
+ zxdh_dcbnl_get_tc_weight_sp(tc_flow_config->tc_type,
+ tc_flow_config->tc_tx_bw, tc_id, &c_weight,
+ &e_weight, &c_sp, &e_sp);
+
+ err = zxdh_dcbnl_find_flow_link_se_id(en_priv, tc_flow_config->tc_type,
+ tc_flow_config->link_level, &c_linkid,
+ &e_linkid, c_sp, e_sp);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets init ets: find_flow_link_se_id failed, tc_id: %d, "
+ "tc_type: %d\n",
+ tc_id, tc_flow_config->tc_type);
+ return err;
+ }
+
+ new_flow_node =
+ kmalloc(sizeof(struct zxdh_dcbnl_ets_flow_node), GFP_KERNEL);
+ if (new_flow_node == NULL) {
+ LOG_ERR("dcbnl_init_ets: kmalloc new flow node failed\n");
+ return 1;
+ }
+
+ new_flow_node->flow_next = NULL;
+ new_flow_node->gsch_id = tree_resource->gsch_id + tc_id;
+ new_flow_node->flow_id = tree_resource->resource_id + tc_id;
+ new_flow_node->tc_id = tc_id;
+ new_flow_node->tc_type = tc_flow_config->tc_type;
+ new_flow_node->tc_tx_bw = tc_flow_config->tc_tx_bw;
+ new_flow_node->td_th = tc_flow_config->td_th;
+ new_flow_node->c_linkid = c_linkid;
+ new_flow_node->c_weight = c_weight;
+ new_flow_node->c_sp = c_sp;
+ new_flow_node->c_rate = tc_flow_config->c_rate;
+ new_flow_node->mode = 1;
+ new_flow_node->e_linkid = e_linkid;
+ new_flow_node->e_weight = e_weight;
+ new_flow_node->e_sp = e_sp;
+ new_flow_node->e_rate = tc_flow_config->e_rate;
+
+ err = dpp_flow_map_port_set(&pf_info, new_flow_node->flow_id,
+ en_dev->phy_port);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: dpp_flow_map_port_set failed, flow_id: %d, "
+ "phy_port: %d, err:%d\n",
+ new_flow_node->flow_id, en_dev->phy_port, err);
+ kfree(new_flow_node);
+ return err;
+ }
+
+ err = dpp_crdt_flow_link_set(&pf_info, new_flow_node->flow_id, c_linkid,
+ c_weight, c_sp, new_flow_node->mode, e_linkid,
+ e_weight, e_sp);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: dpp_crdt_flow_link_set failed, flow_id: %d, "
+ "c_linkid: %d, e_linkid: %d, err:%d\n",
+ new_flow_node->flow_id, c_linkid, e_linkid, err);
+ kfree(new_flow_node);
+ return err;
+ }
+
+ err = dpp_flow_td_th_set(&pf_info, new_flow_node->flow_id,
+ new_flow_node->td_th);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, "
+ "td_th: %d, err:%d\n",
+ en_dev->vport, new_flow_node->flow_id, new_flow_node->td_th,
+ err);
+ // kfree(new_flow_node); //The default is 150
+ // return err;
+ }
+
+ p_para.cir = new_flow_node->c_rate;
+ p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS;
+ p_para.db_en = 1;
+ p_para.eir = new_flow_node->e_rate;
+ p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS;
+
+ err = dpp_flow_shape_set(&pf_info, new_flow_node->flow_id, p_para.cir,
+ p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: dpp_flow_shape_set failed, vport: %d, flow_id: "
+ "%d, tc_id: %d, e_rate: %d, err:%d\n",
+ en_dev->vport, new_flow_node->flow_id, new_flow_node->tc_id,
+ new_flow_node->e_rate, err);
+ }
+
+ new_flow_node->flow_next = ets_node_list_head->flow_next;
+ ets_node_list_head->flow_next = new_flow_node;
+ ets_node_list_head->node_num += 1;
+
+ en_dev->dcb_para.ets_cfg.tc_tsa[tc_id] =
+ zxdh_dcbnl_get_ieee_tsa(new_flow_node->tc_type);
+ en_dev->dcb_para.ets_cfg.tc_tx_bw[tc_id] = tc_flow_config->tc_tx_bw;
+ en_dev->dcb_para.tc_maxrate[tc_id] = new_flow_node->e_rate;
+
+ LOG_INFO(" level:%d, tc_id:%d, flow_id:%d, node_num:%d \n",
+ tree_resource->level, new_flow_node->tc_id, new_flow_node->flow_id,
+ ets_node_list_head->node_num);
+
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_scheduling_tree_link_tc(struct zxdh_en_priv *en_priv)
+{
+ struct zxdh_dcbnl_ets_se_flow_resource tree_resource;
+ uint32_t i = 0;
+ uint32_t err = 0;
+
+ struct zxdh_dcbnl_tc_flow_config
+ ets_tc_config_table[ZXDH_DCBNL_MAX_TRAFFIC_CLASS + 1] = {
+ /*link_level tc_type tc_tx_bw c_rate e_rate td_th */
+ { 1, ZXDH_DCBNL_STRICT_TC, 100, 0, ZXDH_DCBNL_MAXRATE_KBITPS,
+ 150 },
+ { 1, ZXDH_DCBNL_STRICT_TC, 100, 0, ZXDH_DCBNL_MAXRATE_KBITPS,
+ 150 },
+ { 1, ZXDH_DCBNL_STRICT_TC, 100, 0, ZXDH_DCBNL_MAXRATE_KBITPS,
+ 150 },
+ { 1, ZXDH_DCBNL_STRICT_TC, 100, 0, ZXDH_DCBNL_MAXRATE_KBITPS,
+ 150 },
+ { 1, ZXDH_DCBNL_STRICT_TC, 100, 0, ZXDH_DCBNL_MAXRATE_KBITPS,
+ 150 },
+ { 1, ZXDH_DCBNL_STRICT_TC, 100, 0, ZXDH_DCBNL_MAXRATE_KBITPS,
+ 150 },
+ { 1, ZXDH_DCBNL_STRICT_TC, 100, 0, ZXDH_DCBNL_MAXRATE_KBITPS,
+ 150 },
+ { 1, ZXDH_DCBNL_STRICT_TC, 100, 0, ZXDH_DCBNL_MAXRATE_KBITPS,
+ 150 },
+ { 0xff }
+ };
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS &&
+ ets_tc_config_table[i].link_level != 0xff;
+ i++) {
+ err = zxdh_dcbnl_save_flow_resources(en_priv, &ets_tc_config_table[i],
+ &tree_resource, i);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: save_flow_resources failed, entry: %d\n",
+ i);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_set_ets_trust(struct zxdh_en_priv *en_priv, uint32_t trust)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ err = dpp_tm_pport_trust_mode_table_set(&pf_info, en_dev->phy_port, trust);
+ if (err) {
+ LOG_ERR("dcbnl_set_ets: set_ets_trust failed, vport: %d, trust: %d, err:%d\n",
+ en_dev->vport, trust, err);
+ return err;
+ }
+ en_dev->dcb_para.trust = trust;
+ LOG_INFO(" trust:%d \n", trust);
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_init_trust_and_table(struct zxdh_en_priv *en_priv)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t i = 0;
+ uint32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++) {
+ err = dpp_tm_pport_up_map_table_set(&pf_info, en_dev->phy_port, i,
+ i); //初始时,配置一一对应
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: dpp_tm_pport_up_map_table_set failed, vport: "
+ "%d, phy_port: %d, err:%d\n",
+ en_dev->vport, en_dev->phy_port, err);
+ return err;
+ }
+ en_dev->dcb_para.ets_cfg.prio_tc[i] = i;
+ LOG_INFO(" vport:%d,phy_port:%d prio:%d, tc:%d \n", en_dev->vport,
+ en_dev->phy_port, i, en_dev->dcb_para.ets_cfg.prio_tc[i]);
+ }
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_DSCP; i++) {
+ err = dpp_tm_pport_dscp_map_table_set(&pf_info, en_dev->phy_port, i,
+ i >> 3);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: dscp_map_table_set failed, vport: %d, phy_port: "
+ "%d, err:%d\n",
+ en_dev->vport, en_dev->phy_port, err);
+ return err;
+ }
+
+ en_dev->dcb_para.dscp2prio[i] = i >> 3;
+ LOG_INFO("vport:%d,phy_port:%d,dscp:%d, prio:%d \n", en_dev->vport,
+ en_dev->phy_port, i, en_dev->dcb_para.dscp2prio[i]);
+ }
+
+ err = zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_PCP);
+ if (err) {
+ LOG_INFO("set_ets_trust failed \n");
+ return err;
+ }
+ en_dev->dcb_para.trust = ZXDH_DCBNL_ETS_TRUST_PCP;
+ LOG_INFO(" vport:%d,phy_port:%d,trust:%d \n", en_dev->vport,
+ en_dev->phy_port, en_dev->dcb_para.trust);
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_init_ets_list(struct zxdh_en_priv *en_priv)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t level = 0;
+
+ for (level = 0; level < ZXDH_DCBNL_MAX_TREE_LEVEL; level++) {
+ en_dev->dcb_para.ets_node_list_head[level].se_next = NULL;
+ en_dev->dcb_para.ets_node_list_head[level].flow_next = NULL;
+ en_dev->dcb_para.ets_node_list_head[level].node_num = 0;
+ }
+ return 0;
+}
+
+/* Normal release se*/
+uint32_t zxdh_dcbnl_free_se_resources(struct zxdh_en_priv *en_priv)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_se_node *se_node = NULL;
+ struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL;
+ uint32_t err = 0;
+ uint32_t level = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ LOG_INFO(" vport:%d, phy_port:%d \n", en_dev->vport, en_dev->phy_port);
+ for (level = 1; level <= ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL; level++) {
+ ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[level];
+ while (NULL != ets_node_list_head->se_next) {
+ se_node = ets_node_list_head->se_next;
+ ets_node_list_head->se_next = se_node->se_next;
+ if (level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL) {
+ err = dpp_crdt_del_se_link_set(&pf_info, se_node->se_id,
+ se_node->se_id);
+ if (err) {
+ LOG_ERR("dcbnl_free_ets: dpp_crdt_del_se_link_set failed, se_id: %d, "
+ "err:%d \n",
+ se_node->se_id, err);
+ }
+
+ err = dpp_cosq_gsch_id_delete(&pf_info, en_dev->phy_port,
+ se_node->gsch_id);
+ if (err) {
+ LOG_ERR("dcbnl_free_ets: dpp_cosq_gsch_id_delete failed, se_id: "
+ "%lld, err:%d \n",
+ se_node->gsch_id, err);
+ }
+ }
+ LOG_INFO(" free level:%d se_id:%x \n", level, se_node->se_id);
+ kfree(se_node);
+ ets_node_list_head->node_num -= 1;
+ LOG_INFO("current node_num:%d \n", ets_node_list_head->node_num);
+ }
+ }
+
+ return 0;
+}
+/* Normal release flow*/
+uint32_t zxdh_dcbnl_free_flow_resources(struct zxdh_en_priv *en_priv)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_flow_node *flow_node = NULL;
+ struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head =
+ &en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL];
+ uint32_t err = 0;
+ bool have_flow = false;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ LOG_INFO(" vport:%d, phy_port:%d \n", en_dev->vport, en_dev->phy_port);
+ while (NULL != ets_node_list_head->flow_next) {
+ have_flow = true;
+ flow_node = ets_node_list_head->flow_next;
+ ets_node_list_head->flow_next = flow_node->flow_next;
+
+ err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, 0);
+ if (err) {
+ LOG_ERR("dcbnl_free_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, "
+ "td_th: 0, err:%d\n",
+ en_dev->vport, flow_node->flow_id, err);
+ }
+
+ err = dpp_crdt_del_flow_link_set(&pf_info, flow_node->flow_id,
+ flow_node->flow_id);
+ if (err) {
+ LOG_ERR("dcbnl_free_ets: dpp_crdt_del_flow_link_set failed, flow_id: %d, "
+ "err:%d \n",
+ flow_node->flow_id, err);
+ }
+
+ err = dpp_cosq_gsch_id_delete(&pf_info, en_dev->phy_port,
+ flow_node->gsch_id);
+ if (err) {
+ LOG_ERR("dcbnl_free_ets: dpp_cosq_gsch_id_delete failed, gsch_id: %lld "
+ ",err:%d\n",
+ flow_node->gsch_id, err);
+ }
+
+ LOG_INFO(" free level:0, flow_id:%d, tc:%d\n", flow_node->flow_id,
+ flow_node->tc_id);
+
+ kfree(flow_node);
+ ets_node_list_head->node_num -= 1;
+ LOG_INFO("current node_num:%d \n", ets_node_list_head->node_num);
+ }
+
+ if (have_flow) {
+ err = dpp_tm_flowid_pport_table_del(&pf_info, en_dev->phy_port);
+ if (err) {
+ LOG_ERR("dcbnl_free_ets: dpp_tm_flowid_pport_table_del failed,vport:%d, "
+ "phy_port: %d \n",
+ en_dev->vport, en_dev->phy_port);
+ }
+ }
+
+ return 0;
+}
+
+/* host no reset,risc reset? */
+uint32_t zxdh_dcbnl_check_and_free_node_memory(struct zxdh_en_priv *en_priv)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_flow_node *flow_node = NULL;
+ struct zxdh_dcbnl_ets_se_node *se_node = NULL;
+ struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL;
+ uint32_t level = 0;
+
+ ets_node_list_head =
+ &en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL];
+ while (NULL != ets_node_list_head->flow_next) {
+ flow_node = ets_node_list_head->flow_next;
+ ets_node_list_head->flow_next = flow_node->flow_next;
+ kfree(flow_node);
+ ets_node_list_head->node_num -= 1;
+ }
+
+ for (level = 1; level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL + 1; level++) {
+ ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[level];
+ while (NULL != ets_node_list_head->se_next) {
+ se_node = ets_node_list_head->se_next;
+ ets_node_list_head->se_next = se_node->se_next;
+ kfree(se_node);
+ ets_node_list_head->node_num -= 1;
+ }
+ }
+
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_set_tc_scheduling(struct zxdh_en_priv *en_priv,
+ uint8_t *tc_type, uint8_t *tc_tx_bw)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_flow_node *flow_node =
+ en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL]
+ .flow_next;
+ uint32_t tc_id = 0;
+ uint32_t c_linkid = 0;
+ uint32_t e_linkid = 0;
+ uint32_t c_weight = 0;
+ uint32_t e_weight = 0;
+ uint32_t c_sp = 0;
+ uint32_t e_sp = 0;
+ uint32_t i = 0;
+ uint32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (NULL == flow_node) {
+ LOG_ERR("dcbnl_set_ets: set_tc_scheduling no flow in the tree\n");
+ return 1;
+ }
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; i++) {
+ tc_id = flow_node->tc_id;
+ if ((flow_node->tc_type == tc_type[tc_id]) &&
+ (flow_node->tc_tx_bw == tc_tx_bw[tc_id])) {
+ LOG_INFO("Same configuration,tc_id:%d, tc_type:%d, tc_tx_bw:%d\n",
+ tc_id, tc_type[tc_id], tc_tx_bw[tc_id]);
+ flow_node = flow_node->flow_next;
+ continue;
+ }
+
+ zxdh_dcbnl_get_tc_weight_sp(tc_type[tc_id], tc_tx_bw[tc_id], tc_id,
+ &c_weight, &e_weight, &c_sp, &e_sp);
+
+ err = zxdh_dcbnl_find_flow_link_se_id(en_priv, tc_type[tc_id], 1,
+ &c_linkid, &e_linkid, c_sp, e_sp);
+ if (err) {
+ LOG_ERR("dcbnl_set_ets: find_flow_link_se_id failed, tc_id: %d, tc_type: "
+ "%d\n",
+ tc_id, tc_type[tc_id]);
+ return err;
+ }
+
+ /* 先删除挂接 todo:后续要先限流,清TD再删挂接?*/
+ err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, 0);
+ if (err) {
+ LOG_ERR("dcbnl_set_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, "
+ "td_th: 0, err:%d\n",
+ en_dev->vport, flow_node->flow_id, err);
+ }
+
+ err = dpp_crdt_del_flow_link_set(&pf_info, flow_node->flow_id,
+ flow_node->flow_id);
+ if (err) {
+ LOG_ERR("dcbnl_set_ets: dpp_crdt_del_flow_link_set failed, vport: %d, "
+ "flow_id: %d, err:%d\n",
+ en_dev->vport, flow_node->flow_id, err);
+ return err;
+ }
+
+ /* 重新挂接 todo:后续要挂接、设置td,重设限速?*/
+ err = dpp_crdt_flow_link_set(&pf_info, flow_node->flow_id, c_linkid,
+ c_weight, c_sp, 1, e_linkid, e_weight,
+ e_sp);
+ if (err) {
+ LOG_ERR("dcbnl_set_ets: dpp_crdt_flow_link_set failed, flow_id: %d, "
+ "flow_id: %d, flow_id: %d, err:%d\n",
+ flow_node->flow_id, c_linkid, e_linkid, err);
+ return err;
+ }
+
+ err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id,
+ flow_node->td_th);
+ if (err) {
+ LOG_ERR("dcbnl_set_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, "
+ "td_th:%d, err:%d\n",
+ en_dev->vport, flow_node->flow_id, flow_node->td_th, err);
+ }
+
+ flow_node->tc_type = tc_type[tc_id];
+ flow_node->tc_tx_bw = tc_tx_bw[tc_id];
+
+ flow_node->c_linkid = c_linkid;
+ flow_node->c_weight = c_weight;
+ flow_node->c_sp = c_sp;
+
+ flow_node->e_linkid = e_linkid;
+ flow_node->e_weight = e_weight;
+ flow_node->e_sp = e_sp;
+
+ LOG_INFO(" tc_id:%d, tc_type:%d, c_linkid:%x, e_weight:%d, e_sp:%d "
+ ",e_linkid:%x, e_weight:%d, e_sp:%d \n",
+ tc_id, flow_node->tc_type, flow_node->c_linkid,
+ flow_node->c_weight, flow_node->c_sp, flow_node->e_linkid,
+ flow_node->e_weight, flow_node->e_sp);
+
+ flow_node = flow_node->flow_next;
+ }
+
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_set_ets_up_tc_map(struct zxdh_en_priv *en_priv,
+ uint8_t *prio_tc)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t i = 0;
+ uint32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ LOG_INFO(" begin \n");
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++) {
+ err = dpp_tm_pport_up_map_table_set(&pf_info, en_dev->phy_port, i,
+ prio_tc[i]);
+ if (err) {
+ LOG_ERR("dcbnl_set_ets: failed, vport: %d, prio: %d, tc: %d, err:%d\n",
+ en_dev->vport, i, prio_tc[i], err);
+ return err;
+ }
+ LOG_INFO(" vport:%d, phy_port:%d, prio:%d, tc:%d \n", en_dev->vport,
+ en_dev->phy_port, i, prio_tc[i]);
+ }
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_set_tc_maxrate(struct zxdh_en_priv *en_priv,
+ uint32_t *maxrate)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_flow_node *flow_node =
+ en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL]
+ .flow_next;
+ struct zxdh_dcbnl_tc_flow_shape_para p_para = { 0 };
+ uint32_t tc_id = 0;
+ uint32_t i = 0;
+ uint32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (NULL == flow_node) {
+ LOG_ERR("dcbnl_set_ets: set_tc_maxrate no flow in the tree\n");
+ return 1;
+ }
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; i++) {
+ tc_id = flow_node->tc_id;
+ if (flow_node->e_rate == maxrate[tc_id]) {
+ LOG_INFO("Same configuration, tc_id:%d, maxrate:%d\n", tc_id,
+ maxrate[tc_id]);
+ flow_node = flow_node->flow_next;
+ continue;
+ }
+ p_para.cir = 0;
+ p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS;
+ p_para.db_en = 1;
+ p_para.eir = maxrate[tc_id];
+ p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS;
+
+ LOG_INFO(" vport%d,phy_port:%d, flow_id:%d,tc_id:%d, cir:%d, cbs:%d, "
+ "db_en:%d, eir:%d,ebs:%d \n",
+ en_dev->vport, en_dev->phy_port, flow_node->flow_id, tc_id,
+ p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, p_para.ebs);
+
+ err = dpp_flow_shape_set(&pf_info, flow_node->flow_id, p_para.cir,
+ p_para.cbs, p_para.db_en, p_para.eir,
+ p_para.ebs);
+ if (err) {
+ LOG_ERR("dcbnl_set_ets: dpp_flow_shape_set failed, vport: %d, flow_id: "
+ "%d, tc_id: %d, eir: %d, err:%d\n",
+ en_dev->vport, flow_node->flow_id, tc_id, maxrate[tc_id],
+ err);
+ return err;
+ }
+
+ flow_node->e_rate = maxrate[tc_id];
+ en_dev->dcb_para.tc_maxrate[tc_id] = maxrate[tc_id];
+
+ flow_node = flow_node->flow_next;
+ }
+
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_set_dscp2prio(struct zxdh_en_priv *en_priv, uint16_t dscp,
+ uint8_t prio)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ err = dpp_tm_pport_dscp_map_table_set(&pf_info, en_dev->phy_port, dscp,
+ prio);
+ if (err) {
+ LOG_ERR("dcbnl_set_ets: set_dscp2prio failed, vport: %d, dscp: %d, prio: "
+ "%d, err:%d\n",
+ en_dev->vport, dscp, prio, err);
+ return err;
+ }
+ en_dev->dcb_para.dscp2prio[dscp] = prio;
+ LOG_INFO(" vport:%d, ephy_port:%d,dscp:%d, up:%d \n", en_dev->vport,
+ en_dev->phy_port, dscp, prio);
+
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_set_flow_td_th(struct zxdh_en_priv *en_priv,
+ uint32_t *tc_td_th)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_flow_node *flow_node =
+ en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL]
+ .flow_next;
+ uint32_t err = 0;
+ uint32_t i = 0;
+ uint32_t tc_id = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (flow_node == NULL) {
+ LOG_ERR("dcbnl_set_ets: set_flow_td_th no flow in the tree\n");
+ return 1;
+ }
+
+ if (tc_td_th == NULL) {
+ LOG_ERR("dcbnl_set_ets: tc_td_th is null \n");
+ return 1;
+ }
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; i++) {
+ tc_id = flow_node->tc_id;
+ err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, tc_td_th[tc_id]);
+ if (err) {
+ LOG_ERR("dcbnl_set_ets: set_flow_td_th failed, vport: %d, flow_id:%d, "
+ "tc_id:%d, td_th: %d, err:%d\n",
+ en_dev->vport, flow_node->flow_id, tc_id, tc_td_th[tc_id],
+ err);
+ return err;
+ }
+ flow_node->td_th = tc_td_th[tc_id];
+ flow_node = flow_node->flow_next;
+ }
+
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_get_flow_td_th(struct zxdh_en_priv *en_priv,
+ uint32_t *tc_td_th)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_flow_node *flow_node =
+ en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL]
+ .flow_next;
+ uint32_t err = 0;
+ uint32_t i = 0;
+ uint32_t tc_id = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (flow_node == NULL) {
+ LOG_ERR("get_flow_td_th no flow in the tree\n");
+ return 1;
+ }
+
+ if (tc_td_th == NULL) {
+ LOG_ERR(" tc_td_th is null \n");
+ return 1;
+ }
+
+ for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; i++) {
+ tc_id = flow_node->tc_id;
+ err = dpp_flow_td_th_get(&pf_info, flow_node->flow_id,
+ &tc_td_th[tc_id]);
+ if (err) {
+ LOG_ERR("get_flow_td_th failed, vport: %d, flow_id:%d, tc_id:%d, err:%d\n",
+ en_dev->vport, flow_node->flow_id, tc_id, err);
+ return err;
+ }
+ flow_node = flow_node->flow_next;
+ }
+
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_init_port_speed(struct zxdh_en_priv *en_priv)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_tc_flow_shape_para p_para = { 0 };
+ uint32_t err = 0;
+ uint32_t speed = 0;
+ uint32_t max_speed = ZXDH_DCBNL_MAXRATE_KBITPS / ZXDH_DCBNL_RATEUNIT_K;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ speed = en_dev->speed;
+ if ((0 == speed) || (speed > max_speed)) {
+ LOG_INFO("get port speed is : %u ,set to max:%u\n", speed, max_speed);
+ speed = max_speed;
+ }
+
+ p_para.cir = speed * ZXDH_DCBNL_RATEUNIT_K; // Mbps->Kbps
+ p_para.cbs = ZXDH_DCBNL_PORT_RATE_CBS;
+ p_para.db_en = 0;
+ p_para.eir = 0;
+ p_para.ebs = 0;
+
+ LOG_INFO(" vport:%d,phy_port:%d, p_para.cir:%d, speed:%d \n", en_dev->vport,
+ en_dev->phy_port, p_para.cir, speed);
+
+ err = dpp_port_shape_set(&pf_info, en_dev->phy_port, p_para.cir, p_para.cbs,
+ 1);
+ if (err) {
+ LOG_ERR("dcbnl_set_ets: dpp_port_shape_set failed, port:%d, speed:%d, "
+ "speed:%d,err:%d \n",
+ en_dev->phy_port, speed, p_para.cir, err);
+ return err;
+ }
+
+ return 0;
+}
+
+uint32_t zxdh_dcbnl_printk_ets_tree(struct zxdh_en_priv *en_priv)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_dcbnl_ets_flow_node *flow_node = NULL;
+ struct zxdh_dcbnl_ets_se_node *se_node = NULL;
+ struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL;
+ uint32_t level = 0;
+
+ LOG_INFO(" ***vport:%d port:%d \n", en_dev->vport, en_dev->phy_port);
+
+ for (level = ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL; level > 0; level--) {
+ ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[level];
+ se_node = ets_node_list_head->se_next;
+ while (NULL != se_node) {
+ LOG_INFO(" se_node *** level:%d, node_idx:%d, se_id:0x%x *** \n",
+ level, se_node->node_idx, se_node->se_id);
+ LOG_INFO(" se_node gsch_id:0x%llx, node_type:%d, se_id:0x%x \n",
+ se_node->gsch_id, se_node->node_type, se_node->se_id);
+ LOG_INFO(
+ " se_node se_link_id:0x%x, se_link_weight:%d, se_link_sp:%d, "
+ "link_point:%d \n",
+ se_node->se_link_id, se_node->se_link_weight,
+ se_node->se_link_sp, se_node->link_point);
+ se_node = se_node->se_next;
+ }
+ }
+
+ ets_node_list_head =
+ &en_dev->dcb_para.ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL];
+ flow_node = ets_node_list_head->flow_next;
+ while (NULL != flow_node) {
+ LOG_INFO(" flow_node *** tc_id:%d, flow_id:%d *** \n", flow_node->tc_id,
+ flow_node->flow_id);
+ LOG_INFO(" flow_node gsch_id:0x%llx, tc_type:%d, td_th:%d \n",
+ flow_node->gsch_id, flow_node->tc_type, flow_node->td_th);
+ LOG_INFO(" flow_node c_linkid:0x%x, c_weight:%d, c_sp:%d, c_rate:%d \n",
+ flow_node->c_linkid, flow_node->c_weight, flow_node->c_sp,
+ flow_node->c_rate);
+ LOG_INFO(" flow_node e_linkid:0x%x, e_weight:%d, e_sp:%d, e_rate:%d \n",
+ flow_node->e_linkid, flow_node->e_weight, flow_node->e_sp,
+ flow_node->e_rate);
+ flow_node = flow_node->flow_next;
+ }
+
+ return 0;
+}
+
+uint32_t zxdh_link_speed_to_index(uint32_t link_speed)
+{
+ // 50G以下统一按50G处理
+ //暂未使用RDMA端口
+ uint32_t index = ZXDH_TRPG_DEFAULT; // riscv上默认初值
+ if (link_speed == 200000) {
+ index = ZXDH_TRPG_SPEED_200G;
+ } else if (link_speed == 100000) {
+ index = ZXDH_TRPG_SPEED_100G;
+ } else {
+ index = ZXDH_TRPG_SPEED_50G;
+ }
+
+ return index;
+}
+
+DPP_PBU_PORT_TH_PARA_T port_th_para_tbl[ZXDH_TRPG_SPEED_NUM] = {
+ /*brief lif阈值 lif私有阈值 idma私有阈值 idma_th0 idma_th1 idma_th2
+ idma_th3 idma_th4 idma_th5 idma_th6 idma_th7*/
+ /*单位512byte,512代表芯片处理包粒度*/
+ { 100, 140, 140, 110, 130, 150, 170, 190, 210, 230, 250 }, // 50G
+ { 210, 280, 280, 180, 230, 280, 330, 380, 430, 480, 530 }, // 100G
+ { 480, 560, 560, 370, 450, 550, 670, 770, 870, 970, 1070 }, // 200G
+ { 1400, 0, 0, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500 }, // 400G RDMA
+ { 2036, 2000, 2000, 2100, 2100, 2100, 2100, 2100, 2100, 2100,
+ 2100 } // riscv上默认初始值
+};
+
+DPP_PBU_PORT_COS_TH_PARA_T port_cos_th_para_tbl[ZXDH_TRPG_SPEED_NUM] = {
+ /*单位512byte*/
+ { { 100, 120, 140, 160, 180, 200, 220, 240 } }, // 50G
+ { { 160, 210, 260, 310, 360, 410, 460, 510 } }, // 100G
+ { { 320, 420, 480, 620, 720, 820, 920, 1020 } }, // 200G
+ { { 1200, 1200, 2000, 2000, 2800, 2800, 2800, 2800 } }, // 400G RDMA
+ { { 1650, 1700, 1750, 1800, 1850, 1900, 1950, 2000 } } // riscv上默认初始值
+};
+
+uint32_t flow_td_th_tbl[ZXDH_TRPG_SPEED_NUM][ZXDH_DCBNL_MAX_TRAFFIC_CLASS] = {
+ /*单位KB*/
+ { 72, 119, 166, 213, 260, 306, 353, 400 }, // 50G 梯度47
+ { 144, 207, 269, 332, 394, 457, 519, 582 }, // 100G 梯度63
+ { 287, 365, 443, 521, 600, 678, 756, 834 }, // 200G 梯度78 最小560*512/1000
+ { 375, 570, 766, 961, 1156, 1352, 1547, 1742 }, // 400G RDMA 梯度195
+ { 150, 150, 150, 150, 150, 150, 150, 150 } // riscv上默认初始值
+};
+
+uint32_t zxdh_config_param_compare_test(
+ uint32_t tbl_index, DPP_PBU_PORT_TH_PARA_T port_th_para,
+ DPP_PBU_PORT_COS_TH_PARA_T port_cos_th_para, uint32_t *flow_td_th_para)
+{
+ uint32_t index = 0;
+ uint32_t *port_th_para_p1 = (uint32_t *)&port_th_para;
+ uint32_t *port_th_para_p2 = (uint32_t *)&port_cos_th_para_tbl[tbl_index];
+ for (index = 0; index < 11; index++) {
+ if (*(port_th_para_p1 + index) != *(port_th_para_p2 + index)) {
+ return false;
+ }
+ }
+ for (index = 0; index < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; index++) {
+ if ((port_cos_th_para.cos_th[index] !=
+ port_cos_th_para_tbl[tbl_index].cos_th[index]) ||
+ (flow_td_th_para[index] != flow_td_th_tbl[tbl_index][index])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+uint32_t zxdh_port_th_update(struct zxdh_en_device *en_dev)
+{
+ struct net_device *netdev = en_dev->netdev;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ // en_dev里面其实是包含link_speed的,填的是具体的速率值
+ uint32_t tbl_index;
+ uint32_t index;
+ uint32_t pfc_cur_mac_en = 0;
+ uint32_t ret = 0;
+ uint32_t speed = 0;
+ uint32_t max_speed = ZXDH_DCBNL_MAXRATE_KBITPS;
+ uint32_t params_check = 0;
+ DPP_PBU_PORT_TH_PARA_T port_th_para_test = { 0 };
+ DPP_PBU_PORT_COS_TH_PARA_T port_cos_th_para_test = { 0 };
+ DPP_TM_SHAPE_PP_PARA_T port_shape_para_test = { 0 };
+ uint32_t flow_td_th_para_test[ZXDH_DCBNL_MAX_TRAFFIC_CLASS] = { 0 };
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ LOG_INFO("port speed en_dev->speed is abnormal: %d \n", en_dev->speed)
+
+ if (en_dev->speed) {
+ speed = en_dev->speed * ZXDH_DCBNL_RATEUNIT_K;
+ }
+
+ zxdh_en_fc_mode_get(en_dev, &pfc_cur_mac_en);
+ if (pfc_cur_mac_en == BIT(SPM_FC_NONE)) {
+ LOG_INFO("port pfc & fc disable");
+ return 0;
+ }
+
+ // speed过滤
+ if ((0 == speed) || (speed > max_speed)) {
+ LOG_INFO("port speed is abnormal: %u \n", speed);
+ speed = max_speed;
+ }
+ ret = dpp_port_shape_set(&pf_info, en_dev->phy_port, speed,
+ ZXDH_DCBNL_PORT_RATE_CBS, 1);
+
+ if (en_dev->phy_port > 9) {
+ LOG_INFO("en_dev->phy_port not supported");
+ return 0;
+ }
+
+ tbl_index = zxdh_link_speed_to_index(en_dev->link_speed);
+
+ LOG_INFO("link_speed to tbl_index: %d", tbl_index);
+
+ if (pfc_cur_mac_en != BIT(SPM_FC_PAUSE_RX)) {
+ ret = dpp_port_th_set(&pf_info, en_dev->phy_port,
+ port_th_para_tbl + tbl_index);
+
+ ret |= dpp_port_cos_th_set(&pf_info, en_dev->phy_port,
+ port_cos_th_para_tbl + tbl_index);
+ }
+
+ //暂时规避fc下的td阈值改配
+ // if(pfc_cur_mac_en != BIT(SPM_FC_PAUSE_TX))
+ if (pfc_cur_mac_en == SPM_FC_PFC_FULL) {
+ ret |= zxdh_dcbnl_set_flow_td_th(en_priv, flow_td_th_tbl[tbl_index]);
+ }
+
+ /*维测需要*/
+ dpp_port_th_get(&pf_info, en_dev->phy_port, &port_th_para_test);
+ LOG_INFO("dpp_port_th_get lif_th:%d, lif_prv:%d, idma_prv:%d \n",
+ port_th_para_test.lif_th, port_th_para_test.lif_prv,
+ port_th_para_test.idma_prv);
+ LOG_INFO("idma_th0:%d, idma_th1:%d, idma_th2:%d, idma_th3:%d, idma_th4:%d, "
+ "idma_th5:%d, idma_th6:%d, idma_th7:%d \n",
+ port_th_para_test.idma_th_cos0, port_th_para_test.idma_th_cos1,
+ port_th_para_test.idma_th_cos2, port_th_para_test.idma_th_cos3,
+ port_th_para_test.idma_th_cos4, port_th_para_test.idma_th_cos5,
+ port_th_para_test.idma_th_cos6, port_th_para_test.idma_th_cos7);
+
+ dpp_port_cos_th_get(&pf_info, en_dev->phy_port, &port_cos_th_para_test);
+ for (index = 0; index < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; index++) {
+ LOG_INFO("dpp_port_cos_th_get cos%d:%d\n", index,
+ port_cos_th_para_test.cos_th[index]);
+ }
+ /*
+ LOG_INFO("dpp_port_cos_th_get cos0:%d, cos1:%d, cos2:%d, cos3:%d, cos4:%d,
+ cos5:%d, cos6:%d, cos7:%d",\
+ port_cos_th_para_test->cos_th[0], port_cos_th_para_test->cos_th[1],
+ port_cos_th_para_test->cos_th[2], port_cos_th_para_test->cos_th[3],\
+ port_cos_th_para_test->cos_th[4], port_cos_th_para_test->cos_th[6],
+ port_cos_th_para_test->cos_th[0], port_cos_th_para_test->cos_th[0], )
+ */
+ dpp_port_shape_get(&pf_info, en_dev->phy_port, &port_shape_para_test);
+ LOG_INFO("dpp_port_shape_get cir:%d, cbs:%d, c_en:%d\n",
+ port_shape_para_test.cir, port_shape_para_test.cbs,
+ port_shape_para_test.c_en);
+
+ zxdh_dcbnl_get_flow_td_th(en_priv, flow_td_th_para_test);
+ for (index = 0; index < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; index++) {
+ LOG_INFO("zxdh_dcbnl_get_flow_td_th:tc_td_th[%d]:%d \n", index,
+ flow_td_th_para_test[index]);
+ }
+
+ params_check = zxdh_config_param_compare_test(tbl_index, port_th_para_test,
+ port_cos_th_para_test,
+ flow_td_th_para_test);
+ if (params_check) {
+ LOG_INFO("th_params_check OK");
+ }
+
+ return ret;
+}
+
+uint32_t zxdh_port_th_update_to_default(struct zxdh_en_device *en_dev)
+{
+ struct net_device *netdev = en_dev->netdev;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ uint32_t ret = 0;
+ uint32_t pfc_cur_mac_en = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ zxdh_en_fc_mode_get(en_dev, &pfc_cur_mac_en);
+
+ if (pfc_cur_mac_en != BIT(SPM_FC_PAUSE_TX)) {
+ ret = dpp_port_th_set(&pf_info, en_dev->phy_port,
+ port_th_para_tbl + ZXDH_TRPG_DEFAULT);
+
+ ret |= dpp_port_cos_th_set(&pf_info, en_dev->phy_port,
+ port_cos_th_para_tbl + ZXDH_TRPG_DEFAULT);
+ }
+
+ if (pfc_cur_mac_en != BIT(SPM_FC_PAUSE_RX)) {
+ ret |= zxdh_dcbnl_set_flow_td_th(en_priv,
+ flow_td_th_tbl[ZXDH_TRPG_DEFAULT]);
+ }
+
+ return ret;
+}
+
+uint32_t zxdh_dcbnl_pfc_init(struct zxdh_en_priv *en_priv)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t ret = 0;
+ uint32_t test_en = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ LOG_INFO("zxdh_dcbnl_pfc_init start\n");
+
+ ret = dpp_qmu_pfc_en_set(&pf_info, 1);
+
+ if (ret) {
+ LOG_ERR("dpp_qmu_pfc_en_set failed");
+ }
+
+ dpp_qmu_pfc_en_get(&pf_info, &test_en);
+ LOG_INFO("dpp_qmu_pfc_en_get:%d", test_en);
+ LOG_INFO("zxdh_dcbnl_pfc_init end\n");
+
+ return ret;
+}
+
+uint32_t zxdh_dcbnl_init_ets_scheduling_tree(struct zxdh_en_priv *en_priv)
+{
+ uint32_t err = 0;
+
+ zxdh_dcbnl_init_ets_list(en_priv);
+
+ err = zxdh_dcbnl_build_ets_scheduling_tree(en_priv);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: build_tc_scheduling_tree failed \n");
+ goto init_ets_se_error;
+ }
+
+ err = zxdh_dcbnl_scheduling_tree_link_tc(en_priv);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: scheduling_tree_link_tc failed \n");
+ goto init_ets_error;
+ }
+
+ err = zxdh_dcbnl_init_trust_and_table(en_priv);
+ if (err) {
+ LOG_ERR("dcbnl_init_ets: init_trust_and_table failed \n");
+ goto init_ets_error;
+ }
+
+ return 0;
+
+init_ets_error:
+ zxdh_dcbnl_free_flow_resources(en_priv);
+init_ets_se_error:
+ zxdh_dcbnl_free_se_resources(en_priv);
+ LOG_INFO("dcbnl_init_ets failed \n");
+ return err;
+}
+
+uint32_t zxdh_dcbnl_set_tm_gate(struct zxdh_en_priv *en_priv, uint32_t mode)
+{
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (mode == 1) {
+ err = dpp_tm_pport_mcode_switch_set(&pf_info, en_dev->phy_port, 1);
+ if (err) {
+ LOG_ERR(" set_tm_gate open failed \n");
+ }
+ } else if (mode == 0) {
+ err = dpp_tm_pport_mcode_switch_del(&pf_info, en_dev->phy_port);
+ if (err) {
+ LOG_ERR(" set_tm_gate close failed \n");
+ }
+ } else {
+ LOG_ERR(" error \n");
+ }
+
+ return err;
+}
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.h b/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.h
index 485a377fbb734c7d0740385d38dc83b86a3d6e21..e7a649ecb99bab6e322806a344e4121967047668 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.h
@@ -1,35 +1,39 @@
-#ifndef __ZXDH_EN_DCBNL_API_H__
-#define __ZXDH_EN_DCBNL_API_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-
-struct zxdh_en_priv;
-
-uint32_t zxdh_dcbnl_init_port_speed(struct zxdh_en_priv *en_priv);
-uint32_t zxdh_dcbnl_init_ets_scheduling_tree(struct zxdh_en_priv *en_priv);
-uint32_t zxdh_dcbnl_printk_ets_tree(struct zxdh_en_priv *en_priv);
-uint32_t zxdh_dcbnl_pfc_init(struct zxdh_en_priv *en_priv);
-
-uint32_t zxdh_dcbnl_free_flow_resources(struct zxdh_en_priv *en_priv);
-uint32_t zxdh_dcbnl_free_se_resources(struct zxdh_en_priv *en_priv);
-
-uint32_t zxdh_dcbnl_set_tc_scheduling(struct zxdh_en_priv *en_priv, uint8_t *tc_type, uint8_t *tc_tx_bw);
-uint32_t zxdh_dcbnl_set_ets_up_tc_map(struct zxdh_en_priv *en_priv, uint8_t *prio_tc);
-uint32_t zxdh_dcbnl_set_tc_maxrate(struct zxdh_en_priv *en_priv, uint32_t *maxrate);
-uint32_t zxdh_dcbnl_set_ets_trust(struct zxdh_en_priv *en_priv, uint32_t trust);
-uint32_t zxdh_dcbnl_set_dscp2prio(struct zxdh_en_priv *en_priv, uint16_t dscp, uint8_t prio);
-
-uint32_t zxdh_port_th_update(struct zxdh_en_device *en_dev);
-uint32_t zxdh_port_th_update_to_default(struct zxdh_en_device *en_dev);
-
-uint32_t zxdh_dcbnl_set_tm_gate(struct zxdh_en_priv *en_priv, uint32_t mode);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
+#ifndef __ZXDH_EN_DCBNL_API_H__
+#define __ZXDH_EN_DCBNL_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+
+struct zxdh_en_priv;
+
+uint32_t zxdh_dcbnl_init_port_speed(struct zxdh_en_priv *en_priv);
+uint32_t zxdh_dcbnl_init_ets_scheduling_tree(struct zxdh_en_priv *en_priv);
+uint32_t zxdh_dcbnl_printk_ets_tree(struct zxdh_en_priv *en_priv);
+uint32_t zxdh_dcbnl_pfc_init(struct zxdh_en_priv *en_priv);
+
+uint32_t zxdh_dcbnl_free_flow_resources(struct zxdh_en_priv *en_priv);
+uint32_t zxdh_dcbnl_free_se_resources(struct zxdh_en_priv *en_priv);
+
+uint32_t zxdh_dcbnl_set_tc_scheduling(struct zxdh_en_priv *en_priv,
+ uint8_t *tc_type, uint8_t *tc_tx_bw);
+uint32_t zxdh_dcbnl_set_ets_up_tc_map(struct zxdh_en_priv *en_priv,
+ uint8_t *prio_tc);
+uint32_t zxdh_dcbnl_set_tc_maxrate(struct zxdh_en_priv *en_priv,
+ uint32_t *maxrate);
+uint32_t zxdh_dcbnl_set_ets_trust(struct zxdh_en_priv *en_priv, uint32_t trust);
+uint32_t zxdh_dcbnl_set_dscp2prio(struct zxdh_en_priv *en_priv, uint16_t dscp,
+ uint8_t prio);
+
+uint32_t zxdh_port_th_update(struct zxdh_en_device *en_dev);
+uint32_t zxdh_port_th_update_to_default(struct zxdh_en_device *en_dev);
+
+uint32_t zxdh_dcbnl_set_tm_gate(struct zxdh_en_priv *en_priv, uint32_t mode);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.c b/src/net/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.c
old mode 100755
new mode 100644
index 150b79f0de0c11143c993a4abfb018bd28a8317f..f48dacd8e6a64dc6a456fd042adc6c05cec6873c
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.c
@@ -20,7 +20,6 @@
#include
#include
#include
-
#include
#include
#include
@@ -28,1033 +27,1012 @@
#include "../en_aux.h"
#include "drs_sec_dtb.h"
-
-
-UINT32 g_udDownloadSaNum = 1; //sa表的数量
-UINT32 gudTunnelID = 0;
+UINT32 g_udDownloadSaNum = 1; // sa表的数量
+UINT32 gudTunnelID;
UINT32 gudDtbSaNum = 1;
-E_INLINE_TYPE e_gInlineType = 0; //0是inline入境 1是inline出境
-UINT64 guddAntiWindow = 2047; //得配成2047,否则覆盖不到sn为0的情况
+E_INLINE_TYPE e_gInlineType; // 0是inline入境 1是inline出境
+UINT64 guddAntiWindow = 2047; //得配成2047,否则覆盖不到sn为0的情况
-UINT64 guddSecTestSaDtbPdVirAddr = 0;
+UINT64 guddSecTestSaDtbPdVirAddr;
UINT32 gudSecTestSwanSrcIp = 0x0A04B007;
UINT32 gudSecTestSwanDstIp = 0x0AE3656D;
-UINT8 gudIpType = 1;
+UINT8 gudIpType = 1;
// 出入境分开下表需要将此字段置0,会影响入境下表
-UINT16 gusOutSaOffset=0;
-UINT32 gudOutSaId=0;
-
+UINT16 gusOutSaOffset;
+UINT32 gudOutSaId;
UINT64 HalBttlSecRegBaseGet(struct zxdh_en_device *en_dev)
{
- PUB_CHECK_NULL_PTR_RET_ERR(en_dev);
- return en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + 0x7000; //0x7000是目前sec模块寄存器基地址的固定偏移,包括PF/VF
+ PUB_CHECK_NULL_PTR_RET_ERR(en_dev);
+ return en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) +
+ 0x7000; // 0x7000是目前sec模块寄存器基地址的固定偏移,包括PF/VF
}
-
#if 1
-static int zxdh_ipsec_cipher_id_get(u8 ealgo, char* p_alg_name,char* p_aead_name,E_HAL_SEC_IPSEC_CIPHER_ALG *p_zxdh_ealgo_id)
+static int zxdh_ipsec_cipher_id_get(u8 ealgo, char *p_alg_name,
+ char *p_aead_name,
+ E_HAL_SEC_IPSEC_CIPHER_ALG *p_zxdh_ealgo_id)
{
- int i = 0;
- T_ZXDH_EALGO atZxdhEalgo[] =
- {
- {"rfc7539esp(chacha20,poly1305)","",e_HAL_IPSEC_CIPHER_CHACHA},
- };
-
- if((NULL == p_alg_name)||(NULL == p_aead_name))
- {
- return -1;
- }
- for(i=0;iaalg)
- {
- p_aalg_alg_name = xs->aalg->alg_name;
- }
- if(NULL != xs->ealg)
- {
- p_ealg_alg_name = xs->ealg->alg_name;
- }
- if(NULL != xs->aead)
- {
- p_aead_alg_name = xs->aead->alg_name;
- }
-
- /*AH应该提前拦截*/
- /*空加密空认证应该提前拦截*/
-
- //DH_LOG_INFO(MODULE_SEC, "xs:0x%llx\n",xs);
- //DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa:0x%llx\n",ptDtbOutSa);
- /*应该和pcs的思路一样 ,mlx5e_xfrm_validate_state 参数校验里去把sa的赋值做了*/
-
- err = zxdh_ipsec_auth_id_get(xs->props.aalgo,p_aalg_alg_name,&zxdh_auth_id);
- if (err) {
- DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state aalgo:%u\n",xs->props.aalgo);
- return -EINVAL;
- }
- err = zxdh_ipsec_cipher_id_get(xs->props.ealgo,p_ealg_alg_name,p_aead_alg_name,&zxdh_ealgo_id);
- if (err) {
- DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state ealgo:%u\n",xs->props.aalgo);
- return -EINVAL;
- }
-
- //DH_LOG_INFO(MODULE_SEC, "replay_esn 0x%llx\n",xs->replay_esn);
- ptDtbOutSa->ucAuthkeyLen = 0; /*默认值*/
-
- /*处理单认证算法*/
- if(zxdh_auth_id == e_HAL_IPSEC_AUTH_NULL)
- {
- zxdh_encpy_mode = e_SEC_ENCRYP_ESP_ENCRYP_MODE;
- }
- else
- {
- ptDtbOutSa->ucAuthkeyLen = (xs->aalg->alg_key_len + 7)/8;
- udIcvLen = (xs->aalg->alg_trunc_len + 7)/8;
- memcpy((ptDtbOutSa->aucSaAuthKey),xs->aalg->alg_key,ptDtbOutSa->ucAuthkeyLen);
- }
-
- if((zxdh_ealgo_id != e_HAL_IPSEC_CIPHER_NULL)&&(zxdh_auth_id != e_HAL_IPSEC_AUTH_NULL))
- {
- zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_AND_ESP_ENCRYP_MODE;
- }
- /*这里处理组合算法的4字节的salt*/
- if((zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GCM)||(zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_CHACHA)||(zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GMAC))
- {
- zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE;
-
- ptDtbOutSa->ucCipherkeyLen = (xs->aead->alg_key_len + 7)/8 - 4;
- udIcvLen = (xs->aead->alg_icv_len+ 7)/8;
- memcpy(&(ptDtbOutSa->udSalt), xs->aead->alg_key + ptDtbOutSa->ucCipherkeyLen,sizeof(ptDtbOutSa->udSalt));
- memcpy((ptDtbOutSa->aucSaCipherKey),xs->aead->alg_key,ptDtbOutSa->ucCipherkeyLen);
- }
- /*这里处理组合算法CCM,CCM的salt是3B*/
- else if(zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CCM)
- {
- zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE;
-
- ptDtbOutSa->ucCipherkeyLen = (xs->aead->alg_key_len + 7)/8 - 3;
- udIcvLen = (xs->aead->alg_icv_len+ 7)/8;
- memcpy(&(ptDtbOutSa->udSalt), xs->aead->alg_key + ptDtbOutSa->ucCipherkeyLen,sizeof(ptDtbOutSa->udSalt));
- memcpy((ptDtbOutSa->aucSaCipherKey),xs->aead->alg_key,ptDtbOutSa->ucCipherkeyLen);
- }
- /*这里处理有salt的单加密算法CTR,salt是4B*/
- else if(zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CTR)
- {
- ptDtbOutSa->ucCipherkeyLen = (xs->ealg->alg_key_len + 7)/8 - 4;
- memcpy(&(ptDtbOutSa->udSalt), xs->ealg->alg_key + ptDtbOutSa->ucCipherkeyLen,sizeof(ptDtbOutSa->udSalt));
- memcpy((ptDtbOutSa->aucSaCipherKey),xs->ealg->alg_key,ptDtbOutSa->ucCipherkeyLen);
- }
- /*空加密算法*/
- else if(zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_NULL)
- {
- zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_MODE;
- ptDtbOutSa->ucCipherkeyLen = 0;
- }
- /*单加密算法,且没有salt*/
- else
- {
- ptDtbOutSa->ucCipherkeyLen = (xs->ealg->alg_key_len + 7)/8;
- memcpy((ptDtbOutSa->aucSaCipherKey),xs->ealg->alg_key,ptDtbOutSa->ucCipherkeyLen);
- }
-
-
- ptDtbOutSa->udSN = xs->replay.oseq;
- ptDtbOutSa->uddProcessedByteCnt = xs->curlft.bytes; //PUB_HTON64(uddProcessedByteCnt); //没法设置,用于构造iv, 一般都是用seq构造的iv
-
- ptDtbOutSa->udSPI = xs->id.spi;
- ptDtbOutSa->udSaId = PUB_HTON32(0x80001); //PUB_HTON32(udSaId); /*这个要软件自己管理,需要设计一下*/
-
- ptDtbOutSa->ucCiperID = zxdh_ealgo_id;
- ptDtbOutSa->ucAuthID = zxdh_auth_id;
-
- //CmdkBttlSecSaParamConstruct(UINT32 udEntryValid,E_CMDK_SEC_IPSEC_MODE eTunnelMode,UINT32 udSeqCnterOverflow,E_CMDK_LIVETIME_TYPES eLiveTimeType,E_CMDK_SEC_SA_DF_MODE eSaDfMode,E_CMDK_SEC_ENCRYP_MODE eEncryptionMode,UINT32 udIcvLen,UINT16* pusSaParam)
- //E_CMDK_SEC_ENCRYP_MODE 这个只能根据算法反推 gcm ccm gmac chacha是combine gaucSecSwanIpv6Data
- //udIcvLen
- //mode的定义刚好一样E_CMDK_SEC_IPSEC_MODE , XFRM_MODE_TRANSPORT
- /*这个地方还要根据算法做个转换 e_SEC_ENCRYP_ESP_COMBINED_MODE 暂时用GCM*/
- CmdkBttlSecSaParamConstruct(1,xs->props.mode,0,e_SEC_SA_LIVETIME_TIME_TYPE,e_SEC_SA_DF_BYPASS_MODE,zxdh_encpy_mode,udIcvLen,&usSaParam);
- ptDtbOutSa->usSaParam = PUB_HTON16(usSaParam);
-
-
-
- ptDtbOutSa->usFrag_State = PUB_HTON16(0xd2c8);
-
- ptDtbOutSa->udLifetimeSecMax = PUB_HTON32(0xc4454766);
- ptDtbOutSa->uddLifetimByteCntMax = PUB_HTON64(0xffffffffffffffff);
-
- ptDtbOutSa->ucProtocol = xs->id.proto; //50esp协议 51ah
- ptDtbOutSa->ucTOS = 0xbb;
-
- /*esn相关*/
- ptDtbOutSa->ucEsnFlag = 0; /* 默认是非ESN模式 */
- if(xs->props.flags & XFRM_STATE_ESN)
- {
- if(NULL == xs->replay_esn)
- return 1;
- ptDtbOutSa->ucEsnFlag = 0xff; //ucEsnFlag; //0xff表示开启ESN,否则不开启
- ptDtbOutSa->udSN = xs->replay_esn->oseq;
- ptDtbOutSa->udESN = xs->replay_esn->oseq_hi; /*不需要考虑replay_esn为null的情况?*/
- }
-
- /*ipv4*/
- if(AF_INET == xs->props.family)
- {
- ptDtbOutSa->ucIpType = 1<<6; //bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/
- ptDtbOutSa->udSrcAddress0 = xs->props.saddr.a4;
- ptDtbOutSa->udSrcAddress1 = 0x0;
- ptDtbOutSa->udSrcAddress2 = 0x0;
- ptDtbOutSa->udSrcAddress3 = 0x0;
-
- ptDtbOutSa->udDstAddress0 = xs->id.daddr.a4;
- ptDtbOutSa->udDstAddress1 = 0x0;
- ptDtbOutSa->udDstAddress2 = 0x0;
- ptDtbOutSa->udDstAddress3 = 0x0;
- }
- /*ipv4*/
- else if(AF_INET6 == xs->props.family)
- {
- ptDtbOutSa->ucIpType = 2<<6; //bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/
- ptDtbOutSa->udSrcAddress0 = xs->props.saddr.a6[0];
- ptDtbOutSa->udSrcAddress1 = xs->props.saddr.a6[1];
- ptDtbOutSa->udSrcAddress2 = xs->props.saddr.a6[2];
- ptDtbOutSa->udSrcAddress3 = xs->props.saddr.a6[3];
-
- ptDtbOutSa->udDstAddress0 = xs->id.daddr.a6[0];
- ptDtbOutSa->udDstAddress1 = xs->id.daddr.a6[1];
- ptDtbOutSa->udDstAddress2 = xs->id.daddr.a6[2];
- ptDtbOutSa->udDstAddress3 = xs->id.daddr.a6[3];
- }
- else
- {
- return -EINVAL; /*不可能走到这里,前面函数已经校验过了*/
- }
-
- ptDtbOutSa->udRSV0 = 0x0;
- ptDtbOutSa->udRSV1 = 0x0;
- ptDtbOutSa->udRSV2 = 0x0;
-
- DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucAuthkeyLen:0x%x\n",ptDtbOutSa->ucAuthkeyLen);
- DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucCipherkeyLen:0x%x\n",ptDtbOutSa->ucCipherkeyLen);
- DH_LOG_INFO(MODULE_SEC, "zxdh_encpy_mode:0x%x\n",zxdh_encpy_mode);
- DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucCiperID:0x%x\n",ptDtbOutSa->ucCiperID);
- DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucAuthID:0x%x\n",ptDtbOutSa->ucAuthID);
-
-
- return 0;
+ int err = -EINVAL;
+ u16 usSaParam = 0;
+ u32 udIcvLen = 0;
+ E_HAL_SEC_IPSEC_AUTH_ALG zxdh_auth_id;
+ E_HAL_SEC_IPSEC_CIPHER_ALG zxdh_ealgo_id;
+ E_CMDK_SEC_ENCRYP_MODE zxdh_encpy_mode = e_SEC_ENCRYP_MODE_LAST;
+ char test_alg_name[] = "zxdh_alg_test";
+ char *p_aalg_alg_name = test_alg_name;
+ char *p_ealg_alg_name = test_alg_name;
+ char *p_aead_alg_name = test_alg_name;
+
+ if (NULL != xs->aalg) {
+ p_aalg_alg_name = xs->aalg->alg_name;
+ }
+ if (NULL != xs->ealg) {
+ p_ealg_alg_name = xs->ealg->alg_name;
+ }
+ if (NULL != xs->aead) {
+ p_aead_alg_name = xs->aead->alg_name;
+ }
+
+ /*AH应该提前拦截*/
+ /*空加密空认证应该提前拦截*/
+
+ // DH_LOG_INFO(MODULE_SEC, "xs:0x%llx\n",xs);
+ // DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa:0x%llx\n",ptDtbOutSa);
+ /*应该和pcs的思路一样 ,mlx5e_xfrm_validate_state 参数校验里去把sa的赋值做了*/
+
+ err = zxdh_ipsec_auth_id_get(xs->props.aalgo, p_aalg_alg_name,
+ &zxdh_auth_id);
+ if (err) {
+ DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state aalgo:%u\n",
+ xs->props.aalgo);
+ return -EINVAL;
+ }
+ err = zxdh_ipsec_cipher_id_get(xs->props.ealgo, p_ealg_alg_name,
+ p_aead_alg_name, &zxdh_ealgo_id);
+ if (err) {
+ DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state ealgo:%u\n",
+ xs->props.aalgo);
+ return -EINVAL;
+ }
+
+ // DH_LOG_INFO(MODULE_SEC, "replay_esn 0x%llx\n",xs->replay_esn);
+ ptDtbOutSa->ucAuthkeyLen = 0; /*默认值*/
+
+ /*处理单认证算法*/
+ if (zxdh_auth_id == e_HAL_IPSEC_AUTH_NULL) {
+ zxdh_encpy_mode = e_SEC_ENCRYP_ESP_ENCRYP_MODE;
+ } else {
+ ptDtbOutSa->ucAuthkeyLen = (xs->aalg->alg_key_len + 7) / 8;
+ udIcvLen = (xs->aalg->alg_trunc_len + 7) / 8;
+ memcpy((ptDtbOutSa->aucSaAuthKey), xs->aalg->alg_key,
+ ptDtbOutSa->ucAuthkeyLen);
+ }
+
+ if ((zxdh_ealgo_id != e_HAL_IPSEC_CIPHER_NULL) &&
+ (zxdh_auth_id != e_HAL_IPSEC_AUTH_NULL)) {
+ zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_AND_ESP_ENCRYP_MODE;
+ }
+ /*这里处理组合算法的4字节的salt*/
+ if ((zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GCM) ||
+ (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_CHACHA) ||
+ (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GMAC)) {
+ zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE;
+
+ ptDtbOutSa->ucCipherkeyLen = (xs->aead->alg_key_len + 7) / 8 - 4;
+ udIcvLen = (xs->aead->alg_icv_len + 7) / 8;
+ memcpy(&(ptDtbOutSa->udSalt),
+ xs->aead->alg_key + ptDtbOutSa->ucCipherkeyLen,
+ sizeof(ptDtbOutSa->udSalt));
+ memcpy((ptDtbOutSa->aucSaCipherKey), xs->aead->alg_key,
+ ptDtbOutSa->ucCipherkeyLen);
+ }
+ /*这里处理组合算法CCM,CCM的salt是3B*/
+ else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CCM) {
+ zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE;
+
+ ptDtbOutSa->ucCipherkeyLen = (xs->aead->alg_key_len + 7) / 8 - 3;
+ udIcvLen = (xs->aead->alg_icv_len + 7) / 8;
+ memcpy(&(ptDtbOutSa->udSalt),
+ xs->aead->alg_key + ptDtbOutSa->ucCipherkeyLen,
+ sizeof(ptDtbOutSa->udSalt));
+ memcpy((ptDtbOutSa->aucSaCipherKey), xs->aead->alg_key,
+ ptDtbOutSa->ucCipherkeyLen);
+ }
+ /*这里处理有salt的单加密算法CTR,salt是4B*/
+ else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CTR) {
+ ptDtbOutSa->ucCipherkeyLen = (xs->ealg->alg_key_len + 7) / 8 - 4;
+ memcpy(&(ptDtbOutSa->udSalt),
+ xs->ealg->alg_key + ptDtbOutSa->ucCipherkeyLen,
+ sizeof(ptDtbOutSa->udSalt));
+ memcpy((ptDtbOutSa->aucSaCipherKey), xs->ealg->alg_key,
+ ptDtbOutSa->ucCipherkeyLen);
+ }
+ /*空加密算法*/
+ else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_NULL) {
+ zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_MODE;
+ ptDtbOutSa->ucCipherkeyLen = 0;
+ }
+ /*单加密算法,且没有salt*/
+ else {
+ ptDtbOutSa->ucCipherkeyLen = (xs->ealg->alg_key_len + 7) / 8;
+ memcpy((ptDtbOutSa->aucSaCipherKey), xs->ealg->alg_key,
+ ptDtbOutSa->ucCipherkeyLen);
+ }
+
+ ptDtbOutSa->udSN = xs->replay.oseq;
+ ptDtbOutSa->uddProcessedByteCnt =
+ xs->curlft.bytes; // PUB_HTON64(uddProcessedByteCnt);
+ // //没法设置,用于构造iv, 一般都是用seq构造的iv
+
+ ptDtbOutSa->udSPI = xs->id.spi;
+ ptDtbOutSa->udSaId = PUB_HTON32(
+ 0x80001); // PUB_HTON32(udSaId); /*这个要软件自己管理,需要设计一下*/
+
+ ptDtbOutSa->ucCiperID = zxdh_ealgo_id;
+ ptDtbOutSa->ucAuthID = zxdh_auth_id;
+
+ // CmdkBttlSecSaParamConstruct(UINT32 udEntryValid,E_CMDK_SEC_IPSEC_MODE
+ // eTunnelMode,UINT32 udSeqCnterOverflow,E_CMDK_LIVETIME_TYPES
+ // eLiveTimeType,E_CMDK_SEC_SA_DF_MODE eSaDfMode,E_CMDK_SEC_ENCRYP_MODE
+ // eEncryptionMode,UINT32 udIcvLen,UINT16* pusSaParam) E_CMDK_SEC_ENCRYP_MODE
+ // 这个只能根据算法反推 gcm ccm gmac chacha是combine gaucSecSwanIpv6Data
+ // udIcvLen
+ // mode的定义刚好一样E_CMDK_SEC_IPSEC_MODE , XFRM_MODE_TRANSPORT
+ /*这个地方还要根据算法做个转换 e_SEC_ENCRYP_ESP_COMBINED_MODE 暂时用GCM*/
+ CmdkBttlSecSaParamConstruct(1, xs->props.mode, 0,
+ e_SEC_SA_LIVETIME_TIME_TYPE,
+ e_SEC_SA_DF_BYPASS_MODE, zxdh_encpy_mode,
+ udIcvLen, &usSaParam);
+ ptDtbOutSa->usSaParam = PUB_HTON16(usSaParam);
+
+ ptDtbOutSa->usFrag_State = PUB_HTON16(0xd2c8);
+
+ ptDtbOutSa->udLifetimeSecMax = PUB_HTON32(0xc4454766);
+ ptDtbOutSa->uddLifetimByteCntMax = PUB_HTON64(0xffffffffffffffff);
+
+ ptDtbOutSa->ucProtocol = xs->id.proto; // 50esp协议 51ah
+ ptDtbOutSa->ucTOS = 0xbb;
+
+ /*esn相关*/
+ ptDtbOutSa->ucEsnFlag = 0; /* 默认是非ESN模式 */
+ if (xs->props.flags & XFRM_STATE_ESN) {
+ if (NULL == xs->replay_esn)
+ return 1;
+ ptDtbOutSa->ucEsnFlag =
+ 0xff; // ucEsnFlag; //0xff表示开启ESN,否则不开启
+ ptDtbOutSa->udSN = xs->replay_esn->oseq;
+ ptDtbOutSa->udESN =
+ xs->replay_esn->oseq_hi; /*不需要考虑replay_esn为null的情况?*/
+ }
+
+ /*ipv4*/
+ if (AF_INET == xs->props.family) {
+ ptDtbOutSa->ucIpType = 1 << 6; // bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/
+ ptDtbOutSa->udSrcAddress0 = xs->props.saddr.a4;
+ ptDtbOutSa->udSrcAddress1 = 0x0;
+ ptDtbOutSa->udSrcAddress2 = 0x0;
+ ptDtbOutSa->udSrcAddress3 = 0x0;
+
+ ptDtbOutSa->udDstAddress0 = xs->id.daddr.a4;
+ ptDtbOutSa->udDstAddress1 = 0x0;
+ ptDtbOutSa->udDstAddress2 = 0x0;
+ ptDtbOutSa->udDstAddress3 = 0x0;
+ }
+ /*ipv4*/
+ else if (AF_INET6 == xs->props.family) {
+ ptDtbOutSa->ucIpType = 2 << 6; // bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/
+ ptDtbOutSa->udSrcAddress0 = xs->props.saddr.a6[0];
+ ptDtbOutSa->udSrcAddress1 = xs->props.saddr.a6[1];
+ ptDtbOutSa->udSrcAddress2 = xs->props.saddr.a6[2];
+ ptDtbOutSa->udSrcAddress3 = xs->props.saddr.a6[3];
+
+ ptDtbOutSa->udDstAddress0 = xs->id.daddr.a6[0];
+ ptDtbOutSa->udDstAddress1 = xs->id.daddr.a6[1];
+ ptDtbOutSa->udDstAddress2 = xs->id.daddr.a6[2];
+ ptDtbOutSa->udDstAddress3 = xs->id.daddr.a6[3];
+ } else {
+ return -EINVAL; /*不可能走到这里,前面函数已经校验过了*/
+ }
+
+ ptDtbOutSa->udRSV0 = 0x0;
+ ptDtbOutSa->udRSV1 = 0x0;
+ ptDtbOutSa->udRSV2 = 0x0;
+
+ DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucAuthkeyLen:0x%x\n",
+ ptDtbOutSa->ucAuthkeyLen);
+ DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucCipherkeyLen:0x%x\n",
+ ptDtbOutSa->ucCipherkeyLen);
+ DH_LOG_INFO(MODULE_SEC, "zxdh_encpy_mode:0x%x\n", zxdh_encpy_mode);
+ DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucCiperID:0x%x\n",
+ ptDtbOutSa->ucCiperID);
+ DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucAuthID:0x%x\n",
+ ptDtbOutSa->ucAuthID);
+
+ return 0;
}
-static int zxdh_ipsec_dtb_in_sa_get(struct xfrm_state *xs,T_HAL_SA_DTB_HW_IN* ptDtbInSa)
+static int zxdh_ipsec_dtb_in_sa_get(struct xfrm_state *xs,
+ T_HAL_SA_DTB_HW_IN *ptDtbInSa)
{
- int err = -EINVAL;
- u16 usSaParam = 0;
- u32 udIcvLen = 0;
- E_HAL_SEC_IPSEC_AUTH_ALG zxdh_auth_id;
- E_HAL_SEC_IPSEC_CIPHER_ALG zxdh_ealgo_id;
- E_CMDK_SEC_ENCRYP_MODE zxdh_encpy_mode = e_SEC_ENCRYP_MODE_LAST;
- char test_alg_name[] = "zxdh_alg_test";
- char* p_aalg_alg_name = test_alg_name;
- char* p_ealg_alg_name = test_alg_name;
- char* p_aead_alg_name = test_alg_name;
-
-/*应该和pcs的思路一样 ,mlx5e_xfrm_validate_state 参数校验里去把sa的赋值做了*/
- if(NULL != xs->aalg)
- {
- p_aalg_alg_name = xs->aalg->alg_name;
- }
- if(NULL != xs->ealg)
- {
- p_ealg_alg_name = xs->ealg->alg_name;
- }
- if(NULL != xs->aead)
- {
- p_aead_alg_name = xs->aead->alg_name;
- }
- err = zxdh_ipsec_auth_id_get(xs->props.aalgo,p_aalg_alg_name,&zxdh_auth_id);
- if (err) {
- DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state aalgo:%u\n",xs->props.aalgo);
- return -EINVAL;
- }
- err = zxdh_ipsec_cipher_id_get(xs->props.ealgo,p_ealg_alg_name,p_aead_alg_name,&zxdh_ealgo_id);
- if (err) {
- DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state ealgo:%u\n",xs->props.aalgo);
- return -EINVAL;
- }
-
- ptDtbInSa->ucAuthkeyLen = 0; /*默认值*/
-
- /*处理单认证算法*/
- if(zxdh_auth_id == e_HAL_IPSEC_AUTH_NULL)
- {
- zxdh_encpy_mode = e_SEC_ENCRYP_ESP_ENCRYP_MODE;
- }
- else
- {
- ptDtbInSa->ucAuthkeyLen = (xs->aalg->alg_key_len + 7)/8;
- udIcvLen = (xs->aalg->alg_trunc_len + 7)/8;
- memcpy((ptDtbInSa->aucSaAuthKey),xs->aalg->alg_key,ptDtbInSa->ucAuthkeyLen);
- }
-
- if((zxdh_ealgo_id != e_HAL_IPSEC_CIPHER_NULL)&&(zxdh_auth_id != e_HAL_IPSEC_AUTH_NULL))
- {
- zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_AND_ESP_ENCRYP_MODE;
- }
- /*这里处理组合算法的4字节的salt*/
- if((zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GCM)||(zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_CHACHA)||(zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GMAC))
- {
- zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE;
-
- ptDtbInSa->ucCipherkeyLen = (xs->aead->alg_key_len + 7)/8 - 4;
- udIcvLen = (xs->aead->alg_icv_len+ 7)/8;
- memcpy(&(ptDtbInSa->udSalt), xs->aead->alg_key + ptDtbInSa->ucCipherkeyLen,sizeof(ptDtbInSa->udSalt));
- memcpy((ptDtbInSa->aucSaCipherKey),xs->aead->alg_key,ptDtbInSa->ucCipherkeyLen);
- }
- /*这里处理组合算法CCM,CCM的salt是3B*/
- else if(zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CCM)
- {
- zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE;
-
- ptDtbInSa->ucCipherkeyLen = (xs->aead->alg_key_len + 7)/8 - 3;
- udIcvLen = (xs->aead->alg_icv_len+ 7)/8;
- memcpy(&(ptDtbInSa->udSalt), xs->aead->alg_key + ptDtbInSa->ucCipherkeyLen,sizeof(ptDtbInSa->udSalt));
- memcpy((ptDtbInSa->aucSaCipherKey),xs->aead->alg_key,ptDtbInSa->ucCipherkeyLen);
- }
- /*这里处理有salt的单加密算法CTR,salt是4B*/
- else if(zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CTR)
- {
- ptDtbInSa->ucCipherkeyLen = (xs->ealg->alg_key_len + 7)/8 - 4;
- memcpy(&(ptDtbInSa->udSalt), xs->ealg->alg_key + ptDtbInSa->ucCipherkeyLen,sizeof(ptDtbInSa->udSalt));
- memcpy((ptDtbInSa->aucSaCipherKey),xs->ealg->alg_key,ptDtbInSa->ucCipherkeyLen);
- }
- /*空加密算法*/
- else if(zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_NULL)
- {
- zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_MODE;
- ptDtbInSa->ucCipherkeyLen = 0;
- }
- /*单加密算法,且没有salt*/
- else
- {
- ptDtbInSa->ucCipherkeyLen = (xs->ealg->alg_key_len + 7)/8;
- memcpy((ptDtbInSa->aucSaCipherKey),xs->ealg->alg_key,ptDtbInSa->ucCipherkeyLen);
- }
-
- ptDtbInSa->uddProcessedByteCnt = xs->curlft.bytes; //PUB_HTON64(uddProcessedByteCnt); //没法设置,用于构造iv, 一般都是用seq构造的iv
-
- ptDtbInSa->udSPI = xs->id.spi;
- ptDtbInSa->udSaId = PUB_HTON32(0x80000); //PUB_HTON32(udSaId); /*这个要软件自己管理,需要设计一下*/
-
- ptDtbInSa->ucCiperID = zxdh_ealgo_id;
- ptDtbInSa->ucAuthID = zxdh_auth_id;
-
- //CmdkBttlSecSaParamConstruct(UINT32 udEntryValid,E_CMDK_SEC_IPSEC_MODE eTunnelMode,UINT32 udSeqCnterOverflow,E_CMDK_LIVETIME_TYPES eLiveTimeType,E_CMDK_SEC_SA_DF_MODE eSaDfMode,E_CMDK_SEC_ENCRYP_MODE eEncryptionMode,UINT32 udIcvLen,UINT16* pusSaParam)
- //CmdkBttlSecSaParamConstruct(1,x->props.mode,不确定,e_SEC_SA_LIVETIME_BYTE_TYPE(好像硬件只支持这个),E_CMDK_SEC_SA_DF_MODE(0),)
- //E_CMDK_SEC_ENCRYP_MODE 这个只能根据算法反推 gcm ccm gmac chacha是combine gaucSecSwanIpv6Data
- //udIcvLen
- //mode的定义刚好一样E_CMDK_SEC_IPSEC_MODE , XFRM_MODE_TRANSPORT
- CmdkBttlSecSaParamConstruct(1,xs->props.mode,0,e_SEC_SA_LIVETIME_TIME_TYPE,e_SEC_SA_DF_BYPASS_MODE,zxdh_encpy_mode,udIcvLen,&usSaParam);
- ptDtbInSa->usSaParam = PUB_HTON16(usSaParam);
-
-
-
- ptDtbInSa->usFrag_State = PUB_HTON16(0xd2c8);
-
- ptDtbInSa->udLifetimeSecMax = PUB_HTON32(0xc4454766);
- ptDtbInSa->uddLifetimByteCntMax = PUB_HTON64(0xffffffffffffffff);
-
- ptDtbInSa->ucProtocol = xs->id.proto; //50esp协议 51ah
- ptDtbInSa->ucTOS = 0xbb;
-
- /*esn相关*/
- ptDtbInSa->ucEsnFlag = 0; /* 默认是非ESN模式 */
- if(xs->props.flags & XFRM_STATE_ESN)
- {
- if(NULL == xs->replay_esn)
- return 1;
- ptDtbInSa->ucEsnFlag = 0xff; //ucEsnFlag; //0xff表示开启ESN,否则不开启
- ptDtbInSa->udAntiWindowHigh = PUB_HTON32(xs->replay_esn->seq_hi); /*ESN*/
- ptDtbInSa->udAntiWindowLow = PUB_HTON32(xs->replay_esn->replay_window - 1); /*窗口上限sn,这里使用窗口大小-1*/
- memcpy((void*)ptDtbInSa->aucBitmap,(void*)xs->replay_esn->bmp,xs->replay_esn->bmp_len * sizeof(__u32)); /*需要提前判断bmp_len不能太大,避免超过64(拦截窗口大小就行)*/
- }
-
- /*ipv4*/
- if(AF_INET == xs->props.family)
- {
- ptDtbInSa->ucIpType = 1<<6; //bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/
- ptDtbInSa->udSrcAddress0 = xs->props.saddr.a4;
- ptDtbInSa->udSrcAddress1 = 0x0;
- ptDtbInSa->udSrcAddress2 = 0x0;
- ptDtbInSa->udSrcAddress3 = 0x0;
-
- ptDtbInSa->udDstAddress0 = xs->id.daddr.a4;
- ptDtbInSa->udDstAddress1 = 0x0;
- ptDtbInSa->udDstAddress2 = 0x0;
- ptDtbInSa->udDstAddress3 = 0x0;
- }
- /*ipv4*/
- else if(AF_INET6 == xs->props.family)
- {
- ptDtbInSa->ucIpType = 2<<6; //bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/
- ptDtbInSa->udSrcAddress0 = xs->props.saddr.a6[0];
- ptDtbInSa->udSrcAddress1 = xs->props.saddr.a6[1];
- ptDtbInSa->udSrcAddress2 = xs->props.saddr.a6[2];
- ptDtbInSa->udSrcAddress3 = xs->props.saddr.a6[3];
-
- ptDtbInSa->udDstAddress0 = xs->id.daddr.a6[0];
- ptDtbInSa->udDstAddress1 = xs->id.daddr.a6[1];
- ptDtbInSa->udDstAddress2 = xs->id.daddr.a6[2];
- ptDtbInSa->udDstAddress3 = xs->id.daddr.a6[3];
- }
- else
- {
- return -EINVAL; /*不可能走到这里,前面函数已经校验过了*/
- }
-
- ptDtbInSa->udOutSaId = 0x0; /*内核不需要出入境sa一起下吧,固定为0*/
- ptDtbInSa->usOutSaOffset = 0x0;
-
- ptDtbInSa->udRSV0 = 0x0;
- ptDtbInSa->udRSV1 = 0x0;
-
- DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucAuthkeyLen:0x%x\n",ptDtbInSa->ucAuthkeyLen);
- DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucCipherkeyLen:0x%x\n",ptDtbInSa->ucCipherkeyLen);
- DH_LOG_INFO(MODULE_SEC, "zxdh_encpy_mode:0x%x\n",zxdh_encpy_mode);
- DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucCiperID:0x%x\n",ptDtbInSa->ucCiperID);
- DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucAuthID:0x%x\n",ptDtbInSa->ucAuthID);
-
-
- return 0;
+ int err = -EINVAL;
+ u16 usSaParam = 0;
+ u32 udIcvLen = 0;
+ E_HAL_SEC_IPSEC_AUTH_ALG zxdh_auth_id;
+ E_HAL_SEC_IPSEC_CIPHER_ALG zxdh_ealgo_id;
+ E_CMDK_SEC_ENCRYP_MODE zxdh_encpy_mode = e_SEC_ENCRYP_MODE_LAST;
+ char test_alg_name[] = "zxdh_alg_test";
+ char *p_aalg_alg_name = test_alg_name;
+ char *p_ealg_alg_name = test_alg_name;
+ char *p_aead_alg_name = test_alg_name;
+
+ /*应该和pcs的思路一样 ,mlx5e_xfrm_validate_state 参数校验里去把sa的赋值做了*/
+ if (NULL != xs->aalg) {
+ p_aalg_alg_name = xs->aalg->alg_name;
+ }
+ if (NULL != xs->ealg) {
+ p_ealg_alg_name = xs->ealg->alg_name;
+ }
+ if (NULL != xs->aead) {
+ p_aead_alg_name = xs->aead->alg_name;
+ }
+ err = zxdh_ipsec_auth_id_get(xs->props.aalgo, p_aalg_alg_name,
+ &zxdh_auth_id);
+ if (err) {
+ DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state aalgo:%u\n",
+ xs->props.aalgo);
+ return -EINVAL;
+ }
+ err = zxdh_ipsec_cipher_id_get(xs->props.ealgo, p_ealg_alg_name,
+ p_aead_alg_name, &zxdh_ealgo_id);
+ if (err) {
+ DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state ealgo:%u\n",
+ xs->props.aalgo);
+ return -EINVAL;
+ }
+
+ ptDtbInSa->ucAuthkeyLen = 0; /*默认值*/
+
+ /*处理单认证算法*/
+ if (zxdh_auth_id == e_HAL_IPSEC_AUTH_NULL) {
+ zxdh_encpy_mode = e_SEC_ENCRYP_ESP_ENCRYP_MODE;
+ } else {
+ ptDtbInSa->ucAuthkeyLen = (xs->aalg->alg_key_len + 7) / 8;
+ udIcvLen = (xs->aalg->alg_trunc_len + 7) / 8;
+ memcpy((ptDtbInSa->aucSaAuthKey), xs->aalg->alg_key,
+ ptDtbInSa->ucAuthkeyLen);
+ }
+
+ if ((zxdh_ealgo_id != e_HAL_IPSEC_CIPHER_NULL) &&
+ (zxdh_auth_id != e_HAL_IPSEC_AUTH_NULL)) {
+ zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_AND_ESP_ENCRYP_MODE;
+ }
+ /*这里处理组合算法的4字节的salt*/
+ if ((zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GCM) ||
+ (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_CHACHA) ||
+ (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GMAC)) {
+ zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE;
+
+ ptDtbInSa->ucCipherkeyLen = (xs->aead->alg_key_len + 7) / 8 - 4;
+ udIcvLen = (xs->aead->alg_icv_len + 7) / 8;
+ memcpy(&(ptDtbInSa->udSalt),
+ xs->aead->alg_key + ptDtbInSa->ucCipherkeyLen,
+ sizeof(ptDtbInSa->udSalt));
+ memcpy((ptDtbInSa->aucSaCipherKey), xs->aead->alg_key,
+ ptDtbInSa->ucCipherkeyLen);
+ }
+ /*这里处理组合算法CCM,CCM的salt是3B*/
+ else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CCM) {
+ zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE;
+
+ ptDtbInSa->ucCipherkeyLen = (xs->aead->alg_key_len + 7) / 8 - 3;
+ udIcvLen = (xs->aead->alg_icv_len + 7) / 8;
+ memcpy(&(ptDtbInSa->udSalt),
+ xs->aead->alg_key + ptDtbInSa->ucCipherkeyLen,
+ sizeof(ptDtbInSa->udSalt));
+ memcpy((ptDtbInSa->aucSaCipherKey), xs->aead->alg_key,
+ ptDtbInSa->ucCipherkeyLen);
+ }
+ /*这里处理有salt的单加密算法CTR,salt是4B*/
+ else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CTR) {
+ ptDtbInSa->ucCipherkeyLen = (xs->ealg->alg_key_len + 7) / 8 - 4;
+ memcpy(&(ptDtbInSa->udSalt),
+ xs->ealg->alg_key + ptDtbInSa->ucCipherkeyLen,
+ sizeof(ptDtbInSa->udSalt));
+ memcpy((ptDtbInSa->aucSaCipherKey), xs->ealg->alg_key,
+ ptDtbInSa->ucCipherkeyLen);
+ }
+ /*空加密算法*/
+ else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_NULL) {
+ zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_MODE;
+ ptDtbInSa->ucCipherkeyLen = 0;
+ }
+ /*单加密算法,且没有salt*/
+ else {
+ ptDtbInSa->ucCipherkeyLen = (xs->ealg->alg_key_len + 7) / 8;
+ memcpy((ptDtbInSa->aucSaCipherKey), xs->ealg->alg_key,
+ ptDtbInSa->ucCipherkeyLen);
+ }
+
+ ptDtbInSa->uddProcessedByteCnt =
+ xs->curlft.bytes; // PUB_HTON64(uddProcessedByteCnt);
+ // //没法设置,用于构造iv, 一般都是用seq构造的iv
+
+ ptDtbInSa->udSPI = xs->id.spi;
+ ptDtbInSa->udSaId = PUB_HTON32(
+ 0x80000); // PUB_HTON32(udSaId); /*这个要软件自己管理,需要设计一下*/
+
+ ptDtbInSa->ucCiperID = zxdh_ealgo_id;
+ ptDtbInSa->ucAuthID = zxdh_auth_id;
+
+ // CmdkBttlSecSaParamConstruct(UINT32 udEntryValid,E_CMDK_SEC_IPSEC_MODE
+ // eTunnelMode,UINT32 udSeqCnterOverflow,E_CMDK_LIVETIME_TYPES
+ // eLiveTimeType,E_CMDK_SEC_SA_DF_MODE eSaDfMode,E_CMDK_SEC_ENCRYP_MODE
+ // eEncryptionMode,UINT32 udIcvLen,UINT16* pusSaParam)
+ // CmdkBttlSecSaParamConstruct(1,x->props.mode,不确定,e_SEC_SA_LIVETIME_BYTE_TYPE(好像硬件只支持这个),E_CMDK_SEC_SA_DF_MODE(0),)
+ // E_CMDK_SEC_ENCRYP_MODE 这个只能根据算法反推 gcm ccm gmac chacha是combine
+ // gaucSecSwanIpv6Data udIcvLen mode的定义刚好一样E_CMDK_SEC_IPSEC_MODE ,
+ // XFRM_MODE_TRANSPORT
+ CmdkBttlSecSaParamConstruct(1, xs->props.mode, 0,
+ e_SEC_SA_LIVETIME_TIME_TYPE,
+ e_SEC_SA_DF_BYPASS_MODE, zxdh_encpy_mode,
+ udIcvLen, &usSaParam);
+ ptDtbInSa->usSaParam = PUB_HTON16(usSaParam);
+
+ ptDtbInSa->usFrag_State = PUB_HTON16(0xd2c8);
+
+ ptDtbInSa->udLifetimeSecMax = PUB_HTON32(0xc4454766);
+ ptDtbInSa->uddLifetimByteCntMax = PUB_HTON64(0xffffffffffffffff);
+
+ ptDtbInSa->ucProtocol = xs->id.proto; // 50esp协议 51ah
+ ptDtbInSa->ucTOS = 0xbb;
+
+ /*esn相关*/
+ ptDtbInSa->ucEsnFlag = 0; /* 默认是非ESN模式 */
+ if (xs->props.flags & XFRM_STATE_ESN) {
+ if (NULL == xs->replay_esn)
+ return 1;
+ ptDtbInSa->ucEsnFlag =
+ 0xff; // ucEsnFlag; //0xff表示开启ESN,否则不开启
+ ptDtbInSa->udAntiWindowHigh =
+ PUB_HTON32(xs->replay_esn->seq_hi); /*ESN*/
+ ptDtbInSa->udAntiWindowLow =
+ PUB_HTON32(xs->replay_esn->replay_window -
+ 1); /*窗口上限sn,这里使用窗口大小-1*/
+ memcpy((void *)ptDtbInSa->aucBitmap, (void *)xs->replay_esn->bmp,
+ xs->replay_esn->bmp_len *
+ sizeof(__u32)); /*需要提前判断bmp_len不能太大,避免超过64(拦截窗口大小就行)*/
+ }
+
+ /*ipv4*/
+ if (AF_INET == xs->props.family) {
+ ptDtbInSa->ucIpType = 1 << 6; // bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/
+ ptDtbInSa->udSrcAddress0 = xs->props.saddr.a4;
+ ptDtbInSa->udSrcAddress1 = 0x0;
+ ptDtbInSa->udSrcAddress2 = 0x0;
+ ptDtbInSa->udSrcAddress3 = 0x0;
+
+ ptDtbInSa->udDstAddress0 = xs->id.daddr.a4;
+ ptDtbInSa->udDstAddress1 = 0x0;
+ ptDtbInSa->udDstAddress2 = 0x0;
+ ptDtbInSa->udDstAddress3 = 0x0;
+ }
+ /*ipv4*/
+ else if (AF_INET6 == xs->props.family) {
+ ptDtbInSa->ucIpType = 2 << 6; // bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/
+ ptDtbInSa->udSrcAddress0 = xs->props.saddr.a6[0];
+ ptDtbInSa->udSrcAddress1 = xs->props.saddr.a6[1];
+ ptDtbInSa->udSrcAddress2 = xs->props.saddr.a6[2];
+ ptDtbInSa->udSrcAddress3 = xs->props.saddr.a6[3];
+
+ ptDtbInSa->udDstAddress0 = xs->id.daddr.a6[0];
+ ptDtbInSa->udDstAddress1 = xs->id.daddr.a6[1];
+ ptDtbInSa->udDstAddress2 = xs->id.daddr.a6[2];
+ ptDtbInSa->udDstAddress3 = xs->id.daddr.a6[3];
+ } else {
+ return -EINVAL; /*不可能走到这里,前面函数已经校验过了*/
+ }
+
+ ptDtbInSa->udOutSaId = 0x0; /*内核不需要出入境sa一起下吧,固定为0*/
+ ptDtbInSa->usOutSaOffset = 0x0;
+
+ ptDtbInSa->udRSV0 = 0x0;
+ ptDtbInSa->udRSV1 = 0x0;
+
+ DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucAuthkeyLen:0x%x\n",
+ ptDtbInSa->ucAuthkeyLen);
+ DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucCipherkeyLen:0x%x\n",
+ ptDtbInSa->ucCipherkeyLen);
+ DH_LOG_INFO(MODULE_SEC, "zxdh_encpy_mode:0x%x\n", zxdh_encpy_mode);
+ DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucCiperID:0x%x\n",
+ ptDtbInSa->ucCiperID);
+ DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucAuthID:0x%x\n", ptDtbInSa->ucAuthID);
+
+ return 0;
}
#endif
-
VOID RdlSecWrite(UINT64 uddSecBase, UINT32 udRegOff, UINT32 udRegVal)
{
- PUB_WRITE_REG32(uddSecBase + udRegOff, udRegVal);
+ PUB_WRITE_REG32(uddSecBase + udRegOff, udRegVal);
}
-UINT32 HalSecWrite(struct zxdh_en_device *en_dev, UINT32 udSecEngineId, UINT32 udRegOff, UINT32 udRegVal)
+UINT32 HalSecWrite(struct zxdh_en_device *en_dev, UINT32 udSecEngineId,
+ UINT32 udRegOff, UINT32 udRegVal)
{
- UINT64 uddBttlSecBase = 0;
- UINT32 udSecnBaseOff = 0;
- UINT64 uddSecnBase = 0;
+ UINT64 uddBttlSecBase = 0;
+ UINT32 udSecnBaseOff = 0;
+ UINT64 uddSecnBase = 0;
- PUB_CHECK_NULL_PTR_RET_ERR(en_dev);
+ PUB_CHECK_NULL_PTR_RET_ERR(en_dev);
- uddBttlSecBase = HalBttlSecRegBaseGet(en_dev);
- //udSecnBaseOff = udSecEngineId * REG_SEC_IDX_OFFSET; //host不允许操作第二套
- uddSecnBase = uddBttlSecBase + udSecnBaseOff;
- //DH_LOG_INFO(MODULE_SEC, "HalBttlSecRegBaseGet regBase vir:0x%llx\n",uddSecnBase);
- //DH_LOG_INFO(MODULE_SEC, "HalBttlSecRegBaseGet regBase pa:0x%llx\n",virt_to_phys((void*)uddSecnBase));
- RdlSecWrite(uddSecnBase, udRegOff, udRegVal);
+ uddBttlSecBase = HalBttlSecRegBaseGet(en_dev);
+ // udSecnBaseOff = udSecEngineId * REG_SEC_IDX_OFFSET; //host不允许操作第二套
+ uddSecnBase = uddBttlSecBase + udSecnBaseOff;
+ // DH_LOG_INFO(MODULE_SEC, "HalBttlSecRegBaseGet regBase
+ // vir:0x%llx\n",uddSecnBase); DH_LOG_INFO(MODULE_SEC, "HalBttlSecRegBaseGet
+ // regBase pa:0x%llx\n",virt_to_phys((void*)uddSecnBase));
+ RdlSecWrite(uddSecnBase, udRegOff, udRegVal);
- return 0;
+ return 0;
}
UINT32 RdlSecRead(UINT64 uddSecBase, UINT32 udRegOff)
{
- return PUB_READ_REG32(uddSecBase + udRegOff);
+ return PUB_READ_REG32(uddSecBase + udRegOff);
}
-UINT32 HalSecRead(struct zxdh_en_device *en_dev, UINT32 udSecEngineId, UINT32 udRegOff)
+UINT32 HalSecRead(struct zxdh_en_device *en_dev, UINT32 udSecEngineId,
+ UINT32 udRegOff)
{
- UINT64 uddBttlSecBase = 0;
- UINT32 udSecnBaseOff = 0;
- UINT64 uddSecnBase = 0;
+ UINT64 uddBttlSecBase = 0;
+ UINT32 udSecnBaseOff = 0;
+ UINT64 uddSecnBase = 0;
- PUB_CHECK_NULL_PTR_RET_ERR(en_dev);
- uddBttlSecBase = HalBttlSecRegBaseGet(en_dev);
- udSecnBaseOff = udSecEngineId * REG_SEC_IDX_OFFSET;
- uddSecnBase = uddBttlSecBase + udSecnBaseOff;
+ PUB_CHECK_NULL_PTR_RET_ERR(en_dev);
+ uddBttlSecBase = HalBttlSecRegBaseGet(en_dev);
+ udSecnBaseOff = udSecEngineId * REG_SEC_IDX_OFFSET;
+ uddSecnBase = uddBttlSecBase + udSecnBaseOff;
- return RdlSecRead(uddSecnBase, udRegOff);
+ return RdlSecRead(uddSecnBase, udRegOff);
}
-
UINT64 HalBttlVaToVpa(struct zxdh_en_device *en_dev, UINT64 pVaAddr)
{
- PUB_CHECK_NULL_PTR_RET_ERR(en_dev);
- return (UINT64)virt_to_phys((void*)pVaAddr);
+ PUB_CHECK_NULL_PTR_RET_ERR(en_dev);
+ return (UINT64)virt_to_phys((void *)pVaAddr);
}
UINT64 HalBttlVpaToVa(struct zxdh_en_device *en_dev, UINT64 pVpaAddr)
{
- PUB_CHECK_NULL_PTR_RET_ERR(en_dev);
- return (UINT64)phys_to_virt(pVpaAddr);
-}
-
-#if 0
-VOID PubDumpBuf(UINT8 *pucBuf, UINT32 udLen)
-{
- UINT32 i = 0;
- UINT32 j = 0;
- UINT8 *pucPtr =NULL;
-
- pucPtr = pucBuf;
- for( j=0; j<48; j++ )
- {
- PUB_PRINTF("-");
- }
- PUB_PRINTF("\n");
-
- for( i=0; iops->get_vport(en_dev->parent);
-
- //写sa的队列锁状态寄存器CFG_DTB_QUEUE_LOCK_STATE,共128个队列,理论上应该查询
- //udRet = HalSecWrite(en_dev, udSecEngineId, REG_SEC_DTB_QUEUE_LOCK_STATE_0_3(0), PUB_BIT_SET(udLockMask,udQueIndex));
-
-
- //暂时沟通是,只需要将epid配置为0,下表模块就会去riscv侧下表,暂时可以不配
- udEpid = EPID(usVport) + 5;
- udVfuncNum = VFUNC_NUM(usVport);
- udFuncNum = FUNC_NUM(usVport);
- udVfuncActive = VF_ACTIVE(usVport);
-
- DH_LOG_INFO(MODULE_SEC, "udEpid:0x%x,udVfuncNum:0x%x\n",udEpid,udVfuncNum);
- DH_LOG_INFO(MODULE_SEC, "udFuncNum:0x%x,udVfuncActive:0x%x\n",udFuncNum,udVfuncActive);
-
- PUB_BIT_FIELD_SET64(udEpldVfunNum,udVfuncActive,0,1);
- PUB_BIT_FIELD_SET64(udEpldVfunNum,udFuncNum,5,3);
- PUB_BIT_FIELD_SET64(udEpldVfunNum,udCfgMsixVector,8,7);
- PUB_BIT_FIELD_SET64(udEpldVfunNum,udVfuncNum,16,8);
- PUB_BIT_FIELD_SET64(udEpldVfunNum,udEpid,24,4);
- PUB_BIT_FIELD_SET64(udEpldVfunNum,udPcieDbiEn,31,1);
-
- //return 0;
- DH_LOG_INFO(MODULE_SEC, "udEpldVfunNum = 0x%x\n",udEpldVfunNum);
- HalSecWrite(en_dev, udSecEngineId, REG_SEC_CFG_EPID_V_FUNC_NUM_0_127(udQueIndex), udEpldVfunNum);
-
- //查询所申请队列剩余空间,如果队列剩余空间大于0则可入队;
- udRegVal = HalSecRead(en_dev, udSecEngineId, REG_SEC_INFO_QUEUE_BUF_SPACE_LEFT_0_127(udQueIndex));
- if(udRegVal < 2)
- {
- BTTL_PRINTF("queue:%u buf empty left:%u\n",udQueIndex,udRegVal);
- return 1;
- }
- if(udRegVal > 0x20)
- {
- BTTL_PRINTF("queue:%u buf left:%u\n",udQueIndex,udRegVal);
- return 1;
- }
-
- //先写DTB_ADDR[63:32],接着写DTB_ADDR[31:0],最后写usdtb_len(软件需严格遵守该顺序)
- udRet = HalSecWrite(en_dev, udSecEngineId, REG_SEC_CFG_QUEUE_DTB_ADDR_H_0_127(udQueIndex), pt->DtbAddrH);
-
- //DH_LOG_INFO(MODULE_SEC, "pt->DtbAddrH = 0x%x\n",pt->DtbAddrH);
- udRet = HalSecWrite(en_dev, udSecEngineId, REG_SEC_CFG_QUEUE_DTB_ADDR_L_0_127(udQueIndex), pt->DtbAddrL);
-
- //DH_LOG_INFO(MODULE_SEC, "pt->DtbAddrL = 0x%x\n",pt->DtbAddrL);
- //DH_LOG_INFO(MODULE_SEC, "pt->DtbAddrVir = 0x%llx\n",HalBttlVpaToVa(en_dev,(UINT64)((UINT64)pt->DtbAddrH)<<32)+pt->DtbAddrL);
- // CMD寄存器最后配
- udRet = HalSecWrite(en_dev, udSecEngineId, REG_SEC_CFG_QUEUE_DTB_LEN_0_127(udQueIndex), pt->DtbCmd);
-
- //DH_LOG_INFO(MODULE_SEC, "pt->DtbCmd = 0x%x\n",pt->DtbCmd);
- return 0;
+ UINT32 udRet;
+ UINT32 udRegVal;
+ // UINT32 udQueIndex = 1;
+ UINT32 udEpldVfunNum = 0;
+ UINT32 udPcieDbiEn =
+ 1; /*1为dbi中断,0为match中断,NP这里是全局变量,暂时写死*/
+ UINT32 udEpid = 5;
+ UINT32 udVfuncNum = 0;
+ UINT32 udCfgMsixVector = 2; /*以前host驱动写的是2,暂时写死*/
+ UINT32 udFuncNum = 2;
+ UINT32 udVfuncActive = 0;
+ UINT16 usVport = 0;
+
+ // BTTL_PUB_ID_CHECK(en_dev, CMDK_BTTL_PUB_CHIP_MAX);
+ // BTTL_PUB_ID_CHECK(udSecEngineId, HAL_SEC_MAX_ENGINE);
+ // BTTL_PUB_NULL_CHECK(pt);
+ PUB_CHECK_NULL_PTR_RET_ERR(en_dev);
+
+ usVport = en_dev->ops->get_vport(en_dev->parent);
+
+ //写sa的队列锁状态寄存器CFG_DTB_QUEUE_LOCK_STATE,共128个队列,理论上应该查询
+ // udRet = HalSecWrite(en_dev, udSecEngineId,
+ // REG_SEC_DTB_QUEUE_LOCK_STATE_0_3(0), PUB_BIT_SET(udLockMask,udQueIndex));
+
+ //暂时沟通是,只需要将epid配置为0,下表模块就会去riscv侧下表,暂时可以不配
+ udEpid = EPID(usVport) + 5;
+ udVfuncNum = VFUNC_NUM(usVport);
+ udFuncNum = FUNC_NUM(usVport);
+ udVfuncActive = VF_ACTIVE(usVport);
+
+ DH_LOG_INFO(MODULE_SEC, "udEpid:0x%x,udVfuncNum:0x%x\n", udEpid,
+ udVfuncNum);
+ DH_LOG_INFO(MODULE_SEC, "udFuncNum:0x%x,udVfuncActive:0x%x\n", udFuncNum,
+ udVfuncActive);
+
+ PUB_BIT_FIELD_SET64(udEpldVfunNum, udVfuncActive, 0, 1);
+ PUB_BIT_FIELD_SET64(udEpldVfunNum, udFuncNum, 5, 3);
+ PUB_BIT_FIELD_SET64(udEpldVfunNum, udCfgMsixVector, 8, 7);
+ PUB_BIT_FIELD_SET64(udEpldVfunNum, udVfuncNum, 16, 8);
+ PUB_BIT_FIELD_SET64(udEpldVfunNum, udEpid, 24, 4);
+ PUB_BIT_FIELD_SET64(udEpldVfunNum, udPcieDbiEn, 31, 1);
+
+ // return 0;
+ DH_LOG_INFO(MODULE_SEC, "udEpldVfunNum = 0x%x\n", udEpldVfunNum);
+ HalSecWrite(en_dev, udSecEngineId,
+ REG_SEC_CFG_EPID_V_FUNC_NUM_0_127(udQueIndex), udEpldVfunNum);
+
+ //查询所申请队列剩余空间,如果队列剩余空间大于0则可入队;
+ udRegVal = HalSecRead(en_dev, udSecEngineId,
+ REG_SEC_INFO_QUEUE_BUF_SPACE_LEFT_0_127(udQueIndex));
+ if (udRegVal < 2) {
+ BTTL_PRINTF("queue:%u buf empty left:%u\n", udQueIndex, udRegVal);
+ return 1;
+ }
+ if (udRegVal > 0x20) {
+ BTTL_PRINTF("queue:%u buf left:%u\n", udQueIndex, udRegVal);
+ return 1;
+ }
+
+ //先写DTB_ADDR[63:32],接着写DTB_ADDR[31:0],最后写usdtb_len(软件需严格遵守该顺序)
+ udRet = HalSecWrite(en_dev, udSecEngineId,
+ REG_SEC_CFG_QUEUE_DTB_ADDR_H_0_127(udQueIndex),
+ pt->DtbAddrH);
+
+ // DH_LOG_INFO(MODULE_SEC, "pt->DtbAddrH = 0x%x\n",pt->DtbAddrH);
+ udRet = HalSecWrite(en_dev, udSecEngineId,
+ REG_SEC_CFG_QUEUE_DTB_ADDR_L_0_127(udQueIndex),
+ pt->DtbAddrL);
+
+ // DH_LOG_INFO(MODULE_SEC, "pt->DtbAddrL = 0x%x\n",pt->DtbAddrL);
+ // DH_LOG_INFO(MODULE_SEC, "pt->DtbAddrVir =
+ // 0x%llx\n",HalBttlVpaToVa(en_dev,(UINT64)((UINT64)pt->DtbAddrH)<<32)+pt->DtbAddrL);
+ // CMD寄存器最后配
+ udRet = HalSecWrite(en_dev, udSecEngineId,
+ REG_SEC_CFG_QUEUE_DTB_LEN_0_127(udQueIndex),
+ pt->DtbCmd);
+
+ // DH_LOG_INFO(MODULE_SEC, "pt->DtbCmd = 0x%x\n",pt->DtbCmd);
+ return 0;
}
/*
sa下表模块测试
- SA存放地址,第二套L2D uddSaL2DPhyAddr= 0x6201000000;理论上为68位,目前场景为64位
- usdtb_len =30;
+ SA存放地址,第二套L2D uddSaL2DPhyAddr=
+ 0x6201000000;理论上为68位,目前场景为64位 usdtb_len =30;
*/
-//E_SA_TYPE geSaType;
-
-UINT32 gudTestCnt = 0;
-UINT32 CmdkBttlTestSecDtbSaAdd(struct zxdh_en_device *en_dev,E_CMDK_DTB_SA_CMD_TYPE eDtbSaCmdType,E_SA_TYPE eSaType,UINT64 uddSaVirAddr,UINT32 udDtbSaIsIntEn,UINT32 udDtbLen,UINT32 udQueIndex)
+// E_SA_TYPE geSaType;
+
+UINT32 gudTestCnt;
+UINT32 CmdkBttlTestSecDtbSaAdd(struct zxdh_en_device *en_dev,
+ E_CMDK_DTB_SA_CMD_TYPE eDtbSaCmdType,
+ E_SA_TYPE eSaType, UINT64 uddSaVirAddr,
+ UINT32 udDtbSaIsIntEn, UINT32 udDtbLen,
+ UINT32 udQueIndex)
{
- /* int_en指示是否产生需要中断 第29位,cmd_type=0指示为流表下发命令,cmd_type=1指示为流表dump命令 第30位 一对sa表的大小为480字节,以16字节为单位*/
- T_QUEUE_DTB_REG tDtbReg = {0};
- UINT32 udDtbCmd = 0;
- UINT64 uddSaPhaAddr = 0;
- UINT32 udIsDtbAckFinish = 0;
- UINT32 udDtbAckRsl = 0;
- UINT32 udRet = 0;
- int i = 0;
-
-
- /*入参检查*/
- //BTTL_PUB_ID_CHECK(en_dev, CMDK_BTTL_PUB_CHIP_MAX);
- //BTTL_PUB_ID_CHECK(eDtbSaCmdType, E_DTB_SA_CMD_LAST);
- PUB_CHECK_NULL_PTR_RET_ERR(en_dev);
-
- uddSaPhaAddr = (UINT64)HalBttlVaToVpa(en_dev, uddSaVirAddr);
- //BTTL_PUB_0_CHECK(uddSaPhaAddr);
- DH_LOG_INFO(MODULE_SEC, "uddSaVirAddr:0x%llx,uddSaPhaAddr:0x%llx\n",uddSaVirAddr,uddSaPhaAddr);
-
- //构造udDtbCmd
- PUB_BIT_FIELD_SET64(udDtbCmd,udDtbLen>>4,0,10);
- PUB_BIT_FIELD_SET64(udDtbCmd,eSaType,27,2);
- PUB_BIT_FIELD_SET64(udDtbCmd,udDtbSaIsIntEn,29,1);
- PUB_BIT_FIELD_SET64(udDtbCmd,eDtbSaCmdType,30,1);
-
- tDtbReg.DtbAddrH = (UINT32)PUB_BIT_FIELD_RIGHT_JUST_GET64(uddSaPhaAddr,32,32);
- tDtbReg.DtbAddrL = (UINT32)PUB_BIT_FIELD_RIGHT_JUST_GET64(uddSaPhaAddr,0,32);
- tDtbReg.DtbCmd = udDtbCmd;
-
- /* 配置下表寄存器 */
- for(i=0;i> 4, 0, 10);
+ PUB_BIT_FIELD_SET64(udDtbCmd, eSaType, 27, 2);
+ PUB_BIT_FIELD_SET64(udDtbCmd, udDtbSaIsIntEn, 29, 1);
+ PUB_BIT_FIELD_SET64(udDtbCmd, eDtbSaCmdType, 30, 1);
+
+ tDtbReg.DtbAddrH =
+ (UINT32)PUB_BIT_FIELD_RIGHT_JUST_GET64(uddSaPhaAddr, 32, 32);
+ tDtbReg.DtbAddrL =
+ (UINT32)PUB_BIT_FIELD_RIGHT_JUST_GET64(uddSaPhaAddr, 0, 32);
+ tDtbReg.DtbCmd = udDtbCmd;
+
+ /* 配置下表寄存器 */
+ for (i = 0; i < gudDtbSaNum; i++) {
+ // gudTestCnt++;
+ //(*(volatile UINT32*)(uddSaVirAddr + 16)) = PUB_NTOH32(gudTestCnt);
+ udRet = CmdkBttlSecSaDownload(en_dev, 0, &tDtbReg, udQueIndex);
+ // PUB_CHECK_RET_VAL_RV(udRet);
+ }
+ /* 等待 */
+ msleep(1000);
+ // PubUsDelay(10); //等待多久需要微院确认
+
+ udRet = CmdkBttlTestSaAckRslGet(uddSaVirAddr, eDtbSaCmdType,
+ &udIsDtbAckFinish, &udDtbAckRsl);
+ PUB_CHECK_RET_VAL_RV(udRet);
+
+ if ((1 == udIsDtbAckFinish) && (0xff == udDtbAckRsl)) {
+ return 0;
+ } else {
+ BTTL_PRINTF("CmdkBttlTestSa Dtb Ack is error!! "
+ "udIsDtbAckFinish:%u,udDtbAckRsl:%u\n",
+ udIsDtbAckFinish, udDtbAckRsl);
+ BttlPubDump((unsigned char *)uddSaVirAddr, 0x60);
+ return 1;
+ }
+
+ return 0;
}
#if 1
static int zxdh_ipsec_add_sa(struct xfrm_state *xs)
{
- struct xfrm_state_offload *xso = &xs->xso;
- struct net_device *netdev = xso->dev;
- struct zxdh_en_priv *en_priv = NULL;
- //struct zxdh_en_device *en_dev = NULL;
- struct zxdh_en_device *en_dev = NULL;
- dma_addr_t dma_handle;
- UINT32 dma_size = 0x1000; //暂定4K,批量下表情况下需要更多
-
- UINT64 uddDtbSaVirAddr = 0;
- UINT32 udSaTblLen = 0;
- int ret = 0;
-
- en_priv = netdev_priv(netdev);
- en_dev = &(en_priv->edev);
-
- if(unlikely(en_dev->drs_sec_pri.SecVAddr == 0))
- {
- en_dev->drs_sec_pri.SecVAddr = (uint64_t)dma_alloc_coherent(netdev->dev.parent,dma_size,&dma_handle,GFP_KERNEL);
- if(en_dev->drs_sec_pri.SecVAddr == 0)
- {
- DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_add_sa dma_alloc_coherent fail\n");
- return -1;
- }
- en_dev->drs_sec_pri.SecPAddr = dma_handle;
- en_dev->drs_sec_pri.SecMemSize = dma_size;
- }
- uddDtbSaVirAddr = en_dev->drs_sec_pri.SecVAddr;
-
- DH_LOG_INFO(MODULE_SEC, "uddDtbSaVirAddr:0x%llx\n",uddDtbSaVirAddr);
- //DH_LOG_INFO(MODULE_SEC, "xs:0x%llx\n",xs);
-
- memset((void*)uddDtbSaVirAddr,0,1024);
-
- //else if(1 == xs->xso.dir)
- if(xso->flags & XFRM_OFFLOAD_INBOUND)
- {
- ret = zxdh_ipsec_dtb_in_sa_get(xs,(T_HAL_SA_DTB_HW_IN*)(uddDtbSaVirAddr+16));
- if(ret != 0)
- {
- return 1;
- }
- BttlPubDump((unsigned char *)uddDtbSaVirAddr, 0x210); //传入时加了16字节的回写空间
-
- #if 1
- udSaTblLen = 512 - 16;
- CmdkBttlTestSecDtbSaAdd(en_dev,E_DTB_SA_CMD_FLOW_DOWN,E_SATYPE_IN,uddDtbSaVirAddr,0,udSaTblLen,2);
-
- #endif
- }
- //if(2 == xs->xso.dir)
- else
- {
- ret = zxdh_ipsec_dtb_out_sa_get(xs,(T_HAL_SA_DTB_HW_OUT*)(uddDtbSaVirAddr+16));
- if(ret != 0)
- {
- return 1;
- }
- BttlPubDump((unsigned char *)uddDtbSaVirAddr, 0x110); //传入时加了16字节的回写空间
-
- #if 1
- udSaTblLen = 256 - 16;
- CmdkBttlTestSecDtbSaAdd(en_dev,E_DTB_SA_CMD_FLOW_DOWN,E_SATYPE_OUT,uddDtbSaVirAddr,0,udSaTblLen,2);
- #endif
- }
-
- return 0;
+ struct xfrm_state_offload *xso = &xs->xso;
+ struct net_device *netdev = xso->dev;
+ struct zxdh_en_priv *en_priv = NULL;
+ // struct zxdh_en_device *en_dev = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ dma_addr_t dma_handle;
+ UINT32 dma_size = 0x1000; //暂定4K,批量下表情况下需要更多
+
+ UINT64 uddDtbSaVirAddr = 0;
+ UINT32 udSaTblLen = 0;
+ int ret = 0;
+
+ en_priv = netdev_priv(netdev);
+ en_dev = &(en_priv->edev);
+
+ if (unlikely(en_dev->drs_sec_pri.SecVAddr == 0)) {
+ en_dev->drs_sec_pri.SecVAddr = (uint64_t)dma_alloc_coherent(
+ netdev->dev.parent, dma_size, &dma_handle, GFP_KERNEL);
+ if (en_dev->drs_sec_pri.SecVAddr == 0) {
+ DH_LOG_INFO(MODULE_SEC,
+ "zxdh_ipsec_add_sa dma_alloc_coherent fail\n");
+ return -1;
+ }
+ en_dev->drs_sec_pri.SecPAddr = dma_handle;
+ en_dev->drs_sec_pri.SecMemSize = dma_size;
+ }
+ uddDtbSaVirAddr = en_dev->drs_sec_pri.SecVAddr;
+
+ DH_LOG_INFO(MODULE_SEC, "uddDtbSaVirAddr:0x%llx\n", uddDtbSaVirAddr);
+ // DH_LOG_INFO(MODULE_SEC, "xs:0x%llx\n",xs);
+
+ memset((void *)uddDtbSaVirAddr, 0, 1024);
+
+ // else if(1 == xs->xso.dir)
+ if (xso->flags & XFRM_OFFLOAD_INBOUND) {
+ ret = zxdh_ipsec_dtb_in_sa_get(
+ xs, (T_HAL_SA_DTB_HW_IN *)(uddDtbSaVirAddr + 16));
+ if (ret != 0) {
+ return 1;
+ }
+ BttlPubDump((unsigned char *)uddDtbSaVirAddr,
+ 0x210); //传入时加了16字节的回写空间
+
+ udSaTblLen = 512 - 16;
+ CmdkBttlTestSecDtbSaAdd(en_dev, E_DTB_SA_CMD_FLOW_DOWN, E_SATYPE_IN,
+ uddDtbSaVirAddr, 0, udSaTblLen, 2);
+ }
+ // if(2 == xs->xso.dir)
+ else {
+ ret = zxdh_ipsec_dtb_out_sa_get(
+ xs, (T_HAL_SA_DTB_HW_OUT *)(uddDtbSaVirAddr + 16));
+ if (ret != 0) {
+ return 1;
+ }
+ BttlPubDump((unsigned char *)uddDtbSaVirAddr,
+ 0x110); //传入时加了16字节的回写空间
+
+ udSaTblLen = 256 - 16;
+ CmdkBttlTestSecDtbSaAdd(en_dev, E_DTB_SA_CMD_FLOW_DOWN, E_SATYPE_OUT,
+ uddDtbSaVirAddr, 0, udSaTblLen, 2);
+ }
+
+ return 0;
}
void zxdh_ipsec_del_sa(struct xfrm_state *xs)
{
- DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_del_sa\n");
- return;
+ DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_del_sa\n");
+ return;
}
bool zxdh_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
- DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_offload_ok\n");
- return true;
+ DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_offload_ok\n");
+ return true;
}
-void zxdh_ipsec_state_advance_esn (struct xfrm_state *x)
+void zxdh_ipsec_state_advance_esn(struct xfrm_state *x)
{
- DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_state_advance_esn\n");
- return;
+ DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_state_advance_esn\n");
+ return;
}
-void zxdh_ipsec_state_update_curlft (struct xfrm_state *x)
+void zxdh_ipsec_state_update_curlft(struct xfrm_state *x)
{
- DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_state_update_curlft\n");
- return ;
+ DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_state_update_curlft\n");
+ return;
}
-int zxdh_ipsec_policy_add (struct xfrm_policy *x)
+int zxdh_ipsec_policy_add(struct xfrm_policy *x)
{
-#if 1
- int32_t ret = 0;
- UINT8 aucSip[4] = {0xc8,0xfe,0x00,0x1};
- UINT8 aucDip[4] = {0xc8,0xfe,0x00,0x2};
- UINT8 aucSipMask[4] = {0xff,0xff,0x00,0x0};
- UINT8 aucDipMask[4] = {0xff,0xff,0x00,0x0};
- /*6.2的内核才有*/
- //struct xfrm_dev_offload *xdo = &x->xdo;
- //struct net_device *netdev = xdo->dev;
- struct net_device *netdev = NULL; //低版本内核仅编译通过
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- DPP_PF_INFO_T pf_info = {0};
-
- DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_policy_add\n");
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- /*np下表 inline sec模式 打开*/
- ret = dpp_egr_port_attr_set(&pf_info,EGR_FLAG_INLINE_SEC_OFFLOAD,1);
- if (ret != 0)
- {
- LOG_ERR("Failed to set port_attr EGR_FLAG_INLINE_SEC_OFFLOAD !\n");
- }
-
- /*配置np ipset加密表*/
- ret = dpp_ipsec_enc_entry_add(&pf_info,0,aucSip,aucDip,aucSipMask,aucDipMask,1,0x80001);
- if (ret != 0)
- {
- LOG_ERR("xfrm policy dpp_ipsec_enc_entry_add Failed!\n");
- }
-#endif
-
- return 0;
+ int32_t ret = 0;
+ UINT8 aucSip[4] = { 0xc8, 0xfe, 0x00, 0x1 };
+ UINT8 aucDip[4] = { 0xc8, 0xfe, 0x00, 0x2 };
+ UINT8 aucSipMask[4] = { 0xff, 0xff, 0x00, 0x0 };
+ UINT8 aucDipMask[4] = { 0xff, 0xff, 0x00, 0x0 };
+ /*6.2的内核才有*/
+ // struct xfrm_dev_offload *xdo = &x->xdo;
+ // struct net_device *netdev = xdo->dev;
+ struct net_device *netdev = NULL; //低版本内核仅编译通过
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_policy_add\n");
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ /*np下表 inline sec模式 打开*/
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_INLINE_SEC_OFFLOAD, 1);
+ if (ret != 0) {
+ LOG_ERR("Failed to set port_attr EGR_FLAG_INLINE_SEC_OFFLOAD !\n");
+ }
+
+ /*配置np ipset加密表*/
+ ret = dpp_ipsec_enc_entry_add(&pf_info, 0, aucSip, aucDip, aucSipMask,
+ aucDipMask, 1, 0x80001);
+ if (ret != 0) {
+ LOG_ERR("xfrm policy dpp_ipsec_enc_entry_add Failed!\n");
+ }
+
+ return 0;
}
-void zxdh_ipsec_policy_delete (struct xfrm_policy *x)
+void zxdh_ipsec_policy_delete(struct xfrm_policy *x)
{
- DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_policy_delete\n");
- return;
+ DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_policy_delete\n");
+ return;
}
-void zxdh_ipsec_policy_free (struct xfrm_policy *x)
+void zxdh_ipsec_policy_free(struct xfrm_policy *x)
{
- DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_policy_free\n");
- return;
+ DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_policy_free\n");
+ return;
}
-const struct xfrmdev_ops zxdh_xfrmdev_ops =
-{
- .xdo_dev_state_add = zxdh_ipsec_add_sa,
- .xdo_dev_state_delete = zxdh_ipsec_del_sa,
- .xdo_dev_offload_ok = zxdh_ipsec_offload_ok,
- //.xdo_dev_state_advance_esn = zxdh_ipsec_state_advance_esn,
- //.xdo_dev_state_update_curlft = zxdh_ipsec_state_update_curlft,
- //.xdo_dev_policy_add = zxdh_ipsec_policy_add,
- //.xdo_dev_policy_free = zxdh_ipsec_policy_free,
+const struct xfrmdev_ops zxdh_xfrmdev_ops = {
+ .xdo_dev_state_add = zxdh_ipsec_add_sa,
+ .xdo_dev_state_delete = zxdh_ipsec_del_sa,
+ .xdo_dev_offload_ok = zxdh_ipsec_offload_ok,
+ //.xdo_dev_state_advance_esn = zxdh_ipsec_state_advance_esn,
+ //.xdo_dev_state_update_curlft = zxdh_ipsec_state_update_curlft,
+ //.xdo_dev_policy_add = zxdh_ipsec_policy_add,
+ //.xdo_dev_policy_free = zxdh_ipsec_policy_free,
};
#endif
-
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.h b/src/net/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.h
old mode 100755
new mode 100644
index ed07b7ffca144df3c21b029bf7211f9a411162e2..346870bb8efc3976ca6aee6fe63b5e7d75598240
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.h
@@ -19,67 +19,62 @@
#ifndef DRS_SEC_DTB_H
#define DRS_SEC_DTB_H
/*同步pub宏定义*/
-typedef void VOID;
-typedef float FLOAT;
-typedef double DOUBLE;
-
-typedef signed char INT8;
-typedef unsigned char UINT8;
-
-typedef char CHAR;
-
-
-typedef signed short INT16;
-typedef unsigned short UINT16;
-
-typedef signed int INT32;
-typedef unsigned int UINT32;
-
-
-typedef signed long long INT64;
-typedef unsigned long long UINT64;
-
-#define BITWIDTH1 ((UINT32)0x00000001)
-#define BITWIDTH2 ((UINT32)0x00000003)
-#define BITWIDTH3 ((UINT32)0x00000007)
-#define BITWIDTH4 ((UINT32)0x0000000f)
-#define BITWIDTH5 ((UINT32)0x0000001f)
-#define BITWIDTH6 ((UINT32)0x0000003f)
-#define BITWIDTH7 ((UINT32)0x0000007f)
-#define BITWIDTH8 ((UINT32)0x000000ff)
-#define BITWIDTH9 ((UINT32)0x000001ff)
-#define BITWIDTH10 ((UINT32)0x000003ff)
-#define BITWIDTH11 ((UINT32)0x000007ff)
-#define BITWIDTH12 ((UINT32)0x00000fff)
-#define BITWIDTH13 ((UINT32)0x00001fff)
-#define BITWIDTH14 ((UINT32)0x00003fff)
-#define BITWIDTH15 ((UINT32)0x00007fff)
-#define BITWIDTH16 ((UINT32)0x0000ffff)
-#define BITWIDTH17 ((UINT32)0x0001ffff)
-#define BITWIDTH18 ((UINT32)0x0003ffff)
-#define BITWIDTH19 ((UINT32)0x0007ffff)
-#define BITWIDTH20 ((UINT32)0x000fffff)
-#define BITWIDTH21 ((UINT32)0x001fffff)
-#define BITWIDTH22 ((UINT32)0x003fffff)
-#define BITWIDTH23 ((UINT32)0x007fffff)
-#define BITWIDTH24 ((UINT32)0x00ffffff)
-#define BITWIDTH25 ((UINT32)0x01ffffff)
-#define BITWIDTH26 ((UINT32)0x03ffffff)
-#define BITWIDTH27 ((UINT32)0x07ffffff)
-#define BITWIDTH28 ((UINT32)0x0fffffff)
-#define BITWIDTH29 ((UINT32)0x1fffffff)
-#define BITWIDTH30 ((UINT32)0x3fffffff)
-#define BITWIDTH31 ((UINT32)0x7fffffff)
-#define BITWIDTH32 ((UINT32)0xffffffff)
-
-
-
-#define PUB_OK (0)
-#define PUB_ERROR (0xffffffff)/*直接定义为0xffffffff*/
-
-#define BTTL_PRINTF(fmt, arg...) DH_LOG_INFO(MODULE_SEC, fmt, ##arg)
-#define BTTL_PUB_PRINT_ERROR(fmt, arg...) DH_LOG_ERR(MODULE_SEC, fmt, ##arg)
-
+typedef void VOID;
+typedef float FLOAT;
+typedef double DOUBLE;
+
+typedef signed char INT8;
+typedef unsigned char UINT8;
+
+typedef char CHAR;
+
+typedef signed short INT16;
+typedef unsigned short UINT16;
+
+typedef signed int INT32;
+typedef unsigned int UINT32;
+
+typedef signed long long INT64;
+typedef unsigned long long UINT64;
+
+#define BITWIDTH1 ((UINT32)0x00000001)
+#define BITWIDTH2 ((UINT32)0x00000003)
+#define BITWIDTH3 ((UINT32)0x00000007)
+#define BITWIDTH4 ((UINT32)0x0000000f)
+#define BITWIDTH5 ((UINT32)0x0000001f)
+#define BITWIDTH6 ((UINT32)0x0000003f)
+#define BITWIDTH7 ((UINT32)0x0000007f)
+#define BITWIDTH8 ((UINT32)0x000000ff)
+#define BITWIDTH9 ((UINT32)0x000001ff)
+#define BITWIDTH10 ((UINT32)0x000003ff)
+#define BITWIDTH11 ((UINT32)0x000007ff)
+#define BITWIDTH12 ((UINT32)0x00000fff)
+#define BITWIDTH13 ((UINT32)0x00001fff)
+#define BITWIDTH14 ((UINT32)0x00003fff)
+#define BITWIDTH15 ((UINT32)0x00007fff)
+#define BITWIDTH16 ((UINT32)0x0000ffff)
+#define BITWIDTH17 ((UINT32)0x0001ffff)
+#define BITWIDTH18 ((UINT32)0x0003ffff)
+#define BITWIDTH19 ((UINT32)0x0007ffff)
+#define BITWIDTH20 ((UINT32)0x000fffff)
+#define BITWIDTH21 ((UINT32)0x001fffff)
+#define BITWIDTH22 ((UINT32)0x003fffff)
+#define BITWIDTH23 ((UINT32)0x007fffff)
+#define BITWIDTH24 ((UINT32)0x00ffffff)
+#define BITWIDTH25 ((UINT32)0x01ffffff)
+#define BITWIDTH26 ((UINT32)0x03ffffff)
+#define BITWIDTH27 ((UINT32)0x07ffffff)
+#define BITWIDTH28 ((UINT32)0x0fffffff)
+#define BITWIDTH29 ((UINT32)0x1fffffff)
+#define BITWIDTH30 ((UINT32)0x3fffffff)
+#define BITWIDTH31 ((UINT32)0x7fffffff)
+#define BITWIDTH32 ((UINT32)0xffffffff)
+
+#define PUB_OK (0)
+#define PUB_ERROR (0xffffffff) /*直接定义为0xffffffff*/
+
+#define BTTL_PRINTF(fmt, arg...) DH_LOG_INFO(MODULE_SEC, fmt, ##arg)
+#define BTTL_PUB_PRINT_ERROR(fmt, arg...) DH_LOG_ERR(MODULE_SEC, fmt, ##arg)
/* 寄存器单bit位操作 */
@@ -90,7 +85,7 @@ typedef unsigned long long UINT64;
#define PUB_BIT_CLEAR(reg, bit) ((reg) = ((reg) & (~(1u << (bit)))))
/** 获取某bit的值 (0/1) */
-#define PUB_GET_BIT_VAL(reg, bit) (((reg)>> (bit)) & 1u)
+#define PUB_GET_BIT_VAL(reg, bit) (((reg) >> (bit)) & 1u)
/** 判断某bit的值是否为1 */
#define PUB_IS_BIT_SET(reg, pos) (((reg) & (1u << (pos))) != 0x0u)
@@ -99,439 +94,436 @@ typedef unsigned long long UINT64;
#define PUB_IS_BIT_CLEAR(reg, pos) (((reg) & (1u << (pos))) == 0x0u)
/** 某bit位填写值val,其他bit不变 */
-#define PUB_BIT_INSR(reg, bit, val) \
- ((reg) = (((reg) & (~(1u << (bit)))) | (((val) & 1u) << (bit))))
-
+#define PUB_BIT_INSR(reg, bit, val) \
+ ((reg) = (((reg) & (~(1u << (bit)))) | (((val)&1u) << (bit))))
#define PUB_BIT_FIELD_MASK_GET64(bitoff, bitfieldlen) \
-((((UINT64)0x01 << (bitfieldlen)) - 1) << (bitoff))
+ ((((UINT64)0x01 << (bitfieldlen)) - 1) << (bitoff))
#define PUB_BIT_FIELD_GET64(val, bitoff, bitfieldlen) \
-((val) & PUB_BIT_FIELD_MASK_GET64(bitoff, bitfieldlen))
+ ((val)&PUB_BIT_FIELD_MASK_GET64(bitoff, bitfieldlen))
-#define PUB_BIT_FIELD_SET64(var, val, bitoff, bitlen) \
-((var) = (((var) & (~ PUB_BIT_FIELD_MASK_GET64(bitoff, bitlen))) | (((UINT64)val) << (bitoff))))
+#define PUB_BIT_FIELD_SET64(var, val, bitoff, bitlen) \
+ ((var) = (((var) & (~PUB_BIT_FIELD_MASK_GET64(bitoff, bitlen))) | \
+ (((UINT64)val) << (bitoff))))
#define PUB_BIT_FIELD_RIGHT_JUST_GET64(val, bitoff, bitfieldlen) \
-(((val) >> (bitoff)) & (((UINT64)0x01 << (bitfieldlen)) - 1))
+ (((val) >> (bitoff)) & (((UINT64)0x01 << (bitfieldlen)) - 1))
/** 检查空指针,返回错误 */
-#define PUB_CHECK_NULL_PTR_RET_ERR(ptr) \
- do{\
- if(NULL == ptr){\
- DH_LOG_INFO(MODULE_SEC, "Null Ptr Err! Fuc:%s,Line:%d,File:%s\n", __FUNCTION__,__LINE__,__FILE__);\
- return PUB_ERROR;\
- }\
- }while(0)
+#define PUB_CHECK_NULL_PTR_RET_ERR(ptr) \
+ do { \
+ if (NULL == ptr) { \
+ DH_LOG_INFO(MODULE_SEC, "Null Ptr Err! Fuc:%s,Line:%d,File:%s\n", \
+ __FUNCTION__, __LINE__, __FILE__); \
+ return PUB_ERROR; \
+ } \
+ } while (0)
/** 检查空指针,返回VOID */
-#define PUB_CHECK_NULL_PTR_RET_VOID(ptr) \
- do{\
- if(NULL == ptr){\
- DH_LOG_INFO(MODULE_SEC, "Null Ptr Err! Fuc:%s,Line:%d,File:%s\n", __FUNCTION__,__LINE__,__FILE__);\
- return;\
- }\
- }while(0)
-
-#define PUB_CHECK_RET_VAL_RV(expr) \
- do { \
- UINT32 _ret = (expr); \
- if (PUB_OK != _ret) \
- { \
- DH_LOG_INFO(MODULE_SEC, "%s Error,Line:%d,Ret:0x%x\n", __FUNCTION__,__LINE__,_ret); \
- return _ret; \
- } \
- } while (0)
-
-#define BTTL_PUB_ID_CHECK(id, cmpid) \
- do{\
- if(cmpid <= (id)){\
- DH_LOG_INFO(MODULE_SEC, " ID %d <= %d check Err! Fuc:%s,Line:%d,File:%s\n", id, cmpid, __FUNCTION__,__LINE__,__FILE__);\
- return 1;\
- }\
- }while(0)
-
-#define BTTL_PUB_0_CHECK(value) \
- do{\
- if(0 == (value)){\
- DH_LOG_INFO(MODULE_SEC, " value %x 0 check Err! Fuc:%s,Line:%d,File:%s\n", value, __FUNCTION__,__LINE__,__FILE__);\
- return E_INVALID_VALUE;\
- }\
- }while(0)
+#define PUB_CHECK_NULL_PTR_RET_VOID(ptr) \
+ do { \
+ if (NULL == ptr) { \
+ DH_LOG_INFO(MODULE_SEC, "Null Ptr Err! Fuc:%s,Line:%d,File:%s\n", \
+ __FUNCTION__, __LINE__, __FILE__); \
+ return; \
+ } \
+ } while (0)
+
+#define PUB_CHECK_RET_VAL_RV(expr) \
+ do { \
+ UINT32 _ret = (expr); \
+ if (PUB_OK != _ret) { \
+ DH_LOG_INFO(MODULE_SEC, "%s Error,Line:%d,Ret:0x%x\n", \
+ __FUNCTION__, __LINE__, _ret); \
+ return _ret; \
+ } \
+ } while (0)
+
+#define BTTL_PUB_ID_CHECK(id, cmpid) \
+ do { \
+ if (cmpid <= (id)) { \
+ DH_LOG_INFO(MODULE_SEC, \
+ " ID %d <= %d check Err! Fuc:%s,Line:%d,File:%s\n", \
+ id, cmpid, __FUNCTION__, __LINE__, __FILE__); \
+ return 1; \
+ } \
+ } while (0)
+
+#define BTTL_PUB_0_CHECK(value) \
+ do { \
+ if (0 == (value)) { \
+ DH_LOG_INFO(MODULE_SEC, \
+ " value %x 0 check Err! Fuc:%s,Line:%d,File:%s\n", \
+ value, __FUNCTION__, __LINE__, __FILE__); \
+ return E_INVALID_VALUE; \
+ } \
+ } while (0)
/* 大小端操作 */
/** 16位数据大小端转换 */
-#define PUB_SWAP16(x) ((UINT16)((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
+#define PUB_SWAP16(x) ((UINT16)((((x) >> 8) & 0xffu) | (((x)&0xffu) << 8)))
/** 32位数据大小端转换 */
-#define PUB_SWAP32(x) \
- ((UINT32)( \
- (((UINT32)(x) & (UINT32)0x000000ffUL) << 24) | \
- (((UINT32)(x) & (UINT32)0x0000ff00UL) << 8) | \
- (((UINT32)(x) & (UINT32)0x00ff0000UL) >> 8) | \
- (((UINT32)(x) & (UINT32)0xff000000UL) >> 24) ))
+#define PUB_SWAP32(x) \
+ ((UINT32)((((UINT32)(x) & (UINT32)0x000000ffUL) << 24) | \
+ (((UINT32)(x) & (UINT32)0x0000ff00UL) << 8) | \
+ (((UINT32)(x) & (UINT32)0x00ff0000UL) >> 8) | \
+ (((UINT32)(x) & (UINT32)0xff000000UL) >> 24)))
/** 64位数据大小端转换 */
-#define PUB_SWAP64(x) \
- ((UINT64)( \
- (((UINT64)(x) & (UINT64)0x00000000000000ffUL) << 56) | \
- (((UINT64)(x) & (UINT64)0x000000000000ff00UL) << 40) | \
- (((UINT64)(x) & (UINT64)0x0000000000ff0000UL) << 24) | \
- (((UINT64)(x) & (UINT64)0x00000000ff000000UL) << 8 ) | \
- (((UINT64)(x) & (UINT64)0x000000ff00000000UL) >> 8 ) | \
- (((UINT64)(x) & (UINT64)0x0000ff0000000000UL) >> 24) | \
- (((UINT64)(x) & (UINT64)0x00ff000000000000UL) >> 40) | \
- (((UINT64)(x) & (UINT64)0xff00000000000000UL) >> 56) ))
-
+#define PUB_SWAP64(x) \
+ ((UINT64)((((UINT64)(x) & (UINT64)0x00000000000000ffUL) << 56) | \
+ (((UINT64)(x) & (UINT64)0x000000000000ff00UL) << 40) | \
+ (((UINT64)(x) & (UINT64)0x0000000000ff0000UL) << 24) | \
+ (((UINT64)(x) & (UINT64)0x00000000ff000000UL) << 8) | \
+ (((UINT64)(x) & (UINT64)0x000000ff00000000UL) >> 8) | \
+ (((UINT64)(x) & (UINT64)0x0000ff0000000000UL) >> 24) | \
+ (((UINT64)(x) & (UINT64)0x00ff000000000000UL) >> 40) | \
+ (((UINT64)(x) & (UINT64)0xff00000000000000UL) >> 56)))
/* 已知数据的大小端,转换为网络序 */
-#define PUB_LE_TO_NET16(x) PUB_SWAP16(x) /**< 将小端数据转换为网络序 */
-#define PUB_LE_TO_NET32(x) PUB_SWAP32(x) /**< 将小端数据转换为网络序 */
-#define PUB_LE_TO_NET64(x) PUB_SWAP64(x) /**< 将小端数据转换为网络序 */
-#define PUB_DE_TO_NET16(x) (x) /**< 将大端数据转换为网络序 */
-#define PUB_DE_TO_NET32(x) (x) /**< 将大端数据转换为网络序 */
-#define PUB_DE_TO_NET64(x) (x) /**< 将大端数据转换为网络序 */
-
+#define PUB_LE_TO_NET16(x) PUB_SWAP16(x) /**< 将小端数据转换为网络序 */
+#define PUB_LE_TO_NET32(x) PUB_SWAP32(x) /**< 将小端数据转换为网络序 */
+#define PUB_LE_TO_NET64(x) PUB_SWAP64(x) /**< 将小端数据转换为网络序 */
+#define PUB_DE_TO_NET16(x) (x) /**< 将大端数据转换为网络序 */
+#define PUB_DE_TO_NET32(x) (x) /**< 将大端数据转换为网络序 */
+#define PUB_DE_TO_NET64(x) (x) /**< 将大端数据转换为网络序 */
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define PUB_LE_TO_HOST16(x) PUB_SWAP16(x) /**< 小端16位数据转换为主机序 */
-#define PUB_LE_TO_HOST32(x) PUB_SWAP32(x) /**< 小端32位数据转换为主机序 */
-#define PUB_LE_TO_HOST64(x) PUB_SWAP64(x) /**< 小端64位数据转换为主机序 */
-#define PUB_DE_TO_HOST16(x) (x) /**< 大端16位数据转换为主机序 */
-#define PUB_DE_TO_HOST32(x) (x) /**< 大端32位数据转换为主机序 */
-#define PUB_DE_TO_HOST64(x) (x) /**< 大端64位数据转换为主机序 */
-#define PUB_HTON16(x) (x) /**< 16位数据主机序转换为网络序 */
-#define PUB_HTON32(x) (x) /**< 32位数据主机序转换为网络序 */
-#define PUB_HTON64(x) (x) /**< 64位数据主机序转换为网络序 */
-#define PUB_NTOH16(x) (x) /**< 16位数据网络序转换为主机序 */
-#define PUB_NTOH32(x) (x) /**< 32位数据网络序转换为主机序 */
-#define PUB_NTOH64(x) (x) /**< 64位数据网络序转换为主机序 */
-
+#define PUB_LE_TO_HOST16(x) PUB_SWAP16(x) /**< 小端16位数据转换为主机序 */
+#define PUB_LE_TO_HOST32(x) PUB_SWAP32(x) /**< 小端32位数据转换为主机序 */
+#define PUB_LE_TO_HOST64(x) PUB_SWAP64(x) /**< 小端64位数据转换为主机序 */
+#define PUB_DE_TO_HOST16(x) (x) /**< 大端16位数据转换为主机序 */
+#define PUB_DE_TO_HOST32(x) (x) /**< 大端32位数据转换为主机序 */
+#define PUB_DE_TO_HOST64(x) (x) /**< 大端64位数据转换为主机序 */
+#define PUB_HTON16(x) (x) /**< 16位数据主机序转换为网络序 */
+#define PUB_HTON32(x) (x) /**< 32位数据主机序转换为网络序 */
+#define PUB_HTON64(x) (x) /**< 64位数据主机序转换为网络序 */
+#define PUB_NTOH16(x) (x) /**< 16位数据网络序转换为主机序 */
+#define PUB_NTOH32(x) (x) /**< 32位数据网络序转换为主机序 */
+#define PUB_NTOH64(x) (x) /**< 64位数据网络序转换为主机序 */
#else
-#define PUB_LE_TO_HOST16(x) (x)
-#define PUB_LE_TO_HOST32(x) (x)
-#define PUB_LE_TO_HOST64(x) (x)
-#define PUB_DE_TO_HOST16(x) PUB_SWAP16(x)
-#define PUB_DE_TO_HOST32(x) PUB_SWAP32(x)
-#define PUB_DE_TO_HOST64(x) PUB_SWAP64(x)
-#define PUB_HTON16(x) PUB_SWAP16(x)
-#define PUB_HTON32(x) PUB_SWAP32(x)
-#define PUB_HTON64(x) PUB_SWAP64(x)
-#define PUB_NTOH16(x) PUB_SWAP16(x)
-#define PUB_NTOH32(x) PUB_SWAP32(x)
-#define PUB_NTOH64(x) PUB_SWAP64(x)
-
+#define PUB_LE_TO_HOST16(x) (x)
+#define PUB_LE_TO_HOST32(x) (x)
+#define PUB_LE_TO_HOST64(x) (x)
+#define PUB_DE_TO_HOST16(x) PUB_SWAP16(x)
+#define PUB_DE_TO_HOST32(x) PUB_SWAP32(x)
+#define PUB_DE_TO_HOST64(x) PUB_SWAP64(x)
+#define PUB_HTON16(x) PUB_SWAP16(x)
+#define PUB_HTON32(x) PUB_SWAP32(x)
+#define PUB_HTON64(x) PUB_SWAP64(x)
+#define PUB_NTOH16(x) PUB_SWAP16(x)
+#define PUB_NTOH32(x) PUB_SWAP32(x)
+#define PUB_NTOH64(x) PUB_SWAP64(x)
#endif
/*因为SEC下表和NP下表硬件基本一样,这里同步NP关于EPID等定义*/
-#define VF_ACTIVE(VPORT) ((VPORT & 0x0800) >> 11)
-#define EPID(VPORT) ((VPORT & 0x7000) >> 12)
-#define FUNC_NUM(VPORT) ((VPORT & 0x0700) >> 8)
-#define VFUNC_NUM(VPORT) ((VPORT & 0x00FF))
+#define VF_ACTIVE(VPORT) ((VPORT & 0x0800) >> 11)
+#define EPID(VPORT) ((VPORT & 0x7000) >> 12)
+#define FUNC_NUM(VPORT) ((VPORT & 0x0700) >> 8)
+#define VFUNC_NUM(VPORT) ((VPORT & 0x00FF))
-#define PF_VQM_VFID_OFFSET (1152)
-#define IS_PF(VPORT) (!VF_ACTIVE(VPORT))
-#define VQM_VFID(VPORT) (IS_PF(VPORT) ? \
- (PF_VQM_VFID_OFFSET + EPID(VPORT) * 8 + FUNC_NUM(VPORT)) : \
- (EPID(VPORT) * 256 + VFUNC_NUM(VPORT)))
+#define PF_VQM_VFID_OFFSET (1152)
+#define IS_PF(VPORT) (!VF_ACTIVE(VPORT))
+#define VQM_VFID(VPORT) \
+ (IS_PF(VPORT) ? (PF_VQM_VFID_OFFSET + EPID(VPORT) * 8 + FUNC_NUM(VPORT)) : \
+ (EPID(VPORT) * 256 + VFUNC_NUM(VPORT)))
-#define OWNER_PF_VQM_VFID(VPORT) (PF_VQM_VFID_OFFSET + EPID(VPORT) * 8 + FUNC_NUM(VPORT))
-#define OWNER_PF_VPORT(VPORT) (((EPID(VPORT)) << 12) | ((FUNC_NUM(VPORT)) << 8))
+#define OWNER_PF_VQM_VFID(VPORT) \
+ (PF_VQM_VFID_OFFSET + EPID(VPORT) * 8 + FUNC_NUM(VPORT))
+#define OWNER_PF_VPORT(VPORT) (((EPID(VPORT)) << 12) | ((FUNC_NUM(VPORT)) << 8))
-#define VQM_VFID_MAX_NUM (2048)
+#define VQM_VFID_MAX_NUM (2048)
/*vport格式
15 |14 13 12 | 11 |10 9 8|7 6 5 4 3 2 1 0|
rsv| ep_id |func_active|func_num| vfunc_num |
*/
-#define VPORT_EPID_BT_START (12) /*EPID起始位*/
-#define VPORT_EPID_BT_LEN (3) /*EPID长度*/
-#define VPORT_FUNC_ACTIVE_BT_START (11) /*FUNC_ACTIVE起始位*/
-#define VPORT_FUNC_ACTIVE_BT_LEN (1) /*FUNC_ACTIVE长度*/
-#define VPORT_FUNC_NUM_BT_START (8) /*FUNC_NUM起始位*/
-#define VPORT_FUNC_NUM_BT_LEN (3) /*FUNC_NUM长度*/
-#define VPORT_VFUNC_NUM_BT_START (0) /*FUNC_NUM起始位*/
-#define VPORT_VFUNC_NUM_BT_LEN (8) /*FUNC_NUM长度*/
+#define VPORT_EPID_BT_START (12) /*EPID起始位*/
+#define VPORT_EPID_BT_LEN (3) /*EPID长度*/
+#define VPORT_FUNC_ACTIVE_BT_START (11) /*FUNC_ACTIVE起始位*/
+#define VPORT_FUNC_ACTIVE_BT_LEN (1) /*FUNC_ACTIVE长度*/
+#define VPORT_FUNC_NUM_BT_START (8) /*FUNC_NUM起始位*/
+#define VPORT_FUNC_NUM_BT_LEN (3) /*FUNC_NUM长度*/
+#define VPORT_VFUNC_NUM_BT_START (0) /*FUNC_NUM起始位*/
+#define VPORT_VFUNC_NUM_BT_LEN (8) /*FUNC_NUM长度*/
/**
-* @name 通用寄存器操作宏
-* @brief 读寄存器宏定义
-* @{
-*/
-#define PUB_READ_REG8(addr) (*(volatile UINT8 *)(addr)) /**< 读8位寄存器 */
-#define PUB_READ_REG16(addr) (*(volatile UINT16 *)(addr)) /**< 读16位寄存器 */
-#define PUB_READ_REG32(addr) (*(volatile UINT32 *)(addr)) /**< 读32位寄存器 */
+ * @name 通用寄存器操作宏
+ * @brief 读寄存器宏定义
+ * @{
+ */
+#define PUB_READ_REG8(addr) (*(volatile UINT8 *)(addr)) /**< 读8位寄存器 */
+#define PUB_READ_REG16(addr) (*(volatile UINT16 *)(addr)) /**< 读16位寄存器 */
+#define PUB_READ_REG32(addr) (*(volatile UINT32 *)(addr)) /**< 读32位寄存器 */
/** @} 通用寄存器操作宏 */
/**
-* @name 通用寄存器操作宏
-* @brief 写寄存器宏定义
-* @{
-*/
-#define PUB_WRITE_REG8(addr, val_8) (*(volatile UINT8 *)(addr) = val_8) /**< 写8位寄存器 */
-#define PUB_WRITE_REG16(addr, val_16) (*(volatile UINT16 *)(addr) = val_16) /**< 写16位寄存器 */
-#define PUB_WRITE_REG32(addr, val_32) (*(volatile UINT32 *)(addr) = val_32) /**< 写32位寄存器 */
+ * @name 通用寄存器操作宏
+ * @brief 写寄存器宏定义
+ * @{
+ */
+#define PUB_WRITE_REG8(addr, val_8) \
+ (*(volatile UINT8 *)(addr) = val_8) /**< 写8位寄存器 */
+#define PUB_WRITE_REG16(addr, val_16) \
+ (*(volatile UINT16 *)(addr) = val_16) /**< 写16位寄存器 */
+#define PUB_WRITE_REG32(addr, val_32) \
+ (*(volatile UINT32 *)(addr) = val_32) /**< 写32位寄存器 */
/** @} 通用寄存器操作宏 */
-
/*寄存器偏移定义*/
-#define REG_SEC_IDX_OFFSET (0x800000) /* SEC内部基地址偏移 */
+#define REG_SEC_IDX_OFFSET (0x800000) /* SEC内部基地址偏移 */
-#define REG_SEC_TOP_DTB_OFFSET (0) /*host驱动 这里为0,因为就是从dtb开始映射的*/
+#define REG_SEC_TOP_DTB_OFFSET (0) /*host驱动 这里为0,因为就是从dtb开始映射的*/
/* CFG_QUEUE_DTB_ADDR_H_0_127 虚机队列入队的高地址寄存器 n=0~127 */
-#define REG_SEC_CFG_QUEUE_DTB_ADDR_H_0_127(n) (REG_SEC_TOP_DTB_OFFSET + 0x0000 + n*32)
+#define REG_SEC_CFG_QUEUE_DTB_ADDR_H_0_127(n) \
+ (REG_SEC_TOP_DTB_OFFSET + 0x0000 + n * 32)
/* CFG_QUEUE_DTB_ADDR_L_0_127 虚机队列入队的低地址寄存器 n=0~127*/
-#define REG_SEC_CFG_QUEUE_DTB_ADDR_L_0_127(n) (REG_SEC_TOP_DTB_OFFSET + 0x0004 + n*32)
+#define REG_SEC_CFG_QUEUE_DTB_ADDR_L_0_127(n) \
+ (REG_SEC_TOP_DTB_OFFSET + 0x0004 + n * 32)
/* CFG_QUEUE_DTB_LEN_0_127 虚机队列入队的长度寄存器 n=0~127*/
-#define REG_SEC_CFG_QUEUE_DTB_LEN_0_127(n) (REG_SEC_TOP_DTB_OFFSET + 0x0008 + n*32)
+#define REG_SEC_CFG_QUEUE_DTB_LEN_0_127(n) \
+ (REG_SEC_TOP_DTB_OFFSET + 0x0008 + n * 32)
/* INFO_QUEUE_BUF_SPACE_LEFT_0_127 靠靠靠靠靠靠?n=0~127*/
-#define REG_SEC_INFO_QUEUE_BUF_SPACE_LEFT_0_127(n) (REG_SEC_TOP_DTB_OFFSET + 0x000C + n*32)
+#define REG_SEC_INFO_QUEUE_BUF_SPACE_LEFT_0_127(n) \
+ (REG_SEC_TOP_DTB_OFFSET + 0x000C + n * 32)
/* CFG_EPID_V_FUNC_NUM_0_127 SOC虚机信息配置寄存器 n=0~127*/
-#define REG_SEC_CFG_EPID_V_FUNC_NUM_0_127(n) (REG_SEC_TOP_DTB_OFFSET + 0x0010 + n*32)
-
-/* DTB_QUEUE_LOCK_STATE_0_3 队列锁状态寄存器,4个寄存器共128bit,对应队列0~127 n=0~3 */
-#define REG_SEC_DTB_QUEUE_LOCK_STATE_0_3(n) (REG_SEC_TOP_DTB_OFFSET + 0x4080 + n*4)
-
-typedef enum
-{
- e_SEC_IPSEC_TRANSPORT_MODE = 0, /*传输模式*/
- e_SEC_IPSEC_TUNNEL_MODE, /*隧道模式*/
- e_SEC_IPSEC_MODE_LAST,
+#define REG_SEC_CFG_EPID_V_FUNC_NUM_0_127(n) \
+ (REG_SEC_TOP_DTB_OFFSET + 0x0010 + n * 32)
+
+/* DTB_QUEUE_LOCK_STATE_0_3 队列锁状态寄存器,4个寄存器共128bit,对应队列0~127
+ * n=0~3 */
+#define REG_SEC_DTB_QUEUE_LOCK_STATE_0_3(n) \
+ (REG_SEC_TOP_DTB_OFFSET + 0x4080 + n * 4)
+
+typedef enum {
+ e_SEC_IPSEC_TRANSPORT_MODE = 0, /*传输模式*/
+ e_SEC_IPSEC_TUNNEL_MODE, /*隧道模式*/
+ e_SEC_IPSEC_MODE_LAST,
} E_CMDK_SEC_IPSEC_MODE;
-typedef enum
-{
- e_SEC_SA_DF_BYPASS_MODE = 0, /*00 bypass DF bit*/
- e_SEC_SA_DF_CLEAR_MODE, /*01 clear*/
- e_SEC_SA_DF_SET_MODE, /*10 set*/
- e_SEC_SA_DF_COPY_MODE, /*11 copy*/
- e_SEC_SA_DF_MODE_LAST,
+typedef enum {
+ e_SEC_SA_DF_BYPASS_MODE = 0, /*00 bypass DF bit*/
+ e_SEC_SA_DF_CLEAR_MODE, /*01 clear*/
+ e_SEC_SA_DF_SET_MODE, /*10 set*/
+ e_SEC_SA_DF_COPY_MODE, /*11 copy*/
+ e_SEC_SA_DF_MODE_LAST,
} E_CMDK_SEC_SA_DF_MODE;
-typedef enum
-{
- E_DTB_SA_CMD_FLOW_DOWN = 0,
- E_DTB_SA_CMD_DUMP,
- E_DTB_SA_CMD_LAST,
+typedef enum {
+ E_DTB_SA_CMD_FLOW_DOWN = 0,
+ E_DTB_SA_CMD_DUMP,
+ E_DTB_SA_CMD_LAST,
} E_CMDK_DTB_SA_CMD_TYPE;
-typedef enum
-{
- E_SATYPE_IN = 1,
- E_SATYPE_OUT ,
- E_SATYPE_IN_AND_OUT = 3,
-}E_SA_TYPE;
-
-typedef enum
-{
- E_INLINE_IN,
- E_INLINE_OUT,
- E_INLINE_IN_AND_OUT,
-}E_INLINE_TYPE;
-
-typedef enum
-{
- e_SEC_ENCRYP_AH_MODE = 0, /*000 AH认证*/
- e_SEC_ENCRYP_ESP_AUTH_MODE, /*001 ESP完整性*/
- e_SEC_ENCRYP_ESP_ENCRYP_MODE, /*010 ESP加密*/
- e_SEC_ENCRYP_ESP_AUTH_AND_ESP_ENCRYP_MODE, /*011 ESP加密+ESP完整*/
- e_SEC_ENCRYP_ESP_COMBINED_MODE, /*100 ESP组合模式*/
- e_SEC_ENCRYP_MODE_LAST,
+typedef enum {
+ E_SATYPE_IN = 1,
+ E_SATYPE_OUT,
+ E_SATYPE_IN_AND_OUT = 3,
+} E_SA_TYPE;
+
+typedef enum {
+ E_INLINE_IN,
+ E_INLINE_OUT,
+ E_INLINE_IN_AND_OUT,
+} E_INLINE_TYPE;
+
+typedef enum {
+ e_SEC_ENCRYP_AH_MODE = 0, /*000 AH认证*/
+ e_SEC_ENCRYP_ESP_AUTH_MODE, /*001 ESP完整性*/
+ e_SEC_ENCRYP_ESP_ENCRYP_MODE, /*010 ESP加密*/
+ e_SEC_ENCRYP_ESP_AUTH_AND_ESP_ENCRYP_MODE, /*011 ESP加密+ESP完整*/
+ e_SEC_ENCRYP_ESP_COMBINED_MODE, /*100 ESP组合模式*/
+ e_SEC_ENCRYP_MODE_LAST,
} E_CMDK_SEC_ENCRYP_MODE;
-typedef enum
-{
- e_SEC_SA_LIVETIME_NONE_TYPE = 0, /*00 none*/
- e_SEC_SA_LIVETIME_TIME_TYPE, /*01 生存时间*/
- e_SEC_SA_LIVETIME_BYTE_TYPE, /*10 byte数*/
- e_SEC_SA_LIVETIME_PKT_TYPE, /*11 pkt数(预留,目前不支持)*/
- e_SEC_SA_LIVETIME_TYPE_LAST,
+typedef enum {
+ e_SEC_SA_LIVETIME_NONE_TYPE = 0, /*00 none*/
+ e_SEC_SA_LIVETIME_TIME_TYPE, /*01 生存时间*/
+ e_SEC_SA_LIVETIME_BYTE_TYPE, /*10 byte数*/
+ e_SEC_SA_LIVETIME_PKT_TYPE, /*11 pkt数(预留,目前不支持)*/
+ e_SEC_SA_LIVETIME_TYPE_LAST,
} E_CMDK_LIVETIME_TYPES;
#pragma pack(1)
-typedef struct IPV4_HEAD
-{
- UINT8 ip_headlen_version;
- UINT8 ip_tos;
- UINT16 usTotallen;
-
- UINT16 usIdentify;
- UINT16 ip_fragoff;
-
- UINT8 uclive_time;
- UINT8 ucProtocal;
- UINT16 usHeadChecksum;
-
- UINT32 udSrcIpAddr;
- UINT32 udDstIpAddr;
-}T_IPV4_HEAD;
-#pragma pack()
+typedef struct IPV4_HEAD {
+ UINT8 ip_headlen_version;
+ UINT8 ip_tos;
+ UINT16 usTotallen;
-typedef struct
-{
- UINT32 DtbAddrH; /*地址的高32位*/
- UINT32 DtbAddrL; /*地址的低32位,两个地址组成64位然后左移4位得到68位的真实地址*/
- UINT32 DtbCmd; /*研规上的DTB_LEN字段 */
-}T_QUEUE_DTB_REG;
-
-//SA下表模块使用的结构体
-typedef struct
-{
- UINT32 udSPI;
- UINT32 udSaId;
- UINT16 usSaParam;
- UINT8 ucCiperID;
- UINT8 ucAuthID;
- UINT8 ucCipherkeyLen;
- UINT8 ucAuthkeyLen;
- UINT16 usFrag_State;
-
- UINT32 udESN;
- UINT32 udSN;
- UINT64 uddProcessedByteCnt;
-
- UINT32 udSalt;
- UINT32 udLifetimeSecMax;
- UINT64 uddLifetimByteCntMax;
-
- UINT8 ucProtocol;
- UINT8 ucTOS;
- UINT8 ucEsnFlag;
- UINT8 ucIpType;
- UINT32 udRSV0;
- UINT32 udRSV1;
- UINT32 udRSV2;
-
- UINT32 udSrcAddress0;
- UINT32 udSrcAddress1;
- UINT32 udSrcAddress2;
- UINT32 udSrcAddress3;
-
- UINT32 udDstAddress0;
- UINT32 udDstAddress1;
- UINT32 udDstAddress2;
- UINT32 udDstAddress3;
-
- UINT8 aucSaCipherKey[32];
- UINT8 aucSaAuthKey[128];
-}__attribute__((packed))T_HAL_SA_DTB_HW_OUT;
-
- typedef struct
-{
-
- UINT32 udSrcAddress0;
- UINT32 udSrcAddress1;
- UINT32 udSrcAddress2;
- UINT32 udSrcAddress3;
-
- UINT32 udDstAddress0;
- UINT32 udDstAddress1;
- UINT32 udDstAddress2;
- UINT32 udDstAddress3;
-
- UINT32 udSPI;
- UINT32 udSaId;
- UINT16 usSaParam;
- UINT8 ucCiperID;
- UINT8 ucAuthID;
- UINT8 ucCipherkeyLen;
- UINT8 ucAuthkeyLen;
- UINT16 usFrag_State;
-
- UINT32 udSalt;
- UINT32 udLifetimeSecMax;
- UINT64 uddLifetimByteCntMax;
-
- UINT8 ucProtocol;
- UINT8 ucTOS;
- UINT8 ucEsnFlag;
- UINT8 ucIpType;
- UINT16 usOutSaOffset;
- UINT16 udRSV0;
- UINT32 udOutSaId;
- UINT32 udRSV1;
-
- UINT8 aucBitmap[256];
-
- UINT32 udAntiWindowHigh;
- UINT32 udAntiWindowLow;
- UINT64 uddProcessedByteCnt;
-
- UINT8 aucSaCipherKey[32];
- UINT8 aucSaAuthKey[128];
-}__attribute__((packed))T_HAL_SA_DTB_HW_IN;
-
-typedef enum
-{
- e_HAL_IPSEC_CIPHER_NULL = 0x00,
- e_HAL_IPSEC_CIPHER_AES_CTR = 0x11,
- e_HAL_IPSEC_CIPHER_AES_CBC = 0x12,
- e_HAL_IPSEC_CIPHER_AES_ECB = 0x13,
- e_HAL_IPSEC_CIPHER_AES_GCM = 0x14,
- e_HAL_IPSEC_CIPHER_AES_CCM = 0x15,
- e_HAL_IPSEC_CIPHER_AES_GMAC = 0x16,
- /* 新增SM4算法 */
- e_HAL_IPSEC_CIPHER_SM4_CTR = 0x17,
- e_HAL_IPSEC_CIPHER_SM4_CBC = 0x18,
- e_HAL_IPSEC_CIPHER_SM4_ECB = 0x19,
- /* 新增XTS算法 */
- e_HAL_IPSEC_CIPHER_AES_XTS = 0x1a,
- e_HAL_IPSEC_CIPHER_SM4_XTS = 0x1b,
-
- e_HAL_IPSEC_CIPHER_DES_CBC = 0x31,
- e_HAL_IPSEC_CIPHER_3DES_CBC = 0x32,
- e_HAL_IPSEC_CIPHER_CHACHA = 0x50,
-}E_HAL_SEC_IPSEC_CIPHER_ALG;
-
-typedef enum
-{
- e_HAL_IPSEC_AUTH_NULL = 0x00,
-
- /* 新增 */
- e_HAL_IPSEC_AUTH_AES_GMAC = 0x16, /* 1 */
- e_HAL_IPSEC_AUTH_SM4_GMAC = 0x1e,
-
- e_HAL_IPSEC_AUTH_AES_CMAC32 = 0x22, /* 3 */
- e_HAL_IPSEC_AUTH_AES_CMAC96 = 0x23,
- e_HAL_IPSEC_AUTH_AES_XCBCMAC = 0x21,
- e_HAL_IPSEC_AUTH_AES_SHA1 = 0x41, /* 6 */
- e_HAL_IPSEC_AUTH_AES_SHA224 = 0x42,
- e_HAL_IPSEC_AUTH_AES_SHA256 = 0x44,
- e_HAL_IPSEC_AUTH_AES_SHA384 = 0x45,
- e_HAL_IPSEC_AUTH_AES_SHA512 = 0x46,
- e_HAL_IPSEC_AUTH_AES_MD5 = 0x43,
- e_HAL_IPSEC_AUTH_SM3 = 0x47,
-}E_HAL_SEC_IPSEC_AUTH_ALG;
-
-typedef struct
-{
- char alg_name[64];
- char compat_name[64];
- E_HAL_SEC_IPSEC_CIPHER_ALG e_zxdh_ealgo_id;
-}T_ZXDH_EALGO;
-
-typedef struct
-{
- char alg_name[64];
- char compat_name[64];
- E_HAL_SEC_IPSEC_AUTH_ALG e_zxdh_auth_id;
-}T_ZXDH_ALGO;
+ UINT16 usIdentify;
+ UINT16 ip_fragoff;
+ UINT8 uclive_time;
+ UINT8 ucProtocal;
+ UINT16 usHeadChecksum;
+
+ UINT32 udSrcIpAddr;
+ UINT32 udDstIpAddr;
+} T_IPV4_HEAD;
+#pragma pack()
+
+typedef struct {
+ UINT32 DtbAddrH; /*地址的高32位*/
+ UINT32
+ DtbAddrL; /*地址的低32位,两个地址组成64位然后左移4位得到68位的真实地址*/
+ UINT32 DtbCmd; /*研规上的DTB_LEN字段 */
+} T_QUEUE_DTB_REG;
+
+// SA下表模块使用的结构体
+typedef struct {
+ UINT32 udSPI;
+ UINT32 udSaId;
+ UINT16 usSaParam;
+ UINT8 ucCiperID;
+ UINT8 ucAuthID;
+ UINT8 ucCipherkeyLen;
+ UINT8 ucAuthkeyLen;
+ UINT16 usFrag_State;
+
+ UINT32 udESN;
+ UINT32 udSN;
+ UINT64 uddProcessedByteCnt;
+
+ UINT32 udSalt;
+ UINT32 udLifetimeSecMax;
+ UINT64 uddLifetimByteCntMax;
+
+ UINT8 ucProtocol;
+ UINT8 ucTOS;
+ UINT8 ucEsnFlag;
+ UINT8 ucIpType;
+ UINT32 udRSV0;
+ UINT32 udRSV1;
+ UINT32 udRSV2;
+
+ UINT32 udSrcAddress0;
+ UINT32 udSrcAddress1;
+ UINT32 udSrcAddress2;
+ UINT32 udSrcAddress3;
+
+ UINT32 udDstAddress0;
+ UINT32 udDstAddress1;
+ UINT32 udDstAddress2;
+ UINT32 udDstAddress3;
+
+ UINT8 aucSaCipherKey[32];
+ UINT8 aucSaAuthKey[128];
+} __attribute__((packed)) T_HAL_SA_DTB_HW_OUT;
+
+typedef struct {
+ UINT32 udSrcAddress0;
+ UINT32 udSrcAddress1;
+ UINT32 udSrcAddress2;
+ UINT32 udSrcAddress3;
+
+ UINT32 udDstAddress0;
+ UINT32 udDstAddress1;
+ UINT32 udDstAddress2;
+ UINT32 udDstAddress3;
+
+ UINT32 udSPI;
+ UINT32 udSaId;
+ UINT16 usSaParam;
+ UINT8 ucCiperID;
+ UINT8 ucAuthID;
+ UINT8 ucCipherkeyLen;
+ UINT8 ucAuthkeyLen;
+ UINT16 usFrag_State;
+
+ UINT32 udSalt;
+ UINT32 udLifetimeSecMax;
+ UINT64 uddLifetimByteCntMax;
+
+ UINT8 ucProtocol;
+ UINT8 ucTOS;
+ UINT8 ucEsnFlag;
+ UINT8 ucIpType;
+ UINT16 usOutSaOffset;
+ UINT16 udRSV0;
+ UINT32 udOutSaId;
+ UINT32 udRSV1;
+
+ UINT8 aucBitmap[256];
+
+ UINT32 udAntiWindowHigh;
+ UINT32 udAntiWindowLow;
+ UINT64 uddProcessedByteCnt;
+
+ UINT8 aucSaCipherKey[32];
+ UINT8 aucSaAuthKey[128];
+} __attribute__((packed)) T_HAL_SA_DTB_HW_IN;
+
+typedef enum {
+ e_HAL_IPSEC_CIPHER_NULL = 0x00,
+ e_HAL_IPSEC_CIPHER_AES_CTR = 0x11,
+ e_HAL_IPSEC_CIPHER_AES_CBC = 0x12,
+ e_HAL_IPSEC_CIPHER_AES_ECB = 0x13,
+ e_HAL_IPSEC_CIPHER_AES_GCM = 0x14,
+ e_HAL_IPSEC_CIPHER_AES_CCM = 0x15,
+ e_HAL_IPSEC_CIPHER_AES_GMAC = 0x16,
+ /* 新增SM4算法 */
+ e_HAL_IPSEC_CIPHER_SM4_CTR = 0x17,
+ e_HAL_IPSEC_CIPHER_SM4_CBC = 0x18,
+ e_HAL_IPSEC_CIPHER_SM4_ECB = 0x19,
+ /* 新增XTS算法 */
+ e_HAL_IPSEC_CIPHER_AES_XTS = 0x1a,
+ e_HAL_IPSEC_CIPHER_SM4_XTS = 0x1b,
+
+ e_HAL_IPSEC_CIPHER_DES_CBC = 0x31,
+ e_HAL_IPSEC_CIPHER_3DES_CBC = 0x32,
+ e_HAL_IPSEC_CIPHER_CHACHA = 0x50,
+} E_HAL_SEC_IPSEC_CIPHER_ALG;
+
+typedef enum {
+ e_HAL_IPSEC_AUTH_NULL = 0x00,
+
+ /* 新增 */
+ e_HAL_IPSEC_AUTH_AES_GMAC = 0x16, /* 1 */
+ e_HAL_IPSEC_AUTH_SM4_GMAC = 0x1e,
+
+ e_HAL_IPSEC_AUTH_AES_CMAC32 = 0x22, /* 3 */
+ e_HAL_IPSEC_AUTH_AES_CMAC96 = 0x23,
+ e_HAL_IPSEC_AUTH_AES_XCBCMAC = 0x21,
+ e_HAL_IPSEC_AUTH_AES_SHA1 = 0x41, /* 6 */
+ e_HAL_IPSEC_AUTH_AES_SHA224 = 0x42,
+ e_HAL_IPSEC_AUTH_AES_SHA256 = 0x44,
+ e_HAL_IPSEC_AUTH_AES_SHA384 = 0x45,
+ e_HAL_IPSEC_AUTH_AES_SHA512 = 0x46,
+ e_HAL_IPSEC_AUTH_AES_MD5 = 0x43,
+ e_HAL_IPSEC_AUTH_SM3 = 0x47,
+} E_HAL_SEC_IPSEC_AUTH_ALG;
+
+typedef struct {
+ char alg_name[64];
+ char compat_name[64];
+ E_HAL_SEC_IPSEC_CIPHER_ALG e_zxdh_ealgo_id;
+} T_ZXDH_EALGO;
+
+typedef struct {
+ char alg_name[64];
+ char compat_name[64];
+ E_HAL_SEC_IPSEC_AUTH_ALG e_zxdh_auth_id;
+} T_ZXDH_ALGO;
void BttlPubDump(unsigned char *ucBuf, UINT32 udLen);
-UINT32 CmdkBttlTestSecDtbSaAdd(struct zxdh_en_device *en_dev,E_CMDK_DTB_SA_CMD_TYPE eDtbSaCmdType,E_SA_TYPE eSaType,UINT64 uddSaVirAddr,UINT32 udDtbSaIsIntEn,UINT32 udDtbLen,UINT32 udQueIndex);
+UINT32 CmdkBttlTestSecDtbSaAdd(struct zxdh_en_device *en_dev,
+ E_CMDK_DTB_SA_CMD_TYPE eDtbSaCmdType,
+ E_SA_TYPE eSaType, UINT64 uddSaVirAddr,
+ UINT32 udDtbSaIsIntEn, UINT32 udDtbLen,
+ UINT32 udQueIndex);
void zxdh_ipsec_del_sa(struct xfrm_state *xs);
bool zxdh_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs);
-void zxdh_ipsec_state_advance_esn (struct xfrm_state *x);
-void zxdh_ipsec_state_update_curlft (struct xfrm_state *x);
-int zxdh_ipsec_policy_add (struct xfrm_policy *x);
-void zxdh_ipsec_policy_delete (struct xfrm_policy *x);
-void zxdh_ipsec_policy_free (struct xfrm_policy *x);
+void zxdh_ipsec_state_advance_esn(struct xfrm_state *x);
+void zxdh_ipsec_state_update_curlft(struct xfrm_state *x);
+int zxdh_ipsec_policy_add(struct xfrm_policy *x);
+void zxdh_ipsec_policy_delete(struct xfrm_policy *x);
+void zxdh_ipsec_policy_free(struct xfrm_policy *x);
#endif
-
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.c b/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.c
index 79f1a12ec783f00f0c2ecd130b59bbeb38ad5ff9..d438de57e91ad8c9d7202be1569598cbfc5ce0dc 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.c
@@ -1,489 +1,476 @@
-/*****************************************************************************
-(C) 2023 ZTE Corporation. 版权所有.
-
-文件名 : en_1588_pkt_proc.c
-内容摘要 : 提供PTP数据包处理相关接口
-作者/日期 : Limin / 2023.10.12
-版本 : 1.0
-*****************************************************************************/
-
-#include "en_1588_pkt_proc.h"
-#include "en_ioctl.h"
-#include "queue.h"
-
-#define PTP_MESSAGE_HRD_LEN 34
-#define IPV6_HDR_LEN 40
-#define IPV6_PROT_OFFSET 6
-#define UDP_DEST_PORT_OFFSET 2
-#define VLAN_TPID 0x8100
-
-/* pi头中pkt_type字段值 */
-#define PTP_EVENT_TYPE_NOSECURITY 2
-#define PTP_EVENT_TYPE_SECURITY 3
-#define PTP_GENERAL_TYPE 0
-#define PTP_TYPE_OFFSET 4
-/* 下行层四1588微码是否需要查询ipsec表 */
-#define PTP_L4_NEED_QUERY_IPSEC_TABLE 1
-#define PTP_TYPE_L4_SECURITY_OFFSET 3
-
-/* L3报文类型 */
-#define ETH_TYPE_PTP 0x88f7
-#define ETH_TYPE_IPV4 0x0800
-#define ETH_TYPE_IPV6 0x86dd
-
-/* L4报文类型 */
-#define ETH_TYPE_UDP 0x11
-#define ETH_TYPE_TCP 0x06
-
-#define UDP_HDR_LEN 0x08
-#define TCP_HDR_LEN 0x14
-
-/* 报文中关键字段的长度 */
-#define ETHER_TYPE_LEN 2
-#define ETHER_MAC_LEN 6
-#define L2_PKT_HDR_LEN ((2 * ETHER_MAC_LEN) + ETHER_TYPE_LEN)
-
-#define IP_PROT_OFFSET 9 /* IP头中protocol字段的偏移 */
-
-#define PTP_MSG_ERROR_TYPE 0xff
-#define PTPHDR_CF_OFFSET 8
-
-#define VLAN_LEN 4
-
-extern int get_hw_timestamp(struct zxdh_en_device *en_dev, u32 *hwts);
-/* PTP报文类型和处理函数对应关系结构体 */
-typedef struct
-{
- uint8_t type;
- int32_t (*proc_func)(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev);
-} MsgProc_t;
-
-typedef struct
-{
- uint8_t type;
- int32_t (*proc_func)(struct zxdh_net_hdr_rcv *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct skb_shared_info *ptSkbSharedInfo,
- struct zxdh_en_device *en_dev);
-} MsgRcv_t;
-
-/* PTP报文类型和处理函数对应关系表 */
-MsgProc_t g_MsgProcTable[] =
-{
- {PTP_MSG_TYPE_SYNC, pkt_proc_type_sync },
- {PTP_MSG_TYPE_DELAY_REQ, pkt_proc_type_delay_req },
- {PTP_MSG_TYPE_PDELAY_REQ, pkt_proc_type_pdelay_req },
- {PTP_MSG_TYPE_PDELAY_RESP, pkt_proc_type_pdelay_resp },
-
- {PTP_MSG_TYPE_FOLLOW_UP, pkt_proc_type_follow_up },
- {PTP_MSG_TYPE_DELAY_RESP, pkt_proc_type_delay_resp },
- {PTP_MSG_TYPE_PDELAY_RESP_FOLLOW_UP, pkt_proc_type_pdelay_resp_follow_up},
- {PTP_MSG_TYPE_ANNOUNCE, pkt_proc_type_announce },
- {PTP_MSG_TYPE_SIGNALING, pkt_proc_type_signaling },
- {PTP_MSG_TYPE_MANAGEMENT, pkt_proc_type_management },
-
- {PTP_MSG_ERROR_TYPE, NULL }
-};
-
-MsgRcv_t g_MsgRcvTable[] =
-{
- {PTP_MSG_TYPE_SYNC, pkt_rcv_type_event },
- {PTP_MSG_TYPE_DELAY_REQ, pkt_rcv_type_event },
- {PTP_MSG_TYPE_PDELAY_REQ, pkt_rcv_type_event },
- {PTP_MSG_TYPE_PDELAY_RESP, pkt_rcv_type_event },
-
- {PTP_MSG_TYPE_DELAY_RESP, pkt_rcv_type_delay_resp },
-
- {PTP_MSG_ERROR_TYPE, NULL }
-};
-
-/* 判断是否为事件报文 */
-bool is_event_message(const uint8_t msg_type)
-{
- if (msg_type <= PTP_MSG_TYPE_PDELAY_RESP)
- {
- return true;
- }
- return false;
-}
-
-/* 判断是否为普通报文 */
-bool is_general_message(const uint8_t msg_type)
-{
- if ((PTP_MSG_TYPE_FOLLOW_UP <= msg_type) && (msg_type <= PTP_MSG_TYPE_MANAGEMENT))
- {
- return true;
- }
- return false;
-}
-
-/* p得到PTP报文头位置 */
-int32_t get_hdr_point(uint8_t *pData, uint8_t *piTs0ffset, uint8_t **ptpHdr)
-{
- uint16_t udp_dest_port_ptp = 0;
- uint16_t offset = 0;
- uint16_t temp_len = 0;
- uint16_t eth_type_lay3 = ntohs(*((uint16_t*)(pData + (2 * ETHER_MAC_LEN)))); /* get Eth Type */
- uint8_t eth_type_lay4 = 0;
- uint8_t eth_type_lay4_ipv6 = 0;
- uint16_t eth_type_vlan_lay3 = ntohs(*((uint16_t*)(pData + (2 * ETHER_MAC_LEN) + VLAN_LEN)));
-
- /* 计算PTP头的偏移 */
- offset = L2_PKT_HDR_LEN;
-
- if ((VLAN_TPID == eth_type_lay3) && (VLAN_TPID != eth_type_vlan_lay3)) /* 单vlan偏移 */
- {
- offset += VLAN_LEN;
- }
- else if ((VLAN_TPID == eth_type_lay3) && (VLAN_TPID == eth_type_vlan_lay3)) /* 双vlan偏移 */
- {
- offset += (VLAN_LEN * 2);
- }
-
- eth_type_lay3 = ntohs(*((uint16_t*)(pData + offset - ETHER_TYPE_LEN)));
- eth_type_lay4 = *(pData + offset + IP_PROT_OFFSET);
-
- eth_type_lay4_ipv6 = *(pData + offset + IPV6_PROT_OFFSET);
-
- if ((ETH_TYPE_PTP != eth_type_lay3) && (ETH_TYPE_IPV4 != eth_type_lay3) && (ETH_TYPE_IPV6 != eth_type_lay3))
- {
- LOG_ERR("unknown L3 eth type: %d\n", eth_type_lay3);
- return IS_NOT_PTP_MSG;
- }
-
- if (ETH_TYPE_IPV4 == eth_type_lay3)
- {
- /* IP首部第一字节: 版本(4b)+首部长度(4b),这里取低4位,长度是以4字节为单位 */
- temp_len = *(pData + offset);
- temp_len = (temp_len & 0x0f) * 4;
- offset += temp_len;
-
- /* L4类型PTP只有UDP */
- if (ETH_TYPE_UDP == eth_type_lay4)
- {
- udp_dest_port_ptp = ntohs(*(uint16_t *)(pData + offset + UDP_DEST_PORT_OFFSET));
- if ((udp_dest_port_ptp != 319) && (udp_dest_port_ptp != 320))
- {
- LOG_ERR("UDP destination port(%hd) is not 319 or 320!!\n", udp_dest_port_ptp);
- return IS_NOT_PTP_MSG;
- }
- temp_len = UDP_HDR_LEN;
- offset += temp_len;
- }
- else
- {
- LOG_ERR("eth_type_lay4 = %c, is not UDP!!!!!\n", eth_type_lay4);
- return IS_NOT_PTP_MSG;
- }
- }
- else if(ETH_TYPE_IPV6 == eth_type_lay3)
- {
- temp_len = IPV6_HDR_LEN;
- offset += temp_len;
-
- /* L4类型PTP只有UDP */
- if (ETH_TYPE_UDP == eth_type_lay4_ipv6)
- {
- udp_dest_port_ptp = ntohs(*(uint16_t *)(pData + offset + UDP_DEST_PORT_OFFSET));
- if ((udp_dest_port_ptp != 319) && (udp_dest_port_ptp != 320))
- {
- LOG_ERR("UDP destination port(%hd) is not 319 or 320!!\n", udp_dest_port_ptp);
- return IS_NOT_PTP_MSG;
- }
- temp_len = UDP_HDR_LEN;
- offset += temp_len;
- }
- else
- { LOG_ERR("eth_type_lay4_ipv6 = %c, is not UDP!!!!!!\n",eth_type_lay4_ipv6);
- return IS_NOT_PTP_MSG;
- }
- }
-
- *ptpHdr = pData + offset;
-
- /* 赋值pd头的ts_offset字段 */
- *piTs0ffset = offset;
-
- return PTP_SUCCESS;
-}
-
-/* 从PTP报文头中解析出报文类型 */
-uint8_t get_msgtype_from_hrd(uint8_t *hrd, const uint8_t len)
-{
- uint8_t msg_type = PTP_MSG_ERROR_TYPE;
-
- CHECK_UNEQUAL_ERR(len, PTP_MESSAGE_HRD_LEN, -EFAULT, "error len %d!", len);
-
- msg_type = hrd[0] & 0x0f;
- if (is_event_message(msg_type) || is_general_message(msg_type))
- {
- return msg_type;
- }
-
- LOG_ERR("error message type %d", msg_type);
- return PTP_MSG_ERROR_TYPE;
-}
-
-/* 调用PTP模块驱动接口,读取3个时间戳:两个80bit(T1,T2),一个32bit(T3) */
-#ifdef PTP_DRIVER_INTERFACE_EN
-extern int get_pkt_timestamp(int32_t clock_no, struct zxdh_en_device *en_dev, struct time_stamps *ts, u32 *hwts);
-#endif /* PTP_DRIVER_INTERFACE_EN */
-
-int32_t get_tstamps_from_ptp(int32_t clock_no, struct time_stamps *t5g, struct time_stamps *tsn, uint32_t *thw, struct zxdh_en_device *en_dev)
-{
- uint32_t hwts = 0;
- struct time_stamps ts[2] = {};
-
-#ifdef PTP_DRIVER_INTERFACE_EN
- int32_t ret = 0;
- ret = get_pkt_timestamp(clock_no, en_dev, ts, &hwts);//todo
- if (unlikely(ret != 0))
- {
- LOG_ERR("netdev %s get tsn clock %d failed!, ret = %d", en_dev->netdev->name,clock_no,ret);
- return -1;
- }
-#endif /* PTP_DRIVER_INTERFACE_EN */
-
- LOG_DEBUG("===GET-PTP===: hwts=%u", hwts);
- LOG_DEBUG("===GET-PTP===: ts[0].s=%llu, ts[0].ns=%u", ts[0].s, ts[0].ns);
- LOG_DEBUG("===GET-PTP===: ts[1].s=%llu, ts[1].ns=%u", ts[1].s, ts[1].ns);
-
- *t5g = ts[1];
- *tsn = ts[1];
- *thw = hwts;
-
- return 0;
-}
-#ifdef TIME_STAMP_1588
-/* 发送流程中的报文时间戳处理 */
-int32_t pkt_1588_proc_xmit(struct sk_buff *skb, struct zxdh_net_hdr *hdr, int32_t clock_no, struct zxdh_en_device *en_dev)
-{
- struct time_stamps ts_5g; /* 5G时间戳,有效值80bit */
- struct time_stamps ts_tsn; /* TSN时间戳,有效值80bit */
- uint32_t ts_thw = 0; /* 硬件当前时间戳,有效值32bit */
- uint8_t *pData = NULL;
- uint8_t *ptpHdr = NULL;
- uint8_t ret = 0;
- uint8_t i = 0;
- uint8_t cnt = 0;
- uint8_t msg_type = 0xff;
- struct ptpHdr_t *ptPtpHdr = NULL;
-
- memset(&ts_5g, 0, sizeof(struct time_stamps));
- memset(&ts_tsn, 0, sizeof(struct time_stamps));
-
- CHECK_EQUAL_ERR(skb, NULL, -EADDRNOTAVAIL, "skb is NULL!\n");
- CHECK_EQUAL_ERR(hdr, NULL, -EADDRNOTAVAIL, "hdr is NULL!\n");
-
- pData = skb->data;
-
- /* 获得ptp报文头指针&赋值pd头ts_offset字段 */
- ret = get_hdr_point(pData, &(hdr->ts_offset), &ptpHdr);
- CHECK_EQUAL_ERR(ptpHdr, NULL, -EADDRNOTAVAIL, "get ptp hdr failed!\n");
- CHECK_UNEQUAL_ERR(ret, 0, ret, "is not ptp msg or get hdr err!!\n");
-
- ptPtpHdr = (struct ptpHdr_t *)ptpHdr;
- char * phdr = (char *)ptPtpHdr;
-
- /* 解析PTP报文类型 */
- msg_type = get_msgtype_from_hrd(ptpHdr, PTP_MESSAGE_HRD_LEN);
- CHECK_EQUAL_ERR(msg_type, PTP_MSG_ERROR_TYPE, -EFAULT, "unknow PTP msg type!\n");
-
- /* 如果是事件报文,提取时间戳 */
- if (is_event_message(msg_type))
- {
- ret = get_tstamps_from_ptp(clock_no, &ts_5g, &ts_tsn, &ts_thw, en_dev);
- CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "get tstamps from ptp failed!\n");
-
- /* ptp_type[2]的低bit2-4表示pkt_type,加密事件报文类型为2,非加密事件报文为3, */
- hdr->ptp_type[2] = (hdr->ptp_type[2] & 0x8F) + (PTP_EVENT_TYPE_NOSECURITY << PTP_TYPE_OFFSET);
- if (0 != ((ptPtpHdr->flagField) & 0x0080))
- {
- hdr->ptp_type[2] = (hdr->ptp_type[2] & 0x8F) + (PTP_EVENT_TYPE_SECURITY << PTP_TYPE_OFFSET);
- }
- }
- else
- {
- /* 普通报文类型为0 */
- hdr->ptp_type[2] = (hdr->ptp_type[2] & 0x8F) + (PTP_GENERAL_TYPE << PTP_TYPE_OFFSET);
- }
- /* 层四1588报文,下行微码处理时,是否需要查ipsec表, 加密报文需要 */
- LOG_INFO("ptPtpHdr->flagField: 0x%x\n", ptPtpHdr->flagField);
- if (0 != ((ptPtpHdr->flagField) & 0x0080))
- {
- hdr->ptp_type[2] = (hdr->ptp_type[2] & 0xF7) + (PTP_L4_NEED_QUERY_IPSEC_TABLE << PTP_TYPE_L4_SECURITY_OFFSET);
- LOG_INFO("hdr->ptp_type[2]: 0x%x\n", hdr->ptp_type[2]);
- }
- /* 层二发送方向的出端口需要这里指示 */
- hdr->port = en_dev->phy_port;
-
- /* 根据不同报文类型做不同处理 */
- cnt = sizeof(g_MsgProcTable) / sizeof(MsgProc_t);
- for (i = 0; i < cnt; i++)
- {
- if (g_MsgProcTable[i].type == msg_type)
- {
- if (likely(g_MsgProcTable[i].proc_func != NULL))
- {
- ret = g_MsgProcTable[i].proc_func(skb, hdr, ptpHdr, &ts_5g, &ts_tsn, &ts_thw, en_dev);
- }
- }
- }
-
- return ret;
-}
-#endif
-/* 接收流程中的报文时间戳处理 */
-int32_t pkt_1588_proc_rcv(struct sk_buff *skb, struct zxdh_net_hdr_rcv *hdr, int32_t clock_no, struct zxdh_en_device *en_dev)
-{
- struct time_stamps ts_5g; /* 5G时间戳,有效值80bit */
- struct time_stamps ts_tsn; /* TSN时间戳,有效值80bit */
- uint32_t ts_thw = 0; /* 硬件当前时间戳,有效值32bit */
- uint8_t *pData = NULL;
- uint8_t *ptpHdr = NULL;
- int32_t ret = 0;
- uint8_t i = 0;
- uint8_t cnt = 0;
- uint8_t msg_type = 0xff;
- uint8_t piTsOffset = 0;
-
- memset(&ts_5g, 0, sizeof(struct time_stamps));
- memset(&ts_tsn, 0, sizeof(struct time_stamps));
-
- CHECK_EQUAL_ERR(skb, NULL, -EADDRNOTAVAIL, "skb is NULL!\n");
- CHECK_EQUAL_ERR(hdr, NULL, -EADDRNOTAVAIL, "hdr is NULL!\n");
-
- pData = skb->data;//TODO,大包data可能会填到非线性区,此处需要修改。
-
- print_data((uint8_t *)hdr, sizeof(struct zxdh_net_hdr)+16); //todo
- print_data(skb->data, skb->len);
-
- /* 获得ptp报文头指针&赋值pi头ts_offset字段 */
- ret = get_hdr_point(pData, &piTsOffset, &ptpHdr);
- CHECK_EQUAL_ERR(ptpHdr, NULL, -EADDRNOTAVAIL, "get ptp hdr failed!\n");
- CHECK_UNEQUAL_ERR(ret, 0, ret, "is not ptp msg or get hdr err!!\n");
-
- /* 解析PTP报文类型 */
- msg_type = get_msgtype_from_hrd(ptpHdr, PTP_MESSAGE_HRD_LEN);
- CHECK_EQUAL_ERR(msg_type, PTP_MSG_ERROR_TYPE, -EFAULT, "unknow PTP msg type!\n");
-
- /* 如果是事件报文,提取时间戳 */
- if (is_event_message(msg_type))
- {
- ret = get_tstamps_from_ptp(clock_no, &ts_5g, &ts_tsn, &ts_thw, en_dev);
- CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "get tstamps from ptp failed!\n");
- }
-
- /* 根据不同报文类型做不同处理 */
- cnt = sizeof(g_MsgRcvTable) / sizeof(MsgRcv_t);
- for (i = 0; i < cnt; i++)
- {
- if (g_MsgRcvTable[i].type == msg_type)
- {
- if (likely(g_MsgRcvTable[i].proc_func != NULL))
- {
- ret = g_MsgRcvTable[i].proc_func(hdr, ptpHdr, &ts_5g, &ts_tsn, &ts_thw, skb_shinfo(skb), en_dev);
- }
- }
- }
-
- return ret;
-}
-
-int32_t is_delay_statistics_pkt(uint8_t *pData)
-{
- uint16_t udp_dest_port = 0;
- uint16_t offset = 0;
- uint16_t temp_len = 0;
- uint16_t eth_type_lay3 = ntohs(*((uint16_t*)(pData + (2 * ETHER_MAC_LEN)))); /* get Eth Type */
- uint8_t eth_type_lay4 = 0;
- uint8_t eth_type_lay4_ipv6 = 0;
- uint16_t eth_type_vlan_lay3 = ntohs(*((uint16_t*)(pData + (2 * ETHER_MAC_LEN) + VLAN_LEN)));
-
- /* 计算PTP头的偏移 */
- offset = L2_PKT_HDR_LEN;
-
- if ((VLAN_TPID == eth_type_lay3) && (VLAN_TPID != eth_type_vlan_lay3)) /* 单vlan偏移 */
- {
- offset += VLAN_LEN;
- }
- else if ((VLAN_TPID == eth_type_lay3) && (VLAN_TPID == eth_type_vlan_lay3)) /* 双vlan偏移 */
- {
- offset += (VLAN_LEN * 2);
- }
-
- eth_type_lay3 = ntohs(*((uint16_t*)(pData + offset - ETHER_TYPE_LEN)));
- eth_type_lay4 = *(pData + offset + IP_PROT_OFFSET);
-
- eth_type_lay4_ipv6 = *(pData + offset + IPV6_PROT_OFFSET);
-
- if (ETH_TYPE_IPV4 != eth_type_lay3)
- {
- // LOG_ERR("unknown L4 eth type: %d\n", eth_type_lay3);
- return IS_NOT_STATISTICS_PKT;
- }
-
- if (ETH_TYPE_IPV4 == eth_type_lay3)
- {
- /* IP首部第一字节: 版本(4b)+首部长度(4b),这里取低4位,长度是以4字节为单位 */
- temp_len = *(pData + offset);
- temp_len = (temp_len & 0x0f) * 4;
- offset += temp_len;
-
- /* L4类型PTP只有UDP */
- if (ETH_TYPE_UDP == eth_type_lay4)
- {
- udp_dest_port = ntohs(*(uint16_t *)(pData + offset + UDP_DEST_PORT_OFFSET));
- if (udp_dest_port != 49184)
- {
- // LOG_ERR("UDP destination port(%hd) is not 49184!!\n", udp_dest_port);
- return IS_NOT_STATISTICS_PKT;
- }
- }
- else
- {
- // LOG_ERR("eth_type_lay4 = %c, is not UDP!!!!!\n", eth_type_lay4);
- return IS_NOT_STATISTICS_PKT;
- }
- }
-
- return PTP_SUCCESS;
-}
-
-/* delay统计报文发送流程中的时间戳处理 */
-int32_t pkt_delay_statistics_proc(struct sk_buff *skb, struct zxdh_net_hdr *hdr, struct zxdh_en_device *en_dev)
-{
- uint8_t *pData = NULL;
- uint8_t ret = 0;
- uint32_t ts_thw = 0;
-
- CHECK_EQUAL_ERR(skb, NULL, -EADDRNOTAVAIL, "skb is NULL!\n");
- CHECK_EQUAL_ERR(hdr, NULL, -EADDRNOTAVAIL, "hdr is NULL!\n");
-
- pData = skb->data;
-
- /* 检查是否是delay统计报文: udp端口号:49184 */
- if(IS_NOT_STATISTICS_PKT == is_delay_statistics_pkt(pData))
- {
- return DELAY_STATISTICS_FAILED;
- }
- /* 时延统计使能 */
- hdr->pd_hdr.ol_flag |= htons(DELAY_STATISTICS_INSERT_EN_BIT);
-
- ret = get_hw_timestamp(en_dev, &ts_thw);
- CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "get_hw_timestamp failed!\n");
-
- /*hw的时间戳,写到 PD头的5~8字节:高29位为ns位,低3bits位为小数ns位 */
- *(uint32_t*)(&(hdr->pd_hdr.tag_idx)) = htonl(ts_thw << CPU_TX_DECIMAL_NS); /* 大端对齐 */
-
- return ret;
-}
+/*****************************************************************************
+(C) 2023 ZTE Corporation. 版权所有.
+
+文件名 : en_1588_pkt_proc.c
+内容摘要 : 提供PTP数据包处理相关接口
+作者/日期 : Limin / 2023.10.12
+版本 : 1.0
+*****************************************************************************/
+
+#include "en_1588_pkt_proc.h"
+#include "en_ioctl.h"
+#include "queue.h"
+
+#define PTP_MESSAGE_HRD_LEN 34
+#define IPV6_HDR_LEN 40
+#define IPV6_PROT_OFFSET 6
+#define UDP_DEST_PORT_OFFSET 2
+#define VLAN_TPID 0x8100
+
+/* pi头中pkt_type字段值 */
+#define PTP_EVENT_TYPE_NOSECURITY 2
+#define PTP_EVENT_TYPE_SECURITY 3
+#define PTP_GENERAL_TYPE 0
+#define PTP_TYPE_OFFSET 4
+/* 下行层四1588微码是否需要查询ipsec表 */
+#define PTP_L4_NEED_QUERY_IPSEC_TABLE 1
+#define PTP_TYPE_L4_SECURITY_OFFSET 3
+
+/* L3报文类型 */
+#define ETH_TYPE_PTP 0x88f7
+#define ETH_TYPE_IPV4 0x0800
+#define ETH_TYPE_IPV6 0x86dd
+
+/* L4报文类型 */
+#define ETH_TYPE_UDP 0x11
+#define ETH_TYPE_TCP 0x06
+
+#define UDP_HDR_LEN 0x08
+#define TCP_HDR_LEN 0x14
+
+/* 报文中关键字段的长度 */
+#define ETHER_TYPE_LEN 2
+#define ETHER_MAC_LEN 6
+#define L2_PKT_HDR_LEN ((2 * ETHER_MAC_LEN) + ETHER_TYPE_LEN)
+
+#define IP_PROT_OFFSET 9 /* IP头中protocol字段的偏移 */
+
+#define PTP_MSG_ERROR_TYPE 0xff
+#define PTPHDR_CF_OFFSET 8
+
+#define VLAN_LEN 4
+
+extern int get_hw_timestamp(struct zxdh_en_device *en_dev, u32 *hwts);
+/* PTP报文类型和处理函数对应关系结构体 */
+typedef struct {
+ uint8_t type;
+ int32_t (*proc_func)(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev);
+} MsgProc_t;
+
+typedef struct {
+ uint8_t type;
+ int32_t (*proc_func)(struct zxdh_net_hdr_rcv *hdr, uint8_t *ptpHdr,
+ struct time_stamps *t5g, struct time_stamps *tsn,
+ uint32_t *thw, struct skb_shared_info *ptSkbSharedInfo,
+ struct zxdh_en_device *en_dev);
+} MsgRcv_t;
+
+/* PTP报文类型和处理函数对应关系表 */
+MsgProc_t g_MsgProcTable[] = {
+ { PTP_MSG_TYPE_SYNC, pkt_proc_type_sync },
+ { PTP_MSG_TYPE_DELAY_REQ, pkt_proc_type_delay_req },
+ { PTP_MSG_TYPE_PDELAY_REQ, pkt_proc_type_pdelay_req },
+ { PTP_MSG_TYPE_PDELAY_RESP, pkt_proc_type_pdelay_resp },
+
+ { PTP_MSG_TYPE_FOLLOW_UP, pkt_proc_type_follow_up },
+ { PTP_MSG_TYPE_DELAY_RESP, pkt_proc_type_delay_resp },
+ { PTP_MSG_TYPE_PDELAY_RESP_FOLLOW_UP, pkt_proc_type_pdelay_resp_follow_up },
+ { PTP_MSG_TYPE_ANNOUNCE, pkt_proc_type_announce },
+ { PTP_MSG_TYPE_SIGNALING, pkt_proc_type_signaling },
+ { PTP_MSG_TYPE_MANAGEMENT, pkt_proc_type_management },
+
+ { PTP_MSG_ERROR_TYPE, NULL }
+};
+
+MsgRcv_t g_MsgRcvTable[] = { { PTP_MSG_TYPE_SYNC, pkt_rcv_type_event },
+ { PTP_MSG_TYPE_DELAY_REQ, pkt_rcv_type_event },
+ { PTP_MSG_TYPE_PDELAY_REQ, pkt_rcv_type_event },
+ { PTP_MSG_TYPE_PDELAY_RESP, pkt_rcv_type_event },
+
+ { PTP_MSG_TYPE_DELAY_RESP,
+ pkt_rcv_type_delay_resp },
+
+ { PTP_MSG_ERROR_TYPE, NULL } };
+
+/* 判断是否为事件报文 */
+bool is_event_message(const uint8_t msg_type)
+{
+ if (msg_type <= PTP_MSG_TYPE_PDELAY_RESP) {
+ return true;
+ }
+ return false;
+}
+
+/* 判断是否为普通报文 */
+bool is_general_message(const uint8_t msg_type)
+{
+ if ((PTP_MSG_TYPE_FOLLOW_UP <= msg_type) &&
+ (msg_type <= PTP_MSG_TYPE_MANAGEMENT)) {
+ return true;
+ }
+ return false;
+}
+
+/* p得到PTP报文头位置 */
+int32_t get_hdr_point(uint8_t *pData, uint8_t *piTs0ffset, uint8_t **ptpHdr)
+{
+ uint16_t udp_dest_port_ptp = 0;
+ uint16_t offset = 0;
+ uint16_t temp_len = 0;
+ uint16_t eth_type_lay3 = ntohs(
+ *((uint16_t *)(pData + (2 * ETHER_MAC_LEN)))); /* get Eth Type */
+ uint8_t eth_type_lay4 = 0;
+ uint8_t eth_type_lay4_ipv6 = 0;
+ uint16_t eth_type_vlan_lay3 =
+ ntohs(*((uint16_t *)(pData + (2 * ETHER_MAC_LEN) + VLAN_LEN)));
+
+ /* 计算PTP头的偏移 */
+ offset = L2_PKT_HDR_LEN;
+
+ if ((VLAN_TPID == eth_type_lay3) && (VLAN_TPID != eth_type_vlan_lay3)) {
+ /* 单vlan偏移 */
+ offset += VLAN_LEN;
+ } else if ((VLAN_TPID == eth_type_lay3) &&
+ (VLAN_TPID == eth_type_vlan_lay3)) {
+ /* 双vlan偏移 */
+ offset += (VLAN_LEN * 2);
+ }
+
+ eth_type_lay3 = ntohs(*((uint16_t *)(pData + offset - ETHER_TYPE_LEN)));
+ eth_type_lay4 = *(pData + offset + IP_PROT_OFFSET);
+
+ eth_type_lay4_ipv6 = *(pData + offset + IPV6_PROT_OFFSET);
+
+ if ((ETH_TYPE_PTP != eth_type_lay3) && (ETH_TYPE_IPV4 != eth_type_lay3) &&
+ (ETH_TYPE_IPV6 != eth_type_lay3)) {
+ LOG_ERR("unknown L3 eth type: %d\n", eth_type_lay3);
+ return IS_NOT_PTP_MSG;
+ }
+
+ if (ETH_TYPE_IPV4 == eth_type_lay3) {
+ /* IP首部第一字节: 版本(4b)+首部长度(4b),这里取低4位,长度是以4字节为单位 */
+ temp_len = *(pData + offset);
+ temp_len = (temp_len & 0x0f) * 4;
+ offset += temp_len;
+
+ /* L4类型PTP只有UDP */
+ if (ETH_TYPE_UDP == eth_type_lay4) {
+ udp_dest_port_ptp =
+ ntohs(*(uint16_t *)(pData + offset + UDP_DEST_PORT_OFFSET));
+ if ((udp_dest_port_ptp != 319) && (udp_dest_port_ptp != 320)) {
+ LOG_ERR("UDP destination port(%hd) is not 319 or 320!!\n",
+ udp_dest_port_ptp);
+ return IS_NOT_PTP_MSG;
+ }
+ temp_len = UDP_HDR_LEN;
+ offset += temp_len;
+ } else {
+ LOG_ERR("eth_type_lay4 = %c, is not UDP!!!!!\n", eth_type_lay4);
+ return IS_NOT_PTP_MSG;
+ }
+ } else if (ETH_TYPE_IPV6 == eth_type_lay3) {
+ temp_len = IPV6_HDR_LEN;
+ offset += temp_len;
+
+ /* L4类型PTP只有UDP */
+ if (ETH_TYPE_UDP == eth_type_lay4_ipv6) {
+ udp_dest_port_ptp =
+ ntohs(*(uint16_t *)(pData + offset + UDP_DEST_PORT_OFFSET));
+ if ((udp_dest_port_ptp != 319) && (udp_dest_port_ptp != 320)) {
+ LOG_ERR("UDP destination port(%hd) is not 319 or 320!!\n",
+ udp_dest_port_ptp);
+ return IS_NOT_PTP_MSG;
+ }
+ temp_len = UDP_HDR_LEN;
+ offset += temp_len;
+ } else {
+ LOG_ERR("eth_type_lay4_ipv6 = %c, is not UDP!!!!!!\n",
+ eth_type_lay4_ipv6);
+ return IS_NOT_PTP_MSG;
+ }
+ }
+
+ *ptpHdr = pData + offset;
+
+ /* 赋值pd头的ts_offset字段 */
+ *piTs0ffset = offset;
+
+ return PTP_SUCCESS;
+}
+
+/* 从PTP报文头中解析出报文类型 */
+uint8_t get_msgtype_from_hrd(uint8_t *hrd, const uint8_t len)
+{
+ uint8_t msg_type = PTP_MSG_ERROR_TYPE;
+
+ CHECK_UNEQUAL_ERR(len, PTP_MESSAGE_HRD_LEN, -EFAULT, "error len %d!", len);
+
+ msg_type = hrd[0] & 0x0f;
+ if (is_event_message(msg_type) || is_general_message(msg_type)) {
+ return msg_type;
+ }
+
+ LOG_ERR("error message type %d", msg_type);
+ return PTP_MSG_ERROR_TYPE;
+}
+
+/* 调用PTP模块驱动接口,读取3个时间戳:两个80bit(T1,T2),一个32bit(T3) */
+#ifdef PTP_DRIVER_INTERFACE_EN
+extern int get_pkt_timestamp(int32_t clock_no, struct zxdh_en_device *en_dev,
+ struct time_stamps *ts, u32 *hwts);
+#endif /* PTP_DRIVER_INTERFACE_EN */
+
+int32_t get_tstamps_from_ptp(int32_t clock_no, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev)
+{
+ uint32_t hwts = 0;
+ struct time_stamps ts[2] = {};
+
+#ifdef PTP_DRIVER_INTERFACE_EN
+ int32_t ret = 0;
+ ret = get_pkt_timestamp(clock_no, en_dev, ts, &hwts); // todo
+ if (unlikely(ret != 0)) {
+ LOG_ERR("netdev %s get tsn clock %d failed!, ret = %d",
+ en_dev->netdev->name, clock_no, ret);
+ return -1;
+ }
+#endif /* PTP_DRIVER_INTERFACE_EN */
+
+ LOG_DEBUG("===GET-PTP===: hwts=%u", hwts);
+ LOG_DEBUG("===GET-PTP===: ts[0].s=%llu, ts[0].ns=%u", ts[0].s, ts[0].ns);
+ LOG_DEBUG("===GET-PTP===: ts[1].s=%llu, ts[1].ns=%u", ts[1].s, ts[1].ns);
+
+ *t5g = ts[1];
+ *tsn = ts[1];
+ *thw = hwts;
+
+ return 0;
+}
+#ifdef TIME_STAMP_1588
+/* 发送流程中的报文时间戳处理 */
+int32_t pkt_1588_proc_xmit(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ int32_t clock_no, struct zxdh_en_device *en_dev)
+{
+ struct time_stamps ts_5g; /* 5G时间戳,有效值80bit */
+ struct time_stamps ts_tsn; /* TSN时间戳,有效值80bit */
+ uint32_t ts_thw = 0; /* 硬件当前时间戳,有效值32bit */
+ uint8_t *pData = NULL;
+ uint8_t *ptpHdr = NULL;
+ uint8_t ret = 0;
+ uint8_t i = 0;
+ uint8_t cnt = 0;
+ uint8_t msg_type = 0xff;
+ struct ptpHdr_t *ptPtpHdr = NULL;
+
+ memset(&ts_5g, 0, sizeof(struct time_stamps));
+ memset(&ts_tsn, 0, sizeof(struct time_stamps));
+
+ CHECK_EQUAL_ERR(skb, NULL, -EADDRNOTAVAIL, "skb is NULL!\n");
+ CHECK_EQUAL_ERR(hdr, NULL, -EADDRNOTAVAIL, "hdr is NULL!\n");
+
+ pData = skb->data;
+
+ /* 获得ptp报文头指针&赋值pd头ts_offset字段 */
+ ret = get_hdr_point(pData, &(hdr->ts_offset), &ptpHdr);
+ CHECK_EQUAL_ERR(ptpHdr, NULL, -EADDRNOTAVAIL, "get ptp hdr failed!\n");
+ CHECK_UNEQUAL_ERR(ret, 0, ret, "is not ptp msg or get hdr err!!\n");
+
+ ptPtpHdr = (struct ptpHdr_t *)ptpHdr;
+ char *phdr = (char *)ptPtpHdr;
+
+ /* 解析PTP报文类型 */
+ msg_type = get_msgtype_from_hrd(ptpHdr, PTP_MESSAGE_HRD_LEN);
+ CHECK_EQUAL_ERR(msg_type, PTP_MSG_ERROR_TYPE, -EFAULT,
+ "unknow PTP msg type!\n");
+
+ /* 如果是事件报文,提取时间戳 */
+ if (is_event_message(msg_type)) {
+ ret = get_tstamps_from_ptp(clock_no, &ts_5g, &ts_tsn, &ts_thw, en_dev);
+ CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "get tstamps from ptp failed!\n");
+
+ /* ptp_type[2]的低bit2-4表示pkt_type,加密事件报文类型为2,非加密事件报文为3,
+ */
+ hdr->ptp_type[2] = (hdr->ptp_type[2] & 0x8F) +
+ (PTP_EVENT_TYPE_NOSECURITY << PTP_TYPE_OFFSET);
+ if (0 != ((ptPtpHdr->flagField) & 0x0080)) {
+ hdr->ptp_type[2] = (hdr->ptp_type[2] & 0x8F) +
+ (PTP_EVENT_TYPE_SECURITY << PTP_TYPE_OFFSET);
+ }
+ } else {
+ /* 普通报文类型为0 */
+ hdr->ptp_type[2] = (hdr->ptp_type[2] & 0x8F) +
+ (PTP_GENERAL_TYPE << PTP_TYPE_OFFSET);
+ }
+ /* 层四1588报文,下行微码处理时,是否需要查ipsec表, 加密报文需要 */
+ LOG_INFO("ptPtpHdr->flagField: 0x%x\n", ptPtpHdr->flagField);
+ if (0 != ((ptPtpHdr->flagField) & 0x0080)) {
+ hdr->ptp_type[2] =
+ (hdr->ptp_type[2] & 0xF7) +
+ (PTP_L4_NEED_QUERY_IPSEC_TABLE << PTP_TYPE_L4_SECURITY_OFFSET);
+ LOG_INFO("hdr->ptp_type[2]: 0x%x\n", hdr->ptp_type[2]);
+ }
+ /* 层二发送方向的出端口需要这里指示 */
+ hdr->port = en_dev->phy_port;
+
+ /* 根据不同报文类型做不同处理 */
+ cnt = sizeof(g_MsgProcTable) / sizeof(MsgProc_t);
+ for (i = 0; i < cnt; i++) {
+ if (g_MsgProcTable[i].type == msg_type) {
+ if (likely(g_MsgProcTable[i].proc_func != NULL)) {
+ ret = g_MsgProcTable[i].proc_func(skb, hdr, ptpHdr, &ts_5g,
+ &ts_tsn, &ts_thw, en_dev);
+ }
+ }
+ }
+
+ return ret;
+}
+#endif
+/* 接收流程中的报文时间戳处理 */
+int32_t pkt_1588_proc_rcv(struct sk_buff *skb, struct zxdh_net_hdr_rcv *hdr,
+ int32_t clock_no, struct zxdh_en_device *en_dev)
+{
+ struct time_stamps ts_5g; /* 5G时间戳,有效值80bit */
+ struct time_stamps ts_tsn; /* TSN时间戳,有效值80bit */
+ uint32_t ts_thw = 0; /* 硬件当前时间戳,有效值32bit */
+ uint8_t *pData = NULL;
+ uint8_t *ptpHdr = NULL;
+ int32_t ret = 0;
+ uint8_t i = 0;
+ uint8_t cnt = 0;
+ uint8_t msg_type = 0xff;
+ uint8_t piTsOffset = 0;
+
+ memset(&ts_5g, 0, sizeof(struct time_stamps));
+ memset(&ts_tsn, 0, sizeof(struct time_stamps));
+
+ CHECK_EQUAL_ERR(skb, NULL, -EADDRNOTAVAIL, "skb is NULL!\n");
+ CHECK_EQUAL_ERR(hdr, NULL, -EADDRNOTAVAIL, "hdr is NULL!\n");
+
+ pData = skb->data; // TODO,大包data可能会填到非线性区,此处需要修改。
+
+ print_data((uint8_t *)hdr, sizeof(struct zxdh_net_hdr) + 16); // todo
+ print_data(skb->data, skb->len);
+
+ /* 获得ptp报文头指针&赋值pi头ts_offset字段 */
+ ret = get_hdr_point(pData, &piTsOffset, &ptpHdr);
+ CHECK_EQUAL_ERR(ptpHdr, NULL, -EADDRNOTAVAIL, "get ptp hdr failed!\n");
+ CHECK_UNEQUAL_ERR(ret, 0, ret, "is not ptp msg or get hdr err!!\n");
+
+ /* 解析PTP报文类型 */
+ msg_type = get_msgtype_from_hrd(ptpHdr, PTP_MESSAGE_HRD_LEN);
+ CHECK_EQUAL_ERR(msg_type, PTP_MSG_ERROR_TYPE, -EFAULT,
+ "unknow PTP msg type!\n");
+
+ /* 如果是事件报文,提取时间戳 */
+ if (is_event_message(msg_type)) {
+ ret = get_tstamps_from_ptp(clock_no, &ts_5g, &ts_tsn, &ts_thw, en_dev);
+ CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "get tstamps from ptp failed!\n");
+ }
+
+ /* 根据不同报文类型做不同处理 */
+ cnt = sizeof(g_MsgRcvTable) / sizeof(MsgRcv_t);
+ for (i = 0; i < cnt; i++) {
+ if (g_MsgRcvTable[i].type == msg_type) {
+ if (likely(g_MsgRcvTable[i].proc_func != NULL)) {
+ ret = g_MsgRcvTable[i].proc_func(hdr, ptpHdr, &ts_5g, &ts_tsn,
+ &ts_thw, skb_shinfo(skb),
+ en_dev);
+ }
+ }
+ }
+
+ return ret;
+}
+
+int32_t is_delay_statistics_pkt(uint8_t *pData)
+{
+ uint16_t udp_dest_port = 0;
+ uint16_t offset = 0;
+ uint16_t temp_len = 0;
+ uint16_t eth_type_lay3 = ntohs(
+ *((uint16_t *)(pData + (2 * ETHER_MAC_LEN)))); /* get Eth Type */
+ uint8_t eth_type_lay4 = 0;
+ uint8_t eth_type_lay4_ipv6 = 0;
+ uint16_t eth_type_vlan_lay3 =
+ ntohs(*((uint16_t *)(pData + (2 * ETHER_MAC_LEN) + VLAN_LEN)));
+
+ /* 计算PTP头的偏移 */
+ offset = L2_PKT_HDR_LEN;
+
+ if ((VLAN_TPID == eth_type_lay3) && (VLAN_TPID != eth_type_vlan_lay3)) {
+ /* 单vlan偏移 */
+ offset += VLAN_LEN;
+ } else if ((VLAN_TPID == eth_type_lay3) &&
+ (VLAN_TPID == eth_type_vlan_lay3)) {
+ /* 双vlan偏移 */
+ offset += (VLAN_LEN * 2);
+ }
+
+ eth_type_lay3 = ntohs(*((uint16_t *)(pData + offset - ETHER_TYPE_LEN)));
+ eth_type_lay4 = *(pData + offset + IP_PROT_OFFSET);
+
+ eth_type_lay4_ipv6 = *(pData + offset + IPV6_PROT_OFFSET);
+
+ if (ETH_TYPE_IPV4 != eth_type_lay3) {
+ // LOG_ERR("unknown L4 eth type: %d\n", eth_type_lay3);
+ return IS_NOT_STATISTICS_PKT;
+ }
+
+ if (ETH_TYPE_IPV4 == eth_type_lay3) {
+ /* IP首部第一字节: 版本(4b)+首部长度(4b),这里取低4位,长度是以4字节为单位
+ */
+ temp_len = *(pData + offset);
+ temp_len = (temp_len & 0x0f) * 4;
+ offset += temp_len;
+
+ /* L4类型PTP只有UDP */
+ if (ETH_TYPE_UDP == eth_type_lay4) {
+ udp_dest_port =
+ ntohs(*(uint16_t *)(pData + offset + UDP_DEST_PORT_OFFSET));
+ if (udp_dest_port != 49184) {
+ // LOG_ERR("UDP destination port(%hd) is not 49184!!\n", udp_dest_port);
+ return IS_NOT_STATISTICS_PKT;
+ }
+ } else {
+ // LOG_ERR("eth_type_lay4 = %c, is not UDP!!!!!\n", eth_type_lay4);
+ return IS_NOT_STATISTICS_PKT;
+ }
+ }
+
+ return PTP_SUCCESS;
+}
+
+/* delay统计报文发送流程中的时间戳处理 */
+int32_t pkt_delay_statistics_proc(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ struct zxdh_en_device *en_dev)
+{
+ uint8_t *pData = NULL;
+ uint8_t ret = 0;
+ uint32_t ts_thw = 0;
+
+ CHECK_EQUAL_ERR(skb, NULL, -EADDRNOTAVAIL, "skb is NULL!\n");
+ CHECK_EQUAL_ERR(hdr, NULL, -EADDRNOTAVAIL, "hdr is NULL!\n");
+
+ pData = skb->data;
+
+ /* 检查是否是delay统计报文: udp端口号:49184 */
+ if (IS_NOT_STATISTICS_PKT == is_delay_statistics_pkt(pData)) {
+ return DELAY_STATISTICS_FAILED;
+ }
+ /* 时延统计使能 */
+ hdr->pd_hdr.ol_flag |= htons(DELAY_STATISTICS_INSERT_EN_BIT);
+
+ ret = get_hw_timestamp(en_dev, &ts_thw);
+ CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "get_hw_timestamp failed!\n");
+
+ /*hw的时间戳,写到 PD头的5~8字节:高29位为ns位,低3bits位为小数ns位 */
+ *(uint32_t *)(&(hdr->pd_hdr.tag_idx)) =
+ htonl(ts_thw << CPU_TX_DECIMAL_NS); /* 大端对齐 */
+
+ return ret;
+}
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.h b/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.h
index 63af6286aa6666417d2c836e5cf38d2b393a47f0..ac59029818c4fd603f0b3fe1771034d90621d548 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.h
@@ -1,34 +1,38 @@
-/*****************************************************************************
-(C) 2023 ZTE Corporation. 版权所有.
-
-文件名 : en_1588_pkt_proc.h
-内容摘要 : 提供PTP数据包处理相关接口
-作者/日期 : Limin / 2023.10.12
-版本 : 1.0
-*****************************************************************************/
-
-#ifndef _EN_1588_PKT_PROC_H_
-#define _EN_1588_PKT_PROC_H_
-
-#ifdef __cplusplus
- extern "C" {
-#endif /* __cplusplus */
-
-#include "en_1588_pkt_proc_func.h"
-
-#define PTP_SUCCESS 0
-#define PTP_FAILED (-1)
-#define IS_NOT_PTP_MSG 1
-#define IS_NOT_STATISTICS_PKT 1
-#define DELAY_STATISTICS_FAILED (-1)
-
-int32_t pkt_1588_proc_xmit(struct sk_buff *skb, struct zxdh_net_hdr *hdr, int32_t clock_no, struct zxdh_en_device *en_dev);
-int32_t pkt_1588_proc_rcv(struct sk_buff *skb, struct zxdh_net_hdr_rcv *hdr, int32_t clock_no, struct zxdh_en_device *en_dev);
-int32_t pi_1588_net_hdr_add(struct sk_buff *skb, struct zxdh_net_hdr *hdr, int32_t clock_no, struct zxdh_en_device *en_dev);
-int32_t pkt_delay_statistics_proc(struct sk_buff *skb, struct zxdh_net_hdr *hdr, struct zxdh_en_device *en_dev);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
+/*****************************************************************************
+(C) 2023 ZTE Corporation. 版权所有.
+
+文件名 : en_1588_pkt_proc.h
+内容摘要 : 提供PTP数据包处理相关接口
+作者/日期 : Limin / 2023.10.12
+版本 : 1.0
+*****************************************************************************/
+
+#ifndef _EN_1588_PKT_PROC_H_
+#define _EN_1588_PKT_PROC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "en_1588_pkt_proc_func.h"
+
+#define PTP_SUCCESS 0
+#define PTP_FAILED (-1)
+#define IS_NOT_PTP_MSG 1
+#define IS_NOT_STATISTICS_PKT 1
+#define DELAY_STATISTICS_FAILED (-1)
+
+int32_t pkt_1588_proc_xmit(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ int32_t clock_no, struct zxdh_en_device *en_dev);
+int32_t pkt_1588_proc_rcv(struct sk_buff *skb, struct zxdh_net_hdr_rcv *hdr,
+ int32_t clock_no, struct zxdh_en_device *en_dev);
+int32_t pi_1588_net_hdr_add(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ int32_t clock_no, struct zxdh_en_device *en_dev);
+int32_t pkt_delay_statistics_proc(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ struct zxdh_en_device *en_dev);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
#endif /* _EN_1588_PKT_PROC_H_ */
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.c b/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.c
index 53786dcf4c333d8ca82292d7ef5e69eb1ce6d30b..e698b80a59d23127911b2f0e5fdf6aa53b2ea083 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.c
@@ -1,673 +1,631 @@
-/*****************************************************************************
-(C) 2023 ZTE Corporation. 版权所有.
-
-文件名 : en_1588_pkt_proc_func.c
-内容摘要 : 不同数据类型包的处理接口实现
-作者/日期 : Limin / 2023.10.12
-版本 : 1.0
-*****************************************************************************/
-
-#include "en_1588_pkt_proc_func.h"
-#include "en_cmd.h"
-#include "en_ioctl.h"
-
-struct ptp_update_buff tGlobalPtpBuff = {0};
-
-uint64_t htonll(uint64_t u64_host)
-{
- uint64_t u64_net = 0;
- uint32_t u32_host_h = 0;
- uint32_t u32_host_l = 0;
-
- u32_host_l = u64_host & 0xffffffff;
- u32_host_h = (u64_host >> 32) & 0xffffffff;
-
- u64_net = htonl(u32_host_l);
- u64_net = ( u64_net << 32 ) | htonl(u32_host_h);
-
- return u64_net;
-}
-
-/**
-* @brief 计算两时间戳subtraction和minuend之差,并将差值赋值给*ptMinusRet
-* @param minuend 高48bit为s位,低32bit为ns位
-*/
-int32_t bits_80_minus(struct time_stamps subtraction, Bits80_t minuend, struct time_stamps *ptMinusRet)
-{
- uint64_t minusHigh48_s = 0;
- uint32_t minusLow32_ns = 0;
-
- /* 取出80bits被减数的ns位值和s位值 */
- memcpy((uint8_t *)(&minusHigh48_s), &minuend, S_SIZE);
- memcpy(&minusLow32_ns, (uint8_t *)(&minuend) + S_SIZE, NS_SIZE);
-
- /* minuend大端 */
- minusHigh48_s = htonll(minusHigh48_s) >> 16;
- minusLow32_ns = htonl(minusLow32_ns);
-
- /* 如果减数值小于被减数值 */
- if ((subtraction.s < minusHigh48_s) || ((subtraction.s == minusHigh48_s) && (subtraction.ns < minusLow32_ns)))
- {
- LOG_ERR("The difference between the two times is negative!!");
- return PTP_RET_TIME_ERR;
- }
-
- if (subtraction.ns > minusLow32_ns)
- {
- ptMinusRet->ns = subtraction.ns - minusLow32_ns; /* 赋值ns位 */
- ptMinusRet->s = subtraction.s - minusHigh48_s; /* 赋值s位 */
- }
- else
- {
- ptMinusRet->ns = S_HOLD - (minusLow32_ns - subtraction.ns); /* 赋值ns位 */
- ptMinusRet->s = subtraction.s - minusHigh48_s - 1; /* 赋值s位 */
- }
-
- return PTP_RET_SUCCESS;
-}
-
-int32_t pkt_proc_type_sync(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev)
-{
- struct SkbSharedHwtstamps_t tShhwtstamps;
- struct time_stamps tMinusRet;
- struct skb_shared_hwtstamps tHwtstamps5g;
- struct skb_shared_hwtstamps tHwtstampsTsn;
- Bits80_t tTsi;
- struct ptpHdr_t * ptPtpHdr = NULL;
- uint8_t *pOriginTimeStamp = NULL;
- uint8_t majorSdoId = 0;
- uint32_t t5gNsBig = 0;
- uint64_t t5gSBig = 0;
- uint32_t tsnNsBig = 0;
- uint64_t tsnSBig = 0;
-#ifdef TIME_STAMP_1588
- uint32_t frequency = 0;
- uint64_t cfAddedVal = 0;
- uint8_t *tsiTlv = NULL;
- uint32_t cpuTx_ns = 0;
- uint32_t cpuTx_frac_ns = 0;
- uint64_t cfNs = 0;
-#endif
-
- ptPtpHdr = (struct ptpHdr_t *)ptpHdr;
- majorSdoId = ((ptPtpHdr->majorType) & 0xf0) >> 4;
- pOriginTimeStamp = ptpHdr + sizeof(struct ptpHdr_t);
- t5gSBig = (htonll(t5g->s)) >> 16;
- t5gNsBig = htonl(t5g->ns);
- tsnSBig = (htonll(tsn->s)) >> 16;
- tsnNsBig = htonl(tsn->ns);
-
- memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t));
- memset(&tHwtstamps5g, 0, sizeof(struct skb_shared_hwtstamps));
- memset(&tHwtstampsTsn, 0, sizeof(struct skb_shared_hwtstamps));
- memset(&tMinusRet, 0, sizeof(struct time_stamps));
- memset(&tTsi, 0, sizeof(Bits80_t));
-#ifdef TIME_STAMP_1588
- /* 解析PTP Header的majorSdoId字段,如果是0,表示PTP消息由1588使用 */
- if (0 == majorSdoId)
- {
- /* 解析Flag字段低一字节bit1,如果为0,则是一步法,为1不做处理 */
- if (0 == ((ptPtpHdr->flagField) & 0x0002))
- {
- memcpy(pOriginTimeStamp, &t5gSBig, S_SIZE);
- memcpy(pOriginTimeStamp + S_SIZE, &t5gNsBig, NS_SIZE);
- }
- }
- else if (1 == majorSdoId) /* 如果是1,表示PTP消息由802.1AS协议使用 */
- {
- /* 解析Flag字段低一字节bit1,如果为0,则是一步法 */
- if (0 == ((ptPtpHdr->flagField) & 0x0002))
- {
- /* tsn时间戳放到Sync报文的originTimestamp字段 */
- memcpy(pOriginTimeStamp, &tsnSBig, S_SIZE);
- memcpy(pOriginTimeStamp + S_SIZE, &tsnNsBig, NS_SIZE);
-
- if (0 != ((ptPtpHdr->flagField) & 0x8000)) /* 解析Flag字段高一字节bit7,如果为1,做如下处理,为0不做处理*/
- {
- frequency = *(uint32_t *)(ptpHdr + PTPHDR_FREQUENCY_OFFSET);
- frequency = htonl(frequency);/* 频率比 */
-
- memcpy(&tTsi, ptpHdr + PTPHDR_TSI_OFFSET, sizeof(Bits80_t));
-
- bits_80_minus(*t5g, tTsi, &tMinusRet);
-
- /* (*t5g-*tsi)*频率比 计算结果叠加到CF字段ns位,(不会出现CF字段ns位值溢出情况) */
- cfAddedVal = (tMinusRet.s * S_HOLD + tMinusRet.ns) * frequency;
- memcpy(&cfNs, ptPtpHdr->correctionField, CF_NS_SIZE);
- cfNs = htonll(cfNs) >> 16;
- cfNs += cfAddedVal;
- cfNs = htonll(cfNs) >> 16;
- memcpy(&(ptPtpHdr->correctionField[0]), &cfNs, CF_NS_SIZE);
-
- /* flagField字段的高1字节bit7清0 */
- ptPtpHdr->flagField = (ptPtpHdr->flagField) & 0x7f;
-
- /* 清除20 byte tTsi TLV为0 */
- tsiTlv = ptpHdr + PTPHDR_TSI_TLV_OFFSET;
- memset(tsiTlv, 0, PTPHDR_TSI_TLV_LEN);
-
- /* 把Header中的messageLength值减去20 */
- ptPtpHdr->msglen = htons(ptPtpHdr->msglen);
- ptPtpHdr->msglen -= PTPHDR_TSI_TLV_LEN;
- ptPtpHdr->msglen = htons(ptPtpHdr->msglen);
- }
- }
- else /* 如果为1,则是两步法 */
- {
- /* 解析Flag字段高一字节bit7,如果为1,做如下处理,为0不做处理 */
- if (0 != ((ptPtpHdr->flagField) & 0x8000))
- {
- memcpy(&tTsi, ptpHdr + PTPHDR_TSI_OFFSET, sizeof(Bits80_t));
-
- bits_80_minus(*t5g, tTsi, &tMinusRet);
-
- /* (*t5g-*tsi)*频率比 计算结果叠加到CF字段ns位,(不会出现CF字段ns位值溢出情况) */
- frequency = htonl(ptPtpHdr-> msgTypeSpecific);
- cfAddedVal = (tMinusRet.s * S_HOLD + tMinusRet.ns) * frequency;
- memcpy(&cfNs, ptPtpHdr->correctionField, CF_NS_SIZE);
- cfNs = htonll(cfNs) >> 16;
- cfNs += cfAddedVal;
- cfNs = htonll(cfNs) >> 16;
- memcpy(&(ptPtpHdr->correctionField[0]), &cfNs, CF_NS_SIZE);
-
- /* 将messagetypespecific清0 */
- memset(&(ptPtpHdr->msgTypeSpecific), 0, sizeof(uint32_t));
-
- /* flagField字段的高1字节bit7清0 */
- ptPtpHdr->flagField = (ptPtpHdr->flagField) & 0x7f;
-
- /* 清除20 byte tTsi TLV为0 */
- tsiTlv = ptpHdr + PTPHDR_TSI_TLV_OFFSET_TWO;
- memset(tsiTlv, 0, PTPHDR_TSI_TLV_LEN);
-
- /* 把Header中的messageLength值减去20 */
- ptPtpHdr->msglen = htons(ptPtpHdr->msglen);
- ptPtpHdr->msglen -= PTPHDR_TSI_TLV_LEN;
- ptPtpHdr->msglen = htons(ptPtpHdr->msglen);
- }
- }
- }
-
- /*PTPM的32bit的时间戳,写到 PI头的cpu_tx字段:高29位为ns位,低3bits位为小数ns位 */
- cpuTx_frac_ns = (hdr->cpu_tx) & 0x07;
- cpuTx_ns = *thw << CPU_TX_DECIMAL_NS;
- hdr->cpu_tx = htonl(cpuTx_ns + cpuTx_frac_ns); /* 大端对齐 */
-
- /* 两个80bit时间戳(T1,T2)放到socket的ERR_QUEUE中 */
- tShhwtstamps.ts_5g_t = *t5g;
- tShhwtstamps.ts_tsn_t = *tsn;
- tHwtstamps5g.hwtstamp = tShhwtstamps.ts_5g_t.ns + tShhwtstamps.ts_5g_t.s * S_HOLD;
- tHwtstampsTsn.hwtstamp = tShhwtstamps.ts_tsn_t.ns + tShhwtstamps.ts_tsn_t.s * S_HOLD;
- skb_tstamp_tx(skb, &tHwtstamps5g);
-#ifdef CGEL_TSTAMP_2_PATCH_EN
- skb_tstamp_tx_2(skb, &tHwtstampsTsn);
-#endif /* CGEL_TSTAMP_2_PATCH_EN */
-#endif
- return PTP_RET_SUCCESS;
-}
-
-int32_t delay_and_pdelay_req_proc(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw)
-{
-#ifdef TIME_STAMP_1588
- struct SkbSharedHwtstamps_t tShhwtstamps;
- struct skb_shared_hwtstamps tHwtstamps5g;
- struct skb_shared_hwtstamps tHwtstampsTsn;
- uint32_t cpuTx_ns = 0;
- uint32_t cpuTx_frac_ns = 0;
-
- memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t));
- memset(&tHwtstamps5g, 0, sizeof(struct skb_shared_hwtstamps));
- memset(&tHwtstampsTsn, 0, sizeof(struct skb_shared_hwtstamps));
- /*PTPM的32bit的时间戳,写到 PI头的cpu_tx字段:高29位为ns位,低3bits位为小数ns位 */
- cpuTx_frac_ns = (hdr->cpu_tx) & 0x07;
- cpuTx_ns = *thw << CPU_TX_DECIMAL_NS;
- hdr->cpu_tx = htonl(cpuTx_ns + cpuTx_frac_ns);
-
- tShhwtstamps.ts_5g_t = *t5g;
- tShhwtstamps.ts_tsn_t = *tsn;
-
- /* 2个80bit放到socket error queue中 */
- tHwtstamps5g.hwtstamp = tShhwtstamps.ts_5g_t.ns + tShhwtstamps.ts_5g_t.s * S_HOLD;
- tHwtstampsTsn.hwtstamp = tShhwtstamps.ts_tsn_t.ns + tShhwtstamps.ts_tsn_t.s * S_HOLD;
- skb_tstamp_tx(skb, &tHwtstamps5g);
-#ifdef CGEL_TSTAMP_2_PATCH_EN
- skb_tstamp_tx_2(skb, &tHwtstampsTsn);
-#endif /* CGEL_TSTAMP_2_PATCH_EN */
-#endif
- return PTP_RET_SUCCESS;
-}
-
-int32_t pkt_proc_type_delay_req(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev)
-{
- int32_t ret = 0;
- ret = delay_and_pdelay_req_proc(skb, hdr, t5g, tsn, thw);
- return ret;
-}
-
-int32_t pkt_proc_type_pdelay_req(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev)
-{
- int32_t ret = 0;
- ret = delay_and_pdelay_req_proc(skb, hdr, t5g, tsn, thw);
- return ret;
-}
-
-int32_t pkt_proc_type_pdelay_resp(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev)
-{
-#ifdef TIME_STAMP_1588
- Bits80_t tReqReceTs;
- struct SkbSharedHwtstamps_t tShhwtstamps;
- struct skb_shared_hwtstamps tHwtstamps5g;
- struct skb_shared_hwtstamps tHwtstampsTsn;
- struct time_stamps tMinusRet;
- struct ptpHdr_t *ptPtpHdr = NULL;
- uint64_t MinusVal = 0;
- uint32_t cpuTx_ns = 0;
- uint32_t cpuTx_frac_ns = 0;
- uint64_t cfNs = 0;
-
- memset(&tReqReceTs, 0, sizeof(Bits80_t));
- memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t));
- memset(&tMinusRet, 0, sizeof(struct time_stamps));
- memset(&tHwtstamps5g, 0, sizeof(struct skb_shared_hwtstamps));
- memset(&tHwtstampsTsn, 0, sizeof(struct skb_shared_hwtstamps));
- ptPtpHdr = (struct ptpHdr_t *)ptpHdr;
-
- /*PTPM的32bit的时间戳,写到 PI头的cpu_tx字段:高29位为ns位,低3bits位为小数ns位 */
- cpuTx_frac_ns = (hdr->cpu_tx) & 0x07;
- cpuTx_ns = *thw << CPU_TX_DECIMAL_NS;
- hdr->cpu_tx = htonl(cpuTx_ns + cpuTx_frac_ns);
-
- /* 解析Header中flagField的低1字节的bit1,如果是0(一步法) */
- if (0 == (ptPtpHdr->flagField & 0x0002))
- {
- /* 提取requestRecieptTimestamp */
- tReqReceTs = *(Bits80_t *)(ptpHdr + sizeof(struct ptpHdr_t)); /* 记为T2 */
-
- /* 将*tsn-T2的差值加到CorrectionField字段的高48bit ns位上 */
- bits_80_minus(*tsn, tReqReceTs, &tMinusRet);
- MinusVal = tMinusRet.ns + tMinusRet.s * S_HOLD;
- memcpy(&cfNs, ptPtpHdr->correctionField, CF_NS_SIZE);
- cfNs = htonll(cfNs) >> 16;
- cfNs += MinusVal;
- cfNs = htonll(cfNs) >> 16;
- memcpy(&(ptPtpHdr->correctionField[0]), &cfNs, CF_NS_SIZE);
- }
-
- tShhwtstamps.ts_5g_t = *t5g;
- tShhwtstamps.ts_tsn_t = *tsn;
-
- /* 2个80bit放到socket error queue中 */
- tHwtstamps5g.hwtstamp = tShhwtstamps.ts_5g_t.ns + tShhwtstamps.ts_5g_t.s * S_HOLD;
- tHwtstampsTsn.hwtstamp = tShhwtstamps.ts_tsn_t.ns + tShhwtstamps.ts_tsn_t.s * S_HOLD;
- skb_tstamp_tx(skb, &tHwtstamps5g);
-#ifdef CGEL_TSTAMP_2_PATCH_EN
- skb_tstamp_tx_2(skb, &tHwtstampsTsn);
-#endif /* CGEL_TSTAMP_2_PATCH_EN */
-#endif
- return PTP_RET_SUCCESS;
-}
-
-/* 接收方向的事件报文的时间戳处理函数:在1588驱动中对接收方向的事件报文的时间戳处理是一致的 */
-int32_t pkt_rcv_type_event(struct zxdh_net_hdr_rcv *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct skb_shared_info *ptSkbSharedInfo, \
- struct zxdh_en_device *en_dev)
-{
-#ifdef TIME_STAMP_1588
- struct SkbSharedHwtstamps_t tShhwtstamps;
- uint32_t tsRx = 0;
- uint32_t tsRx_ns = 0;
- uint32_t tsRx_frac_ns = 0;
- int32_t MinusRetThwCpu = 0;
- uint64_t temp = 0x20000000;
- uint32_t i = 0;
-
- memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t));
- /* cpu_tx高29bits ns位,低3bits小数ns位 */
- tsRx = htonl(hdr->rx_ts);
-
- tsRx_frac_ns = tsRx & 0x07;
- tsRx_ns = tsRx >> 3;
- // LOG_DEBUG("hdr->rx_ts = %d, tsRx = %d, tsRx_ns = %d\n", hdr->rx_ts, tsRx, tsRx_ns);
-
- if (tsRx_frac_ns > 4)
- {
- tsRx_ns += 1;
- }
- // LOG_DEBUG("thw = %d, tsRx_ns = %d\n", *thw, tsRx_ns);
- MinusRetThwCpu = (*thw & 0x1fffffff) - tsRx_ns;
-
- if(MinusRetThwCpu < 0)
- {
- MinusRetThwCpu += temp;
- }
-
- LOG_DEBUG("MinusRetThwCpu = %d\n", MinusRetThwCpu);
-
- tShhwtstamps.ts_5g_t = *t5g;
- tShhwtstamps.ts_tsn_t = *tsn;
-
- /* 更新两个80bits时间戳 */
- if (tShhwtstamps.ts_5g_t.ns > MinusRetThwCpu)
- {
- tShhwtstamps.ts_5g_t.ns -= MinusRetThwCpu;
- }
- else
- {
- for (i = 1; i < tShhwtstamps.ts_5g_t.s + 1; i++)
- {
- temp = i * S_HOLD + tShhwtstamps.ts_5g_t.ns;
-
- if (temp > MinusRetThwCpu)
- {
- tShhwtstamps.ts_5g_t.ns = temp - MinusRetThwCpu;
- tShhwtstamps.ts_5g_t.s -= i;
- break;
- }
- }
- if (temp < MinusRetThwCpu)
- {
- LOG_ERR("ts_5g_t < MinusRetThwCpu!!!\n");
- }
- }
-
- if (tShhwtstamps.ts_tsn_t.ns > MinusRetThwCpu)
- {
- tShhwtstamps.ts_tsn_t.ns -= MinusRetThwCpu;
- }
- else
- {
- for (i = 1; i < tShhwtstamps.ts_tsn_t.s + 1; i++)
- {
- temp = i * S_HOLD + tShhwtstamps.ts_tsn_t.ns;
- if(temp > MinusRetThwCpu)
- {
- tShhwtstamps.ts_tsn_t.ns = temp - MinusRetThwCpu;
- tShhwtstamps.ts_tsn_t.s -= i;
- break;
- }
- }
- if (temp < MinusRetThwCpu)
- {
- LOG_ERR("ts_tsn_t < MinusRetThwCpu!!!\n");
- }
- }
-
- LOG_DEBUG("enter in pkt_rcv_type_event!!!!\n");
- LOG_DEBUG("tShhwtstamps.ts_5g_t.s = %llu, tShhwtstamps.ts_5g_t.ns = %d\n", tShhwtstamps.ts_5g_t.s, tShhwtstamps.ts_5g_t.ns);
- LOG_DEBUG("tShhwtstamps.ts_tsn_t.s = %llu, tShhwtstamps.ts_tsn_t.ns = %d\n", tShhwtstamps.ts_tsn_t.s, tShhwtstamps.ts_tsn_t.ns);
-
- /* 2个80bit放到socket cmsg中。连同报文返回给应用 */
- ptSkbSharedInfo->hwtstamps.hwtstamp = ktime_set(tShhwtstamps.ts_5g_t.s, tShhwtstamps.ts_5g_t.ns);
-#ifdef CGEL_TSTAMP_2_PATCH_EN
- ptSkbSharedInfo->hwtstamps2.hwtstamp = ktime_set(tShhwtstamps.ts_tsn_t.s, tShhwtstamps.ts_tsn_t.ns);
-#endif /* CGEL_TSTAMP_2_PATCH_EN */
-#endif
- return PTP_RET_SUCCESS;
-}
-
-/**
-* @fn read_ts_match_info
-* @brief 查询时间戳匹配信息,查询到匹配信息后更新cf字段和本地时间戳信息
-* @param msgType ptp事件报文类型
-* @return 返回值为0表示查询时间戳匹配信息成功
-*/
-int32_t read_ts_match_info(uint32_t msgType, uint8_t *ptpHdr)
-{
- uint32_t mssageType = 0;
- int32_t cfNum = 0;
- uint32_t srcPortIdFifo = 0;
- uint32_t sequeIdFifo = 0;
- struct ptpHdr_t *ptPtpHdr = NULL;
- uint32_t matchInfo = 0;
- uint8_t srcPortId = 0;
- uint64_t cfVal = 0;
-
- ptPtpHdr = (struct ptpHdr_t *)ptpHdr;
-
- CHECK_EQUAL_ERR(ptPtpHdr, NULL, -EADDRNOTAVAIL, "tPtpBuff is NULL\n");
-
- srcPortId = *(uint8_t *)(ptPtpHdr->srcPortIdentity + SRCPORTID_LEN - 1); /* 只取srcPortIdentity最后一字节值 */
-
- for (cfNum = 0; cfNum < tGlobalPtpBuff.cfCount; cfNum++)
- {
- matchInfo = tGlobalPtpBuff.ptpRegInfo[cfNum].matchInfo;
- mssageType = (matchInfo >> MSGTYPE_OFFSET) & 0xf;
- srcPortIdFifo = (matchInfo >> SRCPORTID_OFFSET) & 0xf;
- sequeIdFifo = htons(matchInfo & 0xffff);
-
- if((mssageType == msgType) && \
- (srcPortIdFifo == (srcPortId & 0xf)) && \
- (sequeIdFifo == ptPtpHdr->sequenceId))
- {
- LOG_DEBUG("read the match info successfully!!!\n");
- LOG_DEBUG("mssageType: %u, srcPortIdFifo: %u, sequeIdFifo: %u\n", mssageType, srcPortIdFifo, sequeIdFifo);
- memcpy(&cfVal, &(tGlobalPtpBuff.ptpRegInfo[cfNum].cfVal[0]), CF_SIZE);
- cfVal = htonll(cfVal);
- memcpy(&(ptPtpHdr->correctionField[0]), &cfVal, CF_SIZE);
-
- /* 将匹配到的信息从本地buff去除 */
- tGlobalPtpBuff.cfCount--;
- if (cfNum == MAX_PTP_REG_INFO_NUM - 1)
- {
- memset(&(tGlobalPtpBuff.ptpRegInfo[cfNum]), 0, sizeof(struct ptp_reg_info));
- return 0;
- }
- memcpy(&(tGlobalPtpBuff.ptpRegInfo[cfNum]), &(tGlobalPtpBuff.ptpRegInfo[cfNum + 1]), \
- (MAX_PTP_REG_INFO_NUM - cfNum - 1) * sizeof(struct ptp_reg_info));
- memset(&(tGlobalPtpBuff.ptpRegInfo[MAX_PTP_REG_INFO_NUM - 1]), 0, sizeof(struct ptp_reg_info));
-
- return 0;
- }
- }
-
- return -1;
-}
-
-#ifdef PTP_DRIVER_INTERFACE_EN
-extern int32_t get_event_ts_info(struct zxdh_en_device *en_dev, struct ptp_buff* p_tsInfo, int32_t mac_number);
-#endif /* PTP_DRIVER_INTERFACE_EN */
-
-/**
-* @fn general_encrypt_msg_proc
-* @brief 使用两步法,获取、存储和处理不同的ptp加密事件报文的时间戳信息
-* @param msgType ptp事件报文类型
-*/
-int32_t general_encrypt_msg_proc(uint32_t msgType, uint8_t *ptpHdr, struct zxdh_en_device *en_dev)
-{
- int32_t num = 0;
- int32_t macNum = 0;
- int32_t ret = 0;
- struct ptpHdr_t *ptPtpHdr = NULL;
- struct ptp_buff tempBuff;
-
- memset(&tempBuff, 0, sizeof(struct ptp_buff));
- ptPtpHdr = (struct ptpHdr_t *)ptpHdr;
-
- /* 判断报文是否是加密报文 */
- if (!(0x0080 == ((ptPtpHdr->flagField) & 0x0080)))
- {
- return ret;
- }
-
- macNum = zxdh_pf_macpcs_num_get(en_dev);
- if (macNum < 0)
- {
- LOG_ERR("get mac num %d err, its value should is 0-2!\n", macNum);
- return -1;
- }
-
- // LOG_INFO("ptp buff:\n ");
- // print_data((uint8_t *)&tGlobalPtpBuff, sizeof(struct ptp_update_buff));
-
- /* 1、从本地buff查询和处理时间戳匹配信息,并更新本地buff */
- ret = read_ts_match_info(msgType, ptpHdr);
-
- /* 2、从本地没匹配到信息,则读取FIFO中信息,将读取到的信息更新到本地,重新匹配 */
- if (ret != 0)
- {
- // LOG_INFO("cannot read the matchInfo from the BUFF!---------------");
-
- #ifdef PTP_DRIVER_INTERFACE_EN
- ret = get_event_ts_info(en_dev, &tempBuff, macNum);
- CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "read FIFO form ptpDriver failed!!!");
- #endif /* PTP_DRIVER_INTERFACE_EN */
-
- /* 2.1 将读取到到的FIFO信息,添加到本地全局buff */
- if (tempBuff.cfCount > 0)
- {
- if (tempBuff.cfCount + tGlobalPtpBuff.cfCount < MAX_PTP_REG_INFO_NUM)
- {
- memcpy(&(tGlobalPtpBuff.ptpRegInfo[tGlobalPtpBuff.cfCount]),
- tempBuff.ptpRegInfo, sizeof(struct ptp_reg_info) * tempBuff.cfCount);
-
- tGlobalPtpBuff.cfCount += tempBuff.cfCount;
- // LOG_INFO("tGlobalPtpBuff.cfCount: %u\n", tGlobalPtpBuff.cfCount);
- }
- else /* 当超过64组时间戳信息时 */
- {
- num = tempBuff.cfCount + tGlobalPtpBuff.cfCount - MAX_PTP_REG_INFO_NUM;
-
- /* 丢弃掉最先存在本地的信息(此信息更大的概率匹配不上) */
- memcpy(&(tGlobalPtpBuff.ptpRegInfo[0]), &(tGlobalPtpBuff.ptpRegInfo[num]),
- sizeof(struct ptp_reg_info) * (MAX_PTP_REG_INFO_NUM - num));
- tGlobalPtpBuff.cfCount -= num;
-
- /* 添加新的信息到本地 */
- memcpy(&(tGlobalPtpBuff.ptpRegInfo[tGlobalPtpBuff.cfCount]),
- tempBuff.ptpRegInfo, sizeof(struct ptp_reg_info) * tempBuff.cfCount);
- tGlobalPtpBuff.cfCount = MAX_PTP_REG_INFO_NUM;
- }
-
- /* 2.2 在更新后的本地全局buff查询和处理匹配信息,并更新本地buff*/
- ret = read_ts_match_info(msgType, ptpHdr);
- CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "cannot read the matchInfo from the local BUFF!");
- }
- }
-
- // LOG_INFO("ptp buff:\n ");
- // print_data((uint8_t *)&tGlobalPtpBuff, sizeof(struct ptp_update_buff));
-
- return ret;
-}
-
-
-int32_t pkt_proc_type_follow_up(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev)
-{
- int32_t ret = 0;
- ret = general_encrypt_msg_proc(PTP_MSG_TYPE_SYNC, ptpHdr, en_dev);
-
- return ret;
-}
-
-int32_t pkt_proc_type_delay_resp(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev)
-{
- return 0;
-}
-
-int32_t pkt_rcv_type_delay_resp(struct zxdh_net_hdr_rcv *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct skb_shared_info *ptSkbSharedInfo, \
- struct zxdh_en_device *en_dev)
-{
- int32_t ret = 0;
- ret = general_encrypt_msg_proc(PTP_MSG_TYPE_DELAY_REQ, ptpHdr, en_dev);
-
- return ret;
-}
-
-int32_t pkt_proc_type_pdelay_resp_follow_up(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev)
-{
- int32_t ret = 0;
- ret = general_encrypt_msg_proc(PTP_MSG_TYPE_PDELAY_RESP, ptpHdr, en_dev);
-
- return ret;
-}
-
-int32_t pkt_proc_type_announce(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev)
-{
- /* 驱动不做处理 */
- return 0;
-}
-
-int32_t pkt_proc_type_signaling(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev)
-{
- /* 驱动不做处理 */
- return 0;
-}
-
-int32_t pkt_proc_type_management(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev)
-{
- /* 驱动不做处理 */
- return 0;
-}
+/*****************************************************************************
+(C) 2023 ZTE Corporation. 版权所有.
+
+文件名 : en_1588_pkt_proc_func.c
+内容摘要 : 不同数据类型包的处理接口实现
+作者/日期 : Limin / 2023.10.12
+版本 : 1.0
+*****************************************************************************/
+
+#include "en_1588_pkt_proc_func.h"
+#include "en_cmd.h"
+#include "en_ioctl.h"
+
+struct ptp_update_buff tGlobalPtpBuff = { 0 };
+
+uint64_t htonll(uint64_t u64_host)
+{
+ uint64_t u64_net = 0;
+ uint32_t u32_host_h = 0;
+ uint32_t u32_host_l = 0;
+
+ u32_host_l = u64_host & 0xffffffff;
+ u32_host_h = (u64_host >> 32) & 0xffffffff;
+
+ u64_net = htonl(u32_host_l);
+ u64_net = (u64_net << 32) | htonl(u32_host_h);
+
+ return u64_net;
+}
+
+/**
+ * @brief 计算两时间戳subtraction和minuend之差,并将差值赋值给*ptMinusRet
+ * @param minuend 高48bit为s位,低32bit为ns位
+ */
+int32_t bits_80_minus(struct time_stamps subtraction, Bits80_t minuend,
+ struct time_stamps *ptMinusRet)
+{
+ uint64_t minusHigh48_s = 0;
+ uint32_t minusLow32_ns = 0;
+
+ /* 取出80bits被减数的ns位值和s位值 */
+ memcpy((uint8_t *)(&minusHigh48_s), &minuend, S_SIZE);
+ memcpy(&minusLow32_ns, (uint8_t *)(&minuend) + S_SIZE, NS_SIZE);
+
+ /* minuend大端 */
+ minusHigh48_s = htonll(minusHigh48_s) >> 16;
+ minusLow32_ns = htonl(minusLow32_ns);
+
+ /* 如果减数值小于被减数值 */
+ if ((subtraction.s < minusHigh48_s) || ((subtraction.s == minusHigh48_s) &&
+ (subtraction.ns < minusLow32_ns))) {
+ LOG_ERR("The difference between the two times is negative!!");
+ return PTP_RET_TIME_ERR;
+ }
+
+ if (subtraction.ns > minusLow32_ns) {
+ ptMinusRet->ns = subtraction.ns - minusLow32_ns; /* 赋值ns位 */
+ ptMinusRet->s = subtraction.s - minusHigh48_s; /* 赋值s位 */
+ } else {
+ ptMinusRet->ns =
+ S_HOLD - (minusLow32_ns - subtraction.ns); /* 赋值ns位 */
+ ptMinusRet->s = subtraction.s - minusHigh48_s - 1; /* 赋值s位 */
+ }
+
+ return PTP_RET_SUCCESS;
+}
+
+int32_t pkt_proc_type_sync(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev)
+{
+ struct SkbSharedHwtstamps_t tShhwtstamps;
+ struct time_stamps tMinusRet;
+ struct skb_shared_hwtstamps tHwtstamps5g;
+ struct skb_shared_hwtstamps tHwtstampsTsn;
+ Bits80_t tTsi;
+ struct ptpHdr_t *ptPtpHdr = NULL;
+ uint8_t *pOriginTimeStamp = NULL;
+ uint8_t majorSdoId = 0;
+ uint32_t t5gNsBig = 0;
+ uint64_t t5gSBig = 0;
+ uint32_t tsnNsBig = 0;
+ uint64_t tsnSBig = 0;
+#ifdef TIME_STAMP_1588
+ uint32_t frequency = 0;
+ uint64_t cfAddedVal = 0;
+ uint8_t *tsiTlv = NULL;
+ uint32_t cpuTx_ns = 0;
+ uint32_t cpuTx_frac_ns = 0;
+ uint64_t cfNs = 0;
+#endif
+
+ ptPtpHdr = (struct ptpHdr_t *)ptpHdr;
+ majorSdoId = ((ptPtpHdr->majorType) & 0xf0) >> 4;
+ pOriginTimeStamp = ptpHdr + sizeof(struct ptpHdr_t);
+ t5gSBig = (htonll(t5g->s)) >> 16;
+ t5gNsBig = htonl(t5g->ns);
+ tsnSBig = (htonll(tsn->s)) >> 16;
+ tsnNsBig = htonl(tsn->ns);
+
+ memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t));
+ memset(&tHwtstamps5g, 0, sizeof(struct skb_shared_hwtstamps));
+ memset(&tHwtstampsTsn, 0, sizeof(struct skb_shared_hwtstamps));
+ memset(&tMinusRet, 0, sizeof(struct time_stamps));
+ memset(&tTsi, 0, sizeof(Bits80_t));
+#ifdef TIME_STAMP_1588
+ /* 解析PTP Header的majorSdoId字段,如果是0,表示PTP消息由1588使用 */
+ if (0 == majorSdoId) {
+ /* 解析Flag字段低一字节bit1,如果为0,则是一步法,为1不做处理 */
+ if (0 == ((ptPtpHdr->flagField) & 0x0002)) {
+ memcpy(pOriginTimeStamp, &t5gSBig, S_SIZE);
+ memcpy(pOriginTimeStamp + S_SIZE, &t5gNsBig, NS_SIZE);
+ }
+ /* 如果是1,表示PTP消息由802.1AS协议使用 */
+ } else if (1 == majorSdoId) {
+ /* 解析Flag字段低一字节bit1,如果为0,则是一步法 */
+ if (0 == ((ptPtpHdr->flagField) & 0x0002)) {
+ /* tsn时间戳放到Sync报文的originTimestamp字段 */
+ memcpy(pOriginTimeStamp, &tsnSBig, S_SIZE);
+ memcpy(pOriginTimeStamp + S_SIZE, &tsnNsBig, NS_SIZE);
+
+ /* 解析Flag字段高一字节bit7,如果为1,做如下处理,为0不做处理*/
+ if (0 != ((ptPtpHdr->flagField) & 0x8000)) {
+ frequency = *(uint32_t *)(ptpHdr + PTPHDR_FREQUENCY_OFFSET);
+ frequency = htonl(frequency); /* 频率比 */
+
+ memcpy(&tTsi, ptpHdr + PTPHDR_TSI_OFFSET, sizeof(Bits80_t));
+
+ bits_80_minus(*t5g, tTsi, &tMinusRet);
+
+ /* (*t5g-*tsi)*频率比* 计算结果叠加到CF字段ns位,(不会出现CF字段ns位值溢出情况) */
+ cfAddedVal = (tMinusRet.s * S_HOLD + tMinusRet.ns) * frequency;
+ memcpy(&cfNs, ptPtpHdr->correctionField, CF_NS_SIZE);
+ cfNs = htonll(cfNs) >> 16;
+ cfNs += cfAddedVal;
+ cfNs = htonll(cfNs) >> 16;
+ memcpy(&(ptPtpHdr->correctionField[0]), &cfNs, CF_NS_SIZE);
+
+ /* flagField字段的高1字节bit7清0 */
+ ptPtpHdr->flagField = (ptPtpHdr->flagField) & 0x7f;
+
+ /* 清除20 byte tTsi TLV为0 */
+ tsiTlv = ptpHdr + PTPHDR_TSI_TLV_OFFSET;
+ memset(tsiTlv, 0, PTPHDR_TSI_TLV_LEN);
+
+ /* 把Header中的messageLength值减去20 */
+ ptPtpHdr->msglen = htons(ptPtpHdr->msglen);
+ ptPtpHdr->msglen -= PTPHDR_TSI_TLV_LEN;
+ ptPtpHdr->msglen = htons(ptPtpHdr->msglen);
+ }
+ /* 如果为1,则是两步法 */
+ } else {
+ /* 解析Flag字段高一字节bit7,如果为1,做如下处理,为0不做处理 */
+ if (0 != ((ptPtpHdr->flagField) & 0x8000)) {
+ memcpy(&tTsi, ptpHdr + PTPHDR_TSI_OFFSET, sizeof(Bits80_t));
+
+ bits_80_minus(*t5g, tTsi, &tMinusRet);
+
+ /* (*t5g-*tsi)*频率比* 计算结果叠加到CF字段ns位,(不会出现CF字段ns位值溢出情况) */
+ frequency = htonl(ptPtpHdr->msgTypeSpecific);
+ cfAddedVal = (tMinusRet.s * S_HOLD + tMinusRet.ns) * frequency;
+ memcpy(&cfNs, ptPtpHdr->correctionField, CF_NS_SIZE);
+ cfNs = htonll(cfNs) >> 16;
+ cfNs += cfAddedVal;
+ cfNs = htonll(cfNs) >> 16;
+ memcpy(&(ptPtpHdr->correctionField[0]), &cfNs, CF_NS_SIZE);
+
+ /* 将messagetypespecific清0 */
+ memset(&(ptPtpHdr->msgTypeSpecific), 0, sizeof(uint32_t));
+
+ /* flagField字段的高1字节bit7清0 */
+ ptPtpHdr->flagField = (ptPtpHdr->flagField) & 0x7f;
+
+ /* 清除20 byte tTsi TLV为0 */
+ tsiTlv = ptpHdr + PTPHDR_TSI_TLV_OFFSET_TWO;
+ memset(tsiTlv, 0, PTPHDR_TSI_TLV_LEN);
+
+ /* 把Header中的messageLength值减去20 */
+ ptPtpHdr->msglen = htons(ptPtpHdr->msglen);
+ ptPtpHdr->msglen -= PTPHDR_TSI_TLV_LEN;
+ ptPtpHdr->msglen = htons(ptPtpHdr->msglen);
+ }
+ }
+ }
+
+ /*PTPM的32bit的时间戳,写到 PI头的cpu_tx字段:高29位为ns位,低3bits位为小数ns位
+ */
+ cpuTx_frac_ns = (hdr->cpu_tx) & 0x07;
+ cpuTx_ns = *thw << CPU_TX_DECIMAL_NS;
+ hdr->cpu_tx = htonl(cpuTx_ns + cpuTx_frac_ns); /* 大端对齐 */
+
+ /* 两个80bit时间戳(T1,T2)放到socket的ERR_QUEUE中 */
+ tShhwtstamps.ts_5g_t = *t5g;
+ tShhwtstamps.ts_tsn_t = *tsn;
+ tHwtstamps5g.hwtstamp =
+ tShhwtstamps.ts_5g_t.ns + tShhwtstamps.ts_5g_t.s * S_HOLD;
+ tHwtstampsTsn.hwtstamp =
+ tShhwtstamps.ts_tsn_t.ns + tShhwtstamps.ts_tsn_t.s * S_HOLD;
+ skb_tstamp_tx(skb, &tHwtstamps5g);
+#ifdef CGEL_TSTAMP_2_PATCH_EN
+ skb_tstamp_tx_2(skb, &tHwtstampsTsn);
+#endif /* CGEL_TSTAMP_2_PATCH_EN */
+#endif
+ return PTP_RET_SUCCESS;
+}
+
+int32_t delay_and_pdelay_req_proc(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw)
+{
+#ifdef TIME_STAMP_1588
+ struct SkbSharedHwtstamps_t tShhwtstamps;
+ struct skb_shared_hwtstamps tHwtstamps5g;
+ struct skb_shared_hwtstamps tHwtstampsTsn;
+ uint32_t cpuTx_ns = 0;
+ uint32_t cpuTx_frac_ns = 0;
+
+ memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t));
+ memset(&tHwtstamps5g, 0, sizeof(struct skb_shared_hwtstamps));
+ memset(&tHwtstampsTsn, 0, sizeof(struct skb_shared_hwtstamps));
+ /*PTPM的32bit的时间戳,写到 PI头的cpu_tx字段:高29位为ns位,低3bits位为小数ns位
+ */
+ cpuTx_frac_ns = (hdr->cpu_tx) & 0x07;
+ cpuTx_ns = *thw << CPU_TX_DECIMAL_NS;
+ hdr->cpu_tx = htonl(cpuTx_ns + cpuTx_frac_ns);
+
+ tShhwtstamps.ts_5g_t = *t5g;
+ tShhwtstamps.ts_tsn_t = *tsn;
+
+ /* 2个80bit放到socket error queue中 */
+ tHwtstamps5g.hwtstamp =
+ tShhwtstamps.ts_5g_t.ns + tShhwtstamps.ts_5g_t.s * S_HOLD;
+ tHwtstampsTsn.hwtstamp =
+ tShhwtstamps.ts_tsn_t.ns + tShhwtstamps.ts_tsn_t.s * S_HOLD;
+ skb_tstamp_tx(skb, &tHwtstamps5g);
+#ifdef CGEL_TSTAMP_2_PATCH_EN
+ skb_tstamp_tx_2(skb, &tHwtstampsTsn);
+#endif /* CGEL_TSTAMP_2_PATCH_EN */
+#endif
+ return PTP_RET_SUCCESS;
+}
+
+int32_t pkt_proc_type_delay_req(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev)
+{
+ int32_t ret = 0;
+ ret = delay_and_pdelay_req_proc(skb, hdr, t5g, tsn, thw);
+ return ret;
+}
+
+int32_t pkt_proc_type_pdelay_req(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev)
+{
+ int32_t ret = 0;
+ ret = delay_and_pdelay_req_proc(skb, hdr, t5g, tsn, thw);
+ return ret;
+}
+
+int32_t pkt_proc_type_pdelay_resp(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev)
+{
+#ifdef TIME_STAMP_1588
+ Bits80_t tReqReceTs;
+ struct SkbSharedHwtstamps_t tShhwtstamps;
+ struct skb_shared_hwtstamps tHwtstamps5g;
+ struct skb_shared_hwtstamps tHwtstampsTsn;
+ struct time_stamps tMinusRet;
+ struct ptpHdr_t *ptPtpHdr = NULL;
+ uint64_t MinusVal = 0;
+ uint32_t cpuTx_ns = 0;
+ uint32_t cpuTx_frac_ns = 0;
+ uint64_t cfNs = 0;
+
+ memset(&tReqReceTs, 0, sizeof(Bits80_t));
+ memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t));
+ memset(&tMinusRet, 0, sizeof(struct time_stamps));
+ memset(&tHwtstamps5g, 0, sizeof(struct skb_shared_hwtstamps));
+ memset(&tHwtstampsTsn, 0, sizeof(struct skb_shared_hwtstamps));
+ ptPtpHdr = (struct ptpHdr_t *)ptpHdr;
+
+ /*PTPM的32bit的时间戳,写到 PI头的cpu_tx字段:高29位为ns位,低3bits位为小数ns位
+ */
+ cpuTx_frac_ns = (hdr->cpu_tx) & 0x07;
+ cpuTx_ns = *thw << CPU_TX_DECIMAL_NS;
+ hdr->cpu_tx = htonl(cpuTx_ns + cpuTx_frac_ns);
+
+ /* 解析Header中flagField的低1字节的bit1,如果是0(一步法) */
+ if (0 == (ptPtpHdr->flagField & 0x0002)) {
+ /* 提取requestRecieptTimestamp */
+ tReqReceTs =
+ *(Bits80_t *)(ptpHdr + sizeof(struct ptpHdr_t)); /* 记为T2 */
+
+ /* 将*tsn-T2的差值加到CorrectionField字段的高48bit ns位上 */
+ bits_80_minus(*tsn, tReqReceTs, &tMinusRet);
+ MinusVal = tMinusRet.ns + tMinusRet.s * S_HOLD;
+ memcpy(&cfNs, ptPtpHdr->correctionField, CF_NS_SIZE);
+ cfNs = htonll(cfNs) >> 16;
+ cfNs += MinusVal;
+ cfNs = htonll(cfNs) >> 16;
+ memcpy(&(ptPtpHdr->correctionField[0]), &cfNs, CF_NS_SIZE);
+ }
+
+ tShhwtstamps.ts_5g_t = *t5g;
+ tShhwtstamps.ts_tsn_t = *tsn;
+
+ /* 2个80bit放到socket error queue中 */
+ tHwtstamps5g.hwtstamp =
+ tShhwtstamps.ts_5g_t.ns + tShhwtstamps.ts_5g_t.s * S_HOLD;
+ tHwtstampsTsn.hwtstamp =
+ tShhwtstamps.ts_tsn_t.ns + tShhwtstamps.ts_tsn_t.s * S_HOLD;
+ skb_tstamp_tx(skb, &tHwtstamps5g);
+#ifdef CGEL_TSTAMP_2_PATCH_EN
+ skb_tstamp_tx_2(skb, &tHwtstampsTsn);
+#endif /* CGEL_TSTAMP_2_PATCH_EN */
+#endif
+ return PTP_RET_SUCCESS;
+}
+
+/* 接收方向的事件报文的时间戳处理函数:在1588驱动中对接收方向的事件报文的时间戳处理是一致的
+ */
+int32_t pkt_rcv_type_event(struct zxdh_net_hdr_rcv *hdr, uint8_t *ptpHdr,
+ struct time_stamps *t5g, struct time_stamps *tsn,
+ uint32_t *thw,
+ struct skb_shared_info *ptSkbSharedInfo,
+ struct zxdh_en_device *en_dev)
+{
+#ifdef TIME_STAMP_1588
+ struct SkbSharedHwtstamps_t tShhwtstamps;
+ uint32_t tsRx = 0;
+ uint32_t tsRx_ns = 0;
+ uint32_t tsRx_frac_ns = 0;
+ int32_t MinusRetThwCpu = 0;
+ uint64_t temp = 0x20000000;
+ uint32_t i = 0;
+
+ memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t));
+ /* cpu_tx高29bits ns位,低3bits小数ns位 */
+ tsRx = htonl(hdr->rx_ts);
+
+ tsRx_frac_ns = tsRx & 0x07;
+ tsRx_ns = tsRx >> 3;
+ // LOG_DEBUG("hdr->rx_ts = %d, tsRx = %d, tsRx_ns = %d\n", hdr->rx_ts, tsRx,
+ // tsRx_ns);
+
+ if (tsRx_frac_ns > 4) {
+ tsRx_ns += 1;
+ }
+ // LOG_DEBUG("thw = %d, tsRx_ns = %d\n", *thw, tsRx_ns);
+ MinusRetThwCpu = (*thw & 0x1fffffff) - tsRx_ns;
+
+ if (MinusRetThwCpu < 0) {
+ MinusRetThwCpu += temp;
+ }
+
+ LOG_DEBUG("MinusRetThwCpu = %d\n", MinusRetThwCpu);
+
+ tShhwtstamps.ts_5g_t = *t5g;
+ tShhwtstamps.ts_tsn_t = *tsn;
+
+ /* 更新两个80bits时间戳 */
+ if (tShhwtstamps.ts_5g_t.ns > MinusRetThwCpu) {
+ tShhwtstamps.ts_5g_t.ns -= MinusRetThwCpu;
+ } else {
+ for (i = 1; i < tShhwtstamps.ts_5g_t.s + 1; i++) {
+ temp = i * S_HOLD + tShhwtstamps.ts_5g_t.ns;
+
+ if (temp > MinusRetThwCpu) {
+ tShhwtstamps.ts_5g_t.ns = temp - MinusRetThwCpu;
+ tShhwtstamps.ts_5g_t.s -= i;
+ break;
+ }
+ }
+ if (temp < MinusRetThwCpu) {
+ LOG_ERR("ts_5g_t < MinusRetThwCpu!!!\n");
+ }
+ }
+
+ if (tShhwtstamps.ts_tsn_t.ns > MinusRetThwCpu) {
+ tShhwtstamps.ts_tsn_t.ns -= MinusRetThwCpu;
+ } else {
+ for (i = 1; i < tShhwtstamps.ts_tsn_t.s + 1; i++) {
+ temp = i * S_HOLD + tShhwtstamps.ts_tsn_t.ns;
+ if (temp > MinusRetThwCpu) {
+ tShhwtstamps.ts_tsn_t.ns = temp - MinusRetThwCpu;
+ tShhwtstamps.ts_tsn_t.s -= i;
+ break;
+ }
+ }
+ if (temp < MinusRetThwCpu) {
+ LOG_ERR("ts_tsn_t < MinusRetThwCpu!!!\n");
+ }
+ }
+
+ LOG_DEBUG("enter in pkt_rcv_type_event!!!!\n");
+ LOG_DEBUG("tShhwtstamps.ts_5g_t.s = %llu, tShhwtstamps.ts_5g_t.ns = %d\n",
+ tShhwtstamps.ts_5g_t.s, tShhwtstamps.ts_5g_t.ns);
+ LOG_DEBUG("tShhwtstamps.ts_tsn_t.s = %llu, tShhwtstamps.ts_tsn_t.ns = %d\n",
+ tShhwtstamps.ts_tsn_t.s, tShhwtstamps.ts_tsn_t.ns);
+
+ /* 2个80bit放到socket cmsg中。连同报文返回给应用 */
+ ptSkbSharedInfo->hwtstamps.hwtstamp =
+ ktime_set(tShhwtstamps.ts_5g_t.s, tShhwtstamps.ts_5g_t.ns);
+#ifdef CGEL_TSTAMP_2_PATCH_EN
+ ptSkbSharedInfo->hwtstamps2.hwtstamp =
+ ktime_set(tShhwtstamps.ts_tsn_t.s, tShhwtstamps.ts_tsn_t.ns);
+#endif /* CGEL_TSTAMP_2_PATCH_EN */
+#endif
+ return PTP_RET_SUCCESS;
+}
+
+/**
+ * @fn read_ts_match_info
+ * @brief 查询时间戳匹配信息,查询到匹配信息后更新cf字段和本地时间戳信息
+ * @param msgType ptp事件报文类型
+ * @return 返回值为0表示查询时间戳匹配信息成功
+ */
+int32_t read_ts_match_info(uint32_t msgType, uint8_t *ptpHdr)
+{
+ uint32_t mssageType = 0;
+ int32_t cfNum = 0;
+ uint32_t srcPortIdFifo = 0;
+ uint32_t sequeIdFifo = 0;
+ struct ptpHdr_t *ptPtpHdr = NULL;
+ uint32_t matchInfo = 0;
+ uint8_t srcPortId = 0;
+ uint64_t cfVal = 0;
+
+ ptPtpHdr = (struct ptpHdr_t *)ptpHdr;
+
+ CHECK_EQUAL_ERR(ptPtpHdr, NULL, -EADDRNOTAVAIL, "tPtpBuff is NULL\n");
+
+ srcPortId = *(uint8_t *)(ptPtpHdr->srcPortIdentity + SRCPORTID_LEN -
+ 1); /* 只取srcPortIdentity最后一字节值 */
+
+ for (cfNum = 0; cfNum < tGlobalPtpBuff.cfCount; cfNum++) {
+ matchInfo = tGlobalPtpBuff.ptpRegInfo[cfNum].matchInfo;
+ mssageType = (matchInfo >> MSGTYPE_OFFSET) & 0xf;
+ srcPortIdFifo = (matchInfo >> SRCPORTID_OFFSET) & 0xf;
+ sequeIdFifo = htons(matchInfo & 0xffff);
+
+ if ((mssageType == msgType) && (srcPortIdFifo == (srcPortId & 0xf)) &&
+ (sequeIdFifo == ptPtpHdr->sequenceId)) {
+ LOG_DEBUG("read the match info successfully!!!\n");
+ LOG_DEBUG("mssageType: %u, srcPortIdFifo: %u, sequeIdFifo: %u\n",
+ mssageType, srcPortIdFifo, sequeIdFifo);
+ memcpy(&cfVal, &(tGlobalPtpBuff.ptpRegInfo[cfNum].cfVal[0]),
+ CF_SIZE);
+ cfVal = htonll(cfVal);
+ memcpy(&(ptPtpHdr->correctionField[0]), &cfVal, CF_SIZE);
+
+ /* 将匹配到的信息从本地buff去除 */
+ memcpy(&(tGlobalPtpBuff.ptpRegInfo[cfNum]),
+ &(tGlobalPtpBuff.ptpRegInfo[cfNum + 1]),
+ (MAX_PTP_REG_INFO_NUM - cfNum) *
+ sizeof(struct ptp_reg_info));
+ tGlobalPtpBuff.cfCount--;
+
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+#ifdef PTP_DRIVER_INTERFACE_EN
+extern int32_t get_event_ts_info(struct zxdh_en_device *en_dev,
+ struct ptp_buff *p_tsInfo, int32_t mac_number);
+#endif /* PTP_DRIVER_INTERFACE_EN */
+
+/**
+ * @fn general_encrypt_msg_proc
+ * @brief 使用两步法,获取、存储和处理不同的ptp加密事件报文的时间戳信息
+ * @param msgType ptp事件报文类型
+ */
+int32_t general_encrypt_msg_proc(uint32_t msgType, uint8_t *ptpHdr,
+ struct zxdh_en_device *en_dev)
+{
+ int32_t num = 0;
+ int32_t macNum = 0;
+ int32_t ret = 0;
+ struct ptpHdr_t *ptPtpHdr = NULL;
+ struct ptp_buff tempBuff;
+
+ memset(&tempBuff, 0, sizeof(struct ptp_buff));
+ ptPtpHdr = (struct ptpHdr_t *)ptpHdr;
+
+ /* 判断报文是否是加密报文 */
+ if (!(0x0080 == ((ptPtpHdr->flagField) & 0x0080))) {
+ return ret;
+ }
+
+ macNum = zxdh_pf_macpcs_num_get(en_dev);
+ if (macNum < 0) {
+ LOG_ERR("get mac num %d err, its value should is 0-2!\n", macNum);
+ return -1;
+ }
+
+ // LOG_INFO("ptp buff:\n ");
+ // print_data((uint8_t *)&tGlobalPtpBuff, sizeof(struct ptp_update_buff));
+
+ /* 1、从本地buff查询和处理时间戳匹配信息,并更新本地buff */
+ ret = read_ts_match_info(msgType, ptpHdr);
+
+ /* 2、从本地没匹配到信息,则读取FIFO中信息,将读取到的信息更新到本地,重新匹配
+ */
+ if (ret != 0) {
+ // LOG_INFO("cannot read the matchInfo from the BUFF!---------------");
+
+#ifdef PTP_DRIVER_INTERFACE_EN
+ ret = get_event_ts_info(en_dev, &tempBuff, macNum);
+ CHECK_UNEQUAL_ERR(ret, 0, -EFAULT,
+ "read FIFO form ptpDriver failed!!!");
+#endif /* PTP_DRIVER_INTERFACE_EN */
+
+ /* 2.1 将读取到到的FIFO信息,添加到本地全局buff */
+ if (tempBuff.cfCount > 0) {
+ if (tempBuff.cfCount + tGlobalPtpBuff.cfCount <
+ MAX_PTP_REG_INFO_NUM) {
+ memcpy(&(tGlobalPtpBuff.ptpRegInfo[tGlobalPtpBuff.cfCount]),
+ tempBuff.ptpRegInfo,
+ sizeof(struct ptp_reg_info) * tempBuff.cfCount);
+
+ tGlobalPtpBuff.cfCount += tempBuff.cfCount;
+ // LOG_INFO("tGlobalPtpBuff.cfCount: %u\n", tGlobalPtpBuff.cfCount);
+ /* 当超过64组时间戳信息时 */
+ } else {
+ num = tempBuff.cfCount + tGlobalPtpBuff.cfCount -
+ MAX_PTP_REG_INFO_NUM;
+
+ /* 丢弃掉最先存在本地的信息(此信息更大的概率匹配不上) */
+ memcpy(&(tGlobalPtpBuff.ptpRegInfo[0]),
+ &(tGlobalPtpBuff.ptpRegInfo[num]),
+ sizeof(struct ptp_reg_info) *
+ (MAX_PTP_REG_INFO_NUM - num));
+ tGlobalPtpBuff.cfCount -= num;
+
+ /* 添加新的信息到本地 */
+ memcpy(&(tGlobalPtpBuff.ptpRegInfo[tGlobalPtpBuff.cfCount]),
+ tempBuff.ptpRegInfo,
+ sizeof(struct ptp_reg_info) * tempBuff.cfCount);
+ tGlobalPtpBuff.cfCount = MAX_PTP_REG_INFO_NUM;
+ }
+
+ /* 2.2 在更新后的本地全局buff查询和处理匹配信息,并更新本地buff*/
+ ret = read_ts_match_info(msgType, ptpHdr);
+ CHECK_UNEQUAL_ERR(ret, 0, -EFAULT,
+ "cannot read the matchInfo from the local BUFF!");
+ }
+ }
+
+ // LOG_INFO("ptp buff:\n ");
+ // print_data((uint8_t *)&tGlobalPtpBuff, sizeof(struct ptp_update_buff));
+
+ return ret;
+}
+
+int32_t pkt_proc_type_follow_up(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev)
+{
+ int32_t ret = 0;
+ ret = general_encrypt_msg_proc(PTP_MSG_TYPE_SYNC, ptpHdr, en_dev);
+
+ return ret;
+}
+
+int32_t pkt_proc_type_delay_resp(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev)
+{
+ return 0;
+}
+
+int32_t pkt_rcv_type_delay_resp(struct zxdh_net_hdr_rcv *hdr, uint8_t *ptpHdr,
+ struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct skb_shared_info *ptSkbSharedInfo,
+ struct zxdh_en_device *en_dev)
+{
+ int32_t ret = 0;
+ ret = general_encrypt_msg_proc(PTP_MSG_TYPE_DELAY_REQ, ptpHdr, en_dev);
+
+ return ret;
+}
+
+int32_t pkt_proc_type_pdelay_resp_follow_up(
+ struct sk_buff *skb, struct zxdh_net_hdr *hdr, uint8_t *ptpHdr,
+ struct time_stamps *t5g, struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev)
+{
+ int32_t ret = 0;
+ ret = general_encrypt_msg_proc(PTP_MSG_TYPE_PDELAY_RESP, ptpHdr, en_dev);
+
+ return ret;
+}
+
+int32_t pkt_proc_type_announce(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev)
+{
+ /* 驱动不做处理 */
+ return 0;
+}
+
+int32_t pkt_proc_type_signaling(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev)
+{
+ /* 驱动不做处理 */
+ return 0;
+}
+
+int32_t pkt_proc_type_management(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev)
+{
+ /* 驱动不做处理 */
+ return 0;
+}
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.h b/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.h
index 191a8877e804dd8fc54d4de6e87a6d92ed72098c..e2a444c7f9a36ee7cd82467394cad563a022138c 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.h
@@ -1,233 +1,192 @@
-/*****************************************************************************
-(C) 2023 ZTE Corporation. 版权所有.
-
-文件名 : en_1588_pkt_proc_func.h
-内容摘要 : 不同数据类型包的处理接口实现
-作者/日期 : Limin / 2023.10.12
-版本 : 1.0
-*****************************************************************************/
-
-#ifndef _EN_1588_PKT_PROC_FUNC_H_
-#define _EN_1588_PKT_PROC_FUNC_H_
-
-#ifdef __cplusplus
- extern "C" {
-#endif /* __cplusplus */
-
-#include "../en_aux.h"
-#include "queue.h"
-
-#define PTP_REG_INFO_NUM 32
-#define MAX_PTP_REG_INFO_NUM 64
-
-/* MAC FIFO相关定义 */
-#define MSGTYPE_OFFSET 20
-#define SRCPORTID_OFFSET 16
-
-/* PTP报文时间戳处理函数返回值 */
-#define PTP_RET_SUCCESS 0
-#define PTP_RET_TIME_ERR (-1)
-
-/* CF字段ns位和s位长度 */
-#define CF_DECIMAL_NS_SIZE 2
-#define CF_NS_SIZE 6
-#define CF_SIZE 8
-
-/* PTP时间戳长度 */
-#define PTP_TS_5G_LEN 10
-#define PTP_TS_TSN_LEN 10
-#define PTP_REQRECE_TS_LEN 10
-
-/* PTP报文后缀相关字段偏移和长度 */
-#define PTPHDR_FREQUENCY_OFFSET 54
-#define PTPHDR_TSI_OFFSET 86
-#define PTPHDR_TSI_TLV_OFFSET 76
-#define PTPHDR_TSI_TLV_OFFSET_TWO 44
-#define PTPHDR_TSI_TLV_LEN 20
-#define ORIGINTIMESTAMP_LEN 10
-#define FOLLOWUP_TLV_LEN 32
-#define TSITLV_LEN 20
-#define SRCPORTID_LEN 10
-
-/* 80bit时间戳ns位和s位长度 */
-#define S_SIZE 6 /* 高48位 */
-#define NS_SIZE 4 /* 低32位 */
-#define S_HOLD 1000000000L /* 进位阈值,即低32位达到1e9 */
-
-/* pi头中cpu_tx字段,高29位为ns位,低3位为小数ns位 */
-#define CPU_TX_DECIMAL_NS 3
-#define CPU_TX_NS 29
-
-typedef struct
-{
- uint8_t data[S_SIZE + NS_SIZE];
-} Bits80_t;
-
-struct time_stamps
-{
- uint64_t s;
- uint32_t ns;
-};
-
-struct SkbSharedHwtstamps_t
-{
- struct time_stamps ts_5g_t;
- struct time_stamps ts_tsn_t;
-};
-
-struct ptpHdr_t
-{
- uint8_t majorType; /* 高4位为majorSdoId,低4位为msgType */
- uint8_t versionPTP;
- uint16_t msglen;
- uint8_t domainNumber;
- uint8_t minorSdoId;
- uint16_t flagField;
- uint8_t correctionField[CF_SIZE]; /* 高48位为ns位,低16字节为小数ns位 */
- uint32_t msgTypeSpecific; /* 大端 */
- uint8_t srcPortIdentity[SRCPORTID_LEN];
- uint16_t sequenceId;
- uint8_t controlField;
- uint8_t logMsgInterval;
-} __attribute__((packed));
-
-
-struct ptp_reg_info
-{
- uint32_t cfVal[2]; //High寄存器是ns位,Low寄存器的高16bit是ns位,低16bit是ns小数位
- uint32_t matchInfo; //保存的内容是[bit23:0]: {MessageType[23:20], sourcePortIdentity[19:16], sequenceId[15:0]}
-};
-
-struct ptp_buff
-{
- uint32_t cfCount;
- struct ptp_reg_info ptpRegInfo[PTP_REG_INFO_NUM];
-};
-
-struct ptp_update_buff
-{
- uint32_t cfCount;
- struct ptp_reg_info ptpRegInfo[MAX_PTP_REG_INFO_NUM];
-};
-
-/* PTP报文类型枚举 */
-enum
-{
- /* event message types */
- PTP_MSG_TYPE_SYNC = 0,
- PTP_MSG_TYPE_DELAY_REQ,
- PTP_MSG_TYPE_PDELAY_REQ,
- PTP_MSG_TYPE_PDELAY_RESP,
-
- /* general message types */
- PTP_MSG_TYPE_FOLLOW_UP = 8,
- PTP_MSG_TYPE_DELAY_RESP,
- PTP_MSG_TYPE_PDELAY_RESP_FOLLOW_UP,
- PTP_MSG_TYPE_ANNOUNCE,
- PTP_MSG_TYPE_SIGNALING,
- PTP_MSG_TYPE_MANAGEMENT
-};
-
-uint64_t htonll(uint64_t u64_host);
-
-int32_t pkt_proc_type_sync(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev);
-
-int32_t pkt_proc_type_delay_req(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev);
-
-int32_t pkt_proc_type_pdelay_req(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev);
-
-int32_t pkt_proc_type_pdelay_resp(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev);
-
-int32_t pkt_proc_type_follow_up(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev);
-
-int32_t pkt_proc_type_delay_resp(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev);
-
-int32_t pkt_proc_type_pdelay_resp_follow_up(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev);
-
-int32_t pkt_proc_type_announce(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev);
-
-int32_t pkt_proc_type_signaling(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev);
-
-int32_t pkt_proc_type_management(struct sk_buff *skb, \
- struct zxdh_net_hdr *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct zxdh_en_device *en_dev);
-
-int32_t pkt_rcv_type_event(struct zxdh_net_hdr_rcv *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct skb_shared_info *ptSkbSharedInfo,
- struct zxdh_en_device *en_dev);
-
-int32_t pkt_rcv_type_delay_resp(struct zxdh_net_hdr_rcv *hdr, \
- uint8_t *ptpHdr, \
- struct time_stamps *t5g, \
- struct time_stamps *tsn, \
- uint32_t *thw, \
- struct skb_shared_info *ptSkbSharedInfo,
- struct zxdh_en_device *en_dev);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
+/*****************************************************************************
+(C) 2023 ZTE Corporation. 版权所有.
+
+文件名 : en_1588_pkt_proc_func.h
+内容摘要 : 不同数据类型包的处理接口实现
+作者/日期 : Limin / 2023.10.12
+版本 : 1.0
+*****************************************************************************/
+
+#ifndef _EN_1588_PKT_PROC_FUNC_H_
+#define _EN_1588_PKT_PROC_FUNC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "../en_aux.h"
+#include "queue.h"
+
+#define PTP_REG_INFO_NUM 32
+#define MAX_PTP_REG_INFO_NUM 64
+
+/* MAC FIFO相关定义 */
+#define MSGTYPE_OFFSET 20
+#define SRCPORTID_OFFSET 16
+
+/* PTP报文时间戳处理函数返回值 */
+#define PTP_RET_SUCCESS 0
+#define PTP_RET_TIME_ERR (-1)
+
+/* CF字段ns位和s位长度 */
+#define CF_DECIMAL_NS_SIZE 2
+#define CF_NS_SIZE 6
+#define CF_SIZE 8
+
+/* PTP时间戳长度 */
+#define PTP_TS_5G_LEN 10
+#define PTP_TS_TSN_LEN 10
+#define PTP_REQRECE_TS_LEN 10
+
+/* PTP报文后缀相关字段偏移和长度 */
+#define PTPHDR_FREQUENCY_OFFSET 54
+#define PTPHDR_TSI_OFFSET 86
+#define PTPHDR_TSI_TLV_OFFSET 76
+#define PTPHDR_TSI_TLV_OFFSET_TWO 44
+#define PTPHDR_TSI_TLV_LEN 20
+#define ORIGINTIMESTAMP_LEN 10
+#define FOLLOWUP_TLV_LEN 32
+#define TSITLV_LEN 20
+#define SRCPORTID_LEN 10
+
+/* 80bit时间戳ns位和s位长度 */
+#define S_SIZE 6 /* 高48位 */
+#define NS_SIZE 4 /* 低32位 */
+#define S_HOLD 1000000000L /* 进位阈值,即低32位达到1e9 */
+
+/* pi头中cpu_tx字段,高29位为ns位,低3位为小数ns位 */
+#define CPU_TX_DECIMAL_NS 3
+#define CPU_TX_NS 29
+
+typedef struct {
+ uint8_t data[S_SIZE + NS_SIZE];
+} Bits80_t;
+
+struct time_stamps {
+ uint64_t s;
+ uint32_t ns;
+};
+
+struct SkbSharedHwtstamps_t {
+ struct time_stamps ts_5g_t;
+ struct time_stamps ts_tsn_t;
+};
+
+struct ptpHdr_t {
+ uint8_t majorType; /* 高4位为majorSdoId,低4位为msgType */
+ uint8_t versionPTP;
+ uint16_t msglen;
+ uint8_t domainNumber;
+ uint8_t minorSdoId;
+ uint16_t flagField;
+ uint8_t correctionField[CF_SIZE]; /* 高48位为ns位,低16字节为小数ns位 */
+ uint32_t msgTypeSpecific; /* 大端 */
+ uint8_t srcPortIdentity[SRCPORTID_LEN];
+ uint16_t sequenceId;
+ uint8_t controlField;
+ uint8_t logMsgInterval;
+} __attribute__((packed));
+
+struct ptp_reg_info {
+ uint32_t cfVal
+ [2]; // High寄存器是ns位,Low寄存器的高16bit是ns位,低16bit是ns小数位
+ uint32_t matchInfo; //保存的内容是[bit23:0]: {MessageType[23:20],
+ // sourcePortIdentity[19:16], sequenceId[15:0]}
+};
+
+struct ptp_buff {
+ uint32_t cfCount;
+ struct ptp_reg_info ptpRegInfo[PTP_REG_INFO_NUM];
+};
+
+struct ptp_update_buff {
+ uint32_t cfCount;
+ struct ptp_reg_info ptpRegInfo[MAX_PTP_REG_INFO_NUM];
+};
+
+/* PTP报文类型枚举 */
+enum {
+ /* event message types */
+ PTP_MSG_TYPE_SYNC = 0,
+ PTP_MSG_TYPE_DELAY_REQ,
+ PTP_MSG_TYPE_PDELAY_REQ,
+ PTP_MSG_TYPE_PDELAY_RESP,
+
+ /* general message types */
+ PTP_MSG_TYPE_FOLLOW_UP = 8,
+ PTP_MSG_TYPE_DELAY_RESP,
+ PTP_MSG_TYPE_PDELAY_RESP_FOLLOW_UP,
+ PTP_MSG_TYPE_ANNOUNCE,
+ PTP_MSG_TYPE_SIGNALING,
+ PTP_MSG_TYPE_MANAGEMENT
+};
+
+uint64_t htonll(uint64_t u64_host);
+
+int32_t pkt_proc_type_sync(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev);
+
+int32_t pkt_proc_type_delay_req(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev);
+
+int32_t pkt_proc_type_pdelay_req(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev);
+
+int32_t pkt_proc_type_pdelay_resp(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev);
+
+int32_t pkt_proc_type_follow_up(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev);
+
+int32_t pkt_proc_type_delay_resp(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev);
+
+int32_t pkt_proc_type_pdelay_resp_follow_up(
+ struct sk_buff *skb, struct zxdh_net_hdr *hdr, uint8_t *ptpHdr,
+ struct time_stamps *t5g, struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev);
+
+int32_t pkt_proc_type_announce(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev);
+
+int32_t pkt_proc_type_signaling(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev);
+
+int32_t pkt_proc_type_management(struct sk_buff *skb, struct zxdh_net_hdr *hdr,
+ uint8_t *ptpHdr, struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct zxdh_en_device *en_dev);
+
+int32_t pkt_rcv_type_event(struct zxdh_net_hdr_rcv *hdr, uint8_t *ptpHdr,
+ struct time_stamps *t5g, struct time_stamps *tsn,
+ uint32_t *thw,
+ struct skb_shared_info *ptSkbSharedInfo,
+ struct zxdh_en_device *en_dev);
+
+int32_t pkt_rcv_type_delay_resp(struct zxdh_net_hdr_rcv *hdr, uint8_t *ptpHdr,
+ struct time_stamps *t5g,
+ struct time_stamps *tsn, uint32_t *thw,
+ struct skb_shared_info *ptSkbSharedInfo,
+ struct zxdh_en_device *en_dev);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
#endif /* _EN_1588_PKT_PROC_FUNC_H_ */
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/en_cmd.c b/src/net/drivers/net/ethernet/dinghai/en_aux/en_cmd.c
index c80e40206f86aaac42f05e2336954172505fdeae..7d1f1e4f99b7c71d3bef5ac563aaada42ca65ddc 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/en_cmd.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/en_cmd.c
@@ -5,1556 +5,1454 @@
#include
#include
#include
-
#include
#include "../en_aux.h"
#include "../en_np/table/include/dpp_tbl_api.h"
#include "../msg_common.h"
#include "en_cmd.h"
-static int32_t get_common_table_msg(struct zxdh_en_device *en_dev, uint16_t pcie_id, uint8_t field, void *ack);
-static int32_t write_queue_index_to_message(struct zxdh_en_device *en_dev, uint32_t queue_nums,
- uint32_t field, uint16_t *bytes, uint16_t *data)
+static int32_t get_common_table_msg(struct zxdh_en_device *en_dev,
+ uint16_t pcie_id, uint8_t field, void *ack);
+static int32_t write_queue_index_to_message(struct zxdh_en_device *en_dev,
+ uint32_t queue_nums, uint32_t field,
+ uint16_t *bytes, uint16_t *data)
{
- uint32_t ix = 0;
- uint16_t old_queue_nums = 0;
- int32_t ret = 0;
- union zxdh_msg msg = {0};
-
- if (OP_CODE_DATA_CHAN == field)
- {
- *bytes = (uint16_t)((queue_nums + 1) * ZXDH_QS_PAIRS);
- data[0] = (uint16_t)queue_nums;
-
- for (ix = 0; ix < queue_nums; ix = ix + ZXDH_QS_PAIRS)
- {
- data[ix + 1] = en_dev->rq[ix / ZXDH_QS_PAIRS].vq->phy_index;
- data[ix + 2] = en_dev->sq[ix / ZXDH_QS_PAIRS].vq->phy_index;
- }
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- ret = get_common_table_msg(en_dev, en_dev->pcie_id, OP_CODE_DATA_CHAN, &msg);
- if (ret != 0)
- {
- LOG_ERR("Failed to get data queue: %d\n", ret);
- return ret;
- }
-
- old_queue_nums = msg.reps.cmn_vq_msg.queue_nums;
- if (old_queue_nums == 0)
- {
- return 0;
- }
-
- if ((old_queue_nums + queue_nums) > 256)
- {
- LOG_ERR("Exceeded the maximum number of queues, old_queue_nums(%d)\n", old_queue_nums);
- return -1;
- }
-
- *bytes = (uint16_t)((queue_nums + old_queue_nums + 1) * ZXDH_QS_PAIRS);
- data[0] = (uint16_t)(queue_nums + old_queue_nums);
- memcpy(data + queue_nums + 1, msg.reps.cmn_vq_msg.phy_qidx, old_queue_nums * ZXDH_QS_PAIRS);
-
- for (ix = 1; ix <= (queue_nums + old_queue_nums); ix++)
- {
- LOG_INFO("vq phy_qid: %d ", data[ix]);
- }
- }
- }
+ uint32_t ix = 0;
+ zxdh_reps_info reps = { 0 };
+ uint16_t old_queue_nums = 0;
+ int32_t ret = 0;
+
+ if (OP_CODE_DATA_CHAN == field) {
+ *bytes = (uint16_t)((queue_nums + 1) * ZXDH_QS_PAIRS);
+ data[0] = (uint16_t)queue_nums;
+
+ for (ix = 0; ix < queue_nums; ix = ix + ZXDH_QS_PAIRS) {
+ data[ix + 1] = en_dev->rq[ix / ZXDH_QS_PAIRS].vq->phy_index;
+ data[ix + 2] = en_dev->sq[ix / ZXDH_QS_PAIRS].vq->phy_index;
+ }
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ ret = get_common_table_msg(en_dev, en_dev->pcie_id,
+ OP_CODE_DATA_CHAN, &reps);
+ if (ret != 0) {
+ LOG_ERR("Failed to get data queue: %d\n", ret);
+ return ret;
+ }
+
+ old_queue_nums = reps.cmn_vq_msg.queue_nums;
+ if (old_queue_nums == 0) {
+ return 0;
+ }
+
+ LOG_INFO("old queue nums %d: ", old_queue_nums);
+ for (ix = 0; ix < old_queue_nums; ix++) {
+ LOG_INFO("%d ", reps.cmn_vq_msg.phy_qidx[ix]);
+ }
+
+ if ((old_queue_nums + queue_nums) > 256) {
+ LOG_ERR("Exceeded the maximum number of queues, old_queue_nums(%d)\n",
+ old_queue_nums);
+ return -1;
+ }
+
+ *bytes = (uint16_t)((queue_nums + old_queue_nums + 1) *
+ ZXDH_QS_PAIRS);
+ data[0] = (uint16_t)(queue_nums + old_queue_nums);
+ memcpy(data + queue_nums + 1, reps.cmn_vq_msg.phy_qidx,
+ old_queue_nums * ZXDH_QS_PAIRS);
+
+ for (ix = 1; ix <= (queue_nums + old_queue_nums); ix++) {
+ LOG_INFO("%d ", data[ix]);
+ }
+ }
+ }
#ifdef ZXDH_MSGQ
- else if (OP_CODE_MSGQ_CHAN == field)
- {
- *bytes = (uint16_t)(queue_nums * ZXDH_QS_PAIRS);
- data[0] = en_dev->rq[en_dev->curr_queue_pairs].vq->phy_index;
- data[1] = en_dev->sq[en_dev->curr_queue_pairs].vq->phy_index;
- }
+ else if (OP_CODE_MSGQ_CHAN == field) {
+ *bytes = (uint16_t)(queue_nums * ZXDH_QS_PAIRS);
+ data[0] = en_dev->rq[en_dev->curr_queue_pairs].vq->phy_index;
+ data[1] = en_dev->sq[en_dev->curr_queue_pairs].vq->phy_index;
+ }
#endif
- return 0;
+ return 0;
}
-static int32_t cmd_tbl_messgae_to_riscv_send(struct zxdh_en_device *en_dev, void *payload, uint32_t pld_len)
+static int32_t cmd_tbl_messgae_to_riscv_send(struct zxdh_en_device *en_dev,
+ void *payload, uint32_t pld_len)
{
- int32_t ret = 0;
- struct cmd_hdr_recv *hdr_recv;
- struct cmd_tbl_ack cmd_tbl_ack = {0};
-
- ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_TBL, payload, &cmd_tbl_ack, true);
- if (0 != ret)
- {
- LOG_ERR("en_dev->ops->msg_send_cmd failed\n");
- goto out;
- }
-
- hdr_recv =(struct cmd_hdr_recv*)&cmd_tbl_ack;
- if (hdr_recv->check != OP_CODE_TBL_STAT)
- {
- LOG_ERR("tbl init message recv check failed\n");
- ret = -1;
- }
+ int32_t ret = 0;
+ struct cmd_hdr_recv *hdr_recv;
+ struct cmd_tbl_ack cmd_tbl_ack = { 0 };
+
+ ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_TBL, payload,
+ &cmd_tbl_ack, true);
+ if (0 != ret) {
+ LOG_ERR("en_dev->ops->msg_send_cmd failed\n");
+ goto out;
+ }
+
+ hdr_recv = (struct cmd_hdr_recv *)&cmd_tbl_ack;
+ if (hdr_recv->check != OP_CODE_TBL_STAT) {
+ LOG_ERR("tbl init message recv check failed\n");
+ ret = -1;
+ }
out:
- return ret;
+ return ret;
}
-static int32_t cmd_common_tbl_init(struct zxdh_en_device *en_dev, uint32_t queue_nums, uint32_t field)
+static int32_t cmd_common_tbl_init(struct zxdh_en_device *en_dev,
+ uint32_t queue_nums, uint32_t field)
{
- int32_t ret = 0;
- union zxdh_msg msg = {0};
-
- if ((2 * ZXDH_MAX_PAIRS_NUM) < queue_nums)
- {
- LOG_ERR("queue pairs %u out of range\n", queue_nums);
- return -ENOMEM;
- }
-
- msg.payload.hdr_to_cmn.field = field;
- msg.payload.hdr_to_cmn.type = OP_CODE_WRITE;
- msg.payload.hdr_to_cmn.pcie_id = en_dev->pcie_id;
- ret = write_queue_index_to_message(en_dev, queue_nums, field, \
- &msg.payload.hdr_to_cmn.write_bytes, msg.payload.cmn_tbl_msg);
- if (0 != ret)
- {
- LOG_ERR("write_queue_index_to_message failed, ret: %d\n", ret);
- return ret;
- }
-
- // 执行message send
- ret = cmd_tbl_messgae_to_riscv_send(en_dev, &msg, MSG_STRUCT_HD_LEN + msg.payload.hdr_to_cmn.write_bytes);
- if (0 != ret)
- {
- LOG_ERR("zxdh_bar_chan_sync_msg_send failed, ret: %d\n", ret);
- }
-
- return ret;
+ int32_t ret = 0;
+ zxdh_msg_info msg = { 0 };
+
+ if ((2 * ZXDH_MAX_PAIRS_NUM) < queue_nums) {
+ LOG_ERR("queue pairs %u out of range\n", queue_nums);
+ return -ENOMEM;
+ }
+
+ msg.hdr_to_cmn.field = field;
+ msg.hdr_to_cmn.type = OP_CODE_WRITE;
+ msg.hdr_to_cmn.pcie_id = en_dev->pcie_id;
+ ret = write_queue_index_to_message(en_dev, queue_nums, field,
+ &msg.hdr_to_cmn.write_bytes,
+ msg.cmn_tbl_msg);
+ if (0 != ret) {
+ LOG_ERR("write_queue_index_to_message failed, ret: %d\n", ret);
+ return ret;
+ }
+
+ // 执行message send
+ ret = cmd_tbl_messgae_to_riscv_send(
+ en_dev, &msg, MSG_STRUCT_HD_LEN + msg.hdr_to_cmn.write_bytes);
+ if (0 != ret) {
+ LOG_ERR("zxdh_bar_chan_sync_msg_send failed, ret: %d\n", ret);
+ }
+
+ return ret;
}
int32_t zxdh_common_tbl_init(struct net_device *netdev)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t ret = 0;
#ifdef ZXDH_MSGQ
- NEED_MSGQ(en_dev)
- {
- ret = cmd_common_tbl_init(en_dev, ZXDH_QS_PAIRS, OP_CODE_MSGQ_CHAN);
- if (0 != ret)
- {
- LOG_ERR("field msgq message failed\n");
- return -1;
- }
- }
+ if (en_dev->need_msgq) {
+ ret = cmd_common_tbl_init(en_dev, ZXDH_QS_PAIRS, OP_CODE_MSGQ_CHAN);
+ if (0 != ret) {
+ LOG_ERR("field msgq message failed\n");
+ return -1;
+ }
+ }
#endif
- ret = cmd_common_tbl_init(en_dev, en_dev->curr_queue_pairs * ZXDH_QS_PAIRS, OP_CODE_DATA_CHAN);
- if (0 != ret)
- {
- LOG_ERR("field data message failed\n");
- return -1;
- }
+ ret = cmd_common_tbl_init(en_dev, en_dev->curr_queue_pairs * ZXDH_QS_PAIRS,
+ OP_CODE_DATA_CHAN);
+ if (0 != ret) {
+ LOG_ERR("field data message failed\n");
+ return -1;
+ }
- return 0;
+ return 0;
}
-int32_t zxdh_send_command_to_specify(struct zxdh_en_device *en_dev, uint16_t module_id, void *msg, void *ack)
+int32_t zxdh_send_command_to_specify(struct zxdh_en_device *en_dev,
+ uint16_t module_id, void *msg, void *ack)
{
- struct dh_core_dev *dh_dev = en_dev->parent;
+ struct dh_core_dev *dh_dev = en_dev->parent;
- return en_dev->ops->msg_send_cmd(dh_dev, module_id, msg, ack, true);
+ return en_dev->ops->msg_send_cmd(dh_dev, module_id, msg, ack, true);
}
-static int32_t get_common_table_msg(struct zxdh_en_device *en_dev, uint16_t pcie_id, \
- uint8_t field, void *ack)
+static int32_t get_common_table_msg(struct zxdh_en_device *en_dev,
+ uint16_t pcie_id, uint8_t field, void *ack)
{
- union zxdh_msg msg = {0};
+ zxdh_msg_info msg = { 0 };
- msg.payload.hdr_to_cmn.type = RISC_TYPE_READ;
- msg.payload.hdr_to_cmn.field = field;
- msg.payload.hdr_to_cmn.pcie_id = pcie_id;
- msg.payload.hdr_to_cmn.write_bytes = 0;
+ msg.hdr_to_cmn.type = RISC_TYPE_READ;
+ msg.hdr_to_cmn.field = field;
+ msg.hdr_to_cmn.pcie_id = pcie_id;
+ msg.hdr_to_cmn.write_bytes = 0;
- return zxdh_send_command_to_specify(en_dev, MODULE_TBL, &msg, ack);
+ return zxdh_send_command_to_specify(en_dev, MODULE_TBL, &msg, ack);
}
int32_t zxdh_hash_id_get(struct zxdh_en_device *en_dev)
{
- int32_t ret = 0;
- union zxdh_msg msg = {0};
-
- ret = get_common_table_msg(en_dev, en_dev->pcie_id, RISC_FIELD_HASHID_CHANNEL, &msg);
- if(ret != 0)
- {
- LOG_ERR("get own hash_id failed: %d\n", ret);
- return ret;
- }
-
- en_dev->hash_search_idx = msg.reps.cmn_recv_msg.value;
- LOG_DEBUG("hash_id: %u\n", en_dev->hash_search_idx);
- if (en_dev->hash_search_idx > ZXDH_MAX_HASH_INDEX)
- {
- LOG_ERR("hash_id is invalid value: %u\n", en_dev->hash_search_idx);
- return -EINVAL;
- }
- if (en_dev->hash_search_idx == ZXDH_MAX_HASH_INDEX) //TODO:if should be delete
- {
- en_dev->hash_search_idx = 1;
- }
-
- return ret;
+ int32_t ret = 0;
+ zxdh_reps_info reps = { 0 };
+
+ ret = get_common_table_msg(en_dev, en_dev->pcie_id,
+ RISC_FIELD_HASHID_CHANNEL, &reps);
+ if (ret != 0) {
+ LOG_ERR("get own hash_id failed: %d\n", ret);
+ return ret;
+ }
+
+ en_dev->hash_search_idx = reps.cmn_recv_msg.value;
+ LOG_DEBUG("hash_id: %u\n", en_dev->hash_search_idx);
+ if (en_dev->hash_search_idx > ZXDH_MAX_HASH_INDEX) {
+ LOG_ERR("hash_id is invalid value: %u\n", en_dev->hash_search_idx);
+ return -EINVAL;
+ }
+ // TODO:if should be delete
+ if (en_dev->hash_search_idx == ZXDH_MAX_HASH_INDEX) {
+ en_dev->hash_search_idx = 1;
+ }
+
+ return ret;
}
int32_t zxdh_phyport_get(struct zxdh_en_device *en_dev)
{
- int32_t ret = 0;
- union zxdh_msg msg = {0};
-
- ret = get_common_table_msg(en_dev, en_dev->pcie_id, RISC_FIELD_PHYPORT_CHANNEL, &msg);
- if(ret != 0)
- {
- LOG_ERR("get own phyport failed: %d\n", ret);
- return ret;
- }
-
- en_dev->phy_port = msg.reps.cmn_recv_msg.value;
- if (en_dev->phy_port == INVALID_PHY_PORT)
- {
- LOG_ERR("get phy_port failed\n");
- return -EINVAL;
- }
- en_dev->ops->set_pf_phy_port(en_dev->parent, en_dev->phy_port);
- LOG_DEBUG("phy_port: %u\n", en_dev->phy_port);
-
- return ret;
+ int32_t ret = 0;
+ zxdh_reps_info reps = { 0 };
+
+ ret = get_common_table_msg(en_dev, en_dev->pcie_id,
+ RISC_FIELD_PHYPORT_CHANNEL, &reps);
+ if (ret != 0) {
+ LOG_ERR("get own phyport failed: %d\n", ret);
+ return ret;
+ }
+
+ en_dev->phy_port = reps.cmn_recv_msg.value;
+ if (en_dev->phy_port == INVALID_PHY_PORT) {
+ LOG_ERR("get phy_port failed\n");
+ return -EINVAL;
+ }
+ en_dev->ops->set_pf_phy_port(en_dev->parent, en_dev->phy_port);
+ LOG_DEBUG("phy_port: %u\n", en_dev->phy_port);
+
+ return ret;
}
int32_t zxdh_panel_id_get(struct zxdh_en_device *en_dev)
{
- int32_t ret = 0;
- union zxdh_msg msg = {0};
-
- ret = get_common_table_msg(en_dev, en_dev->pcie_id, RISC_FIELD_PANEL_ID, &msg);
- if(ret != 0)
- {
- LOG_ERR("get own phyport failed: %d\n", ret);
- return ret;
- }
-
- en_dev->panel_id = msg.reps.cmn_recv_msg.value;
- if (en_dev->panel_id > MAX_PANEL_ID)
- {
- LOG_ERR("get panel_id failed, panel_id: %u\n", en_dev->panel_id);
- return -EINVAL;
- }
- LOG_DEBUG("panel_id: %u\n", en_dev->panel_id);
-
- return ret;
+ int32_t ret = 0;
+ zxdh_reps_info reps = { 0 };
+
+ ret = get_common_table_msg(en_dev, en_dev->pcie_id, RISC_FIELD_PANEL_ID,
+ &reps);
+ if (ret != 0) {
+ LOG_ERR("get own phyport failed: %d\n", ret);
+ return ret;
+ }
+
+ en_dev->panel_id = reps.cmn_recv_msg.value;
+ if (en_dev->panel_id > MAX_PANEL_ID) {
+ LOG_ERR("get panel_id failed, panel_id: %u\n", en_dev->panel_id);
+ return -EINVAL;
+ }
+ LOG_DEBUG("panel_id: %u\n", en_dev->panel_id);
+
+ return ret;
}
int32_t zxdh_pf_macpcs_num_get(struct zxdh_en_device *en_dev)
{
- int32_t phy_port = 0;
- int32_t mac_num = 0; //0-2
-
- phy_port = en_dev->phy_port;
-
- if (phy_port < 4)
- {
- mac_num = 0;
- }
- else if (phy_port < 8)
- {
- mac_num = 1;
- }
- else if (phy_port < 10)
- {
- mac_num = 2;
- }
- else
- {
- LOG_ERR("phy_port(%d) err, not in 0-9!!\n", phy_port);
- mac_num = -1;
- return mac_num;
- }
-
- LOG_DEBUG("mac_num: %d\n", mac_num);
- return mac_num;
+ int32_t phy_port = 0;
+ int32_t mac_num = 0; // 0-2
+
+ phy_port = en_dev->phy_port;
+
+ if (phy_port < 4) {
+ mac_num = 0;
+ } else if (phy_port < 8) {
+ mac_num = 1;
+ } else if (phy_port < 10) {
+ mac_num = 2;
+ } else {
+ LOG_ERR("phy_port(%d) err, not in 0-9!!\n", phy_port);
+ mac_num = -1;
+ return mac_num;
+ }
+
+ LOG_DEBUG("mac_num: %d\n", mac_num);
+ return mac_num;
}
int32_t zxdh_pf_link_state_get(struct zxdh_en_device *en_dev)
{
- int32_t ret = 0;
- union zxdh_msg msg = {0};
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_LINK_INFO_GET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- ret = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (ret != 0)
- {
- LOG_ERR("zxdh_pf_link_state_get failed: %d\n", ret);
- return ret;
- }
- en_dev->ops->set_pf_link_up(en_dev->parent, msg.reps.mac_set_msg.link_state);
- LOG_INFO("link_up is %s\n", msg.reps.mac_set_msg.link_state ? "true" : "false");
- if(msg.reps.mac_set_msg.link_state)
- {
- netif_carrier_on(en_dev->netdev);
- }
- else
- {
- netif_carrier_off(en_dev->netdev);
- }
- return ret;
+ int32_t ret = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_LINK_INFO_GET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (ret != 0) {
+ LOG_ERR("zxdh_pf_link_state_get failed: %d\n", ret);
+ return ret;
+ }
+ en_dev->ops->set_pf_link_up(en_dev->parent, reps.mac_set_msg.link_state);
+ LOG_INFO("link_up is %s\n", reps.mac_set_msg.link_state ? "true" : "false");
+ if (reps.mac_set_msg.link_state) {
+ netif_carrier_on(en_dev->netdev);
+ } else {
+ netif_carrier_off(en_dev->netdev);
+ }
+ return ret;
}
-int32_t zxdh_lldp_enable_set(struct zxdh_en_device *en_dev,bool lldp_enable)
+int32_t zxdh_lldp_enable_set(struct zxdh_en_device *en_dev, bool lldp_enable)
{
- union zxdh_msg msg = {0};
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
- msg.payload.hdr_to_agt.op_code = AGENT_DEBUG_LLDP_ENABLE_SET;
- msg.payload.hdr_to_agt.port_id = en_dev->panel_id;
+ msg.hdr_to_agt.op_code = AGENT_DEBUG_LLDP_ENABLE_SET;
+ msg.hdr_to_agt.port_id = en_dev->panel_id;
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- msg.payload.hdr_to_agt.port_id = en_dev->pannel_id;
- }
- msg.payload.lldp_msg.lldp_enable = lldp_enable;
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ msg.hdr_to_agt.port_id = en_dev->pannel_id;
+ }
+ msg.lldp_msg.lldp_enable = lldp_enable;
- return zxdh_send_command_to_specify(en_dev, MODULE_DEBUG, &msg, &msg);
+ return zxdh_send_command_to_specify(en_dev, MODULE_DEBUG, &msg, &reps);
}
int32_t zxdh_sshd_enable_set(struct zxdh_en_device *en_dev, bool sshd_enable)
{
- union zxdh_msg msg = {0};
-
- if (sshd_enable)
- {
- msg.payload.hdr_to_agt.op_code = AGENT_SSHD_START;
- }
- else
- {
- msg.payload.hdr_to_agt.op_code = AGENT_SSHD_STOP;
- }
-
- LOG_INFO("event id is %d\n", MODULE_LOGIN_CTRL);
- return zxdh_send_command_to_specify(en_dev, MODULE_LOGIN_CTRL, &msg, &msg);
-}
-
-int32_t zxdh_lldp_enable_get(struct zxdh_en_device *en_dev, uint32_t *lldp_enable)
-{
- int32_t ret = 0;
- union zxdh_msg msg = {0};
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
- msg.payload.hdr_to_agt.op_code = AGENT_DEBUG_LLDP_ENABLE_GET;
- msg.payload.hdr_to_agt.port_id = en_dev->panel_id;
+ if (sshd_enable) {
+ msg.hdr_to_agt.op_code = AGENT_SSHD_START;
+ } else {
+ msg.hdr_to_agt.op_code = AGENT_SSHD_STOP;
+ }
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- msg.payload.hdr_to_agt.port_id = en_dev->pannel_id;
- }
-
- ret = zxdh_send_command_to_specify(en_dev, MODULE_DEBUG, &msg, &msg);
- if (ret != 0)
- {
- LOG_ERR("zxdh_lldp_enable_get failed: %d\n", ret);
- return ret;
- }
-
- *lldp_enable = (uint32_t)(msg.reps.debug_lldp_msg.lldp_status);
-
- return ret;
+ LOG_INFO("event id is %d\n", MODULE_LOGIN_CTRL);
+ return zxdh_send_command_to_specify(en_dev, MODULE_LOGIN_CTRL, &msg, &reps);
}
-int32_t zxdh_riscv_os_type_get(struct zxdh_en_device *en_dev, uint8_t *is_zios)
+int32_t zxdh_lldp_enable_get(struct zxdh_en_device *en_dev,
+ uint32_t *lldp_enable)
{
- int32_t ret = 0;
- union zxdh_msg msg = {0};
- msg.payload.hdr_to_agt.op_code = AGENT_OS_TYPE_GET;
+ int32_t ret = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ msg.hdr_to_agt.op_code = AGENT_DEBUG_LLDP_ENABLE_GET;
+ msg.hdr_to_agt.port_id = en_dev->panel_id;
- ret = zxdh_send_command_to_specify(en_dev, MODULE_LOGIN_CTRL, &msg, &msg);
- if (ret != 0)
- {
- LOG_ERR("get bus from riscv failed: %d\n", ret);
- return ret;
- }
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ msg.hdr_to_agt.port_id = en_dev->pannel_id;
+ }
- *is_zios = msg.reps.os_type_msg.is_zios;
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_DEBUG, &msg, &reps);
+ if (ret != 0) {
+ LOG_ERR("zxdh_lldp_enable_get failed: %d\n", ret);
+ return ret;
+ }
- return ret;
+ *lldp_enable = (uint32_t)(reps.debug_lldp_msg.lldp_status);
+
+ return ret;
}
int32_t zxdh_ep0_bus_get(struct zxdh_en_device *en_dev, uint8_t *bus_info)
{
- int32_t ret = 0;
- union zxdh_msg msg = {0};
+ int32_t ret = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
- msg.payload.hdr_to_agt.op_code = AGENT_EP0_BUS_GET;
+ msg.hdr_to_agt.op_code = AGENT_EP0_BUS_GET;
- ret = zxdh_send_command_to_specify(en_dev, MODULE_LOGIN_CTRL, &msg, &msg);
- if (ret != 0)
- {
- LOG_ERR("get bus from riscv failed: %d\n", ret);
- return ret;
- }
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_LOGIN_CTRL, &msg, &reps);
+ if (ret != 0) {
+ LOG_ERR("get bus from riscv failed: %d\n", ret);
+ return ret;
+ }
- *bus_info = msg.reps.ep0_bus_msg.bus_info;
+ *bus_info = reps.ep0_bus_msg.bus_info;
- return ret;
+ return ret;
}
int8_t zxdh_debug_ip_get(struct zxdh_en_device *en_dev, int8_t *ip)
{
- int32_t ret = 0;
- uint8_t bus_info = 0;
- int8_t ip_address[20] = {0};
- int8_t bus_str[5]= {0};
+ int32_t ret = 0;
+ uint8_t bus_info = 0;
+ int8_t ip_address[20] = { 0 };
+ int8_t bus_str[5] = { 0 };
- sprintf(ip_address, "26.20.5");
+ sprintf(ip_address, "26.20.5");
- ret = zxdh_ep0_bus_get(en_dev, &bus_info);
- if (ret != 0)
- {
- LOG_ERR("zxdh_ep0_bus_get failed: %d\n", ret);
- return ret;
- }
+ ret = zxdh_ep0_bus_get(en_dev, &bus_info);
+ if (ret != 0) {
+ LOG_ERR("zxdh_ep0_bus_get failed: %d\n", ret);
+ return ret;
+ }
- sprintf(bus_str, ".%d", bus_info);
- strcat(ip_address, bus_str); /* 将bus号添加到IP地址字符串后面 */
- strcpy(ip, ip_address);
+ sprintf(bus_str, ".%d", bus_info);
+ strcat(ip_address, bus_str); /* 将bus号添加到IP地址字符串后面 */
+ strcpy(ip, ip_address);
- LOG_INFO("DEBUG IP is: %s\n", ip_address);
+ LOG_INFO("DEBUG IP is: %s\n", ip_address);
- return ret;
+ return ret;
}
#define FLASH_OPEN_FW
-int32_t zxdh_en_firmware_version_get(struct zxdh_en_device *en_dev, uint8_t *fw_version, uint8_t *fw_version_len)
+int32_t zxdh_en_firmware_version_get(struct zxdh_en_device *en_dev,
+ uint8_t *fw_version,
+ uint8_t *fw_version_len)
{
#ifdef FLASH_OPEN_FW
- int32_t ret = 0;
- union zxdh_msg msg = {0};
+ int32_t ret = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
- msg.payload.hdr_to_agt.op_code = AGENT_FLASH_FIR_VERSION_GET;
+ msg.hdr_to_agt.op_code = AGENT_FLASH_FIR_VERSION_GET;
- ret = zxdh_send_command_to_specify(en_dev, MODULE_FLASH, &msg, &msg);
- if (ret != 0)
- {
- LOG_ERR("zxdh_send_command_to_specify failed: %d\n", ret);
- return ret;
- }
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_FLASH, &msg, &reps);
+ if (ret != 0) {
+ LOG_ERR("zxdh_send_command_to_specify failed: %d\n", ret);
+ return ret;
+ }
- memcpy(fw_version, msg.reps.flash_msg.firmware_version, FW_VERSION_LEN);
- *fw_version_len = FW_VERSION_LEN;
+ memcpy(fw_version, reps.flash_msg.firmware_version, FW_VERSION_LEN);
+ *fw_version_len = FW_VERSION_LEN;
#else
- uint8_t fw_version_test[] = "V2.24.10.01B4";
+ uint8_t fw_version_test[] = "V2.24.10.01B4";
- memcpy(fw_version, fw_version_test, sizeof(fw_version_test));
- *fw_version_len = sizeof(fw_version_test);
+ memcpy(fw_version, fw_version_test, sizeof(fw_version_test));
+ *fw_version_len = sizeof(fw_version_test);
#endif
- return 0;
+ return 0;
}
int32_t do_get_vport_stats(struct zxdh_en_device *en_dev, uint8_t np_mode,
- struct zxdh_en_vport_stats *vport_stats)
+ struct zxdh_en_vport_stats *vport_stats)
{
- union zxdh_msg msg = {0};
- uint64_t rx_broadcast_cnt = 0;
- uint64_t tx_broadcast_cnt = 0;
- uint64_t rx_mtu_drop_pkts_cnt = 0;
- uint64_t tx_mtu_drop_pkts_cnt = 0;
- uint64_t rx_mtu_drop_bytes_cnt = 0;
- uint64_t tx_mtu_drop_bytes_cnt = 0;
- uint64_t rx_plcr_drop_pkts_cnt = 0;
- uint64_t tx_plcr_drop_pkts_cnt = 0;
- uint64_t rx_plcr_drop_bytes_cnt = 0;
- uint64_t tx_plcr_drop_bytes_cnt = 0;
- uint32_t vf_id = GET_VFID(en_dev->vport);
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- msg.payload.hdr_to_agt.op_code = AGENT_VQM_DEVICE_STATS_GET;
- msg.payload.hdr_to_agt.vf_id = vf_id;
- err = zxdh_send_command_to_specify(en_dev, MODULE_VQM, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_vport_stats_get failed, err: %d\n", err);
- return err;
- }
- memcpy(&(vport_stats->vqm_stats), &(msg.reps.stats_msg), sizeof(vport_stats->vqm_stats));
-
- memset(&msg, 0, sizeof(union zxdh_msg));
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- msg.payload.hdr.op_code = ZXDH_GET_NP_STATS;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.vf_id = vf_id;
- msg.payload.np_stats_get_msg.clear_mode = np_mode;
- err = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if(err != 0)
- {
- LOG_ERR("zxdh_send_command_to_pf failed: %d\n", err);
- return err;
- }
- rx_broadcast_cnt = msg.reps.np_stats_msg.np_rx_broadcast;
- tx_broadcast_cnt = msg.reps.np_stats_msg.np_tx_broadcast;
- rx_mtu_drop_pkts_cnt = msg.reps.np_stats_msg.np_rx_mtu_drop_pkts;
- tx_mtu_drop_pkts_cnt = msg.reps.np_stats_msg.np_tx_mtu_drop_pkts;
- rx_mtu_drop_bytes_cnt = msg.reps.np_stats_msg.np_rx_mtu_drop_bytes;
- tx_mtu_drop_bytes_cnt = msg.reps.np_stats_msg.np_tx_mtu_drop_bytes;
- rx_plcr_drop_pkts_cnt = msg.reps.np_stats_msg.np_rx_plcr_drop_pkts;
- tx_plcr_drop_pkts_cnt = msg.reps.np_stats_msg.np_tx_plcr_drop_pkts;
- rx_plcr_drop_bytes_cnt = msg.reps.np_stats_msg.np_rx_plcr_drop_bytes;
- tx_plcr_drop_bytes_cnt = msg.reps.np_stats_msg.np_tx_plcr_drop_bytes;
- }
- else
- {
- dpp_stat_port_bc_packet_rx_cnt_get(&pf_info, vf_id, np_mode, &rx_broadcast_cnt);
- dpp_stat_port_bc_packet_tx_cnt_get(&pf_info, vf_id, np_mode, &tx_broadcast_cnt);
- dpp_stat_MTU_packet_msg_rx_cnt_get(&pf_info, vf_id, np_mode, &rx_mtu_drop_bytes_cnt, &rx_mtu_drop_pkts_cnt);
- dpp_stat_MTU_packet_msg_tx_cnt_get(&pf_info, vf_id, np_mode, &tx_mtu_drop_bytes_cnt, &tx_mtu_drop_pkts_cnt);
- dpp_stat_plcr_packet_drop_rx_cnt_get(&pf_info, vf_id, np_mode, &rx_plcr_drop_bytes_cnt, &rx_plcr_drop_pkts_cnt);
- dpp_stat_plcr_packet_drop_tx_cnt_get(&pf_info, vf_id, np_mode, &tx_plcr_drop_bytes_cnt, &tx_plcr_drop_pkts_cnt);
- }
- vport_stats->np_stats.rx_vport_broadcast_packets = rx_broadcast_cnt;
- vport_stats->np_stats.tx_vport_broadcast_packets = tx_broadcast_cnt;
- vport_stats->np_stats.rx_vport_mtu_drop_packets = rx_mtu_drop_pkts_cnt;
- vport_stats->np_stats.tx_vport_mtu_drop_packets = tx_mtu_drop_pkts_cnt;
- vport_stats->np_stats.rx_vport_mtu_drop_bytes = rx_mtu_drop_bytes_cnt;
- vport_stats->np_stats.tx_vport_mtu_drop_bytes = tx_mtu_drop_bytes_cnt;
- vport_stats->np_stats.rx_vport_plcr_drop_packets = rx_plcr_drop_pkts_cnt;
- vport_stats->np_stats.tx_vport_plcr_drop_packets = tx_plcr_drop_pkts_cnt;
- vport_stats->np_stats.rx_vport_plcr_drop_bytes = rx_plcr_drop_bytes_cnt;
- vport_stats->np_stats.tx_vport_plcr_drop_bytes = tx_plcr_drop_bytes_cnt;
-
- return err;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ uint64_t rx_broadcast_cnt = 0;
+ uint64_t tx_broadcast_cnt = 0;
+ uint64_t rx_mtu_drop_pkts_cnt = 0;
+ uint64_t tx_mtu_drop_pkts_cnt = 0;
+ uint64_t rx_mtu_drop_bytes_cnt = 0;
+ uint64_t tx_mtu_drop_bytes_cnt = 0;
+ uint64_t rx_plcr_drop_pkts_cnt = 0;
+ uint64_t tx_plcr_drop_pkts_cnt = 0;
+ uint64_t rx_plcr_drop_bytes_cnt = 0;
+ uint64_t tx_plcr_drop_bytes_cnt = 0;
+ uint32_t vf_id = GET_VFID(en_dev->vport);
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ msg.hdr_to_agt.op_code = AGENT_VQM_DEVICE_STATS_GET;
+ msg.hdr_to_agt.vf_id = vf_id;
+ err = zxdh_send_command_to_specify(en_dev, MODULE_VQM, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_vport_stats_get failed, err: %d\n", err);
+ return err;
+ }
+ memcpy(&(vport_stats->vqm_stats), &(reps.stats_msg),
+ sizeof(vport_stats->vqm_stats));
+
+ memset(&msg, 0, sizeof(zxdh_msg_info));
+ memset(&reps, 0, sizeof(zxdh_reps_info));
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ msg.hdr.op_code = ZXDH_GET_NP_STATS;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.vf_id = vf_id;
+ msg.np_stats_get_msg.clear_mode = np_mode;
+ err = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF,
+ &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_send_command_to_pf failed: %d\n", err);
+ return err;
+ }
+ rx_broadcast_cnt = reps.np_stats_msg.np_rx_broadcast;
+ tx_broadcast_cnt = reps.np_stats_msg.np_tx_broadcast;
+ rx_mtu_drop_pkts_cnt = reps.np_stats_msg.np_rx_mtu_drop_pkts;
+ tx_mtu_drop_pkts_cnt = reps.np_stats_msg.np_tx_mtu_drop_pkts;
+ rx_mtu_drop_bytes_cnt = reps.np_stats_msg.np_rx_mtu_drop_bytes;
+ tx_mtu_drop_bytes_cnt = reps.np_stats_msg.np_tx_mtu_drop_bytes;
+ rx_plcr_drop_pkts_cnt = reps.np_stats_msg.np_rx_plcr_drop_pkts;
+ tx_plcr_drop_pkts_cnt = reps.np_stats_msg.np_tx_plcr_drop_pkts;
+ rx_plcr_drop_bytes_cnt = reps.np_stats_msg.np_rx_plcr_drop_bytes;
+ tx_plcr_drop_bytes_cnt = reps.np_stats_msg.np_tx_plcr_drop_bytes;
+ } else {
+ dpp_stat_port_bc_packet_rx_cnt_get(&pf_info, vf_id, np_mode,
+ &rx_broadcast_cnt);
+ dpp_stat_port_bc_packet_tx_cnt_get(&pf_info, vf_id, np_mode,
+ &tx_broadcast_cnt);
+ dpp_stat_MTU_packet_msg_rx_cnt_get(&pf_info, vf_id, np_mode,
+ &rx_mtu_drop_bytes_cnt,
+ &rx_mtu_drop_pkts_cnt);
+ dpp_stat_MTU_packet_msg_tx_cnt_get(&pf_info, vf_id, np_mode,
+ &tx_mtu_drop_bytes_cnt,
+ &tx_mtu_drop_pkts_cnt);
+ dpp_stat_plcr_packet_drop_rx_cnt_get(&pf_info, vf_id, np_mode,
+ &rx_plcr_drop_bytes_cnt,
+ &rx_plcr_drop_pkts_cnt);
+ dpp_stat_plcr_packet_drop_tx_cnt_get(&pf_info, vf_id, np_mode,
+ &tx_plcr_drop_bytes_cnt,
+ &tx_plcr_drop_pkts_cnt);
+ }
+ vport_stats->np_stats.rx_vport_broadcast_packets = rx_broadcast_cnt;
+ vport_stats->np_stats.tx_vport_broadcast_packets = tx_broadcast_cnt;
+ vport_stats->np_stats.rx_vport_mtu_drop_packets = rx_mtu_drop_pkts_cnt;
+ vport_stats->np_stats.tx_vport_mtu_drop_packets = tx_mtu_drop_pkts_cnt;
+ vport_stats->np_stats.rx_vport_mtu_drop_bytes = rx_mtu_drop_bytes_cnt;
+ vport_stats->np_stats.tx_vport_mtu_drop_bytes = tx_mtu_drop_bytes_cnt;
+ vport_stats->np_stats.rx_vport_plcr_drop_packets = rx_plcr_drop_pkts_cnt;
+ vport_stats->np_stats.tx_vport_plcr_drop_packets = tx_plcr_drop_pkts_cnt;
+ vport_stats->np_stats.rx_vport_plcr_drop_bytes = rx_plcr_drop_bytes_cnt;
+ vport_stats->np_stats.tx_vport_plcr_drop_bytes = tx_plcr_drop_bytes_cnt;
+
+ return err;
}
int32_t zxdh_en_vport_pre_stats_get(struct zxdh_en_device *en_dev)
{
- int32_t err = 0;
- struct zxdh_en_vport_stats *vport_stats = &en_dev->pre_stats;
-
- err = do_get_vport_stats(en_dev, NP_GET_PKT_CNT, vport_stats);
- if(err != 0)
- {
- LOG_ERR("zxdh_en_vport_pre_stat_get failed\n");
- }
- return err;
+ int32_t err = 0;
+ struct zxdh_en_vport_stats *vport_stats = &en_dev->pre_stats;
+
+ err = do_get_vport_stats(en_dev, NP_GET_PKT_CNT, vport_stats);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_vport_pre_stat_get failed\n");
+ }
+ return err;
}
int32_t zxdh_vport_stats_get(struct zxdh_en_device *en_dev)
{
- int32_t err = 0;
- struct zxdh_en_vport_stats *vport_stats = &en_dev->hw_stats.vport_stats;
-
- err = do_get_vport_stats(en_dev, NP_GET_PKT_CNT, vport_stats);
- if(err != 0)
- {
- LOG_ERR("zxdh_vport_stats_get failed\n");
- return err;
- }
-
- vport_stats->vqm_stats.rx_vport_packets -= en_dev->pre_stats.vqm_stats.rx_vport_packets;
- vport_stats->vqm_stats.tx_vport_packets -= en_dev->pre_stats.vqm_stats.tx_vport_packets;
- vport_stats->vqm_stats.rx_vport_bytes -= en_dev->pre_stats.vqm_stats.rx_vport_bytes;
- vport_stats->vqm_stats.tx_vport_bytes -= en_dev->pre_stats.vqm_stats.tx_vport_bytes;
- vport_stats->vqm_stats.rx_vport_dropped -= en_dev->pre_stats.vqm_stats.rx_vport_dropped;
- vport_stats->np_stats.rx_vport_broadcast_packets -= en_dev->pre_stats.np_stats.rx_vport_broadcast_packets;
- vport_stats->np_stats.tx_vport_broadcast_packets -= en_dev->pre_stats.np_stats.tx_vport_broadcast_packets;
- vport_stats->np_stats.rx_vport_mtu_drop_packets -= en_dev->pre_stats.np_stats.rx_vport_mtu_drop_packets;
- vport_stats->np_stats.tx_vport_mtu_drop_packets -= en_dev->pre_stats.np_stats.tx_vport_mtu_drop_packets;
- vport_stats->np_stats.rx_vport_mtu_drop_bytes -= en_dev->pre_stats.np_stats.rx_vport_mtu_drop_bytes;
- vport_stats->np_stats.tx_vport_mtu_drop_bytes -= en_dev->pre_stats.np_stats.tx_vport_mtu_drop_bytes;
- vport_stats->np_stats.rx_vport_plcr_drop_packets -= en_dev->pre_stats.np_stats.rx_vport_plcr_drop_packets;
- vport_stats->np_stats.tx_vport_plcr_drop_packets -= en_dev->pre_stats.np_stats.tx_vport_plcr_drop_packets;
- vport_stats->np_stats.rx_vport_plcr_drop_bytes -= en_dev->pre_stats.np_stats.rx_vport_plcr_drop_bytes;
- vport_stats->np_stats.tx_vport_plcr_drop_bytes -= en_dev->pre_stats.np_stats.tx_vport_plcr_drop_bytes;
- return err;
+ int32_t err = 0;
+ struct zxdh_en_vport_stats *vport_stats = &en_dev->hw_stats.vport_stats;
+
+ err = do_get_vport_stats(en_dev, NP_GET_PKT_CNT, vport_stats);
+ if (err != 0) {
+ LOG_ERR("zxdh_vport_stats_get failed\n");
+ return err;
+ }
+
+ vport_stats->vqm_stats.rx_vport_packets -=
+ en_dev->pre_stats.vqm_stats.rx_vport_packets;
+ vport_stats->vqm_stats.tx_vport_packets -=
+ en_dev->pre_stats.vqm_stats.tx_vport_packets;
+ vport_stats->vqm_stats.rx_vport_bytes -=
+ en_dev->pre_stats.vqm_stats.rx_vport_bytes;
+ vport_stats->vqm_stats.tx_vport_bytes -=
+ en_dev->pre_stats.vqm_stats.tx_vport_bytes;
+ vport_stats->vqm_stats.rx_vport_dropped -=
+ en_dev->pre_stats.vqm_stats.rx_vport_dropped;
+ vport_stats->np_stats.rx_vport_broadcast_packets -=
+ en_dev->pre_stats.np_stats.rx_vport_broadcast_packets;
+ vport_stats->np_stats.tx_vport_broadcast_packets -=
+ en_dev->pre_stats.np_stats.tx_vport_broadcast_packets;
+ vport_stats->np_stats.rx_vport_mtu_drop_packets -=
+ en_dev->pre_stats.np_stats.rx_vport_mtu_drop_packets;
+ vport_stats->np_stats.tx_vport_mtu_drop_packets -=
+ en_dev->pre_stats.np_stats.tx_vport_mtu_drop_packets;
+ vport_stats->np_stats.rx_vport_mtu_drop_bytes -=
+ en_dev->pre_stats.np_stats.rx_vport_mtu_drop_bytes;
+ vport_stats->np_stats.tx_vport_mtu_drop_bytes -=
+ en_dev->pre_stats.np_stats.tx_vport_mtu_drop_bytes;
+ vport_stats->np_stats.rx_vport_plcr_drop_packets -=
+ en_dev->pre_stats.np_stats.rx_vport_plcr_drop_packets;
+ vport_stats->np_stats.tx_vport_plcr_drop_packets -=
+ en_dev->pre_stats.np_stats.tx_vport_plcr_drop_packets;
+ vport_stats->np_stats.rx_vport_plcr_drop_bytes -=
+ en_dev->pre_stats.np_stats.rx_vport_plcr_drop_bytes;
+ vport_stats->np_stats.tx_vport_plcr_drop_bytes -=
+ en_dev->pre_stats.np_stats.tx_vport_plcr_drop_bytes;
+ return err;
}
int32_t zxdh_mac_stats_get(struct zxdh_en_device *en_dev)
{
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_STATS_GET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_mac_stats_get failed, err: %d\n", err);
- return err;
- }
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- memset(&msg.reps.stats_msg, 0, sizeof(msg.reps.stats_msg));
- }
- memcpy(&(en_dev->hw_stats.phy_stats), &msg.reps.stats_msg, sizeof(en_dev->hw_stats.phy_stats));
-
- return err;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_STATS_GET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_mac_stats_get failed, err: %d\n", err);
+ return err;
+ }
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ memset(&reps.stats_msg, 0, sizeof(reps.stats_msg));
+ }
+ memcpy(&(en_dev->hw_stats.phy_stats), &reps.stats_msg,
+ sizeof(en_dev->hw_stats.phy_stats));
+
+#if STATS_CLEAR_AFTER_READ
+ msg.hdr_to_agt.op_code = AGENT_MAC_STATS_CLEAR;
+ zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+#endif
+
+ return err;
}
int32_t zxdh_mac_stats_clear(struct zxdh_en_device *en_dev)
{
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_STATS_CLEAR;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_mac_stats_clear failed, err: %d\n", err);
- return err;
- }
-
- return err;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_STATS_CLEAR;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_mac_stats_clear failed, err: %d\n", err);
+ return err;
+ }
+
+ return err;
}
int32_t zxdh_en_phyport_init(struct zxdh_en_device *en_dev)
{
- union zxdh_msg msg = {0};
- int32_t err = 0;
- struct link_info_struct link_info_val = {0};
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_PHYPORT_INIT;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- if (en_dev->ops->is_upf(en_dev->parent))
- {
- msg.payload.hdr_to_agt.phyport = 0;
- msg.payload.hdr_to_agt.is_upf = 1;
- //TODO 确定upf设备link状态获取方案
- en_dev->link_up = true;
- en_dev->speed = SPEED_100000;
- en_dev->ops->set_pf_link_up(en_dev->parent, en_dev->link_up);
- netif_carrier_on(en_dev->netdev);
- }
-
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", err);
- return err;
- }
-
- en_dev->supported_speed_modes = msg.reps.mac_set_msg.speed_modes;
- en_dev->advertising_speed_modes = msg.reps.mac_set_msg.speed_modes;
-
- link_info_val.speed = en_dev->speed;
- link_info_val.autoneg_enable = en_dev->autoneg_enable;
- link_info_val.supported_speed_modes = en_dev->supported_speed_modes;
- link_info_val.advertising_speed_modes = en_dev->advertising_speed_modes;
- link_info_val.duplex = en_dev->duplex;
- en_dev->ops->update_pf_link_info(en_dev->parent, &link_info_val);
-
- return err;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+ struct link_info_struct link_info_val = { 0 };
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_PHYPORT_INIT;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", err);
+ return err;
+ }
+
+ en_dev->supported_speed_modes = reps.mac_set_msg.speed_modes;
+ en_dev->advertising_speed_modes = reps.mac_set_msg.speed_modes;
+
+ link_info_val.speed = en_dev->speed;
+ link_info_val.autoneg_enable = en_dev->autoneg_enable;
+ link_info_val.supported_speed_modes = en_dev->supported_speed_modes;
+ link_info_val.advertising_speed_modes = en_dev->advertising_speed_modes;
+ link_info_val.duplex = en_dev->duplex;
+ en_dev->ops->update_pf_link_info(en_dev->parent, &link_info_val);
+
+ return err;
}
-int32_t zxdh_en_autoneg_set(struct zxdh_en_device *en_dev, uint8_t enable, uint32_t speed_modes)
+int32_t zxdh_en_autoneg_set(struct zxdh_en_device *en_dev, uint8_t enable,
+ uint32_t speed_modes)
{
- union zxdh_msg msg = {0};
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_AUTONEG_SET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- msg.payload.mac_set_msg.autoneg = enable;
- msg.payload.mac_set_msg.speed_modes = speed_modes;
+ msg.hdr_to_agt.op_code = AGENT_MAC_AUTONEG_SET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+ msg.mac_set_msg.autoneg = enable;
+ msg.mac_set_msg.speed_modes = speed_modes;
- return zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
+ return zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
}
int32_t zxdh_en_fec_mode_set(struct zxdh_en_device *en_dev, uint32_t fec_cfg)
{
- union zxdh_msg msg = {0};
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_FEC_MODE_SET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- msg.payload.mac_fec_mode_msg.fec_cfg = fec_cfg;
+ msg.hdr_to_agt.op_code = AGENT_MAC_FEC_MODE_SET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+ msg.mac_fec_mode_msg.fec_cfg = fec_cfg;
- return zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
+ return zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
}
-int32_t zxdh_en_fec_mode_get(struct zxdh_en_device *en_dev, uint32_t *fec_cap, uint32_t *fec_cfg, uint32_t *fec_active)
+int32_t zxdh_en_fec_mode_get(struct zxdh_en_device *en_dev, uint32_t *fec_cap,
+ uint32_t *fec_cfg, uint32_t *fec_active)
{
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_FEC_MODE_GET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
-
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", err);
- return err;
- }
-
- if(fec_cap)
- *fec_cap = msg.reps.mac_fec_mode_msg.fec_cap;
- if(fec_cfg)
- *fec_cfg = msg.reps.mac_fec_mode_msg.fec_cfg;
- if(fec_active)
- *fec_active = msg.reps.mac_fec_mode_msg.fec_link;
-
- return err;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_FEC_MODE_GET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", err);
+ return err;
+ }
+
+ if (fec_cap)
+ *fec_cap = reps.mac_fec_mode_msg.fec_cap;
+ if (fec_cfg)
+ *fec_cfg = reps.mac_fec_mode_msg.fec_cfg;
+ if (fec_active)
+ *fec_active = reps.mac_fec_mode_msg.fec_link;
+
+ return err;
}
int32_t zxdh_en_fc_mode_set(struct zxdh_en_device *en_dev, uint32_t fc_mode)
{
- union zxdh_msg msg = {0};
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_FC_MODE_SET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- msg.payload.mac_fc_mode_msg.fc_mode = fc_mode;
+ msg.hdr_to_agt.op_code = AGENT_MAC_FC_MODE_SET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+ msg.mac_fc_mode_msg.fc_mode = fc_mode;
- return zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
+ return zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
}
int32_t zxdh_en_fc_mode_get(struct zxdh_en_device *en_dev, uint32_t *fc_mode)
{
- union zxdh_msg msg = {0};
- int32_t err = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_FC_MODE_GET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
+ msg.hdr_to_agt.op_code = AGENT_MAC_FC_MODE_GET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", err);
- return err;
- }
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", err);
+ return err;
+ }
- if(fc_mode)
- *fc_mode = msg.reps.mac_fc_mode_msg.fc_mode;
+ if (fc_mode)
+ *fc_mode = reps.mac_fc_mode_msg.fc_mode;
- return err;
+ return err;
}
-uint32_t zxdh_en_module_eeprom_read(struct zxdh_en_device *en_dev, struct zxdh_en_module_eeprom_param *query, uint8_t *data)
+uint32_t zxdh_en_module_eeprom_read(struct zxdh_en_device *en_dev,
+ struct zxdh_en_module_eeprom_param *query,
+ uint8_t *data)
{
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_MODULE_EEPROM_READ;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- msg.payload.module_eeprom_msg.i2c_addr = query->i2c_addr;
- msg.payload.module_eeprom_msg.bank = query->bank;
- msg.payload.module_eeprom_msg.page = query->page;
- msg.payload.module_eeprom_msg.offset = query->offset;
- msg.payload.module_eeprom_msg.length = query->length;
-
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", err);
- return 0;
- }
-
- if(data)
- memcpy(data, msg.reps.module_eeprom_msg.data, msg.reps.module_eeprom_msg.length);
-
- return msg.reps.module_eeprom_msg.length;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_MODULE_EEPROM_READ;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+ msg.module_eeprom_msg.i2c_addr = query->i2c_addr;
+ msg.module_eeprom_msg.bank = query->bank;
+ msg.module_eeprom_msg.page = query->page;
+ msg.module_eeprom_msg.offset = query->offset;
+ msg.module_eeprom_msg.length = query->length;
+
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", err);
+ return 0;
+ }
+
+ if (data)
+ memcpy(data, reps.module_eeprom_msg.data,
+ reps.module_eeprom_msg.length);
+
+ return reps.module_eeprom_msg.length;
}
int32_t zxdh_vf_1588_call_np_interface(struct zxdh_en_device *en_dev)
{
- int32_t ret = 0;
- union zxdh_msg msg = {0};
-
- msg.payload.hdr.op_code = ZXDH_VF_1588_CALL_NP;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.vf_1588_call_np.vfid = VQM_VFID(msg.payload.hdr.vport);
- msg.payload.vf_1588_call_np.call_np_interface_num = en_dev->vf_1588_call_np_num;
- msg.payload.vf_1588_call_np.ptp_tc_enable_opt = en_dev->ptp_tc_enable_opt;
- ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if(ret != 0)
- {
- LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret);
- return ret;
- }
-
- return ret;
+ int32_t ret = 0;
+ zxdh_msg_info msg;
+ zxdh_reps_info ack;
+
+ msg.hdr.op_code = ZXDH_VF_1588_CALL_NP;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.vf_1588_call_np.vfid = VQM_VFID(msg.hdr.vport);
+ msg.vf_1588_call_np.call_np_interface_num = en_dev->vf_1588_call_np_num;
+ msg.vf_1588_call_np.ptp_tc_enable_opt = en_dev->ptp_tc_enable_opt;
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
+ if (ret != 0) {
+ LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
}
int32_t zxdh_vf_port_create(struct zxdh_en_device *en_dev)
{
- int32_t ret = 0;
- union zxdh_msg msg = {0};
- uint8_t link_up = 0;
- bool is_upf = false;
-
- if (en_dev->ops->is_upf(en_dev->parent))
- {
- is_upf = true;
- }
-
- msg.payload.hdr.op_code = ZXDH_VF_PORT_INIT;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.vf_init_msg.base_qid = en_dev->phy_index[0];
- msg.payload.vf_init_msg.hash_search_idx = en_dev->hash_search_idx;
- msg.payload.vf_init_msg.rss_enable = 1;
- msg.payload.vf_init_msg.is_upf = is_upf;
-
- ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if(ret != 0)
- {
- LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret);
- return ret;
- }
-
- if (is_upf)
- {
- en_dev->link_up = msg.reps.vf_init_msg.link_up;
- }
- else
- {
- en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_up);
- en_dev->link_up = link_up;
- LOG_INFO("vf read link_up: %d from vqm\n", link_up);
- }
-
- ether_addr_copy(en_dev->netdev->dev_addr, msg.reps.vf_init_msg.mac_addr);
- en_dev->speed = msg.reps.vf_init_msg.speed;
- en_dev->autoneg_enable = msg.reps.vf_init_msg.autoneg_enable;
- en_dev->supported_speed_modes = msg.reps.vf_init_msg.sup_link_modes;
- en_dev->advertising_speed_modes = msg.reps.vf_init_msg.adv_link_modes;
- en_dev->duplex = msg.reps.vf_init_msg.duplex;
-
- if (!is_upf)
- {
- en_dev->phy_port = msg.reps.vf_init_msg.phy_port;
- en_dev->ops->set_pf_phy_port(en_dev->parent, en_dev->phy_port);
- }
-
- if (en_dev->link_up)
- {
- en_dev->ops->set_pf_link_up(en_dev->parent, TRUE);
- netif_carrier_on(en_dev->netdev);
- }
- else
- {
- en_dev->ops->set_pf_link_up(en_dev->parent, FALSE);
- netif_carrier_off(en_dev->netdev);
- }
- return ret;
+ int32_t ret = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+ uint8_t link_up = 0;
+
+ // dpp_np_init
+ msg.hdr.op_code = ZXDH_VF_PORT_INIT;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.vf_init_msg.base_qid = en_dev->phy_index[0];
+ msg.vf_init_msg.hash_search_idx = en_dev->hash_search_idx;
+ msg.vf_init_msg.rss_enable = 1;
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
+ if (ret != 0) {
+ LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret);
+ return ret;
+ }
+ en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_up);
+
+ LOG_INFO("vf read link_up: %d from vqm\n", link_up);
+ en_dev->link_up = link_up;
+ ether_addr_copy(en_dev->netdev->dev_addr, ack.vf_init_msg.mac_addr);
+ en_dev->speed = ack.vf_init_msg.speed;
+ en_dev->autoneg_enable = ack.vf_init_msg.autoneg_enable;
+ en_dev->supported_speed_modes = ack.vf_init_msg.sup_link_modes;
+ en_dev->advertising_speed_modes = ack.vf_init_msg.adv_link_modes;
+ en_dev->duplex = ack.vf_init_msg.duplex;
+ en_dev->phy_port = ack.vf_init_msg.phy_port;
+
+ en_dev->ops->set_pf_phy_port(en_dev->parent, en_dev->phy_port);
+
+ if (link_up == 0) {
+ en_dev->ops->set_pf_link_up(en_dev->parent, FALSE);
+ netif_carrier_off(en_dev->netdev);
+ } else {
+ en_dev->ops->set_pf_link_up(en_dev->parent, TRUE);
+ netif_carrier_on(en_dev->netdev);
+ }
+ return ret;
}
int32_t zxdh_vf_port_delete(struct zxdh_en_device *en_dev)
{
- union zxdh_msg msg = {0};
-
- //dpp_np_uninit
- msg.payload.hdr.op_code = ZXDH_VF_PORT_UNINIT;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
+ zxdh_msg_info msg;
+ zxdh_reps_info ack;
+
+ // dpp_np_uninit
+ msg.hdr.op_code = ZXDH_VF_PORT_UNINIT;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
}
-int32_t zxdh_vf_dpp_add_mac(struct zxdh_en_device *en_dev, const uint8_t *dev_addr, uint8_t filter_flag)
+int32_t zxdh_vf_dpp_add_mac(struct zxdh_en_device *en_dev,
+ const uint8_t *dev_addr, uint8_t filter_flag)
{
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- msg.payload.hdr.op_code = ZXDH_MAC_ADD;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.mac_addr_set_msg.filter_flag = filter_flag;
- memcpy(msg.payload.mac_addr_set_msg.mac_addr, dev_addr, en_dev->netdev->addr_len);
-
- err = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if (err != 0)
- {
- if (msg.reps.flag == ZXDH_REPS_BEOND_MAC)
- {
- return DPP_RC_TABLE_RANGE_INVALID;
- }
- }
- return err;
-}
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
-int32_t zxdh_vf_dpp_del_mac(struct zxdh_en_device *en_dev, const uint8_t *dev_addr, uint8_t filter_flag, bool mac_flag)
-{
- union zxdh_msg msg = {0};
+ msg.hdr.op_code = ZXDH_MAC_ADD;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.mac_addr_set_msg.filter_flag = filter_flag;
+ memcpy(msg.mac_addr_set_msg.mac_addr, dev_addr, en_dev->netdev->addr_len);
- msg.payload.hdr.op_code = ZXDH_MAC_DEL;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.mac_addr_set_msg.filter_flag = filter_flag;
- msg.payload.mac_addr_set_msg.mac_flag = mac_flag;
- memcpy(msg.payload.mac_addr_set_msg.mac_addr, dev_addr, en_dev->netdev->addr_len);
+ return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
+}
- return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
+int32_t zxdh_vf_dpp_del_mac(struct zxdh_en_device *en_dev,
+ const uint8_t *dev_addr, uint8_t filter_flag,
+ bool mac_flag)
+{
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+
+ msg.hdr.op_code = ZXDH_MAC_DEL;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.mac_addr_set_msg.filter_flag = filter_flag;
+ msg.mac_addr_set_msg.mac_flag = mac_flag;
+ memcpy(msg.mac_addr_set_msg.mac_addr, dev_addr, en_dev->netdev->addr_len);
+
+ return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
}
int32_t zxdh_vf_rss_en_set(struct zxdh_en_device *en_dev, uint32_t enable)
{
- union zxdh_msg msg = {0};
-
- msg.payload.hdr.op_code = ZXDH_RSS_EN_SET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.rss_enable_msg.rss_enable = enable;
- return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+
+ msg.hdr.op_code = ZXDH_RSS_EN_SET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.rss_enable_msg.rss_enable = enable;
+ return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
}
-int32_t zxdh_vf_dpp_add_ipv6_mac(struct zxdh_en_device *en_dev, const uint8_t *mac_addr)
+int32_t zxdh_vf_dpp_add_ipv6_mac(struct zxdh_en_device *en_dev,
+ const uint8_t *dev_addr)
{
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- msg.payload.hdr.op_code = ZXDH_IPV6_MAC_ADD;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- memcpy(msg.payload.mac_addr_set_msg.mac_addr, mac_addr, en_dev->netdev->addr_len);
- err = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if ((err != 0) && (msg.reps.flag == ZXDH_REPS_BEOND_MAC))
- {
- LOG_ERR("Add Multicast MAC Address(%pM) Failed, Beyond Max MAC Num 32\n", mac_addr);
- return DPP_RC_TABLE_RANGE_INVALID;
- }
- return err;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+ msg.hdr.op_code = ZXDH_IPV6_MAC_ADD;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ memcpy(msg.ipv6_mac_addr_msg.mac_addr, dev_addr, en_dev->netdev->addr_len);
+
+ return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
}
-int32_t zxdh_vf_dpp_del_ipv6_mac(struct zxdh_en_device *en_dev, const uint8_t *mac_addr)
+int32_t zxdh_vf_dpp_del_ipv6_mac(struct zxdh_en_device *en_dev,
+ const uint8_t *dev_addr)
{
- union zxdh_msg msg = {0};
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
- msg.payload.hdr.op_code = ZXDH_IPV6_MAC_DEL;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- memcpy(msg.payload.mac_addr_set_msg.mac_addr, mac_addr, en_dev->netdev->addr_len);
+ msg.hdr.op_code = ZXDH_IPV6_MAC_DEL;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ memcpy(msg.ipv6_mac_addr_msg.mac_addr, dev_addr, en_dev->netdev->addr_len);
- return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
+ return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
}
-int32_t zxdh_vf_egr_port_attr_set(struct zxdh_en_device *en_dev, uint32_t mode, uint32_t value, uint8_t fow)
+int32_t zxdh_vf_egr_port_attr_set(struct zxdh_en_device *en_dev, uint32_t mode,
+ uint32_t value, uint8_t fow)
{
- union zxdh_msg msg = {0};
-
- msg.payload.hdr.op_code = ZXDH_PORT_ATTRS_SET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.port_attr_set_msg.mode = mode;
- msg.payload.port_attr_set_msg.value = value;
- msg.payload.port_attr_set_msg.allmulti_follow = fow;
- return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+
+ msg.hdr.op_code = ZXDH_PORT_ATTRS_SET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.port_attr_set_msg.mode = mode;
+ msg.port_attr_set_msg.value = value;
+ msg.port_attr_set_msg.allmulti_follow = fow;
+ return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
}
-int32_t zxdh_vf_egr_port_attr_get(struct zxdh_en_device *en_dev, ZXDH_VPORT_T *port_attr_entry)
+int32_t zxdh_vf_egr_port_attr_get(struct zxdh_en_device *en_dev,
+ ZXDH_VPORT_T *port_attr_entry)
{
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- msg.payload.hdr.op_code = ZXDH_PORT_ATTRS_GET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- err = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("vf_egr_port_attr_get failed, err = %d\n", err);
- return err;
- }
-
- memcpy(port_attr_entry, &msg.reps.port_attr_get_msg.port_attr_entry, sizeof(ZXDH_VPORT_T));
- return err;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+ int32_t err = 0;
+
+ msg.hdr.op_code = ZXDH_PORT_ATTRS_GET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ err = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
+ if (err != 0) {
+ LOG_ERR("vf_egr_port_attr_get failed, err = %d\n", err);
+ return err;
+ }
+
+ memcpy(port_attr_entry, &ack.port_attr_get_msg.port_attr_entry,
+ sizeof(ZXDH_VPORT_T));
+ return err;
}
-int32_t zxdh_vf_port_promisc_set(struct zxdh_en_device *en_dev, uint8_t mode, uint8_t value, uint8_t fow)
+int32_t zxdh_vf_port_promisc_set(struct zxdh_en_device *en_dev, uint8_t mode,
+ uint8_t value, uint8_t fow)
{
- union zxdh_msg msg = {0};
-
- msg.payload.hdr.op_code = ZXDH_PROMISC_SET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.promisc_set_msg.mode = mode;
- msg.payload.promisc_set_msg.value = value;
- msg.payload.promisc_set_msg.mc_follow = fow;
- return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
+ zxdh_msg_info msg;
+ zxdh_reps_info ack;
+
+ msg.hdr.op_code = ZXDH_PROMISC_SET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.promisc_set_msg.mode = mode;
+ msg.promisc_set_msg.value = value;
+ msg.promisc_set_msg.mc_follow = fow;
+ return zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
}
int32_t zxdh_en_vport_create(struct zxdh_en_device *en_dev)
{
- DPP_PF_INFO_T pf_info = {0};
+ DPP_PF_INFO_T pf_info = { 0 };
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- if (!en_dev->ops->if_init(en_dev->parent))
- {
- return 0;
- }
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ if (!en_dev->ops->if_init(en_dev->parent)) {
+ return 0;
+ }
- return dpp_vport_create(&pf_info);
+ return dpp_vport_create(&pf_info);
}
int32_t zxdh_en_vport_delete(struct zxdh_en_device *en_dev)
{
- DPP_PF_INFO_T pf_info = {0};
+ DPP_PF_INFO_T pf_info = { 0 };
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- if (!en_dev->ops->if_init(en_dev->parent))
- {
- return 0;
- }
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ if (!en_dev->ops->if_init(en_dev->parent)) {
+ return 0;
+ }
- return dpp_vport_delete(&pf_info);
+ return dpp_vport_delete(&pf_info);
}
int32_t zxdh_pf_vport_create(struct zxdh_en_device *en_dev)
{
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- ret = zxdh_en_vport_create(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_en_vport_create failed: %d\n", ret);
- return ret;
- }
-
- ret = dpp_vport_bond_pf(&pf_info);
- if (ret != 0)
- {
- LOG_ERR("dpp_vport_bond_pf failed: %d\n", ret);
- goto err_vport;
- }
-
- if (en_dev->ops->is_upf(en_dev->parent))
- {
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_LAG_ID, 0);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set lag_id 0 failed: %d\n", ret);
- goto err_vport;
- }
-
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_LAG_EN_OFF, 1);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set bond_en 1 failed: %d\n", ret);
- goto err_vport;
- }
- }
- else
- {
- ret = dpp_panel_bond_vport(&pf_info, en_dev->phy_port);
- if (ret != 0)
- {
- LOG_ERR("dpp_panel_bond_vport failed: %d\n", ret);
- goto err_vport;
- }
- }
-
- return ret;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ ret = zxdh_en_vport_create(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_en_vport_create failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = dpp_vport_bond_pf(&pf_info);
+ if (ret != 0) {
+ LOG_ERR("dpp_vport_bond_pf failed: %d\n", ret);
+ goto err_vport;
+ }
+
+ ret = dpp_panel_bond_vport(&pf_info, en_dev->phy_port);
+ if (ret != 0) {
+ LOG_ERR("dpp_panel_bond_vport failed: %d\n", ret);
+ goto err_vport;
+ }
+
+ return ret;
err_vport:
- zxdh_en_vport_delete(en_dev);
- return ret;
+ zxdh_en_vport_delete(en_dev);
+ return ret;
}
int32_t zxdh_rxfh_set(struct zxdh_en_device *en_dev, uint32_t *queue_map)
{
- union zxdh_msg msg = {0};
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (queue_map == NULL)
- {
- return -1;
- }
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- err = dpp_rxfh_set(&pf_info, queue_map, ZXDH_INDIR_RQT_SIZE);
- if (err != 0)
- {
- LOG_ERR("dpp_rxfh_set failed: %d\n", err);
- }
- }
- else
- {
- msg.payload.hdr.op_code = ZXDH_RXFH_SET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- memcpy(msg.payload.rxfh_set_msg.queue_map, queue_map, ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t));
- err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg, true);
- if(err != 0)
- {
- LOG_ERR("zxdh_send_command_to_pf_np failed: %d\n", err);
- }
- }
-
- return err;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (queue_map == NULL) {
+ return -1;
+ }
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ err = dpp_rxfh_set(&pf_info, queue_map, ZXDH_INDIR_RQT_SIZE);
+ if (err != 0) {
+ LOG_ERR("dpp_rxfh_set failed: %d\n", err);
+ }
+ } else {
+ msg.hdr.op_code = ZXDH_RXFH_SET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ memcpy(msg.rxfh_set_msg.queue_map, queue_map,
+ ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t));
+ err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF,
+ &msg, &ack, true);
+ if (err != 0) {
+ LOG_ERR("zxdh_send_command_to_pf_np failed: %d\n", err);
+ }
+ }
+
+ return err;
}
void zxdh_rxfh_del(struct zxdh_en_device *en_dev)
{
- union zxdh_msg msg = {0};
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- return;
- }
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- dpp_rxfh_del(&pf_info);
- }
- else
- {
- msg.payload.hdr.op_code = ZXDH_RXFH_DEL;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg, true);
- if(err != 0)
- {
- LOG_ERR("zxdh_send_command_to_pf_np failed: %d\n", err);
- }
- }
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ return;
+ }
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ dpp_rxfh_del(&pf_info);
+ } else {
+ msg.hdr.op_code = ZXDH_RXFH_DEL;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF,
+ &msg, &ack, true);
+ if (err != 0) {
+ LOG_ERR("zxdh_send_command_to_pf_np failed: %d\n", err);
+ }
+ }
}
int32_t zxdh_ethtool_init(struct zxdh_en_device *en_dev)
{
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- ret = dpp_vport_rss_en_set(&pf_info, 1);
- if (ret != 0)
- {
- LOG_ERR("dpp_vport_rss_en_set failed: %d\n", ret);
- return ret;
- }
-
- ret = dpp_vport_hash_funcs_set(&pf_info, en_dev->hash_func);
- if (ret != 0)
- {
- LOG_ERR("dpp_vport_hash_funcs_set failed: %d\n", ret);
- return ret;
- }
-
- ret = dpp_rx_flow_hash_set(&pf_info, ZXDH_NET_RX_FLOW_HASH_SDFNT);
- if (ret != 0)
- {
- LOG_ERR("zxdh_rx_flow_hash_set failed: %d\n", ret);
- return ret;
- }
-
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_PORT_BASE_QID, (uint16_t)en_dev->phy_index[0]);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set %d failed: %d\n", en_dev->phy_index[0], ret);
- return ret;
- }
-
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IPV4_TCP_ASSEMBLE, 1);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set tcp assemble failed: %d\n", ret);
- return ret;
- }
-
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IPV6_TCP_ASSEMBLE, 1);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set tcp assemble failed: %d\n", ret);
- return ret;
- }
-
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IP_CHKSUM, 1);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set rx ip checksum failed: %d\n", ret);
- return ret;
- }
-
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_TCP_UDP_CHKSUM, 1);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set rx l4 checksum failed: %d\n", ret);
- return ret;
- }
-
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_ACCELERATOR_OFFLOAD_FLAG, 1);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set accelerator offload failed: %d\n", ret);
- return ret;
- }
-
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_OUTER_IP_CHECKSUM_OFFLOAD, 1);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set vxlan outer ip checksum failed: %d\n", ret);
- return ret;
- }
-
- ret = dpp_vport_vlan_filter_en_set(&pf_info, 0);
- if (ret != 0)
- {
- LOG_ERR("dpp_vport_vlan_filter_en_set failed: %d\n", ret);
- return ret;
- }
-
- ret = dpp_vlan_filter_init(&pf_info);
- if (ret != 0)
- {
- LOG_ERR("dpp_vlan_filter_init failed: %d\n", ret);
- return ret;
- }
-
- return ret;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ ret = dpp_vport_rss_en_set(&pf_info, 1);
+ if (ret != 0) {
+ LOG_ERR("dpp_vport_rss_en_set failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = dpp_vport_hash_funcs_set(&pf_info, en_dev->hash_func);
+ if (ret != 0) {
+ LOG_ERR("dpp_vport_hash_funcs_set failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = dpp_rx_flow_hash_set(&pf_info, ZXDH_NET_RX_FLOW_HASH_SDFNT);
+ if (ret != 0) {
+ LOG_ERR("zxdh_rx_flow_hash_set failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_PORT_BASE_QID,
+ (uint16_t)en_dev->phy_index[0]);
+ if (ret != 0) {
+ LOG_ERR("dpp_egr_port_attr_set %d failed: %d\n", en_dev->phy_index[0],
+ ret);
+ return ret;
+ }
+
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IPV4_TCP_ASSEMBLE, 1);
+ if (ret != 0) {
+ LOG_ERR("dpp_egr_port_attr_set tcp assemble failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IPV6_TCP_ASSEMBLE, 1);
+ if (ret != 0) {
+ LOG_ERR("dpp_egr_port_attr_set tcp assemble failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_IP_CHKSUM, 1);
+ if (ret != 0) {
+ LOG_ERR("dpp_egr_port_attr_set rx ip checksum failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_TCP_UDP_CHKSUM, 1);
+ if (ret != 0) {
+ LOG_ERR("dpp_egr_port_attr_set rx l4 checksum failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_ACCELERATOR_OFFLOAD_FLAG, 1);
+ if (ret != 0) {
+ LOG_ERR("dpp_egr_port_attr_set accelerator offload failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_OUTER_IP_CHECKSUM_OFFLOAD,
+ 1);
+ if (ret != 0) {
+ LOG_ERR("dpp_egr_port_attr_set vxlan outer ip checksum failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = dpp_vport_vlan_filter_en_set(&pf_info, 0);
+ if (ret != 0) {
+ LOG_ERR("dpp_vport_vlan_filter_en_set failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = dpp_vlan_filter_init(&pf_info);
+ if (ret != 0) {
+ LOG_ERR("dpp_vlan_filter_init failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
}
int32_t zxdh_pf_flush_mac(struct zxdh_en_device *en_dev)
{
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- /* 删除此转发域所有单播mac地址 */
- err = dpp_unicast_all_mac_delete(&pf_info);
- if (err != 0)
- {
- LOG_ERR("dpp_unicast_all_mac_delete failed\n");
- return err;
- }
- LOG_INFO("dpp_unicast_all_mac_delete succeed\n");
-
- /* 删除此转发域中所有组播mac地址 */
- err = dpp_multicast_all_mac_delete(&pf_info);
- if (err != 0)
- {
- LOG_ERR("dpp_multicast_all_mac_delete failed\n");
- return err;
- }
- LOG_INFO("dpp_multicast_all_mac_delete succeed\n");
-
- return err;
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ /* 删除此转发域所有单播mac地址 */
+ err = dpp_unicast_all_mac_delete(&pf_info);
+ if (err != 0) {
+ LOG_ERR("dpp_unicast_all_mac_delete failed\n");
+ return err;
+ }
+ LOG_INFO("dpp_unicast_all_mac_delete succeed\n");
+
+ /* 删除此转发域中所有组播mac地址 */
+ err = dpp_multicast_all_mac_delete(&pf_info);
+ if (err != 0) {
+ LOG_ERR("dpp_multicast_all_mac_delete failed\n");
+ return err;
+ }
+ LOG_INFO("dpp_multicast_all_mac_delete succeed\n");
+
+ return err;
}
int32_t zxdh_pf_flush_mac_online(struct zxdh_en_device *en_dev)
{
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- /* 删除此转发域所有单播mac地址 */
- err = dpp_unicast_all_mac_online_delete(&pf_info);
- if (err != 0)
- {
- LOG_ERR("dpp_unicast_all_mac_online_delete failed:%d\n", err);
- return err;
- }
-
- /* 删除此转发域中所有组播mac地址 */
- err = dpp_multicast_all_mac_online_delete(&pf_info);
- if (err != 0)
- {
- LOG_ERR("dpp_multicast_all_mac_online_delete failed:%d\n", err);
- return err;
- }
-
- return err;
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ /* 删除此转发域所有单播mac地址 */
+ err = dpp_unicast_all_mac_online_delete(&pf_info);
+ if (err != 0) {
+ LOG_ERR("dpp_unicast_all_mac_online_delete failed:%d\n", err);
+ return err;
+ }
+
+ /* 删除此转发域中所有组播mac地址 */
+ err = dpp_multicast_all_mac_online_delete(&pf_info);
+ if (err != 0) {
+ LOG_ERR("dpp_multicast_all_mac_online_delete failed:%d\n", err);
+ return err;
+ }
+
+ return err;
}
int32_t zxdh_pf_port_delete(struct net_device *netdev)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- if (en_dev == NULL)
- {
- return -1;
- }
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- dpp_vport_uc_promisc_set(&pf_info, 0);
- dpp_vport_mc_promisc_set(&pf_info, 0);
-
- /* pf删除所有配置到np的mac地址 */
- if (!en_dev->ops->is_bond(en_dev->parent))
- {
- ret = zxdh_pf_flush_mac_online(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_pf_flush_mac_online failed: %d\n", ret);
- return ret;
- }
- }
-
- ret = zxdh_en_vport_delete(en_dev);
- if (ret != 0)
- {
- LOG_ERR("dpp_vport_delete failed: %d\n", ret);
- return ret;
- }
-
- return ret;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ if (en_dev == NULL) {
+ return -1;
+ }
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ dpp_vport_uc_promisc_set(&pf_info, 0);
+ dpp_vport_mc_promisc_set(&pf_info, 0);
+
+ /* pf删除所有配置到np的mac地址 */
+ if (!en_dev->ops->is_bond(en_dev->parent)) {
+ ret = zxdh_pf_flush_mac_online(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_pf_flush_mac_online failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = zxdh_en_vport_delete(en_dev);
+ if (ret != 0) {
+ LOG_ERR("dpp_vport_delete failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
}
int32_t zxdh_aux_alloc_pannel(struct zxdh_en_device *en_dev)
{
- int32_t ret = 0;
- struct zxdh_pannle_port port;
+ int32_t ret = 0;
+ struct zxdh_pannle_port port;
- ret = en_dev->ops->request_port(en_dev->parent, &port);
- if (ret != 0)
- {
- LOG_ERR("zxdh_aux_alloc_pannel failed \n");
- goto out;
- }
+ ret = en_dev->ops->request_port(en_dev->parent, &port);
+ if (ret != 0) {
+ LOG_ERR("zxdh_aux_alloc_pannel failed \n");
+ goto out;
+ }
- en_dev->phy_port = port.phyport;
- en_dev->pannel_id = port.pannel_id;
- en_dev->link_check_bit = port.link_check_bit;
+ en_dev->phy_port = port.phyport;
+ en_dev->pannel_id = port.pannel_id;
+ en_dev->link_check_bit = port.link_check_bit;
- LOG_INFO("bond pf: pannel %u, phyport %u check bit %u \n",
- en_dev->pannel_id, en_dev->phy_port, en_dev->link_check_bit);
+ LOG_INFO("zxdh_aux_alloc_pannel pannel %u, phyport %u check bit %u \n",
+ en_dev->pannel_id, en_dev->phy_port, en_dev->link_check_bit);
out:
- return ret;
+ return ret;
}
-#if 0
-int32_t zxdh_aux_query_phyport(struct zxdh_en_device *en_dev)
-{
- int32_t ret = 0;
- struct aux_phyport_message recv = {0};
- struct aux_phyport_message *recv_data = &recv;
- zxdh_aux_phyport_msg msg = {0};
- zxdh_aux_phyport_msg *payload = &msg;
-
- payload->pcie_id = en_dev->pcie_id;
- payload->pannel_id = en_dev->pannel_id;
- payload->rsv = en_dev->pannel_id;
-
- ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_PHYPORT_QUERY, payload, recv_data, true);
- if (ret != 0)
- {
- LOG_ERR("zxdh_aux_query_phyport send message failed \n");
- goto out;
- }
-
- en_dev->phy_port = recv_data->phyport;
-
-out:
- return ret;
-}
-#endif
int32_t zxdh_pf_port_init(struct net_device *netdev)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- bool vepa = false;
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- if (en_dev == NULL)
- {
- return -1;
- }
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
-#if 0
- en_dev->ops->dpp_np_init(en_dev->parent, en_dev->vport);
-#endif
-
- ret = zxdh_pf_vport_create(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_pf_vport_create failed: %d\n", ret);
- return ret;
- }
-
- zxdh_mac_stats_clear(en_dev);
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- if (!en_dev->ops->if_init(en_dev->parent))
- {
- LOG_INFO("First net-device is init\n");
- return 0;
- }
-
- /* 只将第一个网络设备的队列配置到vport属性表中 */
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_PORT_BASE_QID, (uint16_t)en_dev->phy_index[0]);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set %d failed: %d\n", en_dev->phy_index[0], ret);
- goto err_vport;
- }
- return 0;
- }
-
- vepa = en_dev->ops->get_vepa(en_dev->parent);
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VEPA_EN_OFF, (uint32_t)vepa);
- if (ret != 0)
- {
- LOG_ERR("Failed to setup vport(0x%x) %s mode, ret: %d\n", en_dev->vport, vepa?"vepa":"veb", ret);
- goto err_vport;
- }
- LOG_INFO("Initialize vport(0x%x) to %s mode\n", en_dev->vport, vepa?"vepa":"veb");
-
- ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_HASH_SEARCH_INDEX, en_dev->hash_search_idx);
- if (ret != 0)
- {
- LOG_ERR("dpp_egr_port_attr_set hash_search_index %u failed: %d\n", en_dev->hash_search_idx, ret);
- goto err_vport;
- }
-
- ret = zxdh_ethtool_init(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_ethtool_init failed: %d\n", ret);
- return ret;
- }
-
- /* PF删除复位前配置到np的mac */
- ret = zxdh_pf_flush_mac(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_pf_flush_mac failed: %d\n", ret);
- goto err_vport;
- }
-
- ret = dpp_add_mac(&pf_info, netdev->dev_addr);
- if (ret != 0)
- {
- LOG_ERR("dpp_add_mac failed: %d\n", ret);
- goto err_vport;
- }
-
- dpp_vport_uc_promisc_set(&pf_info, 0);
- dpp_vport_mc_promisc_set(&pf_info, 0);
-
- return 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ bool vepa = false;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ if (en_dev == NULL) {
+ return -1;
+ }
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ en_dev->hash_func = ZXDH_FUNC_TOP;
+
+ ret = zxdh_pf_vport_create(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_pf_vport_create failed: %d\n", ret);
+ return ret;
+ }
+
+ zxdh_mac_stats_clear(en_dev);
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ if (!en_dev->ops->if_init(en_dev->parent)) {
+ LOG_INFO("First net-device is init\n");
+ return 0;
+ }
+
+ /* 只将第一个网络设备的队列配置到vport属性表中 */
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_PORT_BASE_QID,
+ (uint16_t)en_dev->phy_index[0]);
+ if (ret != 0) {
+ LOG_ERR("dpp_egr_port_attr_set %d failed: %d\n",
+ en_dev->phy_index[0], ret);
+ goto err_vport;
+ }
+ return 0;
+ }
+
+ vepa = en_dev->ops->get_vepa(en_dev->parent);
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VEPA_EN_OFF, (uint32_t)vepa);
+ if (ret != 0) {
+ LOG_ERR("Failed to setup vport(0x%x) %s mode, ret: %d\n", en_dev->vport,
+ vepa ? "vepa" : "veb", ret);
+ goto err_vport;
+ }
+ LOG_INFO("Initialize vport(0x%x) to %s mode\n", en_dev->vport,
+ vepa ? "vepa" : "veb");
+
+ ret = dpp_egr_port_attr_set(&pf_info, EGR_FLAG_HASH_SEARCH_INDEX,
+ en_dev->hash_search_idx);
+ if (ret != 0) {
+ LOG_ERR("dpp_egr_port_attr_set hash_search_index %u failed: %d\n",
+ en_dev->hash_search_idx, ret);
+ goto err_vport;
+ }
+
+ ret = zxdh_ethtool_init(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_ethtool_init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* PF删除复位前配置到np的mac */
+ ret = zxdh_pf_flush_mac(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_pf_flush_mac failed: %d\n", ret);
+ goto err_vport;
+ }
+
+ ret = dpp_add_mac(&pf_info, netdev->dev_addr);
+ if (ret != 0) {
+ LOG_ERR("dpp_add_mac failed: %d\n", ret);
+ goto err_vport;
+ }
+
+ dpp_vport_uc_promisc_set(&pf_info, 0);
+ dpp_vport_mc_promisc_set(&pf_info, 0);
+
+ return 0;
err_vport:
- zxdh_en_vport_delete(en_dev);
- return ret;
+ zxdh_en_vport_delete(en_dev);
+ return ret;
}
int32_t zxdh_vf_get_mac(struct net_device *netdev)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct netdev_hw_addr *ha = NULL;
- union zxdh_msg msg = {0};
- uint8_t mac[6] = {0};
- int32_t ret = 0;
- bool add_flag = true;
-
- msg.payload.hdr.op_code = ZXDH_MAC_GET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg);
- if (ret != 0)
- {
- LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret);
- return ret;
- }
-
- ether_addr_copy(mac, msg.reps.vf_mac_addr_get_msg.mac_addr);
- LOG_INFO("zxdh_vf_get_mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
- if (is_zero_ether_addr(mac))
- {
- get_random_bytes(mac, 6);
- mac[0] &= 0xfe;
- LOG_INFO("vf set random mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
- }
-
- list_for_each_entry(ha, &netdev->uc.list, list)
- {
- if (!memcmp(ha->addr, mac, netdev->addr_len))
- {
- add_flag = false;
- }
- }
-
- if (add_flag)
- {
- ret = zxdh_vf_dpp_add_mac(en_dev, mac, UNFILTER_MAC);
- if (ret != 0)
- {
- LOG_ERR("zxdh_vf_dpp_add_mac failed: %d\n", ret);
- goto free_vport;
- }
- }
-
- ether_addr_copy(netdev->dev_addr, mac);
-
- return ret;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct netdev_hw_addr *ha = NULL;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+ uint8_t mac[6] = { 0 };
+ int32_t ret = 0;
+ bool add_flag = true;
+
+ msg.hdr.op_code = ZXDH_MAC_GET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_VF_BAR_MSG_TO_PF, &msg,
+ &ack);
+ if (ret != 0) {
+ LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret);
+ return ret;
+ }
+
+ ether_addr_copy(mac, ack.vf_mac_addr_get_msg.mac_addr);
+ LOG_INFO("zxdh_vf_get_mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", mac[0], mac[1],
+ mac[2], mac[3], mac[4], mac[5]);
+ if (is_zero_ether_addr(mac)) {
+ get_random_bytes(mac, 6);
+ mac[0] &= 0xfe;
+ LOG_INFO("vf set random mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", mac[0],
+ mac[1], mac[2], mac[3], mac[4], mac[5]);
+ }
+
+ list_for_each_entry(ha, &netdev->uc.list, list) {
+ if (!memcmp(ha->addr, mac, netdev->addr_len)) {
+ add_flag = false;
+ }
+ }
+
+ if (add_flag) {
+ ret = zxdh_vf_dpp_add_mac(en_dev, mac, UNFILTER_MAC);
+ if (ret != 0) {
+ LOG_ERR("zxdh_vf_dpp_add_mac failed: %d\n", ret);
+ goto free_vport;
+ }
+ }
+
+ ether_addr_copy(netdev->dev_addr, mac);
+
+ return ret;
free_vport:
- zxdh_vf_port_delete(en_dev);
- return ret;
+ zxdh_vf_port_delete(en_dev);
+ return ret;
}
int32_t zxdh_vf_dpp_port_init(struct net_device *netdev)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t ret = 0;
- ret = zxdh_vf_port_create(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_vf_port_create failed: %d\n", ret);
- }
+ ret = zxdh_vf_port_create(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_vf_port_create failed: %d\n", ret);
+ }
- return ret;
+ return ret;
}
void zxdh_vport_uninit(struct net_device *netdev)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t ret = 0;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- ret = zxdh_pf_port_delete(netdev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_pf_port_delete failed: %d\n", ret);
- }
- }
- else
- {
- ret = zxdh_vf_port_delete(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_vf_port_delete failed: %d\n", ret);
- }
- }
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t ret = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ ret = zxdh_pf_port_delete(netdev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_pf_port_delete failed: %d\n", ret);
+ }
+ } else {
+ ret = zxdh_vf_port_delete(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_vf_port_delete failed: %d\n", ret);
+ }
+ }
}
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/en_cmd.h b/src/net/drivers/net/ethernet/dinghai/en_aux/en_cmd.h
index 578b8dcc1267e6d56e6ac61a7f247145db1cbaec..4a48ad99e4362096db913f1b9ed03ccdf4f7bbc9 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/en_cmd.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/en_cmd.h
@@ -4,223 +4,217 @@
#include
#include "../msg_common.h"
-#define ZXDH_QRES_TBL_LEN (300)
-#define ZXDH_QS_PAIRS (2)
+#define ZXDH_QRES_TBL_LEN (300)
+#define ZXDH_QS_PAIRS (2)
-#define INVALID_PHY_PORT 0xff
-#define ZXDH_MAX_HASH_INDEX 6//TODO:should is 5
+#define STATS_CLEAR_AFTER_READ 0
+
+#define INVALID_PHY_PORT 0xff
+#define ZXDH_MAX_HASH_INDEX 6 // TODO:should is 5
/* HASH_FUNC TYPE */
-#define ZXDH_FUNC_TOP 0x04
-#define ZXDH_FUNC_XOR 0x02
-#define ZXDH_FUNC_CRC32 0x01
+#define ZXDH_FUNC_TOP 0x04
+#define ZXDH_FUNC_XOR 0x02
+#define ZXDH_FUNC_CRC32 0x01
/* RX_NFC */
-#define ZXDH_NET_RX_FLOW_HASH_MV 4
-#define ZXDH_NET_RX_FLOW_HASH_SDT 2
-#define ZXDH_NET_RX_FLOW_HASH_SDFNT 1
+#define ZXDH_NET_RX_FLOW_HASH_MV 4
+#define ZXDH_NET_RX_FLOW_HASH_SDT 2
+#define ZXDH_NET_RX_FLOW_HASH_SDFNT 1
/* RISCV OPCODE */
-#define RISC_TYPE_READ 0
-#define RISC_FIELD_PANEL_ID 5
-#define RISC_FIELD_PHYPORT_CHANNEL 6
-#define RISC_FIELD_HASHID_CHANNEL 10
-#define RISC_SERVER_TIME 0xF0
-
-
-#define MAX_PANEL_ID 8//TODO:should is 5
-
-enum riscv_op_code
-{
- OP_CODE_WRITE = 1,
- OP_CODE_MSGQ_CHAN = 2,
- OP_CODE_DATA_CHAN = 3,
- OP_CODE_MAX,
+#define RISC_TYPE_READ 0
+#define RISC_FIELD_PANEL_ID 5
+#define RISC_FIELD_PHYPORT_CHANNEL 6
+#define RISC_FIELD_HASHID_CHANNEL 10
+#define RISC_SERVER_TIME 0xF0
+
+#define MAX_PANEL_ID 8 // TODO:should is 5
+
+enum riscv_op_code {
+ OP_CODE_WRITE = 1,
+ OP_CODE_MSGQ_CHAN = 2,
+ OP_CODE_DATA_CHAN = 3,
+ OP_CODE_MAX,
};
-#define OP_CODE_TBL_STAT (0xaa)
-#define MSG_STRUCT_HD_LEN 8
-
-struct queue_index_message
-{
- uint8_t type;
- uint8_t field;
- uint16_t ep_bdf;
- uint16_t write_bytes;
- uint16_t rsv;
- uint16_t write_data[0];
+#define OP_CODE_TBL_STAT (0xaa)
+#define MSG_STRUCT_HD_LEN 8
+
+struct queue_index_message {
+ uint8_t type;
+ uint8_t field;
+ uint16_t ep_bdf;
+ uint16_t write_bytes;
+ uint16_t rsv;
+ uint16_t write_data[0];
} __attribute__((packed));
-struct cmd_hdr_recv
-{
- uint8_t check;
- uint8_t rsv;
- uint16_t data_len_bytes;
+struct cmd_hdr_recv {
+ uint8_t check;
+ uint8_t rsv;
+ uint16_t data_len_bytes;
};
-struct cmd_tbl_ack
-{
- struct cmd_hdr_recv hdr;
- uint8_t phy_port;
- uint8_t rsv[3];
+struct cmd_tbl_ack {
+ struct cmd_hdr_recv hdr;
+ uint8_t phy_port;
+ uint8_t rsv[3];
} __attribute__((packed));
-enum zxdh_msg_chan_opc
-{
- ZXDH_VPORT_GET = 4,
- ZXDH_PHYPORT_GET = 6,
+enum zxdh_msg_chan_opc {
+ ZXDH_VPORT_GET = 4,
+ ZXDH_PHYPORT_GET = 6,
};
-struct zxdh_debug_msg
-{
- uint8_t opcode;
- uint8_t phyport;
- bool lldp_enable;
+struct zxdh_debug_msg {
+ uint8_t opcode;
+ uint8_t phyport;
+ bool lldp_enable;
} __attribute__((packed));
-struct zxdh_debug_rcv_msg
-{
- uint8_t reps_states;
- uint8_t lldp_enable;
+struct zxdh_debug_rcv_msg {
+ uint8_t reps_states;
+ uint8_t lldp_enable;
} __attribute__((packed));
-enum zxdh_en_link_speed_bit_indices
-{
- SPM_SPEED_1X_1G = 2,
- SPM_SPEED_1X_10G = 5,
- SPM_SPEED_1X_25G = 6,
- SPM_SPEED_1X_50G = 7,
- SPM_SPEED_2X_100G = 8,
- SPM_SPEED_4X_40G = 9,
- SPM_SPEED_4X_100G = 10,
+enum zxdh_en_link_speed_bit_indices {
+ SPM_SPEED_1X_1G = 2,
+ SPM_SPEED_1X_10G = 5,
+ SPM_SPEED_1X_25G = 6,
+ SPM_SPEED_1X_50G = 7,
+ SPM_SPEED_2X_100G = 8,
+ SPM_SPEED_4X_40G = 9,
+ SPM_SPEED_4X_100G = 10,
};
-enum zxdh_en_fec_mode_bit_indices
-{
- SPM_FEC_NONE = 0,
- SPM_FEC_BASER = 1,
- SPM_FEC_RS528 = 2,
- SPM_FEC_RS544 = 3,
+enum zxdh_en_fec_mode_bit_indices {
+ SPM_FEC_NONE = 0,
+ SPM_FEC_BASER = 1,
+ SPM_FEC_RS528 = 2,
+ SPM_FEC_RS544 = 3,
};
-enum zxdh_en_fc_mode_bit_indices
-{
- SPM_FC_NONE = 0,
- SPM_FC_PAUSE_RX = 1,
- SPM_FC_PAUSE_TX = 2,
- SPM_FC_PAUSE_FULL = 3,
- SPM_FC_PFC_FULL = 4,
+enum zxdh_en_fc_mode_bit_indices {
+ SPM_FC_NONE = 0,
+ SPM_FC_PAUSE_RX = 1,
+ SPM_FC_PAUSE_TX = 2,
+ SPM_FC_PAUSE_FULL = 3,
+ SPM_FC_PFC_FULL = 4,
};
-struct zxdh_en_module_eeprom_param
-{
- uint8_t i2c_addr;
- uint8_t bank;
- uint8_t page;
- uint8_t offset;
- uint8_t length;
+struct zxdh_en_module_eeprom_param {
+ uint8_t i2c_addr;
+ uint8_t bank;
+ uint8_t page;
+ uint8_t offset;
+ uint8_t length;
};
-#define SFF_I2C_ADDRESS_LOW (0x50)
-#define SFF_I2C_ADDRESS_HIGH (0x51)
+#define SFF_I2C_ADDRESS_LOW (0x50)
+#define SFF_I2C_ADDRESS_HIGH (0x51)
enum zxdh_module_id {
- ZXDH_MODULE_ID_SFP = 0x3,
- ZXDH_MODULE_ID_QSFP = 0xC,
- ZXDH_MODULE_ID_QSFP_PLUS = 0xD,
- ZXDH_MODULE_ID_QSFP28 = 0x11,
- ZXDH_MODULE_ID_QSFP_DD = 0x18,
- ZXDH_MODULE_ID_OSFP = 0x19,
- ZXDH_MODULE_ID_DSFP = 0x1B,
+ ZXDH_MODULE_ID_SFP = 0x3,
+ ZXDH_MODULE_ID_QSFP = 0xC,
+ ZXDH_MODULE_ID_QSFP_PLUS = 0xD,
+ ZXDH_MODULE_ID_QSFP28 = 0x11,
+ ZXDH_MODULE_ID_QSFP_DD = 0x18,
+ ZXDH_MODULE_ID_OSFP = 0x19,
+ ZXDH_MODULE_ID_DSFP = 0x1B,
};
-#define SPEED_MODES_TO_SPEED(speed_modes, speed) \
-do \
-{ \
- if (((speed_modes) & BIT(SPM_SPEED_1X_1G)) == BIT(SPM_SPEED_1X_1G)) \
- { \
- (speed) = SPEED_1000; \
- } \
- else if (((speed_modes) & BIT(SPM_SPEED_1X_10G)) == BIT(SPM_SPEED_1X_10G)) \
- { \
- (speed) = SPEED_10000; \
- } \
- else if (((speed_modes) & BIT(SPM_SPEED_1X_25G)) == BIT(SPM_SPEED_1X_25G)) \
- { \
- (speed) = SPEED_25000; \
- } \
- else if (((speed_modes) & BIT(SPM_SPEED_4X_40G)) == BIT(SPM_SPEED_4X_40G)) \
- { \
- (speed) = SPEED_40000; \
- } \
- else if (((speed_modes) & BIT(SPM_SPEED_1X_50G)) == BIT(SPM_SPEED_1X_50G)) \
- { \
- (speed) = SPEED_50000; \
- } \
- else if (((speed_modes) & BIT(SPM_SPEED_2X_100G)) == BIT(SPM_SPEED_2X_100G)) \
- { \
- (speed) = SPEED_100000; \
- } \
- else if (((speed_modes) & BIT(SPM_SPEED_4X_100G)) == BIT(SPM_SPEED_4X_100G)) \
- { \
- (speed) = SPEED_100000; \
- } \
- else \
- { \
- (speed) = SPEED_UNKNOWN; \
- } \
-} while (0)
+#define SPEED_MODES_TO_SPEED(speed_modes, speed) \
+ do { \
+ if (((speed_modes)&BIT(SPM_SPEED_1X_1G)) == BIT(SPM_SPEED_1X_1G)) { \
+ (speed) = SPEED_1000; \
+ } else if (((speed_modes)&BIT(SPM_SPEED_1X_10G)) == \
+ BIT(SPM_SPEED_1X_10G)) { \
+ (speed) = SPEED_10000; \
+ } else if (((speed_modes)&BIT(SPM_SPEED_1X_25G)) == \
+ BIT(SPM_SPEED_1X_25G)) { \
+ (speed) = SPEED_25000; \
+ } else if (((speed_modes)&BIT(SPM_SPEED_4X_40G)) == \
+ BIT(SPM_SPEED_4X_40G)) { \
+ (speed) = SPEED_40000; \
+ } else if (((speed_modes)&BIT(SPM_SPEED_1X_50G)) == \
+ BIT(SPM_SPEED_1X_50G)) { \
+ (speed) = SPEED_50000; \
+ } else if (((speed_modes)&BIT(SPM_SPEED_2X_100G)) == \
+ BIT(SPM_SPEED_2X_100G)) { \
+ (speed) = SPEED_100000; \
+ } else if (((speed_modes)&BIT(SPM_SPEED_4X_100G)) == \
+ BIT(SPM_SPEED_4X_100G)) { \
+ (speed) = SPEED_100000; \
+ } else { \
+ (speed) = SPEED_UNKNOWN; \
+ } \
+ } while (0)
#define GET_VFID(vport) \
- (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) ? \
- (PF_VQM_VFID_OFFSET + EPID(vport) * 8 + FUNC_NUM(vport)) : \
- (EPID(vport) * 256 + VFUNC_NUM(vport)) \
-
+ ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) ? \
+ (PF_VQM_VFID_OFFSET + EPID(vport) * 8 + FUNC_NUM(vport)) : \
+ (EPID(vport) * 256 + VFUNC_NUM(vport)))
-#define NP_GET_PKT_CNT 0
-#define NP_CLEAR_PKT_CNT 1
+#define NP_GET_PKT_CNT 0
+#define NP_CLEAR_PKT_CNT 1
struct zxdh_en_device;
int32_t zxdh_common_tbl_init(struct net_device *netdev);
int32_t zxdh_en_phyport_init(struct zxdh_en_device *en_dev);
-int32_t zxdh_en_autoneg_set(struct zxdh_en_device *en_dev, uint8_t enable, uint32_t speed_modes);
+int32_t zxdh_en_autoneg_set(struct zxdh_en_device *en_dev, uint8_t enable,
+ uint32_t speed_modes);
int32_t zxdh_vport_stats_get(struct zxdh_en_device *en_dev);
int32_t zxdh_en_vport_pre_stats_get(struct zxdh_en_device *en_dev);
int32_t zxdh_mac_stats_get(struct zxdh_en_device *en_dev);
int32_t zxdh_mac_stats_clear(struct zxdh_en_device *en_dev);
int32_t zxdh_hash_id_get(struct zxdh_en_device *en_dev);
int32_t zxdh_en_fec_mode_set(struct zxdh_en_device *en_dev, uint32_t fec_cfg);
-int32_t zxdh_en_fec_mode_get(struct zxdh_en_device *en_dev, uint32_t *fec_cap, uint32_t *fec_cfg, uint32_t *fec_active);
+int32_t zxdh_en_fec_mode_get(struct zxdh_en_device *en_dev, uint32_t *fec_cap,
+ uint32_t *fec_cfg, uint32_t *fec_active);
int32_t zxdh_en_fc_mode_set(struct zxdh_en_device *en_dev, uint32_t fc_mode);
int32_t zxdh_en_fc_mode_get(struct zxdh_en_device *en_dev, uint32_t *fc_mode);
-uint32_t zxdh_en_module_eeprom_read(struct zxdh_en_device *en_dev, struct zxdh_en_module_eeprom_param *query, uint8_t *data);
+uint32_t zxdh_en_module_eeprom_read(struct zxdh_en_device *en_dev,
+ struct zxdh_en_module_eeprom_param *query,
+ uint8_t *data);
int32_t zxdh_lldp_enable_set(struct zxdh_en_device *en_dev, bool lldp_enable);
int32_t zxdh_sshd_enable_set(struct zxdh_en_device *en_dev, bool sshd_enable);
-int32_t zxdh_vf_dpp_add_mac(struct zxdh_en_device *en_dev, const uint8_t *dev_addr, uint8_t filter_flag);
-int32_t zxdh_vf_dpp_del_mac(struct zxdh_en_device *en_dev, const uint8_t *dev_addr, uint8_t filter_flag, bool mac_flag);
+int32_t zxdh_vf_dpp_add_mac(struct zxdh_en_device *en_dev,
+ const uint8_t *dev_addr, uint8_t filter_flag);
+int32_t zxdh_vf_dpp_del_mac(struct zxdh_en_device *en_dev,
+ const uint8_t *dev_addr, uint8_t filter_flag,
+ bool mac_flag);
void zxdh_vport_uninit(struct net_device *netdev);
int32_t zxdh_pf_port_init(struct net_device *netdev);
int32_t zxdh_vf_dpp_port_init(struct net_device *netdev);
-int32_t zxdh_vf_egr_port_attr_set(struct zxdh_en_device *en_dev, uint32_t mode, uint32_t value, uint8_t fow);
-int32_t zxdh_vf_egr_port_attr_get(struct zxdh_en_device *en_dev, ZXDH_VPORT_T *port_attr_entry);
+int32_t zxdh_vf_egr_port_attr_set(struct zxdh_en_device *en_dev, uint32_t mode,
+ uint32_t value, uint8_t fow);
+int32_t zxdh_vf_egr_port_attr_get(struct zxdh_en_device *en_dev,
+ ZXDH_VPORT_T *port_attr_entry);
int32_t zxdh_vf_rss_en_set(struct zxdh_en_device *en_dev, uint32_t enable);
-int32_t zxdh_num_channels_changed(struct zxdh_en_device *en_dev, uint16_t num_changed);
-int32_t zxdh_send_command_to_specify(struct zxdh_en_device *en_dev, uint16_t module_id, void *msg, void *ack);
+int32_t zxdh_num_channels_changed(struct zxdh_en_device *en_dev,
+ uint16_t num_changed);
+int32_t zxdh_send_command_to_specify(struct zxdh_en_device *en_dev,
+ uint16_t module_id, void *msg, void *ack);
int32_t zxdh_pf_macpcs_num_get(struct zxdh_en_device *en_dev);
-int32_t zxdh_lldp_enable_get(struct zxdh_en_device *en_dev, uint32_t *lldp_enable);
+int32_t zxdh_lldp_enable_get(struct zxdh_en_device *en_dev,
+ uint32_t *lldp_enable);
int32_t zxdh_vf_get_mac(struct net_device *netdev);
int32_t zxdh_rxfh_set(struct zxdh_en_device *en_dev, uint32_t *queue_map);
void zxdh_rxfh_del(struct zxdh_en_device *en_dev);
void zxdh_u32_array_print(uint32_t *array, uint16_t size);
-int32_t zxdh_en_firmware_version_get(struct zxdh_en_device *en_dev, uint8_t *fw_version, uint8_t *fw_version_len);
+int32_t zxdh_en_firmware_version_get(struct zxdh_en_device *en_dev,
+ uint8_t *fw_version,
+ uint8_t *fw_version_len);
int32_t zxdh_panel_id_get(struct zxdh_en_device *en_dev);
-int32_t zxdh_vf_port_promisc_set(struct zxdh_en_device *en_dev, uint8_t mode, uint8_t value, uint8_t fow);
+int32_t zxdh_vf_port_promisc_set(struct zxdh_en_device *en_dev, uint8_t mode,
+ uint8_t value, uint8_t fow);
int32_t zxdh_phyport_get(struct zxdh_en_device *en_dev);
int32_t zxdh_vf_1588_call_np_interface(struct zxdh_en_device *en_dev);
int32_t zxdh_aux_alloc_pannel(struct zxdh_en_device *en_dev);
int8_t zxdh_debug_ip_get(struct zxdh_en_device *en_dev, int8_t *ip);
-int32_t zxdh_riscv_os_type_get(struct zxdh_en_device *en_dev, uint8_t *is_zios);
-int32_t zxdh_vf_dpp_add_ipv6_mac(struct zxdh_en_device *en_dev, const uint8_t *mac_addr);
-int32_t zxdh_vf_dpp_del_ipv6_mac(struct zxdh_en_device *en_dev, const uint8_t *mac_addr);
-
-#endif /* END __ZXDH_EN_COMMAND_H_ */
+int32_t zxdh_vf_dpp_add_ipv6_mac(struct zxdh_en_device *en_dev,
+ const uint8_t *dev_addr);
+int32_t zxdh_vf_dpp_del_ipv6_mac(struct zxdh_en_device *en_dev,
+ const uint8_t *dev_addr);
+#endif /* END __ZXDH_EN_COMMAND_H_ */
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/en_ioctl.c b/src/net/drivers/net/ethernet/dinghai/en_aux/en_ioctl.c
index f8a48fbd412e0f8e1eb5bd8f8e1dd30a5c4f12ec..5596e6dbbc6eaf437594babdf0c0a8ab5478ffdc 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/en_ioctl.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/en_ioctl.c
@@ -7,1457 +7,1380 @@
#include "../en_np/table/include/dpp_tbl_api.h"
#include "../en_pf/msg_func.h"
#include "../en_pf/eq.h"
-#include "../en_tsn/zxdh_tsn_ioctl.h"
-extern int32_t tod_device_set_bar_virtual_addr(uint64_t virtaddr, uint16_t pcieid);
+extern int32_t tod_device_set_bar_virtual_addr(uint64_t virtaddr);
int32_t print_data(uint8_t *data, uint32_t len)
{
- int32_t i = 0;
- uint32_t loopcnt = 0;
- uint32_t last_line_len = 0;
- uint32_t line_len = PKT_PRINT_LINE_LEN;
- uint8_t last_line_data[PKT_PRINT_LINE_LEN] = {0};
-
- if (len == 0)
- {
- return 0;
- }
- loopcnt = len / line_len;
- last_line_len = len % line_len;
-
- LOG_DEBUG("***************packet data[len: %d]***************\n", len);
- for (i = 0; i < loopcnt; i++)
- {
- LOG_INFO("%.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", \
- *(data + (line_len * i) + 0), *(data + (line_len * i) + 1), *(data + (line_len * i) + 2), *(data + (line_len * i) + 3), \
- *(data + (line_len * i) + 4), *(data + (line_len * i) + 5), *(data + (line_len * i) + 6), *(data + (line_len * i) + 7), \
- *(data + (line_len * i) + 8), *(data + (line_len * i) + 9), *(data + (line_len * i) + 10), *(data + (line_len * i) + 11), \
- *(data + (line_len * i) + 12), *(data + (line_len * i) + 13), *(data + (line_len * i) + 14), *(data + (line_len * i) + 15));
- }
- if (last_line_len != 0)
- {
- memcpy(last_line_data, (data + (line_len * i)), last_line_len);
- LOG_INFO("%.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", \
- last_line_data[0], last_line_data[1], last_line_data[2], last_line_data[3], \
- last_line_data[4], last_line_data[5], last_line_data[6], last_line_data[7], \
- last_line_data[8], last_line_data[9], last_line_data[10], last_line_data[11], \
- last_line_data[12], last_line_data[13], last_line_data[14], last_line_data[15]);
- }
- LOG_INFO("****************end packet data**************\n");
-
- return 0;
+ int32_t i = 0;
+ uint32_t loopcnt = 0;
+ uint32_t last_line_len = 0;
+ uint32_t line_len = PKT_PRINT_LINE_LEN;
+ uint8_t last_line_data[PKT_PRINT_LINE_LEN] = { 0 };
+
+ if (len == 0) {
+ return 0;
+ }
+ loopcnt = len / line_len;
+ last_line_len = len % line_len;
+
+ LOG_DEBUG("***************packet data[len: %d]***************\n", len);
+ for (i = 0; i < loopcnt; i++) {
+ LOG_INFO(
+ "%.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x "
+ "%.2x %.2x %.2x\n",
+ *(data + (line_len * i) + 0), *(data + (line_len * i) + 1),
+ *(data + (line_len * i) + 2), *(data + (line_len * i) + 3),
+ *(data + (line_len * i) + 4), *(data + (line_len * i) + 5),
+ *(data + (line_len * i) + 6), *(data + (line_len * i) + 7),
+ *(data + (line_len * i) + 8), *(data + (line_len * i) + 9),
+ *(data + (line_len * i) + 10), *(data + (line_len * i) + 11),
+ *(data + (line_len * i) + 12), *(data + (line_len * i) + 13),
+ *(data + (line_len * i) + 14), *(data + (line_len * i) + 15));
+ }
+ if (last_line_len != 0) {
+ memcpy(last_line_data, (data + (line_len * i)), last_line_len);
+ LOG_INFO(
+ "%.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x "
+ "%.2x %.2x %.2x\n",
+ last_line_data[0], last_line_data[1], last_line_data[2],
+ last_line_data[3], last_line_data[4], last_line_data[5],
+ last_line_data[6], last_line_data[7], last_line_data[8],
+ last_line_data[9], last_line_data[10], last_line_data[11],
+ last_line_data[12], last_line_data[13], last_line_data[14],
+ last_line_data[15]);
+ }
+ LOG_INFO("****************end packet data**************\n");
+
+ return 0;
}
int32_t zxdh_read_reg_cmd(struct net_device *netdev, struct ifreq *ifr)
{
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- struct zxdh_en_reg *reg = NULL;
- uint32_t size = sizeof(struct zxdh_en_reg);
- uint64_t base_addr = 0;
- uint32_t num = 0;
- int32_t err = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- reg = kzalloc(size, GFP_KERNEL);
- CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n");
-
- if (copy_from_user(reg, ifr->ifr_ifru.ifru_data, size))
- {
- LOG_ERR("copy_from_user failed\n");
- err = -EFAULT;
- goto err_ret;
- }
-
- if ((reg->num == 0) || (reg->num > MAX_ACCESS_NUM))
- {
- LOG_ERR("transmit failed, reg->num=%u\n", reg->num);
- err = -EFAULT;
- goto err_ret;
- }
-
- base_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0);
-
- for (num = 0; num < reg->num; num++)
- {
- reg->data[num] = readl((const volatile void *)(base_addr + (reg->offset & 0xfffffffc) + num * 4));
- }
-
- if (copy_to_user(ifr->ifr_ifru.ifru_data, reg, size))
- {
- LOG_ERR("copy_to_user failed\n");
- err = -EFAULT;
- }
-
-err_ret:
- kfree(reg);
- return err;
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ struct zxdh_en_reg reg = { 0 };
+ uint64_t base_addr = 0;
+ uint32_t num = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ if (copy_from_user(®, ifr->ifr_ifru.ifru_data, sizeof(reg))) {
+ LOG_ERR("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ if ((reg.num == 0) || (reg.num > MAX_ACCESS_NUM)) {
+ LOG_ERR("transmit failed, reg.num=%u\n", reg.num);
+ return -EFAULT;
+ }
+
+ base_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0);
+
+ for (num = 0; num < reg.num; num++) {
+ reg.data[num] = readl(
+ (const volatile void *)(base_addr + (reg.offset & 0xfffffffc) +
+ num * 4));
+ }
+
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, ®, sizeof(reg))) {
+ LOG_ERR("copy_to_user failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
}
int32_t zxdh_write_reg_cmd(struct net_device *netdev, struct ifreq *ifr)
{
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- struct zxdh_en_reg *reg = NULL;
- uint32_t size = sizeof(struct zxdh_en_reg);
- uint64_t base_addr = 0;
- uint32_t num = 0;
- int32_t err = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- reg = kzalloc(size, GFP_KERNEL);
- CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n");
-
- if (copy_from_user(reg, ifr->ifr_ifru.ifru_data, size))
- {
- LOG_ERR("copy_from_user failed\n");
- err = -EFAULT;
- goto err_ret;
- }
-
- if ((reg->num == 0) || (reg->num > MAX_ACCESS_NUM))
- {
- LOG_ERR("transmit failed, reg->num=%u\n", reg->num);
- err = -EFAULT;
- goto err_ret;
- }
-
- base_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0);
-
- for (num = 0; num < reg->num; num++)
- {
- writel(reg->data[num], (volatile void *)(base_addr + (reg->offset & 0xfffffffc) + num * 4));
- }
-
-err_ret:
- kfree(reg);
- return err;
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ struct zxdh_en_reg reg = { 0 };
+ uint64_t base_addr = 0;
+ uint32_t num = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ if (copy_from_user(®, ifr->ifr_ifru.ifru_data, sizeof(reg))) {
+ LOG_ERR("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ if ((reg.num == 0) || (reg.num > MAX_ACCESS_NUM)) {
+ LOG_ERR("transmit failed, reg.num=%u\n", reg.num);
+ return -EFAULT;
+ }
+
+ base_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0);
+
+ for (num = 0; num < reg.num; num++) {
+ writel(reg.data[num],
+ (volatile void *)(base_addr + (reg.offset & 0xfffffffc) +
+ num * 4));
+ }
+
+ return 0;
}
int32_t print_vring_info(struct virtqueue *vq, struct zxdh_en_reg *reg)
{
- struct vring_virtqueue *vvq = to_vvq(vq);
+ struct vring_virtqueue *vvq = to_vvq(vq);
- if ((reg->num + reg->data[0]) > vvq->packed.vring.num)
- {
- LOG_ERR("the sum of desc_index %u and desc_num %u over desc depth %u, should be [0-%u]\n", \
- reg->num, reg->data[0], vvq->packed.vring.num, vvq->packed.vring.num - 1);
- return -EINVAL;
- }
+ if ((reg->num + reg->data[0]) > vvq->packed.vring.num) {
+ LOG_ERR("the sum of desc_index %u and desc_num %u over desc depth %u, "
+ "should be [0-%u]\n",
+ reg->num, reg->data[0], vvq->packed.vring.num,
+ vvq->packed.vring.num - 1);
+ return -EINVAL;
+ }
- zxdh_print_vring_info(vq, reg->num, reg->num + reg->data[0]);
+ zxdh_print_vring_info(vq, reg->num, reg->num + reg->data[0]);
- return 0;
+ return 0;
}
int32_t zxdh_get_vring_info(struct net_device *netdev, struct ifreq *ifr)
{
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- struct zxdh_en_reg *reg = NULL;
- uint32_t size = sizeof(struct zxdh_en_reg);
- struct virtqueue *vq = NULL;
- int32_t ret = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- reg = kzalloc(size, GFP_KERNEL);
- CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n");
-
- if (copy_from_user(reg, ifr->ifr_ifru.ifru_data, size))
- {
- LOG_ERR("copy_from_user failed\n");
- ret = -EFAULT;
- goto err_ret;
- }
-
- if (reg->offset >= en_dev->max_queue_pairs)
- {
- LOG_ERR("the queue index %u over the curr_queue_pairs %u, should be [0-%u]\n", \
- reg->offset, en_dev->curr_queue_pairs, en_dev->curr_queue_pairs - 1);
- ret = -EINVAL;
- goto err_ret;
- }
-
- vq = en_dev->sq[reg->offset].vq;
- LOG_INFO("******************************tx vring info****************************\n");
- ret = print_vring_info(vq, reg);
- if (ret != 0)
- {
- LOG_ERR("print tx vring info failed!\n");
- ret = -EINVAL;
- goto err_ret;
- }
-
- vq = en_dev->rq[reg->offset].vq;
- LOG_INFO("******************************rx vring info****************************\n");
- ret = print_vring_info(vq, reg);
- if (ret != 0)
- {
- LOG_ERR("print rx vring info failed!\n");
- ret = -EINVAL;
- }
-
-err_ret:
- kfree(reg);
- return ret;
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ struct zxdh_en_reg reg = { 0 };
+ struct virtqueue *vq = NULL;
+ int32_t ret = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ if (copy_from_user(®, ifr->ifr_ifru.ifru_data, sizeof(reg))) {
+ LOG_ERR("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ if (reg.offset >= en_dev->max_queue_pairs) {
+ LOG_ERR("the queue index %u over the curr_queue_pairs %u, should be [0-%u]\n",
+ reg.offset, en_dev->curr_queue_pairs,
+ en_dev->curr_queue_pairs - 1);
+ return -EINVAL;
+ }
+
+ vq = en_dev->sq[reg.offset].vq;
+ LOG_INFO("******************************tx vring "
+ "info****************************\n");
+ ret = print_vring_info(vq, ®);
+ CHECK_UNEQUAL_ERR(ret, 0, -EINVAL, "print tx vring info failed!\n");
+
+ vq = en_dev->rq[reg.offset].vq;
+ LOG_INFO("******************************rx vring "
+ "info****************************\n");
+ ret = print_vring_info(vq, ®);
+ CHECK_UNEQUAL_ERR(ret, 0, -EINVAL, "print rx vring info failed!\n");
+
+ return 0;
}
-int32_t zxdh_en_set_clock_no(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_set_clock_no(struct net_device *netdev, struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
- if (reg->num != 1)
- {
- LOG_ERR("Transmit failed[len = %d]!\n", reg->num);
- goto err_ret;
- }
+ if (reg->num != 1) {
+ LOG_ERR("Transmit failed[len = %d]!\n", reg->num);
+ goto err_ret;
+ }
- en_dev->clock_no = reg->data[0];
- LOG_INFO("en_dev %s clock_no = %d\n", en_dev->netdev->name, en_dev->clock_no);
+ en_dev->clock_no = reg->data[0];
+ LOG_INFO("en_dev %s clock_no = %d\n", en_dev->netdev->name,
+ en_dev->clock_no);
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- LOG_ERR("copy_to_user failed!\n");
- goto err_ret;
- }
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ LOG_ERR("copy_to_user failed!\n");
+ goto err_ret;
+ }
- return 0;
+ return 0;
err_ret:
- return -1;
+ return -1;
}
void copy_u32_to_u8(uint8_t *data_pkt, uint32_t *data, uint32_t pktlen)
{
- uint32_t i = 0;
+ uint32_t i = 0;
- for (i = 0; i < pktlen; i++)
- {
- *data_pkt++ = data[i];
- }
+ for (i = 0; i < pktlen; i++) {
+ *data_pkt++ = data[i];
+ }
}
int32_t zxdh_tx_file_pkts(struct zxdh_en_priv *en_priv, struct zxdh_en_reg *reg)
{
- int32_t total_sg = 0;
- uint8_t *data_pkt = NULL;
- struct scatterlist *sg = NULL;
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct send_queue *sq = en_dev->sq;
- struct page *page = NULL;
- struct data_packet pkt = {0};
- uint16_t i = 0;
- uint32_t len = 0;
- void *ptr = NULL;
- uint32_t last_buff_len = 0;
- uint32_t pktLen = reg->num;
- uint32_t buffLen = 4096;
-
- while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL)
- {
- LOG_ERR("virtqueue_get_buf() != NULL, ptr=0x%llx, len=0x%x\n", (uint64_t)ptr, len);
- };
-
- sg = sq->sg;
- pkt.buf_size = 16 * PAGE_SIZE;
- page = alloc_pages(GFP_KERNEL, 4);
- if (unlikely(page == NULL))
- {
- LOG_ERR("page is null\n");
- goto err;
- }
-
- pkt.buf = page_address(page);
- if (unlikely(pkt.buf == NULL))
- {
- LOG_ERR("pkt.buf is null\n");
- goto err1;
- }
- memset(pkt.buf, 0, pkt.buf_size);
-
- data_pkt = (uint8_t*)pkt.buf;
- copy_u32_to_u8(data_pkt, reg->data, pktLen);
- print_data(data_pkt, (pktLen > PKT_PRINT_LEN_MAX) ? PKT_PRINT_LEN_MAX : pktLen);
-
- total_sg = pktLen / buffLen;
- last_buff_len = pktLen % buffLen;
- if (last_buff_len != 0)
- {
- total_sg += 1;
- }
-
- sg_init_table(sg, total_sg);
- for (i = 0; i < total_sg; i++)
- {
- if (i == (total_sg - 1))
- {
- sg_set_buf(&sg[i], data_pkt + (i * buffLen), ((last_buff_len != 0) ? last_buff_len : buffLen));
- }
- else
- {
- sg_set_buf(&sg[i], data_pkt + (i * buffLen), buffLen);
- }
- }
-
- if (unlikely(virtqueue_add_outbuf(sq->vq, sg, total_sg, data_pkt, GFP_ATOMIC) != 0))
- {
- LOG_ERR("virtqueue_add_outbuf failure!\n");
- goto err1;
- }
-
- if (virtqueue_kick_prepare_packed(sq->vq) && virtqueue_notify(sq->vq))
- {
- u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.kicks++;
- u64_stats_update_end(&sq->stats.syncp);
- }
-
- en_dev->netdev->stats.tx_packets++;
- en_dev->netdev->stats.tx_bytes += pktLen;
- LOG_INFO("en_dev->netdev->stats.tx_packets=%ld, tx pktLen=%d\n", en_dev->netdev->stats.tx_packets, pktLen);
-
- return 0;
+ int32_t total_sg = 0;
+ uint8_t *data_pkt = NULL;
+ struct scatterlist *sg = NULL;
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct send_queue *sq = en_dev->sq;
+ struct page *page = NULL;
+ struct data_packet pkt = { 0 };
+ uint16_t i = 0;
+ uint32_t len = 0;
+ void *ptr = NULL;
+ uint32_t last_buff_len = 0;
+ uint32_t pktLen = reg->num;
+ uint32_t buffLen = 4096;
+
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ LOG_ERR("virtqueue_get_buf() != NULL, ptr=0x%llx, len=0x%x\n",
+ (uint64_t)ptr, len);
+ };
+
+ sg = sq->sg;
+ pkt.buf_size = 16 * PAGE_SIZE;
+ page = alloc_pages(GFP_KERNEL, 4);
+ if (unlikely(page == NULL)) {
+ LOG_ERR("page is null\n");
+ goto err;
+ }
+
+ pkt.buf = page_address(page);
+ if (unlikely(pkt.buf == NULL)) {
+ LOG_ERR("pkt.buf is null\n");
+ goto err1;
+ }
+ memset(pkt.buf, 0, pkt.buf_size);
+
+ data_pkt = (uint8_t *)pkt.buf;
+ copy_u32_to_u8(data_pkt, reg->data, pktLen);
+ print_data(data_pkt,
+ (pktLen > PKT_PRINT_LEN_MAX) ? PKT_PRINT_LEN_MAX : pktLen);
+
+ total_sg = pktLen / buffLen;
+ last_buff_len = pktLen % buffLen;
+ if (last_buff_len != 0) {
+ total_sg += 1;
+ }
+
+ sg_init_table(sg, total_sg);
+ for (i = 0; i < total_sg; i++) {
+ if (i == (total_sg - 1)) {
+ sg_set_buf(&sg[i], data_pkt + (i * buffLen),
+ ((last_buff_len != 0) ? last_buff_len : buffLen));
+ } else {
+ sg_set_buf(&sg[i], data_pkt + (i * buffLen), buffLen);
+ }
+ }
+
+ if (unlikely(virtqueue_add_outbuf(sq->vq, sg, total_sg, data_pkt,
+ GFP_ATOMIC) != 0)) {
+ LOG_ERR("virtqueue_add_outbuf failure!\n");
+ goto err1;
+ }
+
+ if (virtqueue_kick_prepare_packed(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.kicks++;
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+
+ en_dev->netdev->stats.tx_packets++;
+ en_dev->netdev->stats.tx_bytes += pktLen;
+ LOG_INFO("en_dev->netdev->stats.tx_packets=%ld, tx pktLen=%d\n",
+ en_dev->netdev->stats.tx_packets, pktLen);
+
+ return 0;
err1:
- free_pages((uint64_t)pkt.buf, 4);
+ free_pages((uint64_t)pkt.buf, 4);
err:
- return -1;
+ return -1;
}
int32_t zxdh_send_file_pkt(struct net_device *netdev, struct ifreq *ifr)
{
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_reg *reg = NULL;
- uint32_t size = sizeof(struct zxdh_en_reg);
- int32_t ret = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
-
- reg = kzalloc(size, GFP_KERNEL);
- CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n");
-
- if (copy_from_user(reg, ifr->ifr_ifru.ifru_data, size))
- {
- LOG_ERR("copy_from_user failed\n");
- ret = -EFAULT;
- goto err_ret;
- }
-
- if ((reg->num == 0) || (reg->num > MAX_ACCESS_NUM))
- {
- LOG_ERR("transmit failed, reg->num=%d\n", reg->num);
- ret = -EFAULT;
- goto err_ret;
- }
-
- ret = zxdh_tx_file_pkts(en_priv, reg);
- if (unlikely(ret != 0))
- {
- LOG_ERR("transmit failed[ret = %d]!", ret);
- ret = -1;
- goto err_ret;
- }
-
- reg->num = 0;
- if (copy_to_user(ifr->ifr_ifru.ifru_data, reg, size))
- {
- LOG_ERR("copy_to_user failed\n");
- ret = -EFAULT;
- }
-
-err_ret:
- kfree(reg);
- return ret;
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_reg reg = { 0 };
+ int32_t ret = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+
+ if (copy_from_user(®, ifr->ifr_ifru.ifru_data, sizeof(reg))) {
+ LOG_ERR("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ if ((reg.num == 0) || (reg.num > MAX_ACCESS_NUM)) {
+ LOG_ERR("transmit failed, reg.num=%d\n", reg.num);
+ return -EFAULT;
+ }
+
+ ret = zxdh_tx_file_pkts(en_priv, ®);
+ if (unlikely(ret != 0)) {
+ LOG_ERR("transmit failed[ret = %d]!", ret);
+ return -1;
+ }
+
+ reg.num = 0;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, ®, sizeof(reg))) {
+ LOG_ERR("copy_to_user failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
}
-#ifdef PTP_DRIVER_INTERFACE_EN
+#ifdef PTP_DRIVER_INTERFACE_EN
/* ptp发送加密报文时,需要调用使能函数进行使能 */
-extern int32_t enable_write_ts_to_fifo(struct zxdh_en_device *en_dev, uint32_t enable, uint32_t mac_number);
-extern int32_t set_interrupt_capture_timer(struct zxdh_en_device *en_dev, uint32_t index);
-extern int32_t zxdh_set_pps_selection(struct zxdh_en_device *en_dev, uint32_t pps_type, uint32_t selection);
-extern int32_t zxdh_set_pd_detection(struct zxdh_en_device *en_dev, uint32_t pd_index, uint32_t pd_input1, uint32_t pd_input2);
-extern int32_t zxdh_get_pd_value(struct zxdh_en_device *en_dev, uint32_t pd_index, uint32_t *pd_result);
+extern int32_t enable_write_ts_to_fifo(struct zxdh_en_device *en_dev,
+ uint32_t enable, uint32_t mac_number);
+extern int32_t set_interrupt_capture_timer(struct zxdh_en_device *en_dev,
+ uint32_t index);
+extern int32_t zxdh_set_pps_selection(struct zxdh_en_device *en_dev,
+ uint32_t pps_type, uint32_t selection);
+extern int32_t zxdh_set_pd_detection(struct zxdh_en_device *en_dev,
+ uint32_t pd_index, uint32_t pd_input1,
+ uint32_t pd_input2);
+extern int32_t zxdh_get_pd_value(struct zxdh_en_device *en_dev,
+ uint32_t pd_index, uint32_t *pd_result);
#endif /* PTP_DRIVER_INTERFACE_EN */
-int32_t zxdh_en_enable_ptp_encrypted_msg(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_enable_ptp_encrypted_msg(struct net_device *netdev,
+ struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- int32_t mac_num = 0; //0-2
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- uint32_t enable = 0;
- int32_t ret = 0;
-
- LOG_INFO("enter in zxdh_en_enable_ptp_encrypted_msg\n");
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- mac_num = zxdh_pf_macpcs_num_get(en_dev);
- if (mac_num < 0)
- {
- LOG_ERR("get mac num %d err, its value should is 0-2!\n", mac_num);
- goto err_ret;
- }
-
- if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, reg_size)))
- {
- LOG_ERR("copy_from_user failed!\n");
- goto err_ret;
- }
- if (reg->num != 1)
- {
- LOG_ERR("Transmit failed[len = %d]!\n", reg->num);
- goto err_ret;
- }
-
- enable = reg->data[0];
- if ((enable != 0) && (enable != 1))
- {
- LOG_ERR("Transmit failed[enable = %u]!\n", enable);
- goto err_ret;
- }
-
- LOG_INFO("enable = %u\n", enable);
-
-#ifdef PTP_DRIVER_INTERFACE_EN
- /* 使能ptp加密报文发送接口 */
- ret = enable_write_ts_to_fifo(en_dev, enable, mac_num);
- CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "enable ptp encrypted msg failed!!\n");
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ int32_t mac_num = 0; // 0-2
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ uint32_t enable = 0;
+ int32_t ret = 0;
+
+ LOG_INFO("enter in zxdh_en_enable_ptp_encrypted_msg\n");
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ mac_num = zxdh_pf_macpcs_num_get(en_dev);
+ if (mac_num < 0) {
+ LOG_ERR("get mac num %d err, its value should is 0-2!\n", mac_num);
+ goto err_ret;
+ }
+
+ if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, reg_size))) {
+ LOG_ERR("copy_from_user failed!\n");
+ goto err_ret;
+ }
+ if (reg->num != 1) {
+ LOG_ERR("Transmit failed[len = %d]!\n", reg->num);
+ goto err_ret;
+ }
+
+ enable = reg->data[0];
+ if ((enable != 0) && (enable != 1)) {
+ LOG_ERR("Transmit failed[enable = %u]!\n", enable);
+ goto err_ret;
+ }
+
+ LOG_INFO("enable = %u\n", enable);
+
+#ifdef PTP_DRIVER_INTERFACE_EN
+ /* 使能ptp加密报文发送接口 */
+ ret = enable_write_ts_to_fifo(en_dev, enable, mac_num);
+ CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "enable ptp encrypted msg failed!!\n");
#endif /* PTP_DRIVER_INTERFACE_EN */
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- LOG_ERR("copy_to_user failed!\n");
- goto err_ret;
- }
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ LOG_ERR("copy_to_user failed!\n");
+ goto err_ret;
+ }
- return ret;
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_set_intr_capture_timer(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_set_intr_capture_timer(struct net_device *netdev,
+ struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- u_int32_t index;
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- if (reg->num != 1)
- {
- LOG_ERR("Transmit failed[len = %d]!", reg->num);
- goto err_ret;
- }
-
- index = reg->data[0];
- LOG_INFO("index = %d\n", index);
- if (index > 4)
- {
- LOG_ERR("capture timer out of range!");
- goto err_ret;
- }
-#ifdef PTP_DRIVER_INTERFACE_EN
- ret = set_interrupt_capture_timer(en_dev, index);
- CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "set interrupt capture timer failed!!\n");
+ u_int32_t index;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ if (reg->num != 1) {
+ LOG_ERR("Transmit failed[len = %d]!", reg->num);
+ goto err_ret;
+ }
+
+ index = reg->data[0];
+ LOG_INFO("index = %d\n", index);
+ if (index > 4) {
+ LOG_ERR("capture timer out of range!");
+ goto err_ret;
+ }
+#ifdef PTP_DRIVER_INTERFACE_EN
+ ret = set_interrupt_capture_timer(en_dev, index);
+ CHECK_UNEQUAL_ERR(ret, 0, -EFAULT,
+ "set interrupt capture timer failed!!\n");
#endif /* PTP_DRIVER_INTERFACE_EN */
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- LOG_ERR("copy_to_user failed!!!\n");
- goto err_ret;
- }
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ LOG_ERR("copy_to_user failed!!!\n");
+ goto err_ret;
+ }
- return ret;
+ return ret;
err_ret:
- return -1;
-
+ return -1;
}
-int32_t zxdh_en_set_pps_selection(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_set_pps_selection(struct net_device *netdev, struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t pps_type;
- uint32_t selection;
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- if (reg->num != 2)
- {
- LOG_ERR("Transmit failed[len = %d]!", reg->num);
- goto err_ret;
- }
-
- pps_type = reg->data[0];
- selection = reg->data[1];
- LOG_INFO("pps_type = %u, selection = %u\n", pps_type, selection);
-#ifdef PTP_DRIVER_INTERFACE_EN
- ret = zxdh_set_pps_selection(en_dev, pps_type, selection);
- CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "set pps selection failed!!\n");
+ uint32_t pps_type;
+ uint32_t selection;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ if (reg->num != 2) {
+ LOG_ERR("Transmit failed[len = %d]!", reg->num);
+ goto err_ret;
+ }
+
+ pps_type = reg->data[0];
+ selection = reg->data[1];
+ LOG_INFO("pps_type = %u, selection = %u\n", pps_type, selection);
+#ifdef PTP_DRIVER_INTERFACE_EN
+ ret = zxdh_set_pps_selection(en_dev, pps_type, selection);
+ CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "set pps selection failed!!\n");
#endif /* PTP_DRIVER_INTERFACE_EN */
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- LOG_ERR("copy_to_user failed!!!\n");
- goto err_ret;
- }
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ LOG_ERR("copy_to_user failed!!!\n");
+ goto err_ret;
+ }
- return ret;
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_set_phase_detection(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_set_phase_detection(struct net_device *netdev,
+ struct ifreq *ifr, struct zxdh_en_reg *reg)
{
- uint32_t pd_index;
- uint32_t pd_input1;
- uint32_t pd_input2;
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- if (reg->num != 3)
- {
- LOG_ERR("Transmit failed[len = %d]!", reg->num);
- goto err_ret;
- }
-
- pd_index = reg->data[0];
- pd_input1 = reg->data[1];
- pd_input2 = reg->data[2];
- LOG_INFO("pd_index = %u, pd_input1 = %u, pd_input2 = %u\n", pd_index, pd_input1, pd_input2);
-#ifdef PTP_DRIVER_INTERFACE_EN
- ret = zxdh_set_pd_detection(en_dev, pd_index, pd_input1, pd_input2);
- CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "set pd detection failed!!\n");
+ uint32_t pd_index;
+ uint32_t pd_input1;
+ uint32_t pd_input2;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ if (reg->num != 3) {
+ LOG_ERR("Transmit failed[len = %d]!", reg->num);
+ goto err_ret;
+ }
+
+ pd_index = reg->data[0];
+ pd_input1 = reg->data[1];
+ pd_input2 = reg->data[2];
+ LOG_INFO("pd_index = %u, pd_input1 = %u, pd_input2 = %u\n", pd_index,
+ pd_input1, pd_input2);
+#ifdef PTP_DRIVER_INTERFACE_EN
+ ret = zxdh_set_pd_detection(en_dev, pd_index, pd_input1, pd_input2);
+ CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "set pd detection failed!!\n");
#endif /* PTP_DRIVER_INTERFACE_EN */
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- LOG_ERR("copy_to_user failed!!!\n");
- goto err_ret;
- }
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ LOG_ERR("copy_to_user failed!!!\n");
+ goto err_ret;
+ }
- return ret;
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_get_pd_value(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_get_pd_value(struct net_device *netdev, struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t pd_index;
- uint32_t pd_result;
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- if (reg->num != 1)
- {
- LOG_ERR("Transmit failed[len = %d]!", reg->num);
- goto err_ret;
- }
-
- pd_index = reg->data[0];
- LOG_INFO("pd_index = %u\n", pd_index);
-#ifdef PTP_DRIVER_INTERFACE_EN
- ret = zxdh_get_pd_value(en_dev, pd_index, &pd_result);
- CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "get pd value failed!!\n");
+ uint32_t pd_index;
+ uint32_t pd_result;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ if (reg->num != 1) {
+ LOG_ERR("Transmit failed[len = %d]!", reg->num);
+ goto err_ret;
+ }
+
+ pd_index = reg->data[0];
+ LOG_INFO("pd_index = %u\n", pd_index);
+#ifdef PTP_DRIVER_INTERFACE_EN
+ ret = zxdh_get_pd_value(en_dev, pd_index, &pd_result);
+ CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "get pd value failed!!\n");
#endif /* PTP_DRIVER_INTERFACE_EN */
- reg->num = 1;
- reg->data[0] = pd_result;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- LOG_ERR("copy_to_user failed!!!\n");
- goto err_ret;
- }
+ reg->num = 1;
+ reg->data[0] = pd_result;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ LOG_ERR("copy_to_user failed!!!\n");
+ goto err_ret;
+ }
- return ret;
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_set_l2_ptp_port(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_set_l2_ptp_port(struct net_device *netdev, struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- LOG_INFO("reg->num: %d", reg->num);
- LOG_INFO("reg->offset: %d", reg->offset);
- if (reg->num != 1)
- {
- LOG_ERR("Transmit failed[len = %d]!", reg->num);
- goto err_ret;
- }
-
- en_dev->vf_1588_call_np_num = PTP_PORT_VFID_SET;
- LOG_INFO("en_dev->vport: 0x%x, IS_PF(en_dev->vport): %d", en_dev->vport, IS_PF(en_dev->vport));
- if (IS_PF(en_dev->vport))
- {
- ret = dpp_ptp_port_vfid_set(&pf_info, VQM_VFID(en_dev->vport));
- if (ret != 0)
- {
- LOG_ERR("dpp_ptp_port_vfid_set failed!!!\n");
- goto err_ret;
- }
- }
- else
- {
- ret = zxdh_vf_1588_call_np_interface(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_vf_1588_call_np_interface failed!!!\n");
- goto err_ret;
- }
- }
-
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- LOG_ERR("copy_to_user failed!!!\n");
- goto err_ret;
- }
- LOG_INFO("dpp_ptp_port_vfid_set success");
-
- return ret;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ LOG_INFO("reg->num: %d", reg->num);
+ LOG_INFO("reg->offset: %d", reg->offset);
+ if (reg->num != 1) {
+ LOG_ERR("Transmit failed[len = %d]!", reg->num);
+ goto err_ret;
+ }
+
+ en_dev->vf_1588_call_np_num = PTP_PORT_VFID_SET;
+ LOG_INFO("en_dev->vport: 0x%x, IS_PF(en_dev->vport): %d", en_dev->vport,
+ IS_PF(en_dev->vport));
+ if (IS_PF(en_dev->vport)) {
+ ret = dpp_ptp_port_vfid_set(&pf_info, VQM_VFID(en_dev->vport));
+ if (ret != 0) {
+ LOG_ERR("dpp_ptp_port_vfid_set failed!!!\n");
+ goto err_ret;
+ }
+ } else {
+ ret = zxdh_vf_1588_call_np_interface(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_vf_1588_call_np_interface failed!!!\n");
+ goto err_ret;
+ }
+ }
+
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ LOG_ERR("copy_to_user failed!!!\n");
+ goto err_ret;
+ }
+ LOG_INFO("dpp_ptp_port_vfid_set success");
+
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_set_ptp_tc_enable(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_set_ptp_tc_enable(struct net_device *netdev, struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- if (reg->num != 1)
- {
- LOG_ERR("Transmit failed[len = %d]!", reg->num);
- goto err_ret;
- }
-
- en_dev->ptp_tc_enable_opt = reg->data[0];
- LOG_INFO("en_dev->ptp_tc_enable_opt = %u\n", en_dev->ptp_tc_enable_opt);
-
- en_dev->vf_1588_call_np_num = PTP_TC_ENABLE_SET;
-
- if (IS_PF(en_dev->vport))
- {
- ret = dpp_ptp_tc_enable_set(&pf_info, en_dev->ptp_tc_enable_opt);
- if (ret != 0)
- {
- LOG_ERR("dpp_ptp_tc_enable_set failed!!!\n");
- goto err_ret;
- }
- }
- else
- {
- ret = zxdh_vf_1588_call_np_interface(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_vf_1588_call_np_interface failed!!!\n");
- goto err_ret;
- }
- }
-
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- goto err_ret;
- }
-
- return ret;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ if (reg->num != 1) {
+ LOG_ERR("Transmit failed[len = %d]!", reg->num);
+ goto err_ret;
+ }
+
+ en_dev->ptp_tc_enable_opt = reg->data[0];
+ LOG_INFO("en_dev->ptp_tc_enable_opt = %u\n", en_dev->ptp_tc_enable_opt);
+
+ en_dev->vf_1588_call_np_num = PTP_TC_ENABLE_SET;
+
+ if (IS_PF(en_dev->vport)) {
+ ret = dpp_ptp_tc_enable_set(&pf_info, en_dev->ptp_tc_enable_opt);
+ if (ret != 0) {
+ LOG_ERR("dpp_ptp_tc_enable_set failed!!!\n");
+ goto err_ret;
+ }
+ } else {
+ ret = zxdh_vf_1588_call_np_interface(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_vf_1588_call_np_interface failed!!!\n");
+ goto err_ret;
+ }
+ }
+
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ goto err_ret;
+ }
+
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_set_synce_recovery_port(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_set_synce_recovery_port(struct net_device *netdev,
+ struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- if (reg->num != 1)
- {
- LOG_ERR("Transmit failed[len = %d]!", reg->num);
- goto err_ret;
- }
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_RECOVERY_CLK_SET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- msg.payload.synce_clk_recovery_port.clk_speed = reg->data[0];
- LOG_INFO("phyport = %u, clk_speed = %u\n", msg.payload.hdr_to_agt.phyport, msg.payload.synce_clk_recovery_port.clk_speed);
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_en_set_synce_recovery_port failed, err: %d\n", err);
- return err;
- }
-
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- goto err_ret;
- }
-
- return ret;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ if (reg->num != 1) {
+ LOG_ERR("Transmit failed[len = %d]!", reg->num);
+ goto err_ret;
+ }
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_RECOVERY_CLK_SET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+ msg.synce_clk_recovery_port.clk_speed = reg->data[0];
+ LOG_INFO("phyport = %u, clk_speed = %u\n", msg.hdr_to_agt.phyport,
+ msg.synce_clk_recovery_port.clk_speed);
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_set_synce_recovery_port failed, err: %d\n", err);
+ return err;
+ }
+
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ goto err_ret;
+ }
+
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_get_synce_clk_stats(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_get_synce_clk_stats(struct net_device *netdev,
+ struct ifreq *ifr, struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- if (reg->num != 1)
- {
- LOG_ERR("Transmit failed[len = %d]!", reg->num);
- goto err_ret;
- }
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_SYNCE_CLK_STATS_GET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- LOG_INFO("phyport = %u\n", msg.payload.hdr_to_agt.phyport);
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_en_get_synce_clk_stats failed, err: %d\n", err);
- return err;
- }
-
- reg->num = 1;
- reg->data[0] = msg.reps.synce_clk_recovery_port.clk_stats;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- goto err_ret;
- }
- LOG_INFO("num = %u, clk_stats: 0x%x\n", reg->num, reg->data[0]);
-
- return ret;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ if (reg->num != 1) {
+ LOG_ERR("Transmit failed[len = %d]!", reg->num);
+ goto err_ret;
+ }
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_SYNCE_CLK_STATS_GET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+ LOG_INFO("phyport = %u\n", msg.hdr_to_agt.phyport);
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_get_synce_clk_stats failed, err: %d\n", err);
+ return err;
+ }
+
+ reg->num = 1;
+ reg->data[0] = reps.synce_clk_recovery_port.clk_stats;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ goto err_ret;
+ }
+ LOG_INFO("num = %u, clk_stats: 0x%x\n", reg->num, reg->data[0]);
+
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_set_spm_port_tstamp_enable(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_set_spm_port_tstamp_enable(struct net_device *netdev,
+ struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- if (reg->num != 2)
- {
- LOG_ERR("Transmit failed[len = %d]!", reg->num);
- goto err_ret;
- }
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_ENABLE_SET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port; // 0~9
- msg.payload.mac_tstamp_msg.tx_enable = reg->data[0];
- msg.payload.mac_tstamp_msg.rx_enable = reg->data[1];
- LOG_INFO("phyport = %u, tx_enable: %u, rx_enable: %u\n", msg.payload.hdr_to_agt.phyport, msg.payload.mac_tstamp_msg.tx_enable, msg.payload.mac_tstamp_msg.rx_enable);
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_en_set_spm_port_tstamp_enable failed, err: %d\n", err);
- return err;
- }
-
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- goto err_ret;
- }
-
- return ret;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ if (reg->num != 2) {
+ LOG_ERR("Transmit failed[len = %d]!", reg->num);
+ goto err_ret;
+ }
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_ENABLE_SET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port; // 0~9
+ msg.mac_tstamp_msg.tx_enable = reg->data[0];
+ msg.mac_tstamp_msg.rx_enable = reg->data[1];
+ LOG_INFO("phyport = %u, tx_enable: %u, rx_enable: %u\n",
+ msg.hdr_to_agt.phyport, msg.mac_tstamp_msg.tx_enable,
+ msg.mac_tstamp_msg.rx_enable);
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_set_spm_port_tstamp_enable failed, err: %d\n", err);
+ return err;
+ }
+
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ goto err_ret;
+ }
+
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_get_spm_port_tstamp_enable(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_get_spm_port_tstamp_enable(struct net_device *netdev,
+ struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_ENABLE_GET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port; // 0~9
- LOG_INFO("phyport = %u\n", msg.payload.hdr_to_agt.phyport);
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_en_get_spm_port_tstamp_enable failed, err: %d\n", err);
- return err;
- }
-
- reg->num = 2;
- reg->data[0] = msg.reps.mac_tstamp_msg.tx_enable;
- reg->data[1] = msg.reps.mac_tstamp_msg.rx_enable;
- LOG_INFO("tx_enable: %u, rx_enable: %u\n", msg.reps.mac_tstamp_msg.tx_enable, msg.reps.mac_tstamp_msg.rx_enable);
-
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- goto err_ret;
- }
-
- return ret;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_ENABLE_GET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port; // 0~9
+ LOG_INFO("phyport = %u\n", msg.hdr_to_agt.phyport);
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_get_spm_port_tstamp_enable failed, err: %d\n", err);
+ return err;
+ }
+
+ reg->num = 2;
+ reg->data[0] = reps.mac_tstamp_msg.tx_enable;
+ reg->data[1] = reps.mac_tstamp_msg.rx_enable;
+ LOG_INFO("tx_enable: %u, rx_enable: %u\n", reps.mac_tstamp_msg.tx_enable,
+ reps.mac_tstamp_msg.rx_enable);
+
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ goto err_ret;
+ }
+
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_set_spm_port_tstamp_mode(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_set_spm_port_tstamp_mode(struct net_device *netdev,
+ struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- if (reg->num != 2)
- {
- LOG_ERR("Transmit failed[len = %d]!", reg->num);
- goto err_ret;
- }
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_MODE_SET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;// 0~9
- msg.payload.mac_tstamp_msg.tx_mode = reg->data[0];
- msg.payload.mac_tstamp_msg.rx_mode = reg->data[1];
- LOG_INFO("phyport = %u, tx_mode: %u, rx_mode: %u\n", msg.payload.hdr_to_agt.phyport, msg.payload.mac_tstamp_msg.tx_mode, msg.payload.mac_tstamp_msg.rx_mode);
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_en_set_spm_port_tstamp_mode failed, err: %d\n", err);
- return err;
- }
-
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- goto err_ret;
- }
-
- return ret;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ if (reg->num != 2) {
+ LOG_ERR("Transmit failed[len = %d]!", reg->num);
+ goto err_ret;
+ }
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_MODE_SET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port; // 0~9
+ msg.mac_tstamp_msg.tx_mode = reg->data[0];
+ msg.mac_tstamp_msg.rx_mode = reg->data[1];
+ LOG_INFO("phyport = %u, tx_mode: %u, rx_mode: %u\n", msg.hdr_to_agt.phyport,
+ msg.mac_tstamp_msg.tx_mode, msg.mac_tstamp_msg.rx_mode);
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_set_spm_port_tstamp_mode failed, err: %d\n", err);
+ return err;
+ }
+
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ goto err_ret;
+ }
+
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_get_spm_port_tstamp_mode(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_get_spm_port_tstamp_mode(struct net_device *netdev,
+ struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_MODE_GET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;// 0~9
- LOG_INFO("phyport = %u\n", msg.payload.hdr_to_agt.phyport);
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_en_get_spm_port_tstamp_mode failed, err: %d\n", err);
- return err;
- }
-
- reg->num = 2;
- reg->data[0] = msg.reps.mac_tstamp_msg.tx_mode;
- reg->data[1] = msg.reps.mac_tstamp_msg.rx_mode;
- LOG_INFO("tx_mode: %u, rx_mode: %u\n", msg.reps.mac_tstamp_msg.tx_mode, msg.reps.mac_tstamp_msg.rx_mode);
-
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- goto err_ret;
- }
-
- return 0;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_MODE_GET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port; // 0~9
+ LOG_INFO("phyport = %u\n", msg.hdr_to_agt.phyport);
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_get_spm_port_tstamp_mode failed, err: %d\n", err);
+ return err;
+ }
+
+ reg->num = 2;
+ reg->data[0] = reps.mac_tstamp_msg.tx_mode;
+ reg->data[1] = reps.mac_tstamp_msg.rx_mode;
+ LOG_INFO("tx_mode: %u, rx_mode: %u\n", reps.mac_tstamp_msg.tx_mode,
+ reps.mac_tstamp_msg.rx_mode);
+
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ goto err_ret;
+ }
+
+ return 0;
err_ret:
- return -1;
+ return -1;
}
/* 配置时延测量功能是否打开, 维测功能 */
-int32_t zxdh_en_set_delay_statistics_enable(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_set_delay_statistics_enable(struct net_device *netdev,
+ struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
- if (reg->num != 1)
- {
- LOG_ERR("Transmit failed[len = %d]!", reg->num);
- goto err_ret;
- }
+ if (reg->num != 1) {
+ LOG_ERR("Transmit failed[len = %d]!", reg->num);
+ goto err_ret;
+ }
- en_dev->delay_statistics_enable = reg->data[0];
- LOG_INFO("en_dev->delay_statistics_enable = %u\n", en_dev->delay_statistics_enable);
+ en_dev->delay_statistics_enable = reg->data[0];
+ LOG_INFO("en_dev->delay_statistics_enable = %u\n",
+ en_dev->delay_statistics_enable);
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- goto err_ret;
- }
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ goto err_ret;
+ }
- return ret;
+ return ret;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_get_delay_statistics_value(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_get_delay_statistics_value(struct net_device *netdev,
+ struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
- union zxdh_msg msg = {0};
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_PORT_DELAY_VALUE_GET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;// 0~9
- LOG_INFO("phyport = %u\n", msg.payload.hdr_to_agt.phyport);
- ret = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (ret != 0)
- {
- LOG_ERR("zxdh_en_get_delay_statistics_value failed, ret: %d\n", ret);
- return ret;
- }
-
- reg->num = 4;
- reg->data[0] = (uint32_t)(msg.reps.delay_statistics_val.min_delay & 0xffffffff);
- reg->data[1] = (uint32_t)((msg.reps.delay_statistics_val.min_delay >> 32) & 0xffffffff);
- reg->data[2] = (uint32_t)(msg.reps.delay_statistics_val.max_delay & 0xffffffff);
- reg->data[3] = (uint32_t)((msg.reps.delay_statistics_val.max_delay >> 32) & 0xffffffff);
- LOG_INFO("delay val: min_delay: %llu, max_delay: %llu\n", msg.reps.delay_statistics_val.min_delay, \
- msg.reps.delay_statistics_val.max_delay);
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- goto err_ret;
- }
-
- return 0;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_PORT_DELAY_VALUE_GET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port; // 0~9
+ LOG_INFO("phyport = %u\n", msg.hdr_to_agt.phyport);
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (ret != 0) {
+ LOG_ERR("zxdh_en_get_delay_statistics_value failed, ret: %d\n", ret);
+ return ret;
+ }
+
+ reg->num = 4;
+ reg->data[0] = (uint32_t)(reps.delay_statistics_val.min_delay & 0xffffffff);
+ reg->data[1] = (uint32_t)((reps.delay_statistics_val.min_delay >> 32) &
+ 0xffffffff);
+ reg->data[2] = (uint32_t)(reps.delay_statistics_val.max_delay & 0xffffffff);
+ reg->data[3] = (uint32_t)((reps.delay_statistics_val.max_delay >> 32) &
+ 0xffffffff);
+ LOG_INFO("delay val: min_delay: %llu, max_delay: %llu\n",
+ reps.delay_statistics_val.min_delay,
+ reps.delay_statistics_val.max_delay);
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ goto err_ret;
+ }
+
+ return 0;
err_ret:
- return -1;
+ return -1;
}
-int32_t zxdh_en_clear_delay_statistics_value(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t zxdh_en_clear_delay_statistics_value(struct net_device *netdev,
+ struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- int32_t ret = 0;
- union zxdh_msg msg = {0};
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
- en_dev = &en_priv->edev;
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_PORT_DELAY_VALUE_CLR;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;// 0~9
- LOG_INFO("phyport = %u\n", msg.payload.hdr_to_agt.phyport);
- ret = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (ret != 0)
- {
- LOG_ERR("zxdh_en_clear_delay_statistics_value failed, ret: %d\n", ret);
- return ret;
- }
-
- reg->num = 0;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- goto err_ret;
- }
-
- return 0;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ int32_t ret = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, "netdev priv is null!\n");
+ en_dev = &en_priv->edev;
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_PORT_DELAY_VALUE_CLR;
+ msg.hdr_to_agt.phyport = en_dev->phy_port; // 0~9
+ LOG_INFO("phyport = %u\n", msg.hdr_to_agt.phyport);
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
+ if (ret != 0) {
+ LOG_ERR("zxdh_en_clear_delay_statistics_value failed, ret: %d\n", ret);
+ return ret;
+ }
+
+ reg->num = 0;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ goto err_ret;
+ }
+
+ return 0;
err_ret:
- return -1;
+ return -1;
}
-struct zxdh_en_ptp_ioctl_table ioctl_ptp_table[] =
-{
- {PTP_SET_CLOCK_NO, zxdh_en_set_clock_no},
- {PTP_ENABLE_PTP_ENCRYPTED_MSG, zxdh_en_enable_ptp_encrypted_msg},
- {PTP_SET_INTR_CAPTURE_TIMER, zxdh_en_set_intr_capture_timer},
- {PTP_SET_PP1S_SELECTION, zxdh_en_set_pps_selection},
- {PTP_SET_PHASE_DETECTION, zxdh_en_set_phase_detection},
- {PTP_GET_PD_VALUE, zxdh_en_get_pd_value},
- {PTP_SET_L2PTP_PORT, zxdh_en_set_l2_ptp_port},
- {PTP_SET_PTP_EC_ENABLE, zxdh_en_set_ptp_tc_enable},
- {PTP_SET_SYNCE_CLK_PORT, zxdh_en_set_synce_recovery_port},
- {PTP_GET_SYNCE_CLK_STATS, zxdh_en_get_synce_clk_stats},
- {PTP_SET_SPM_PORT_TSTAMP_ENABLE, zxdh_en_set_spm_port_tstamp_enable},
- {PTP_GET_SPM_PORT_TSTAMP_ENABLE, zxdh_en_get_spm_port_tstamp_enable},
- {PTP_SET_SPM_PORT_TSTAMP_MODE, zxdh_en_set_spm_port_tstamp_mode},
- {PTP_GET_SPM_PORT_TSTAMP_MODE, zxdh_en_get_spm_port_tstamp_mode},
- {PTP_SET_DELAY_STATISTICS_ENABLE, zxdh_en_set_delay_statistics_enable},
- {PTP_GET_DELAY_STATISTICS_VALUE, zxdh_en_get_delay_statistics_value},
- {PTP_CLR_DELAY_STATISTICS_VALUE, zxdh_en_clear_delay_statistics_value}
+struct zxdh_en_ptp_ioctl_table ioctl_ptp_table[] = {
+ { PTP_SET_CLOCK_NO, zxdh_en_set_clock_no },
+ { PTP_ENABLE_PTP_ENCRYPTED_MSG, zxdh_en_enable_ptp_encrypted_msg },
+ { PTP_SET_INTR_CAPTURE_TIMER, zxdh_en_set_intr_capture_timer },
+ { PTP_SET_PP1S_SELECTION, zxdh_en_set_pps_selection },
+ { PTP_SET_PHASE_DETECTION, zxdh_en_set_phase_detection },
+ { PTP_GET_PD_VALUE, zxdh_en_get_pd_value },
+ { PTP_SET_L2PTP_PORT, zxdh_en_set_l2_ptp_port },
+ { PTP_SET_PTP_EC_ENABLE, zxdh_en_set_ptp_tc_enable },
+ { PTP_SET_SYNCE_CLK_PORT, zxdh_en_set_synce_recovery_port },
+ { PTP_GET_SYNCE_CLK_STATS, zxdh_en_get_synce_clk_stats },
+ { PTP_SET_SPM_PORT_TSTAMP_ENABLE, zxdh_en_set_spm_port_tstamp_enable },
+ { PTP_GET_SPM_PORT_TSTAMP_ENABLE, zxdh_en_get_spm_port_tstamp_enable },
+ { PTP_SET_SPM_PORT_TSTAMP_MODE, zxdh_en_set_spm_port_tstamp_mode },
+ { PTP_GET_SPM_PORT_TSTAMP_MODE, zxdh_en_get_spm_port_tstamp_mode },
+ { PTP_SET_DELAY_STATISTICS_ENABLE, zxdh_en_set_delay_statistics_enable },
+ { PTP_GET_DELAY_STATISTICS_VALUE, zxdh_en_get_delay_statistics_value },
+ { PTP_CLR_DELAY_STATISTICS_VALUE, zxdh_en_clear_delay_statistics_value }
};
-int32_t ptp_table_match_func(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg)
+int32_t ptp_table_match_func(struct net_device *netdev, struct ifreq *ifr,
+ struct zxdh_en_reg *reg)
{
- uint32_t i = 0;
- uint32_t ret = 0;
- uint32_t table_size = sizeof(ioctl_ptp_table) / sizeof(struct zxdh_en_ioctl_table);
- for(i = 0; i < table_size; i++)
- {
- if((reg->offset == ioctl_ptp_table[i].cmd) && (ioctl_ptp_table[i].func != NULL))
- {
- ret = ioctl_ptp_table[i].func(netdev, ifr, reg);
- break;
- }
- }
- return ret;
+ uint32_t i = 0;
+ uint32_t ret = 0;
+ uint32_t table_size =
+ sizeof(ioctl_ptp_table) / sizeof(struct zxdh_en_ioctl_table);
+ for (i = 0; i < table_size; i++) {
+ if ((reg->offset == ioctl_ptp_table[i].cmd) &&
+ (ioctl_ptp_table[i].func != NULL)) {
+ ret = ioctl_ptp_table[i].func(netdev, ifr, reg);
+ break;
+ }
+ }
+ return ret;
}
int32_t zxdh_en_ptp_func(struct net_device *netdev, struct ifreq *ifr)
{
- struct zxdh_en_reg *reg = NULL;
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_reg *reg = NULL;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
- reg = kzalloc(reg_size, GFP_KERNEL);
- CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n");
+ reg = kzalloc(reg_size, GFP_KERNEL);
+ CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n");
- if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, reg_size)))
- {
- LOG_ERR("copy_from_user failed!\n");
- goto err_ret;
- }
+ if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, reg_size))) {
+ LOG_ERR("copy_from_user failed!\n");
+ goto err_ret;
+ }
- if(-1 == ptp_table_match_func(netdev, ifr, reg))
- {
- LOG_ERR("ptp_table_match_func failed!\n");
- goto err_ret;
- }
+ if (-1 == ptp_table_match_func(netdev, ifr, reg)) {
+ LOG_ERR("ptp_table_match_func failed!\n");
+ goto err_ret;
+ }
- kfree(reg);
- return 0;
+ kfree(reg);
+ return 0;
err_ret:
- kfree(reg);
- return -1;
+ kfree(reg);
+ return -1;
}
int32_t zxdh_en_pps_func(struct net_device *netdev, struct ifreq *ifr)
{
- struct zxdh_en_priv *en_priv = NULL;
- struct zxdh_en_device *en_dev = NULL;
- struct dh_core_dev *dh_dev = NULL;
- struct zxdh_pf_device *pf_dev = NULL;
- struct dh_eq_table *table = NULL;
- struct dh_pf_eq_table *table_priv = NULL;
- uint64_t virtaddr = 0x0;
- struct dh_irq *expps = NULL;
- struct dh_irq *lopps = NULL;
- union zxdh_msg msg = {0};
- int32_t err = 0;
-
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
-
- en_priv = netdev_priv(netdev);
- en_dev = &en_priv->edev;
- dh_dev = en_dev->parent->parent;
- pf_dev = dh_core_priv(dh_dev);
-
- table = &dh_dev->eq_table;
- table_priv = table->priv;
-
- LOG_ERR("pf_dev->pci_ioremap_addr[0]: 0x%llx\n", pf_dev->pci_ioremap_addr[0]);
-
- virtaddr = pf_dev->pci_ioremap_addr[0] + ZXDH_BAR_MSG_OFFSET;
- tod_device_set_bar_virtual_addr(virtaddr, pf_dev->pcie_id);
-
- expps = table_priv->async_irq_tbl[3];
- lopps = table_priv->async_irq_tbl[4];
-
- msg.payload.msg_pps.pcieid = pf_dev->pcie_id;
- msg.payload.msg_pps.extern_pps_vector = expps->index;
- msg.payload.msg_pps.local_pps_vector = lopps->index;
- err = zxdh_send_command_to_specify(en_dev, MODULE_PPS, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("zxdh_en_pps_func failed, err: %d\n", err);
- return err;
- }
-
- return 0;
+ struct zxdh_en_priv *en_priv = NULL;
+ struct zxdh_en_device *en_dev = NULL;
+ struct dh_core_dev *dh_dev = NULL;
+ struct zxdh_pf_device *pf_dev = NULL;
+ struct dh_eq_table *table = NULL;
+ struct dh_pf_eq_table *table_priv = NULL;
+ uint64_t virtaddr = 0x0;
+ struct dh_irq *expps = NULL;
+ struct dh_irq *lopps = NULL;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t err = 0;
+
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+
+ en_priv = netdev_priv(netdev);
+ en_dev = &en_priv->edev;
+ dh_dev = en_dev->parent->parent;
+ pf_dev = dh_core_priv(dh_dev);
+
+ table = &dh_dev->eq_table;
+ table_priv = table->priv;
+
+ LOG_ERR("pf_dev->pci_ioremap_addr[0]: 0x%llx\n",
+ pf_dev->pci_ioremap_addr[0]);
+
+ virtaddr = pf_dev->pci_ioremap_addr[0] + ZXDH_BAR_MSG_OFFSET;
+ tod_device_set_bar_virtual_addr(virtaddr);
+
+ expps = table_priv->async_irq_tbl[3];
+ lopps = table_priv->async_irq_tbl[4];
+
+ msg.msg_pps.pcieid = pf_dev->pcie_id;
+ msg.msg_pps.extern_pps_vector = expps->index;
+ msg.msg_pps.local_pps_vector = lopps->index;
+ err = zxdh_send_command_to_specify(en_dev, MODULE_PPS, &msg, &reps);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_pps_func failed, err: %d\n", err);
+ return err;
+ }
+
+ return 0;
}
#ifdef ZXDH_MSGQ
int32_t zxdh_msgq_msg_send(struct net_device *netdev, struct ifreq *ifr)
{
- struct zxdh_en_device *en_dev = NULL;
- struct msgq_dev *msgq_dev = NULL;
- struct zxdh_en_reg *reg = NULL;
- struct msgq_pkt_info pkt_info = {0};
- uint32_t size = sizeof(struct zxdh_en_reg);
- struct reps_info reps = {0};
- uint32_t loop_cnt = 0;
- uint32_t i = 0;
- int32_t err = -2;
- uint64_t start_us = 0;
- uint64_t end_us = 0;
-
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- en_dev = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_dev, NULL, -EADDRNOTAVAIL, "en_dev is null!\n");
- msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
- CHECK_EQUAL_ERR(msgq_dev, NULL, -EADDRNOTAVAIL, "msgq_dev is null!\n");
-
- reg = kzalloc(size, GFP_KERNEL);
- CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n");
-
- if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, size)))
- {
- LOG_ERR("copy_from_user failed!\n");
- goto err_ret;
- }
-
- pkt_info.event_id = MODULE_DEMO;
- pkt_info.timeout_us = 500000;
- pkt_info.len = reg->data[0] + PRIV_HEADER_LEN;
- pkt_info.no_reps = (reg->data[1] == 0) ? false : true;
- loop_cnt = reg->data[2];
- if (loop_cnt == 0)
- {
- goto err_ret;
- }
-
- if (loop_cnt > 100000000)
- {
- loop_cnt = 100000000;
- }
-
- reps.len = 14000;
- reps.addr = vmalloc(reps.len);
- if (reps.addr == NULL)
- {
- LOG_ERR("vmalloc failed!\n");
- goto err_ret;
- }
-
- LOG_DEBUG("len: %d, no_reps: %d, loop_cnt: %d\n", \
- pkt_info.len, pkt_info.no_reps, loop_cnt);
-
- start_us = jiffies_to_usecs(jiffies);
- for (i = 0; i < loop_cnt; ++i)
- {
- pkt_info.addr = kzalloc(pkt_info.len, GFP_KERNEL);
- if(pkt_info.addr == NULL)
- {
- err = -3;
- break;
- };
- err = zxdh_msgq_send_cmd(msgq_dev, &pkt_info, &reps);
- }
-
- end_us = jiffies_to_usecs(jiffies);
- if (i != 0)
- {
- LOG_DEBUG("exec_time: %lld us, single_time: %lld us\n", \
- end_us- start_us, (end_us- start_us) / i);
- }
-
- reg->num = -err;
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, size)))
- {
- LOG_ERR("copy_to_user failed!\n");
- }
-
- if (pkt_info.is_async && !pkt_info.no_reps)
- {
- usleep_range(pkt_info.timeout_us, pkt_info.timeout_us + 100);
- }
-
- vfree(reps.addr);
+ struct zxdh_en_device *en_dev = NULL;
+ struct msgq_dev *msgq_dev = NULL;
+ struct zxdh_en_reg *reg = NULL;
+ struct msgq_pkt_info pkt_info = { 0 };
+ uint32_t size = sizeof(struct zxdh_en_reg);
+ struct reps_info reps = { 0 };
+ uint32_t loop_cnt = 0;
+ uint32_t i = 0;
+ int32_t err = -2;
+ uint64_t start_us = 0;
+ uint64_t end_us = 0;
+
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ en_dev = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_dev, NULL, -EADDRNOTAVAIL, "en_dev is null!\n");
+ msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
+ CHECK_EQUAL_ERR(msgq_dev, NULL, -EADDRNOTAVAIL, "msgq_dev is null!\n");
+
+ reg = kzalloc(size, GFP_KERNEL);
+ CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n");
+
+ if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, size))) {
+ LOG_ERR("copy_from_user failed!\n");
+ goto err_ret;
+ }
+
+ pkt_info.event_id = MODULE_DEMO;
+ pkt_info.timeout_us = 500000;
+ pkt_info.len = reg->data[0] + PRIV_HEADER_LEN;
+ pkt_info.no_reps = (reg->data[1] == 0) ? false : true;
+ loop_cnt = reg->data[2];
+ if (loop_cnt == 0) {
+ goto err_ret;
+ }
+
+ reps.len = 14000;
+ reps.addr = vmalloc(reps.len);
+ if (reps.addr == NULL) {
+ LOG_ERR("vmalloc failed!\n");
+ goto err_ret;
+ }
+
+ LOG_DEBUG("len: %d, no_reps: %d, loop_cnt: %d\n", pkt_info.len,
+ pkt_info.no_reps, loop_cnt);
+
+ start_us = jiffies_to_usecs(jiffies);
+ for (i = 0; i < loop_cnt; ++i) {
+ pkt_info.addr = kzalloc(pkt_info.len, GFP_KERNEL);
+ if (pkt_info.addr == NULL) {
+ err = -3;
+ break;
+ };
+ err = zxdh_msgq_send_cmd(msgq_dev, &pkt_info, &reps);
+ }
+
+ end_us = jiffies_to_usecs(jiffies);
+ if (i != 0) {
+ LOG_DEBUG("exec_time: %lld us, single_time: %lld us\n",
+ end_us - start_us, (end_us - start_us) / i);
+ }
+
+ reg->num = err;
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, size))) {
+ LOG_ERR("copy_to_user failed!\n");
+ }
+
+ if (pkt_info.is_async && !pkt_info.no_reps) {
+ usleep_range(pkt_info.timeout_us, pkt_info.timeout_us + 100);
+ }
+
+ vfree(reps.addr);
err_ret:
- kfree(reg);
- return err;
+ kfree(reg);
+ return err;
}
int32_t zxdh_msgq_dev_config(struct net_device *netdev, struct ifreq *ifr)
{
- uint32_t reg_size = sizeof(struct zxdh_en_reg);
- struct zxdh_en_device *en_dev = NULL;
- struct msgq_dev *msgq_dev = NULL;
- struct zxdh_en_reg *reg = NULL;
-
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
- CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
- en_dev = netdev_priv(netdev);
- CHECK_EQUAL_ERR(en_dev, NULL, -EADDRNOTAVAIL, "en_dev is null!\n");
- msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
- CHECK_EQUAL_ERR(msgq_dev, NULL, -EADDRNOTAVAIL, "msgq_dev is null!\n");
-
- reg = kzalloc(reg_size, GFP_KERNEL);
- CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n");
-
- if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, reg_size)))
- {
- LOG_ERR("copy_from_user failed!\n");
- goto err_ret;
- }
-
- if (reg->data[1] == MSGQ_PRINT_STA)
- {
- LOG_DEBUG("msgq_rx_pkts: %lld\n", msgq_dev->rq_priv->stats.packets);
- LOG_DEBUG("msgq_rx_kicks: %lld\n", msgq_dev->rq_priv->stats.kicks);
- LOG_DEBUG("msgq_rx_bytes: %lld\n", msgq_dev->rq_priv->stats.bytes);
- LOG_DEBUG("msgq_rx_drops: %lld\n", msgq_dev->rq_priv->stats.drops);
- LOG_DEBUG("msgq_rx_errs: %lld\n", msgq_dev->rq_priv->stats.xdp_drops);
-
- LOG_DEBUG("msgq_tx_pkts: %lld\n", msgq_dev->sq_priv->stats.packets);
- LOG_DEBUG("msgq_tx_bytes: %lld\n", msgq_dev->sq_priv->stats.bytes);
- LOG_DEBUG("msgq_tx_kicks: %lld\n", msgq_dev->sq_priv->stats.kicks);
- LOG_DEBUG("msgq_tx_timeouts: %lld\n", msgq_dev->sq_priv->stats.tx_timeouts);
- LOG_DEBUG("msgq_tx_errs: %lld\n", msgq_dev->sq_priv->stats.xdp_tx_drops);
-
- kfree(reg);
- return 0;
- }
-
- msgq_dev->loopback = (reg->data[0] != 0 ? true : false);
- msgq_dev->print_flag = reg->data[1];
- LOG_INFO("msgq_dev->print_flag = %d\n", msgq_dev->print_flag);
- LOG_INFO("msgq_dev->loopback = %d\n", msgq_dev->loopback);
-
- if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size)))
- {
- LOG_ERR("copy_to_user failed!\n");
- goto err_ret;
- }
-
- kfree(reg);
- return 0;
+ uint32_t reg_size = sizeof(struct zxdh_en_reg);
+ struct zxdh_en_device *en_dev = NULL;
+ struct msgq_dev *msgq_dev = NULL;
+ struct zxdh_en_reg *reg = NULL;
+
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+ CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n");
+ en_dev = netdev_priv(netdev);
+ CHECK_EQUAL_ERR(en_dev, NULL, -EADDRNOTAVAIL, "en_dev is null!\n");
+ msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
+ CHECK_EQUAL_ERR(msgq_dev, NULL, -EADDRNOTAVAIL, "msgq_dev is null!\n");
+
+ reg = kzalloc(reg_size, GFP_KERNEL);
+ CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n");
+
+ if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, reg_size))) {
+ LOG_ERR("copy_from_user failed!\n");
+ goto err_ret;
+ }
+
+ msgq_dev->loopback = (reg->data[0] != 0 ? true : false);
+ msgq_dev->print_flag = reg->data[1];
+ LOG_INFO("msgq_dev->print_flag = %d\n", msgq_dev->print_flag);
+ LOG_INFO("msgq_dev->loopback = %d\n", msgq_dev->loopback);
+
+ if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) {
+ LOG_ERR("copy_to_user failed!\n");
+ goto err_ret;
+ }
+
+ kfree(reg);
+ return 0;
err_ret:
- kfree(reg);
- return -1;
+ kfree(reg);
+ return -1;
}
#endif
-struct zxdh_en_ioctl_table ioctl_table[] =
-{
- {SIOCGMIIREG, zxdh_read_reg_cmd},
- {SIOCSMIIREG, zxdh_write_reg_cmd},
- {SIOCDEVPRIVATE_VQ_INFO, zxdh_get_vring_info},
- {SIOCDEVPRIVATE_SEND_FILE_PKT, zxdh_send_file_pkt},
+struct zxdh_en_ioctl_table ioctl_table[] = {
+ { SIOCGMIIREG, zxdh_read_reg_cmd },
+ { SIOCSMIIREG, zxdh_write_reg_cmd },
+ { SIOCDEVPRIVATE_VQ_INFO, zxdh_get_vring_info },
+ { SIOCDEVPRIVATE_SEND_FILE_PKT, zxdh_send_file_pkt },
#ifdef ZXDH_MSGQ
- {SIOCDEVPRIVATE_MSGQ_SNED, zxdh_msgq_msg_send},
- {SIOCDEVPRIVATE_MSGQ_CONFIG, zxdh_msgq_dev_config},
+ { SIOCDEVPRIVATE_MSGQ_SNED, zxdh_msgq_msg_send },
+ { SIOCDEVPRIVATE_MSGQ_CONFIG, zxdh_msgq_dev_config },
#endif
- {SIOCDEVPRIVATE_PTP_FUNC, zxdh_en_ptp_func},
- {SIOCDEVPRIVATE_PPS_FUNC, zxdh_en_pps_func},
- {SIOCDEVPRIVATE_TSN_FUNC, zxdh_en_tsn_func},
- {SIOCDEVPRIVATE_DH_TOOLS, zxdh_tools_ioctl_dispatcher},
+ { SIOCDEVPRIVATE_PTP_FUNC, zxdh_en_ptp_func },
+ { SIOCDEVPRIVATE_PPS_FUNC, zxdh_en_pps_func },
+ { SIOCDEVPRIVATE_DH_TOOLS, zxdh_tools_ioctl_dispatcher },
};
-int32_t ioctl_table_match_func(struct net_device *netdev, struct ifreq *ifr, int32_t cmd,
- struct zxdh_en_ioctl_table *func_table, uint32_t table_size)
+int32_t ioctl_table_match_func(struct net_device *netdev, struct ifreq *ifr,
+ int32_t cmd,
+ struct zxdh_en_ioctl_table *func_table,
+ uint32_t table_size)
{
- int32_t ret = 0;
- uint32_t i = 0;
-
- CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
- for (i = 0; i < table_size; i++)
- {
- if ((func_table[i].cmd == cmd) && (func_table[i].func != NULL))
- {
- ret = func_table[i].func(netdev, ifr);
- break;
- }
- }
-
- return ret;
+ int32_t ret = 0;
+ uint32_t i = 0;
+
+ CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n");
+ for (i = 0; i < table_size; i++) {
+ if ((func_table[i].cmd == cmd) && (func_table[i].func != NULL)) {
+ ret = func_table[i].func(netdev, ifr);
+ break;
+ }
+ }
+
+ return ret;
}
int32_t zxdh_en_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
- uint32_t table_size = sizeof(ioctl_table) / sizeof(struct zxdh_en_ioctl_table);
+ uint32_t table_size =
+ sizeof(ioctl_table) / sizeof(struct zxdh_en_ioctl_table);
- return ioctl_table_match_func(netdev, ifr, cmd, ioctl_table, table_size);
+ return ioctl_table_match_func(netdev, ifr, cmd, ioctl_table, table_size);
}
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/en_ioctl.h b/src/net/drivers/net/ethernet/dinghai/en_aux/en_ioctl.h
index 1b2e5c478750bf7212ea3700785af553303e5017..9ecf2d1b0a75b13fb88ee6090e78955e5112ef22 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/en_ioctl.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/en_ioctl.h
@@ -7,33 +7,32 @@ extern "C" {
#include "../en_aux.h"
-#define SIOCDEVPRIVATE_WRITE_MAC (SIOCDEVPRIVATE + 1)
-#define SIOCDEVPRIVATE_VQ_INFO (SIOCDEVPRIVATE + 2)
-#define SIOCDEVPRIVATE_MSGQ_SNED (SIOCDEVPRIVATE + 3)
-#define SIOCDEVPRIVATE_MSGQ_CONFIG (SIOCDEVPRIVATE + 4)
+#define SIOCDEVPRIVATE_WRITE_MAC (SIOCDEVPRIVATE + 1)
+#define SIOCDEVPRIVATE_VQ_INFO (SIOCDEVPRIVATE + 2)
+#define SIOCDEVPRIVATE_MSGQ_SNED (SIOCDEVPRIVATE + 3)
+#define SIOCDEVPRIVATE_MSGQ_CONFIG (SIOCDEVPRIVATE + 4)
#define SIOCDEVPRIVATE_SEND_FILE_PKT (SIOCDEVPRIVATE + 6)
-#define SIOCDEVPRIVATE_PTP_FUNC (SIOCDEVPRIVATE + 9)
-#define SIOCDEVPRIVATE_PPS_FUNC (SIOCDEVPRIVATE + 10)
-#define SIOCDEVPRIVATE_TSN_FUNC (SIOCDEVPRIVATE + 11)
-#define SIOCDEVPRIVATE_DH_TOOLS (SIOCDEVPRIVATE + 13)
-
-#define PTP_SET_CLOCK_NO (0)
-#define PTP_ENABLE_PTP_ENCRYPTED_MSG (1)
-#define PTP_SET_INTR_CAPTURE_TIMER (2)
-#define PTP_SET_PP1S_SELECTION (3)
-#define PTP_SET_PHASE_DETECTION (4)
-#define PTP_GET_PD_VALUE (5)
-#define PTP_SET_L2PTP_PORT (6)
-#define PTP_SET_PTP_EC_ENABLE (7)
-#define PTP_SET_SYNCE_CLK_PORT (8)
-#define PTP_GET_SYNCE_CLK_STATS (9)
-#define PTP_SET_SPM_PORT_TSTAMP_ENABLE (10)
-#define PTP_GET_SPM_PORT_TSTAMP_ENABLE (11)
-#define PTP_SET_SPM_PORT_TSTAMP_MODE (12)
-#define PTP_GET_SPM_PORT_TSTAMP_MODE (13)
-#define PTP_SET_DELAY_STATISTICS_ENABLE (14)
-#define PTP_GET_DELAY_STATISTICS_VALUE (15)
-#define PTP_CLR_DELAY_STATISTICS_VALUE (16)
+#define SIOCDEVPRIVATE_PTP_FUNC (SIOCDEVPRIVATE + 9)
+#define SIOCDEVPRIVATE_PPS_FUNC (SIOCDEVPRIVATE + 10)
+#define SIOCDEVPRIVATE_DH_TOOLS (SIOCDEVPRIVATE + 13)
+
+#define PTP_SET_CLOCK_NO (0)
+#define PTP_ENABLE_PTP_ENCRYPTED_MSG (1)
+#define PTP_SET_INTR_CAPTURE_TIMER (2)
+#define PTP_SET_PP1S_SELECTION (3)
+#define PTP_SET_PHASE_DETECTION (4)
+#define PTP_GET_PD_VALUE (5)
+#define PTP_SET_L2PTP_PORT (6)
+#define PTP_SET_PTP_EC_ENABLE (7)
+#define PTP_SET_SYNCE_CLK_PORT (8)
+#define PTP_GET_SYNCE_CLK_STATS (9)
+#define PTP_SET_SPM_PORT_TSTAMP_ENABLE (10)
+#define PTP_GET_SPM_PORT_TSTAMP_ENABLE (11)
+#define PTP_SET_SPM_PORT_TSTAMP_MODE (12)
+#define PTP_GET_SPM_PORT_TSTAMP_MODE (13)
+#define PTP_SET_DELAY_STATISTICS_ENABLE (14)
+#define PTP_GET_DELAY_STATISTICS_VALUE (15)
+#define PTP_CLR_DELAY_STATISTICS_VALUE (16)
#define PI_HDR_MAX_NUM 128
#define GET_LOW32 0x00000000ffffffff
@@ -42,52 +41,47 @@ extern "C" {
#define PKT_PRINT_LINE_LEN 16
#define PKT_PRINT_LEN_MAX (16 * 1024)
-#define CONFIG_RISC_PCS_LOOPB_OPCODE 13
-#define CONFIG_RISC_PCS_NORMAL_OPCODE 14
+#define CONFIG_RISC_PCS_LOOPB_OPCODE 13
+#define CONFIG_RISC_PCS_NORMAL_OPCODE 14
-#define MSG_MODULE_DEBUG_RISC 20
+#define MSG_MODULE_DEBUG_RISC 20
#define MAX_ACCESS_NUM 500
-struct zxdh_en_reg
-{
- uint32_t offset;
- uint32_t num;
- uint32_t data[MAX_ACCESS_NUM];
+struct zxdh_en_reg {
+ uint32_t offset;
+ uint32_t num;
+ uint32_t data[MAX_ACCESS_NUM];
};
-struct risc_config_mac_msg
-{
- uint8_t op_code;
- uint8_t phyport;
- uint8_t spm_speed;
- uint8_t spm_fec;
- uint8_t loop_enable;
+struct risc_config_mac_msg {
+ uint8_t op_code;
+ uint8_t phyport;
+ uint8_t spm_speed;
+ uint8_t spm_fec;
+ uint8_t loop_enable;
};
-struct risc_config_userspace
-{
- uint8_t op_code;
- uint8_t arg_num;
- uint8_t filestr_size;
- uint8_t file[100];
+struct risc_config_userspace {
+ uint8_t op_code;
+ uint8_t arg_num;
+ uint8_t filestr_size;
+ uint8_t file[100];
};
-struct data_packet
-{
- void *buf;
- uint32_t buf_size;
+struct data_packet {
+ void *buf;
+ uint32_t buf_size;
};
-struct zxdh_en_ioctl_table
-{
- int32_t cmd;
- int32_t (*func)(struct net_device *netdev, struct ifreq *ifr);
+struct zxdh_en_ioctl_table {
+ int32_t cmd;
+ int32_t (*func)(struct net_device *netdev, struct ifreq *ifr);
};
-struct zxdh_en_ptp_ioctl_table
-{
- int32_t cmd;
- int32_t (*func)(struct net_device *netdev, struct ifreq *ifr, struct zxdh_en_reg *reg);
+struct zxdh_en_ptp_ioctl_table {
+ int32_t cmd;
+ int32_t (*func)(struct net_device *netdev, struct ifreq *ifr,
+ struct zxdh_en_reg *reg);
};
int32_t print_data(uint8_t *data, uint32_t len);
@@ -98,4 +92,3 @@ int32_t zxdh_en_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
#endif
#endif
-
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/eq.c b/src/net/drivers/net/ethernet/dinghai/en_aux/eq.c
index e63fb8dfffac94d17bc4704c22e6384d083d5d63..36fbc26990cb604d23e6379c21dcefdbf73c129b 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/eq.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/eq.c
@@ -3,331 +3,326 @@
#include
#include "eq.h"
-static int32_t dh_eq_async_link_info_int_bond_pf(struct notifier_block *nb, unsigned long action, void *data)
+static int32_t dh_eq_async_link_info_int_bond_pf(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
{
- struct dh_eq_async *eq_link_info_async = container_of(nb, struct dh_eq_async, irq_nb);
- struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)eq_link_info_async->priv;
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint8_t link_up = 0;
- uint8_t link_info = 0;
- uint8_t bit_value = 0;
-
- if (en_dev == NULL)
- {
- LOG_ERR("null ptr\n");
- return -1;
- }
-
- if(!en_dev->ops->is_bond(en_dev->parent))
- {
- LOG_INFO("isn't bond_pf exit\n");
- return 0;
- }
-
- //读取state寄存器中第en_dev->link_check_bit位的值
- en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_info);
- bit_value = (link_info >> en_dev->link_check_bit) & 0x01;
- LOG_INFO("[aux level] netdev:%s read VQM[0x%x]: link_check_bit[%d]-bit_value[%d]\n", en_dev->netdev->name, link_info, en_dev->link_check_bit, bit_value);
- link_up |= bit_value;
-
- en_dev->link_up = link_up;
- queue_work(en_priv->events->wq, &en_dev->link_info_irq_update_np_work);
-
- if(link_up == 0)
- {
- netif_carrier_off(en_dev->netdev);
- en_dev->speed = SPEED_UNKNOWN;
- en_dev->duplex = DUPLEX_UNKNOWN;
- }
- else
- {
- netif_carrier_on(en_dev->netdev);
- queue_work(en_priv->events->wq, &en_dev->link_info_irq_process_work);
- }
-
- return 0;
+ struct dh_eq_async *eq_link_info_async =
+ container_of(nb, struct dh_eq_async, irq_nb);
+ struct zxdh_en_priv *en_priv =
+ (struct zxdh_en_priv *)eq_link_info_async->priv;
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint8_t link_up = 0;
+ uint8_t link_info = 0;
+ uint8_t bit_value = 0;
+
+ if (en_dev == NULL) {
+ LOG_ERR("null ptr\n");
+ return -1;
+ }
+
+ if (!en_dev->ops->is_bond(en_dev->parent)) {
+ LOG_INFO("isn't bond_pf exit\n");
+ return 0;
+ }
+
+ //读取state寄存器中第en_dev->link_check_bit位的值
+ en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_info);
+ bit_value = (link_info >> en_dev->link_check_bit) & 0x01;
+ LOG_INFO("[aux level] netdev:%s read VQM[0x%x]: "
+ "link_check_bit[%d]-bit_value[%d]\n",
+ en_dev->netdev->name, link_info, en_dev->link_check_bit,
+ bit_value);
+ link_up |= bit_value;
+
+ en_dev->link_up = link_up;
+ queue_work(en_priv->events->wq, &en_dev->link_info_irq_update_np_work);
+
+ if (link_up == 0) {
+ netif_carrier_off(en_dev->netdev);
+ en_dev->speed = SPEED_UNKNOWN;
+ en_dev->duplex = DUPLEX_UNKNOWN;
+ } else {
+ netif_carrier_on(en_dev->netdev);
+ queue_work(en_priv->events->wq, &en_dev->link_info_irq_process_work);
+ }
+
+ return 0;
}
-static int32_t dh_eq_async_link_info_int(struct notifier_block *nb, unsigned long action, void *data)
+static int32_t dh_eq_async_link_info_int(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- struct dh_eq_async *eq_link_info_async = container_of(nb, struct dh_eq_async, irq_nb);
- struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)eq_link_info_async->priv;
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint8_t link_up = 0;
- uint8_t link_info = 0;
- uint8_t phyport_val = 0;
-
- //判断是否为bond_pf
- if (en_dev == NULL)
- {
- LOG_ERR("null ptr\n");
- return -1;
- }
-
- if(en_dev->ops->is_bond(en_dev->parent))
- {
- LOG_INFO("is bond_pf, exit\n");
- return 0;
- }
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF && (en_dev->device_id==0x8042))
- {
- //调用读取state后四位获取phoport、读取前四位,获取link_up信息
- en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_info);
- link_up = link_info & 0x0F;
- phyport_val = (link_info >> 4) & 0x0F;
- LOG_INFO("[bond_vf netdev %s] read VQM[0x%x]: phyport[0x%x] link_up[%d]\n", en_dev->netdev->name, link_info, phyport_val, link_up);
- en_dev->phy_port = phyport_val;
- }
- else
- {
- en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_up);
- }
-
- en_dev->link_up = link_up;
- queue_work(en_priv->events->wq, &en_dev->link_info_irq_update_np_work);
- if(link_up == 0)
- {
- en_dev->ops->set_pf_link_up(en_dev->parent, FALSE);
- netif_carrier_off(en_dev->netdev);
- en_dev->speed = SPEED_UNKNOWN;
- en_dev->duplex = DUPLEX_UNKNOWN;
- }
- else
- {
- en_dev->ops->set_pf_link_up(en_dev->parent, TRUE);
- netif_carrier_on(en_dev->netdev);
- queue_work(en_priv->events->wq, &en_dev->link_info_irq_process_work);
- }
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- LOG_INFO("pf update all vf\n");
- queue_work(en_priv->events->wq, &en_dev->link_info_irq_update_vf_work);
- }
- return 0;
+ struct dh_eq_async *eq_link_info_async =
+ container_of(nb, struct dh_eq_async, irq_nb);
+ struct zxdh_en_priv *en_priv =
+ (struct zxdh_en_priv *)eq_link_info_async->priv;
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint8_t link_up = 0;
+ uint8_t link_info = 0;
+ uint8_t phyport_val = 0;
+
+ //判断是否为bond_pf
+ if (en_dev == NULL) {
+ LOG_ERR("null ptr\n");
+ return -1;
+ }
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ LOG_INFO("is bond_pf, exit\n");
+ return 0;
+ }
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF &&
+ (en_dev->device_id == 0x8042)) {
+ //调用读取state后四位获取phoport、读取前四位,获取link_up信息
+ en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_info);
+ link_up = link_info & 0x0F;
+ phyport_val = (link_info >> 4) & 0x0F;
+ LOG_INFO(
+ "[bond_vf netdev %s] read VQM[0x%x]: phyport[0x%x] link_up[%d]\n",
+ en_dev->netdev->name, link_info, phyport_val, link_up);
+ en_dev->phy_port = phyport_val;
+ } else {
+ en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_up);
+ }
+
+ en_dev->link_up = link_up;
+ queue_work(en_priv->events->wq, &en_dev->link_info_irq_update_np_work);
+ if (link_up == 0) {
+ en_dev->ops->set_pf_link_up(en_dev->parent, FALSE);
+ netif_carrier_off(en_dev->netdev);
+ en_dev->speed = SPEED_UNKNOWN;
+ en_dev->duplex = DUPLEX_UNKNOWN;
+ } else {
+ en_dev->ops->set_pf_link_up(en_dev->parent, TRUE);
+ netif_carrier_on(en_dev->netdev);
+ queue_work(en_priv->events->wq, &en_dev->link_info_irq_process_work);
+ }
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ LOG_INFO("pf update all vf\n");
+ queue_work(en_priv->events->wq, &en_dev->link_info_irq_update_vf_work);
+ }
+ return 0;
}
-static int32_t dh_eq_async_riscv_int(struct notifier_block *nb, unsigned long action, void *data)
+static int32_t dh_eq_async_riscv_int(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- struct dh_eq_async * eq_riscv_async = container_of(nb, struct dh_eq_async, irq_nb);
- struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)eq_riscv_async->priv;
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct dh_eq_table *eq_table = &en_priv->eq_table;
- struct dh_events *events = en_priv->events;
- struct dh_event_nb *event_nb = NULL;
- uint64_t virt_addr = 0;
- int32_t event_type = 0;
- uint16_t event_idx = 0;
- uint16_t i = 0;
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- return 0;
- }
-
- virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + ZXDH_BAR_MSG_OFFSET;
- event_idx = zxdh_get_event_id(virt_addr, MSG_CHAN_END_RISC, MSG_CHAN_END_PF);
- event_type = dh_eq_event_type_get(event_idx);
- LOG_INFO("------------- event_idx: %d, event_type: %d------------\n", event_idx, event_type);
-
- if(events == NULL)
- {
- LOG_ERR("riscv_irq trigger, events is null\n");
- return 0;
- }
-
- for (i = 0; i < events->evt_num; i++)
- {
- event_nb = &events->notifiers[i];
-
- if (event_type == event_nb->nb.event_type)
- {
- LOG_INFO("en_aux async riscv irq_handler called\n");
- atomic_notifier_call_chain(&eq_table->nh[event_type], event_type, NULL);
- return 0;
- }
- }
-
- return 0;
+ struct dh_eq_async *eq_riscv_async =
+ container_of(nb, struct dh_eq_async, irq_nb);
+ struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)eq_riscv_async->priv;
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct dh_eq_table *eq_table = &en_priv->eq_table;
+ struct dh_events *events = en_priv->events;
+ struct dh_event_nb *event_nb = NULL;
+ uint64_t virt_addr = 0;
+ int32_t event_type = 0;
+ uint16_t event_idx = 0;
+ uint16_t i = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ return 0;
+ }
+
+ virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) +
+ ZXDH_BAR_MSG_OFFSET;
+ event_idx =
+ zxdh_get_event_id(virt_addr, MSG_CHAN_END_RISC, MSG_CHAN_END_PF);
+ event_type = dh_eq_event_type_get(event_idx);
+ LOG_INFO("------------- event_idx: %d, event_type: %d------------\n",
+ event_idx, event_type);
+
+ if (events == NULL) {
+ LOG_ERR("riscv_irq trigger, events is null\n");
+ return 0;
+ }
+
+ for (i = 0; i < events->evt_num; i++) {
+ event_nb = &events->notifiers[i];
+
+ if (event_type == event_nb->nb.event_type) {
+ LOG_INFO("en_aux async riscv irq_handler called\n");
+ atomic_notifier_call_chain(&eq_table->nh[event_type], event_type,
+ NULL);
+ return 0;
+ }
+ }
+
+ return 0;
}
-static int32_t dh_eq_async_pf_int(struct notifier_block *nb, unsigned long action, void *data)
+static int32_t dh_eq_async_pf_int(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- struct dh_eq_async * eq_pf_async = container_of(nb, struct dh_eq_async, irq_nb);
- struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)eq_pf_async->priv;
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct dh_eq_table *eq_table = &en_priv->eq_table;
- struct dh_events *events = en_priv->events;
- struct dh_event_nb *event_nb = NULL;
- uint64_t virt_addr = 0;
- int32_t event_type = 0;
- uint16_t event_idx = 0;
- uint16_t i = 0;
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- return 0;
- }
-
- virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + ZXDH_BAR_MSG_OFFSET + ZXDH_BAR_PFVF_MSG_OFFSET;
- event_idx = zxdh_get_event_id(virt_addr, MSG_CHAN_END_PF, MSG_CHAN_END_VF);
- event_type = dh_eq_event_type_get(event_idx);
- LOG_INFO("------------- event_idx: %d, event_type: %d------------\n", event_idx, event_type);
-
- for (i = 0; i < events->evt_num; i++)
- {
- event_nb = &events->notifiers[i];
-
- if (event_type == event_nb->nb.event_type)
- {
- LOG_INFO("en_aux async pf irq_handler called\n");
- atomic_notifier_call_chain(&eq_table->nh[event_type], event_type, NULL);
- return 0;
- }
- }
-
- return 0;
+ struct dh_eq_async *eq_pf_async =
+ container_of(nb, struct dh_eq_async, irq_nb);
+ struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)eq_pf_async->priv;
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct dh_eq_table *eq_table = &en_priv->eq_table;
+ struct dh_events *events = en_priv->events;
+ struct dh_event_nb *event_nb = NULL;
+ uint64_t virt_addr = 0;
+ int32_t event_type = 0;
+ uint16_t event_idx = 0;
+ uint16_t i = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ return 0;
+ }
+
+ virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) +
+ ZXDH_BAR_MSG_OFFSET + ZXDH_BAR_PFVF_MSG_OFFSET;
+ event_idx = zxdh_get_event_id(virt_addr, MSG_CHAN_END_PF, MSG_CHAN_END_VF);
+ event_type = dh_eq_event_type_get(event_idx);
+ LOG_INFO("------------- event_idx: %d, event_type: %d------------\n",
+ event_idx, event_type);
+
+ for (i = 0; i < events->evt_num; i++) {
+ event_nb = &events->notifiers[i];
+
+ if (event_type == event_nb->nb.event_type) {
+ LOG_INFO("en_aux async pf irq_handler called\n");
+ atomic_notifier_call_chain(&eq_table->nh[event_type], event_type,
+ NULL);
+ return 0;
+ }
+ }
+
+ return 0;
}
-struct dh_aux_async_eq_table
-{
- char name[64];
- notifier_fn_t async_int;
+struct dh_aux_async_eq_table {
+ char name[64];
+ notifier_fn_t async_int;
};
-static struct dh_aux_async_eq_table dh_aux_async_eq_tbl[] =
-{
- {"riscv", dh_eq_async_riscv_int},
- {"pf", dh_eq_async_pf_int},
- {"link_info", dh_eq_async_link_info_int},
- {"link_info", dh_eq_async_link_info_int_bond_pf},
+static struct dh_aux_async_eq_table dh_aux_async_eq_tbl[] = {
+ { "riscv", dh_eq_async_riscv_int },
+ { "pf", dh_eq_async_pf_int },
+ { "link_info", dh_eq_async_link_info_int },
+ { "link_info", dh_eq_async_link_info_int_bond_pf },
};
-static int32_t dh_aux_setup_async_eq(struct zxdh_en_priv *en_priv, \
- struct dh_eq_async *eq, const char *name, \
- notifier_fn_t call)
+static int32_t dh_aux_setup_async_eq(struct zxdh_en_priv *en_priv,
+ struct dh_eq_async *eq, const char *name,
+ notifier_fn_t call)
{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t err = 0;
-
- spin_lock_init(&eq->lock);//unused
- eq->priv = en_priv;
- eq->irq_nb.notifier_call = call;
- err = en_dev->ops->async_eq_enable(en_dev->parent, eq, name, true);
- if (err != 0)
- {
- LOG_ERR("failed to enable %s EQ %d\n", name, err);
- }
-
- return err;
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t err = 0;
+
+ spin_lock_init(&eq->lock); // unused
+ eq->priv = en_priv;
+ eq->irq_nb.notifier_call = call;
+ err = en_dev->ops->async_eq_enable(en_dev->parent, eq, name, true);
+ if (err != 0) {
+ LOG_ERR("failed to enable %s EQ %d\n", name, err);
+ }
+
+ return err;
}
-static void cleanup_async_eq(struct zxdh_en_priv *en_priv, struct dh_eq_async *eq, const char *name)
+static void cleanup_async_eq(struct zxdh_en_priv *en_priv,
+ struct dh_eq_async *eq, const char *name)
{
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t err = 0;
-
- err = en_dev->ops->async_eq_enable(en_dev->parent, eq, name, false);
- if (err != 0)
- {
- LOG_ERR("failed to disable %s EQ %d\n", name, err);
- }
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t err = 0;
+
+ err = en_dev->ops->async_eq_enable(en_dev->parent, eq, name, false);
+ if (err != 0) {
+ LOG_ERR("failed to disable %s EQ %d\n", name, err);
+ }
}
static void destroy_async_eqs(struct zxdh_en_priv *en_priv)
{
- struct dh_eq_table *table = &en_priv->eq_table;
- struct dh_aux_eq_table *table_priv = table->priv;
- int32_t i = 0;
-
- for (i = 0; i < ZXDH_AUX_ASYNC_EQ_NUM; ++i)
- {
- cleanup_async_eq(en_priv, &table_priv->async_eq_tbl[i], dh_aux_async_eq_tbl[i].name);
- }
+ struct dh_eq_table *table = &en_priv->eq_table;
+ struct dh_aux_eq_table *table_priv = table->priv;
+ int32_t i = 0;
+
+ for (i = 0; i < ZXDH_AUX_ASYNC_EQ_NUM; ++i) {
+ cleanup_async_eq(en_priv, &table_priv->async_eq_tbl[i],
+ dh_aux_async_eq_tbl[i].name);
+ }
}
void dh_aux_eq_table_destroy(struct zxdh_en_priv *en_priv)
{
- destroy_async_eqs(en_priv);
+ destroy_async_eqs(en_priv);
}
void dh_aux_eq_table_cleanup(struct zxdh_en_priv *en_priv)
{
- kvfree(en_priv->eq_table.priv);
+ kvfree(en_priv->eq_table.priv);
}
int32_t dh_aux_eq_table_init(struct zxdh_en_priv *en_priv)
{
- struct dh_eq_table *eq_table;
- struct dh_aux_eq_table *table_priv = NULL;
- int32_t err = 0;
- uint32_t i = 0;
+ struct dh_eq_table *eq_table;
+ struct dh_aux_eq_table *table_priv = NULL;
+ int32_t err = 0;
+ uint32_t i = 0;
- eq_table = &en_priv->eq_table;
+ eq_table = &en_priv->eq_table;
- table_priv = kvzalloc(sizeof(*table_priv), GFP_KERNEL);
- if (unlikely(table_priv == NULL))
- {
- LOG_ERR("dh_aux_eq_table kvzalloc failed\n");
- err = -ENOMEM;
- goto err_table_priv;
- }
+ table_priv = kvzalloc(sizeof(*table_priv), GFP_KERNEL);
+ if (unlikely(table_priv == NULL)) {
+ err = -ENOMEM;
+ goto err_table_priv;
+ }
- eq_table->priv = table_priv;
+ eq_table->priv = table_priv;
- mutex_init(&eq_table->lock);
- for (i = 0; i < DH_EVENT_TYPE_MAX; i++)
- {
- ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
- }
+ mutex_init(&eq_table->lock);
+ for (i = 0; i < DH_EVENT_TYPE_MAX; i++) {
+ ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
+ }
- eq_table->irq_table = NULL;
+ eq_table->irq_table = NULL;
- return 0;
+ return 0;
err_table_priv:
- return err;
+ return err;
}
static int32_t create_async_eqs(struct zxdh_en_priv *en_priv)
{
- struct dh_eq_table *eq_table = &en_priv->eq_table;
- struct dh_aux_eq_table *table_priv = eq_table->priv;
- int32_t err = 0;
- int32_t i = 0;
- int32_t j = 0;
-
- for (i = 0; i < ZXDH_AUX_ASYNC_EQ_NUM; ++i)
- {
- err = dh_aux_setup_async_eq(en_priv, &table_priv->async_eq_tbl[i], \
- dh_aux_async_eq_tbl[i].name, dh_aux_async_eq_tbl[i].async_int);
- if (err != 0)
- {
- LOG_ERR("Failed to setup aux_async_eq_tbl[%d]\n", i);
- goto err_setup_async_eq;
- }
- }
-
- return err;
+ struct dh_eq_table *eq_table = &en_priv->eq_table;
+ struct dh_aux_eq_table *table_priv = eq_table->priv;
+ int32_t err = 0;
+ int32_t i = 0;
+ int32_t j = 0;
+
+ for (i = 0; i < ZXDH_AUX_ASYNC_EQ_NUM; ++i) {
+ err = dh_aux_setup_async_eq(en_priv, &table_priv->async_eq_tbl[i],
+ dh_aux_async_eq_tbl[i].name,
+ dh_aux_async_eq_tbl[i].async_int);
+ if (err != 0) {
+ LOG_ERR("Failed to setup aux_async_eq_tbl[%d]\n", i);
+ goto err_setup_async_eq;
+ }
+ }
+
+ return err;
err_setup_async_eq:
- for (j = 0; j < i; ++j)
- {
- cleanup_async_eq(en_priv, &table_priv->async_eq_tbl[j], dh_aux_async_eq_tbl[j].name);
- }
- return err;
+ for (j = 0; j < i; ++j) {
+ cleanup_async_eq(en_priv, &table_priv->async_eq_tbl[j],
+ dh_aux_async_eq_tbl[j].name);
+ }
+ return err;
}
int32_t dh_aux_eq_table_create(struct zxdh_en_priv *en_priv)
{
- int32_t err = 0;
+ int32_t err = 0;
- err = create_async_eqs(en_priv);
- if (err != 0)
- {
- LOG_ERR("Failed to create async EQs\n");
- }
+ err = create_async_eqs(en_priv);
+ if (err != 0) {
+ LOG_ERR("Failed to create async EQs\n");
+ }
- return err;
+ return err;
}
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/eq.h b/src/net/drivers/net/ethernet/dinghai/en_aux/eq.h
index c9d24443b379b440491b742fbc0ec95e6d8e62d1..9c9c53502c681327e7b97153a31ba889e8b3ba11 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/eq.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/eq.h
@@ -9,7 +9,7 @@ extern "C" {
#define ZXDH_AUX_ASYNC_EQ_NUM 4
struct dh_aux_eq_table {
- struct dh_eq_async async_eq_tbl[ZXDH_AUX_ASYNC_EQ_NUM];
+ struct dh_eq_async async_eq_tbl[ZXDH_AUX_ASYNC_EQ_NUM];
};
int32_t dh_aux_eq_table_init(struct zxdh_en_priv *en_priv);
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/events.c b/src/net/drivers/net/ethernet/dinghai/en_aux/events.c
index 1dd5fee9cb51babb5b7a8d8e5672536a049d8bcf..db231f375afa3af9c6cfa29396dfeee3f8a6429e 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/events.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/events.c
@@ -1,824 +1,790 @@
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include "events.h"
-#include "en_cmd.h"
-#include "../msg_common.h"
-#include "../en_np/table/include/dpp_tbl_api.h"
-#include "../zxdh_tools/zxdh_tools_netlink.h"
-#include "dcbnl/en_dcbnl_api.h"
-#include "zxic_common.h"
-#include
-#include
-#include
-#include
-#include
-#include // 对于VLAN设备
-#include // 对于bonding设备
-#include
-
-static int32_t pf2vf_notifier(struct notifier_block *, unsigned long, void *);
-static int32_t riscv2aux_notifier(struct notifier_block *, unsigned long, void *);
-
-void rx_mode_set_handler(struct work_struct *work);
-
-static struct dh_nb aux_events[] = {
- {.nb.notifier_call = pf2vf_notifier, .event_type = DH_EVENT_TYPE_NOTIFY_PF_TO_VF},
- {.nb.notifier_call = riscv2aux_notifier, .event_type = DH_EVENT_TYPE_NOTIFY_RISCV_TO_AUX},
-};
-
-static int32_t do_pf_vf_inet6_update_mac_to_np(struct zxdh_en_device *en_dev, const struct in6_addr *ipv6_addr, unsigned long action)
-{
- int32_t ret = 0;
- struct in6_addr sol_addr={0};
- uint8_t mcast_mac[ETH_ALEN];
-
- // 打印IPv6地址,使用%pI6c格式化IPv6地址,确保正确显示
- LOG_INFO("IPv6 address changed on interface %s, %s address: %pI6c\n",
- en_dev->netdev->name, (action == 1) ? "add" : (action == 2) ? "del" : "unknown action with", ipv6_addr);
- // Calculate the multicast MAC address from the IPv6 address
- addrconf_addr_solict_mult(ipv6_addr, &sol_addr);
- ipv6_eth_mc_map(&sol_addr, mcast_mac);
- LOG_INFO("Multicast MAC Address: %pM\n", mcast_mac);
-
- switch (action) {
- case NETDEV_UP:
- {
- ret = zxdh_ip6mac_add(en_dev, ipv6_addr->s6_addr32, mcast_mac);
- if (ret != 0)
- {
- LOG_ERR("zxdh_ip6mac_add failed");
- }
- break;
- }
- case NETDEV_DOWN:
- {
- ret = zxdh_ip6mac_del(en_dev, ipv6_addr->s6_addr32, mcast_mac);
- if (ret != 0)
- {
- LOG_ERR("zxdh_ip6mac_del failed");
- }
- break;
- }
- default:
- break;
- }
- return ret;
-}
-
-static int32_t do_bond_master_inet6_update_mac_to_np(struct net_device *notifier_dev, const struct in6_addr *ipv6_addr, struct zxdh_en_device *en_dev, unsigned long action)
-{
- int32_t ret = 0;
- struct list_head *iter = NULL;
- struct slave *slave_dev = NULL;
- struct bonding *bond = netdev_priv(notifier_dev);
-
- // 遍历所有slave设备
- if (!bond_has_slaves(bond))
- {
- LOG_INFO("Bond device %s don't have slave\n", notifier_dev->name);
- return 0;
- }
-
- bond_for_each_slave(bond, slave_dev, iter)
- {
- if (strcmp(en_dev->netdev->name, slave_dev->dev->name) != 0)
- {
- continue;
- }
- LOG_INFO("Bond device %s have slave device: %s\n", notifier_dev->name, slave_dev->dev->name);
- ret = do_pf_vf_inet6_update_mac_to_np(en_dev, ipv6_addr, action);
- if (ret != 0)
- {
- return ret;
- }
- }
- return 0;
-}
-
-static int32_t inet6_addr_change_notifier(struct notifier_block *nb, unsigned long action, void *data)
-{
- struct inet6_ifaddr *ifa = NULL;
- struct net_device *notifier_dev = NULL; //触发事件的网络设备
- struct zxdh_en_device *en_dev = container_of(nb, struct zxdh_en_device, ipv6_notifier); //处理此回调函数的设备
-
- if(data == NULL)
- {
- LOG_ERR("data is NULL");
- return NOTIFY_OK;
- }
-
- ifa = (struct inet6_ifaddr *)data;
- notifier_dev = ifa->idev->dev;
-
- if (notifier_dev == NULL)
- {
- LOG_ERR("notifier_dev is NULL");
- return NOTIFY_OK;
- }
-
- // 检查是否为vlan设备
- if (is_vlan_dev(notifier_dev))
- notifier_dev = vlan_dev_real_dev(notifier_dev);
-
- // 检查是否为bond master设备
- if (netif_is_bond_master(notifier_dev))
- return do_bond_master_inet6_update_mac_to_np(notifier_dev, &ifa->addr, en_dev, action);
-
- // 检查是否为自定义设备
- if (strcmp(en_dev->netdev->name, notifier_dev->name) == 0)
- return do_pf_vf_inet6_update_mac_to_np(en_dev, &ifa->addr, action);
-
- return NOTIFY_OK;
-}
-
-static void vf_link_info_update_handler(struct work_struct *_work)
-{
- struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device, vf_link_info_update_work);
- union zxdh_msg msg = {0};
- struct zxdh_vf_item *vf_item = NULL;
- int32_t err = 0;
- uint16_t vf_idx = 0;
- struct pci_dev *pdev = NULL;
- uint16_t num_vfs = 0;
- bool pf_link_up = false;
-
- ZXDH_AUX_INIT_COMP_CHECK(en_dev);
- pf_link_up = en_dev->ops->get_pf_link_up(en_dev->parent);
- pdev = en_dev->ops->get_pdev(en_dev->parent);
- num_vfs = pci_num_vf(pdev);
-
- msg.payload.hdr_vf.op_code = ZXDH_SET_VF_LINK_STATE;
- msg.payload.link_state_msg.is_link_force_set = FALSE;
-
- msg.payload.link_state_msg.link_up = pf_link_up;
- msg.payload.link_state_msg.speed = en_dev->speed;
- msg.payload.link_state_msg.autoneg_enable = en_dev->autoneg_enable;
- msg.payload.link_state_msg.supported_speed_modes = en_dev->supported_speed_modes;
- msg.payload.link_state_msg.advertising_speed_modes = en_dev->advertising_speed_modes;
-
- for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
- {
- vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
- msg.payload.hdr_vf.dst_pcie_id = FIND_VF_PCIE_ID(en_dev->pcie_id, vf_idx);
- if(vf_item->is_probed)
- {
- msg.payload.link_state_msg.link_forced = vf_item->link_forced;
- err = zxdh_send_command_to_specify(en_dev, MODULE_PF_BAR_MSG_TO_VF, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("failed to update VF[%d]\n", vf_idx);
- }
- }
- }
-}
-
-static void link_info_irq_update_vf_handler(struct work_struct *_work)
-{
- struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device, link_info_irq_update_vf_work);
- struct zxdh_vf_item *vf_item = NULL;
- int32_t err = 0;
- uint16_t vf_idx = 0;
- struct pci_dev *pdev = NULL;
- uint16_t num_vfs = 0;
- bool pf_link_up = en_dev->ops->get_pf_link_up(en_dev->parent);
- uint16_t func_no = 0;
- uint16_t pf_no = FIND_PF_ID(en_dev->pcie_id);
- union zxdh_msg msg = {0};
-
- LOG_INFO("is called\n");
- ZXDH_AUX_INIT_COMP_CHECK(en_dev);
- msg.payload.hdr_to_agt.op_code = AGENT_DEV_STATUS_NOTIFY;
- msg.payload.hdr_to_agt.pcie_id = en_dev->pcie_id;
- pdev = en_dev->ops->get_pdev(en_dev->parent);
- num_vfs = pci_num_vf(pdev);
- for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
- {
- vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
- if(vf_item->link_forced == FALSE && vf_item->is_probed)
- {
- func_no = GET_FUNC_NO(pf_no, vf_idx);
- LOG_INFO("vf_idx:%d, func_no=0x%x\n",vf_idx,func_no);
- msg.payload.pcie_msix_msg.func_no[msg.payload.pcie_msix_msg.num++] = func_no;
- en_dev->ops->set_vf_link_info(en_dev->parent, vf_idx, pf_link_up ? 1 : 0);
- }
- }
- LOG_INFO("msg.payload.pcie_msix_msg.num:%d\n",msg.payload.pcie_msix_msg.num);
- if(msg.payload.pcie_msix_msg.num > 0)
- {
- err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (err != 0)
- {
- LOG_ERR("failed to update VF link info\n");
- }
- }
-}
-
-static void link_info_irq_process_handler(struct work_struct *_work)
-{
- struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device, link_info_irq_process_work);
- int32_t ret = 0;
- union zxdh_msg msg = {0};
- struct link_info_struct link_info_val = {0};
-
- ZXDH_AUX_INIT_COMP_CHECK(en_dev);
-
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_LINK_INFO_GET;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- ret = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
- if (ret != 0)
- {
- LOG_ERR("get speed and duplex from agent failed: %d\n", ret);
- return;
- }
- en_dev->speed = msg.reps.mac_set_msg.speed;
- en_dev->duplex = msg.reps.mac_set_msg.duplex;
- LOG_DEBUG("netdev:%s, phy_port:0x%x, speed:%d, duplex:0x%x\n", en_dev->netdev->name, en_dev->phy_port, en_dev->speed, en_dev->duplex);
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- link_info_val.speed = en_dev->speed;
- link_info_val.autoneg_enable = en_dev->autoneg_enable;
- link_info_val.supported_speed_modes = en_dev->supported_speed_modes;
- link_info_val.advertising_speed_modes = en_dev->advertising_speed_modes;
- link_info_val.duplex = en_dev->duplex;
-
- en_dev->ops->update_pf_link_info(en_dev->parent, &link_info_val);
-
- //zxdh_port_th_update(en_dev);
- }
-
- return;
-}
-
-static void link_info_irq_update_np_work_handler(struct work_struct *_work)
-{
- struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device, link_info_irq_update_np_work);
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- ZXDH_AUX_INIT_COMP_CHECK(en_dev);
- if (!en_dev->ops->is_bond(en_dev->parent))
- {
- if (!netif_running(en_dev->netdev))
- {
- return;
- }
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_VPORT_IS_UP, en_dev->link_up, 0);
- }
- else
- {
- dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VPORT_IS_UP, en_dev->link_up);
- }
- return;
- }
-
- if (!en_dev->link_up)
- {
- dpp_panel_attr_set(&pf_info, en_dev->phy_port, PANEL_FLAG_IS_UP, 0);
- }
- else
- {
- if (en_dev->netdev->flags & IFF_UP)
- {
- dpp_panel_attr_set(&pf_info, en_dev->phy_port, PANEL_FLAG_IS_UP, 1);
- }
- }
-}
-
-static void en_aux_spoof_check(struct zxdh_en_device *en_dev)
-{
- uint64_t ssvpc = 0;
- uint16_t en_aux_pf_id = 0;
- uint32_t ret = 0;
- uint16_t num_vfs = 0;
- struct pci_dev *pdev = NULL;
- struct dh_core_dev *dh_dev = NULL;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- dh_dev = en_dev->parent;
- pdev = en_dev->ops->get_pdev(dh_dev);
- num_vfs = pci_num_vf(pdev);
-
- if (!IS_PF(en_dev->vport))
- {
- return;
- }
- if (num_vfs == 0)
- {
- return;
- }
-
- en_aux_pf_id = DH_AUX_PF_ID_OFFSET(en_dev->vport);
- // spoof static register clear to 0 after read
- ret = dpp_stat_spoof_packet_drop_cnt_get(&pf_info, en_aux_pf_id, 1, &ssvpc);
- if (ret != 0)
- {
- LOG_ERR("Failed to get spoof check dropped packets number.\n");
- return;
- }
- if (!ssvpc)
- {
- return;
- }
- LOG_INFO("%llu Spoofed packets detected in EP%d, PF%d\n", ssvpc, EPID(en_dev->vport), FUNC_NUM(en_dev->vport));
- return;
-}
-
-static void en_aux_service_task(struct work_struct *_work)
-{
- struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device, service_task);
-
- ZXDH_AUX_INIT_COMP_CHECK(en_dev);
- en_aux_spoof_check(en_dev);
-}
-
-static void en_aux_service_timer(struct timer_list *t)
-{
- unsigned long next_event_offset = HZ * 2;
- struct zxdh_en_device *en_dev = from_timer(en_dev, t, service_timer);
- struct zxdh_en_priv *en_priv = container_of(en_dev, struct zxdh_en_priv, edev);
-
- /* Reset the timer */
- mod_timer(&en_dev->service_timer, next_event_offset + jiffies);
- queue_work(en_priv->events->wq, &en_dev->service_task);
-}
-
-static void en_aux_service_riscv_task(struct work_struct *_work)
-{
- int32_t retval = 0;
- union zxdh_msg msg = {0};
-
- time64_t time64;
- struct rtc_time tm;
- unsigned long next_event_offset = HZ * 259200;
-
- struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device, service_riscv_task);
-
- if (!IS_PF(en_dev->vport))
- {
- return;
- }
-
- msg.payload.hdr_to_cmn.pcie_id = en_dev->pcie_id;;
- msg.payload.hdr_to_cmn.write_bytes = 9;
- msg.payload.hdr_to_cmn.type = RISC_SERVER_TIME;
- msg.payload.hdr_to_cmn.field = 0;
-
- time64 = ktime_get_real_seconds();
- time64 += 28800;//CST比UST晚八个小时
- rtc_time64_to_tm(time64, &tm);
-
- msg.payload.time_cfg_msg.tmmng_type = 0xF0;
- msg.payload.time_cfg_msg.dir = 0x2;
- msg.payload.time_cfg_msg.year = tm.tm_year + 1900;
- msg.payload.time_cfg_msg.month = tm.tm_mon + 1;
- msg.payload.time_cfg_msg.day = tm.tm_mday;
- msg.payload.time_cfg_msg.hour = tm.tm_hour;
- msg.payload.time_cfg_msg.min = tm.tm_min;
- msg.payload.time_cfg_msg.sec = tm.tm_sec;
-
-
- LOG_INFO("send msg timer to riscv:%d-%d-%d %d:%d:%d\n", msg.payload.time_cfg_msg.year, msg.payload.time_cfg_msg.month, msg.payload.time_cfg_msg.day, msg.payload.time_cfg_msg.hour, msg.payload.time_cfg_msg.min, msg.payload.time_cfg_msg.sec);
- ZXDH_AUX_INIT_COMP_CHECK(en_dev);
-
- retval = zxdh_send_command_to_specify(en_dev, MODULE_PF_TIMER_TO_RISC_MSG, &msg, &msg);
- if (retval != 0)
- {
- LOG_ERR("zxdh_send_command_to_riscv failed: %d\n", retval);
- }
-
- mod_timer(&en_dev->service_riscv_timer, next_event_offset + jiffies);
-}
-
-static void en_aux_service_riscv_timer(struct timer_list *t)
-{
- unsigned long next_event_offset = HZ * 60;
- struct zxdh_en_device *en_dev = from_timer(en_dev, t, service_riscv_timer);
- struct zxdh_en_priv *en_priv = container_of(en_dev, struct zxdh_en_priv, edev);
-
- /* Reset the timer */
- mod_timer(&en_dev->service_riscv_timer, next_event_offset + jiffies);
- queue_work(en_priv->events->wq, &en_dev->service_riscv_task);
-}
-
-static void pf2vf_msg_proc_work_handler(struct work_struct *_work)
-{
- struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device, pf2vf_msg_proc_work);
- uint64_t virt_addr = 0;
-
- LOG_INFO("is called\n");
- ZXDH_AUX_INIT_COMP_CHECK(en_dev);
- virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + ZXDH_BAR_MSG_OFFSET + ZXDH_BAR_PFVF_MSG_OFFSET;
- zxdh_bar_irq_recv(MSG_CHAN_END_PF, MSG_CHAN_END_VF, virt_addr, en_dev);
-}
-
-static int32_t pf2vf_notifier(struct notifier_block *nb, unsigned long type, void *data)
-{
- struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb);
- struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)event_nb->ctx;
-
- LOG_INFO("is called\n");
- queue_work(en_priv->events->wq, &en_priv->edev.pf2vf_msg_proc_work);
-
- return NOTIFY_OK;
-}
-
-static void riscv2aux_msg_proc_work_handler(struct work_struct *_work)
-{
- struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device, riscv2aux_msg_proc_work);
- uint64_t virt_addr = 0;
- uint16_t src = MSG_CHAN_END_RISC;
- uint16_t dst = MSG_CHAN_END_PF;
-
- LOG_INFO("is called\n");
- ZXDH_AUX_INIT_COMP_CHECK(en_dev);
- virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + ZXDH_BAR_MSG_OFFSET;
- zxdh_bar_irq_recv(src, dst, virt_addr, en_dev);
-}
-
-static int32_t riscv2aux_notifier(struct notifier_block *nb, unsigned long type, void *data)
-{
- struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb);
- struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)event_nb->ctx;
- LOG_INFO("is called\n");
- queue_work(en_priv->events->wq, &en_priv->edev.riscv2aux_msg_proc_work);
-
- return NOTIFY_OK;
-}
-
-void pf_notify_vf_reset_handler(struct work_struct *work)
-{
- int32_t ret = 0;
- struct zxdh_en_device *en_dev = container_of(work, struct zxdh_en_device, pf_notify_vf_reset_work);
- struct net_device *netdev = en_dev->netdev;
-
- LOG_INFO("pf_notify_vf_reset_handler is called\n");
- ZXDH_AUX_INIT_COMP_CHECK(en_dev);
- ret = zxdh_vf_get_mac(netdev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_vf_get_mac failed: %d\n", ret);
- }
-}
-
-typedef uint32_t (*zxdh_pf_msg_func)(zxdh_msg_info *msg, zxdh_reps_info *reps, struct zxdh_en_device *en_dev);
-
-typedef struct
-{
- zxdh_msg_op_code op_code;
- uint8_t proc_name[ZXDH_MSG_TYPE_CNT_MAX];
- zxdh_pf_msg_func msg_proc;
-} zxdh_pf_msg_proc;
-
-static uint32_t zxdh_set_vf_link_state(zxdh_msg_info *msg, zxdh_reps_info *reps, struct zxdh_en_device *en_dev)
-{
- uint32_t ret = 0;
- uint16_t vf_idx = msg->hdr_vf.dst_pcie_id & (0xff);
-
- if(!msg->link_state_msg.is_link_force_set)
- {
- en_dev->speed = msg->link_state_msg.speed;
- en_dev->autoneg_enable = msg->link_state_msg.autoneg_enable;
- en_dev->supported_speed_modes = msg->link_state_msg.supported_speed_modes;
- en_dev->advertising_speed_modes = msg->link_state_msg.advertising_speed_modes;
- if(msg->link_state_msg.link_forced)
- {
- return 0;
- }
- }
-
- en_dev->ops->set_pf_link_up(en_dev->parent, msg->link_state_msg.link_up);
- if(en_dev->ops->get_pf_link_up(en_dev->parent))
- {
- netif_carrier_on(en_dev->netdev);
- }
- else
- {
- netif_carrier_off(en_dev->netdev);
- }
- LOG_INFO("[VF GET MSG FROM PF]--VF[%d] link_state[%s] update success!\n", vf_idx, en_dev->ops->get_pf_link_up(en_dev->parent)?"TRUE":"FALSE");
- return ret;
-}
-
-static uint32_t zxdh_set_vf_reset(zxdh_msg_info *msg, zxdh_reps_info *reps, struct zxdh_en_device *en_dev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(en_dev->netdev);
- queue_work(en_priv->events->wq, &en_priv->edev.pf_notify_vf_reset_work);
- return 0;
-}
-
-static uint32_t zxdh_set_vf_vlan(zxdh_msg_info *msg, zxdh_reps_info *reps, struct zxdh_en_device *edev)
-{
- uint32_t ret = 0;
- /* update local var*/
- edev->vlan_dev.vlan_id = msg->vf_vlan_msg.vlan_id;
- edev->vlan_dev.qos = msg->vf_vlan_msg.qos;
- edev->vlan_dev.protcol = msg->vf_vlan_msg.protocl;
-
- return ret;
-}
-
-static uint32_t zxdh_pf_get_vf_queue(zxdh_msg_info *msg, zxdh_reps_info *reps, struct zxdh_en_device *edev)
-{
- uint32_t ret = 0;
- uint32_t vir_queue_start;
- uint32_t vir_queue_num;
- uint32_t queue_index;
- uint32_t queue_num;
- uint32_t max_queue_num = edev->curr_queue_pairs;
-
- PLCR_LOG_INFO("vf's edev->vport = 0x%x\n", edev->vport);
- PLCR_LOG_INFO("vf's max_queue_num(pairs) = 0x%x\n", max_queue_num);
- PLCR_LOG_INFO("edev->device_id = %x\n", edev->device_id);
- PLCR_LOG_INFO("edev->rq[0].vq->phy_index = %x\n", edev->rq[0].vq->phy_index);
- PLCR_LOG_INFO("edev->sq[0].vq->phy_index = %x\n", edev->sq[0].vq->phy_index);
-
- vir_queue_start = msg->plcr_pf_get_vf_queue_info_msg.vir_queue_start;
- vir_queue_num = msg->plcr_pf_get_vf_queue_info_msg.vir_queue_num;
-
- PLCR_LOG_INFO("vir_queue_start = 0x%x\n", vir_queue_start);
- PLCR_LOG_INFO("vir_queue_num = 0x%x\n", vir_queue_num);
-
- if(max_queue_num > (vir_queue_num + vir_queue_num))
- {
- max_queue_num = vir_queue_num + vir_queue_num;
- }
-
- for(queue_index=vir_queue_start, queue_num = 0; queue_indexplcr_pf_get_vf_queue_info_rsp.phy_rxq[queue_num] = edev->rq[queue_num].vq->phy_index;
- reps->plcr_pf_get_vf_queue_info_rsp.phy_txq[queue_num] = edev->sq[queue_num].vq->phy_index;
- }
-
- reps->plcr_pf_get_vf_queue_info_rsp.phy_queue_num = queue_num;
-
- PLCR_LOG_INFO("queue_num = 0x%x\n", queue_num);
-
- return ret;
-}
-
-zxdh_pf_msg_proc pf_msg_proc[] =
-{
- {ZXDH_SET_VF_LINK_STATE, "set_vf_link_state", zxdh_set_vf_link_state},
- {ZXDH_SET_VF_RESET, "set_vf_reset", zxdh_set_vf_reset},
- {ZXDH_PF_SET_VF_VLAN, "pf_set_vf_vlan", zxdh_set_vf_vlan},
- {ZXDH_PF_GET_VF_QUEUE_INFO, "pf_get_vf_queue_info", zxdh_pf_get_vf_queue},
-};
-
-int32_t zxdh_vf_msg_recv_func(void *pay_load, uint16_t len, void *reps_buffer, uint16_t *reps_len, void *dev)
-{
- zxdh_msg_info *msg = (zxdh_msg_info *)pay_load;
- zxdh_reps_info *reps = (zxdh_reps_info *)reps_buffer;
- struct zxdh_en_device *en_dev = (struct zxdh_en_device *)dev;
- int32_t ret = 0;
- int32_t i = 0;
- int32_t num = 0;
-
- LOG_INFO("is called\n");
- if (len != sizeof(union zxdh_msg))
- {
- LOG_ERR("invalid data_len\n");
- return -1;
- }
-
- if (en_dev == NULL)
- {
- LOG_ERR("dev is NULL\n");
- return -1;
- }
-
- num = sizeof(pf_msg_proc)/sizeof(zxdh_pf_msg_proc);
-
- for (i = 0; i < num; i++)
- {
- *reps_len = sizeof(union zxdh_msg);
- if (pf_msg_proc[i].op_code == msg->hdr_vf.op_code)
- {
- LOG_INFO("%s is called", pf_msg_proc[i].proc_name);
- ret = pf_msg_proc[i].msg_proc(msg, reps, en_dev);
- if (ret != 0)
- {
- reps->flag = ZXDH_REPS_FAIL;
- LOG_ERR("%s failed, ret: %d\n", pf_msg_proc[i].proc_name, ret);
- return -1;
- }
- reps->flag = ZXDH_REPS_SUCC;
- return 0;
- }
- }
-
- LOG_ERR("invalid op_code: [%u]\n", msg->hdr_vf.op_code);
- return -2;
-}
-
-int32_t dh_aux_ipv6_notifier_init(struct zxdh_en_priv *en_priv)
-{
- int32_t ret = 0;
- struct zxdh_en_device *en_dev = &en_priv->edev;
- en_dev->ipv6_notifier.notifier_call = inet6_addr_change_notifier;
- en_dev->ipv6_notifier.priority = 0;
- ret = dh_inet6_addr_change_notifier_register(&(en_dev->ipv6_notifier));
- if (ret)
- {
- LOG_ERR("Failed to register inet6addr_notifier, ret:%d\n",ret);
- return ret;
- }
- LOG_INFO("netdev:%s ipv6_notifier_init success\n", en_dev->netdev->name);
- return ret;
-}
-
-int32_t dh_aux_events_init(struct zxdh_en_priv *en_priv)
-{
- struct dh_events *events = NULL;
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t i = 0;
- int32_t ret = 0;
- uint32_t evt_num = ARRAY_SIZE(aux_events);
-
- if (!en_dev->ops->if_init(en_dev->parent))
- {
- evt_num -= 1;
- }
-
- events = kzalloc((sizeof(*events) + evt_num * sizeof(struct dh_event_nb)), GFP_KERNEL);
- if (unlikely(events == NULL))
- {
- LOG_ERR("events kzalloc failed: %p\n", events);
- ret = -ENOMEM;
- goto err_events_kzalloc;
- }
-
- events->evt_num = evt_num;
- events->dev = NULL;
- en_priv->events = events;
- events->wq = create_singlethread_workqueue("dh_aux_events");
- if (!events->wq)
- {
- LOG_ERR("events->wq create_singlethread_workqueue failed: %p\n", events->wq);
- ret = -ENOMEM;
- goto err_create_wq;
- }
-
- INIT_WORK(&en_dev->vf_link_info_update_work, vf_link_info_update_handler);
- INIT_WORK(&en_dev->link_info_irq_update_vf_work, link_info_irq_update_vf_handler);
- INIT_WORK(&en_dev->link_info_irq_process_work, link_info_irq_process_handler);
- INIT_WORK(&en_dev->link_info_irq_update_np_work, link_info_irq_update_np_work_handler);
- INIT_WORK(&en_dev->rx_mode_set_work, rx_mode_set_handler);
- INIT_WORK(&en_dev->pf2vf_msg_proc_work, pf2vf_msg_proc_work_handler);
- INIT_WORK(&en_dev->pf_notify_vf_reset_work, pf_notify_vf_reset_handler);
- INIT_WORK(&en_dev->service_task, en_aux_service_task);
- INIT_WORK(&en_dev->service_riscv_task, en_aux_service_riscv_task);
- INIT_WORK(&en_dev->riscv2aux_msg_proc_work, riscv2aux_msg_proc_work_handler);
-
- timer_setup(&en_dev->service_timer, en_aux_service_timer, 0);
- ret = mod_timer(&en_dev->service_timer, jiffies);
- if (ret)
- {
- LOG_ERR("timer add failed\n");
- goto err_mod_timer;
- }
-
- timer_setup(&en_dev->service_riscv_timer, en_aux_service_riscv_timer, 0);
- ret = mod_timer(&en_dev->service_riscv_timer, jiffies);
- if (ret)
- {
- LOG_ERR("timer add failed\n");
- goto err_riscv_timer;
- }
-
- for (i = 0; i < evt_num; i++)
- {
- events->notifiers[i].nb = aux_events[i];
- events->notifiers[i].ctx = en_priv;
- dh_eq_notifier_register(&en_priv->eq_table, &events->notifiers[i].nb);
- }
-
- return ret;
-
-err_riscv_timer:
- del_timer(&en_dev->service_riscv_timer);
-err_mod_timer:
- del_timer(&en_dev->service_timer);
- destroy_workqueue(events->wq);
-err_create_wq:
- kfree(events);
-err_events_kzalloc:
- return ret;
-}
-
-void dh_aux_events_uninit(struct zxdh_en_priv *en_priv)
-{
- struct dh_events *events = en_priv->events;
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t i = 0;
-
- for (i = events->evt_num - 1; i >= 0 ; i--)
- {
- dh_eq_notifier_unregister(&en_priv->eq_table, &events->notifiers[i].nb);
- }
-
- del_timer(&en_dev->service_timer);
- del_timer(&en_dev->service_riscv_timer);
- destroy_workqueue(en_priv->events->wq);
- kfree(en_priv->events);
-
- return;
-}
-
-static int32_t mgr_test_cnt(void *data, uint16_t len, void *reps, uint16_t *reps_len, void *dev)
-{
- uint8_t *pay_load = (uint8_t *)data;
- uint8_t *reps_buffer = (uint8_t *)reps;
- uint16_t idx = 0;
- uint16_t sum = 0;
-
- if (reps_buffer == NULL)
- {
- return 0;
- }
-
- for (idx = 0; idx < len; idx++)
- {
- sum += pay_load[idx];
- }
-
- reps_buffer[0] = (uint8_t)sum;
- reps_buffer[1] = (uint8_t)(sum >> 8);
- *reps_len = 2;
- return 0;
-}
-
-static int32_t msgq_test_func(void *data, uint16_t len, void *reps, uint16_t *reps_len, void *dev)
-{
- if (reps == NULL)
- {
- return 0;
- }
-
- *reps_len = len;
- return 0;
-}
-
-int32_t dh_aux_msg_recv_func_register(void)
-{
- int32_t ret = 0;
-
- ret = zxdh_bar_chan_msg_recv_register(MODULE_PF_BAR_MSG_TO_VF, zxdh_vf_msg_recv_func);
- if (0 != ret)
- {
- LOG_ERR("event_id[%d] register failed: %d\n", MODULE_PF_BAR_MSG_TO_VF, ret);
- return ret;
- }
-
- ret = zxdh_bar_chan_msg_recv_register(MODULE_DHTOOL, zxdh_tools_sendto_user_netlink);
- if (0 != ret)
- {
- LOG_ERR("event_id[%d] register failed: %d\n", MODULE_DHTOOL, ret);
- goto unregister_pf_to_vf;
- }
-
- ret = zxdh_bar_chan_msg_recv_register(MODULE_DEMO, mgr_test_cnt);
- if (0 != ret)
- {
- LOG_ERR("event_id[%d] register failed: %d\n", MODULE_MSGQ, ret);
- goto unregister_dhtool;
- }
-
- ret = zxdh_bar_chan_msg_recv_register(MODULE_MSGQ, msgq_test_func);
- if (0 != ret)
- {
- LOG_ERR("event_id[%d] register failed: %d\n", MODULE_MSGQ, ret);
- goto unregister_demo;
- }
-
- return ret;
-unregister_demo:
- zxdh_bar_chan_msg_recv_unregister(MODULE_DEMO);
-unregister_dhtool:
- zxdh_bar_chan_msg_recv_unregister(MODULE_DHTOOL);
-unregister_pf_to_vf:
- zxdh_bar_chan_msg_recv_unregister(MODULE_PF_BAR_MSG_TO_VF);
- return ret;
-}
-
-void dh_aux_msg_recv_func_unregister(void)
-{
- zxdh_bar_chan_msg_recv_unregister(MODULE_MSGQ);
- zxdh_bar_chan_msg_recv_unregister(MODULE_DEMO);
- zxdh_bar_chan_msg_recv_unregister(MODULE_DHTOOL);
- zxdh_bar_chan_msg_recv_unregister(MODULE_PF_BAR_MSG_TO_VF);
- return;
-}
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "events.h"
+#include "en_cmd.h"
+#include "../msg_common.h"
+#include "../en_np/table/include/dpp_tbl_api.h"
+#include "../zxdh_tools/zxdh_tools_netlink.h"
+#include "dcbnl/en_dcbnl_api.h"
+#include "zxic_common.h"
+#include
+#include
+#include
+#include
+#include
+#include // 对于VLAN设备
+#include // 对于bonding设备
+#include
+
+static int32_t pf2vf_notifier(struct notifier_block *, unsigned long, void *);
+static int32_t riscv2aux_notifier(struct notifier_block *, unsigned long,
+ void *);
+
+void rx_mode_set_handler(struct work_struct *work);
+
+static struct dh_nb aux_events[] = {
+ { .nb.notifier_call = pf2vf_notifier,
+ .event_type = DH_EVENT_TYPE_NOTIFY_PF_TO_VF },
+ { .nb.notifier_call = riscv2aux_notifier,
+ .event_type = DH_EVENT_TYPE_NOTIFY_RISCV_TO_AUX },
+};
+
+static int32_t do_pf_vf_inet6_update_mac_to_np(struct zxdh_en_device *en_dev,
+ const struct in6_addr *ipv6_addr,
+ unsigned long action)
+{
+ int32_t ret = 0;
+ struct in6_addr sol_addr = { 0 };
+ uint8_t mcast_mac[ETH_ALEN];
+
+ // 打印IPv6地址,使用%pI6c格式化IPv6地址,确保正确显示
+ LOG_INFO("IPv6 address changed on interface %s, %s address: %pI6c\n",
+ en_dev->netdev->name,
+ (action == 1) ? "add" :
+ (action == 2) ? "del" :
+ "unknown action with",
+ ipv6_addr);
+ // Calculate the multicast MAC address from the IPv6 address
+ addrconf_addr_solict_mult(ipv6_addr, &sol_addr);
+ LOG_INFO("Solicited-Node multicast address: %pI6\n", &sol_addr);
+ ipv6_eth_mc_map(&sol_addr, mcast_mac);
+ LOG_INFO("Multicast MAC Address: %pM\n", mcast_mac);
+
+ switch (action) {
+ case NETDEV_UP: {
+ ret = ipv6_multicast_mac_add(en_dev, en_dev->netdev, mcast_mac);
+ if (ret != 0) {
+ LOG_ERR("ipv6_multicast_mac_add failed");
+ }
+ break;
+ }
+ case NETDEV_DOWN: {
+ ret = ipv6_multicast_mac_del(en_dev, en_dev->netdev, mcast_mac);
+ if (ret != 0) {
+ LOG_ERR("ipv6_multicast_mac_del failed");
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int32_t do_bond_master_inet6_update_mac_to_np(
+ struct net_device *notifier_dev, const struct in6_addr *ipv6_addr,
+ struct zxdh_en_device *en_dev, unsigned long action)
+{
+ int32_t ret = 0;
+ struct list_head *iter = NULL;
+ struct slave *slave_dev = NULL;
+ struct bonding *bond = netdev_priv(notifier_dev);
+
+ // 遍历所有slave设备
+ if (bond_has_slaves(bond)) {
+ bond_for_each_slave(bond, slave_dev, iter) {
+ if (strcmp(en_dev->netdev->name, slave_dev->dev->name) != 0) {
+ continue;
+ }
+ LOG_INFO("Bond device %s has slave device: %s\n",
+ notifier_dev->name, slave_dev->dev->name);
+ ret = do_pf_vf_inet6_update_mac_to_np(en_dev, ipv6_addr, action);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static int32_t inet6_addr_change_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct inet6_ifaddr *ifa = NULL;
+ struct net_device *notifier_dev = NULL; //触发事件的网络设备
+ struct net_device *vlan_real_dev = NULL; // vlan子接口底层设备
+ struct zxdh_en_device *en_dev = container_of(
+ nb, struct zxdh_en_device, ipv6_notifier); //处理此回调函数的设备
+
+ if (data == NULL) {
+ LOG_ERR("data is NULL");
+ return -1;
+ }
+
+ ifa = (struct inet6_ifaddr *)data;
+ notifier_dev = ifa->idev->dev;
+
+ if (notifier_dev == NULL) {
+ LOG_ERR("notifier_dev is NULL");
+ return -1;
+ }
+
+ // 检查是否为vlan设备
+ if (is_vlan_dev(notifier_dev))
+ notifier_dev = vlan_dev_real_dev(notifier_dev);
+
+ // 检查是否为bond master设备
+ if (netif_is_bond_master(notifier_dev)) {
+ return do_bond_master_inet6_update_mac_to_np(notifier_dev, &ifa->addr,
+ en_dev, action);
+ }
+
+ // 检查是否为自定义设备
+ if (strcmp(en_dev->netdev->name, notifier_dev->name) == 0) {
+ return do_pf_vf_inet6_update_mac_to_np(en_dev, &ifa->addr, action);
+ }
+
+ return NOTIFY_OK;
+}
+
+static void vf_link_info_update_handler(struct work_struct *_work)
+{
+ struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device,
+ vf_link_info_update_work);
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+ struct zxdh_vf_item *vf_item = NULL;
+ int32_t err = 0;
+ uint16_t vf_idx = 0;
+ struct pci_dev *pdev = NULL;
+ uint16_t num_vfs = 0;
+ bool pf_link_up = false;
+
+ ZXDH_AUX_INIT_COMP_CHECK(en_dev);
+ pf_link_up = en_dev->ops->get_pf_link_up(en_dev->parent);
+ pdev = en_dev->ops->get_pdev(en_dev->parent);
+ num_vfs = pci_num_vf(pdev);
+
+ msg.hdr_vf.op_code = ZXDH_SET_VF_LINK_STATE;
+ msg.link_state_msg.is_link_force_set = FALSE;
+
+ msg.link_state_msg.link_up = pf_link_up;
+ msg.link_state_msg.speed = en_dev->speed;
+ msg.link_state_msg.autoneg_enable = en_dev->autoneg_enable;
+ msg.link_state_msg.supported_speed_modes = en_dev->supported_speed_modes;
+ msg.link_state_msg.advertising_speed_modes =
+ en_dev->advertising_speed_modes;
+
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) {
+ vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
+ msg.hdr_vf.dst_pcie_id = FIND_VF_PCIE_ID(en_dev->pcie_id, vf_idx);
+ if (vf_item->is_probed) {
+ msg.link_state_msg.link_forced = vf_item->link_forced;
+ err = zxdh_send_command_to_specify(en_dev, MODULE_PF_BAR_MSG_TO_VF,
+ &msg, &ack);
+ if (err != 0) {
+ LOG_ERR("failed to update VF[%d]\n", vf_idx);
+ }
+ }
+ }
+}
+
+static void link_info_irq_update_vf_handler(struct work_struct *_work)
+{
+ struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device,
+ link_info_irq_update_vf_work);
+ struct zxdh_vf_item *vf_item = NULL;
+ int32_t err = 0;
+ uint16_t vf_idx = 0;
+ struct pci_dev *pdev = NULL;
+ uint16_t num_vfs = 0;
+ bool pf_link_up = en_dev->ops->get_pf_link_up(en_dev->parent);
+ uint16_t func_no = 0;
+ uint16_t pf_no = FIND_PF_ID(en_dev->pcie_id);
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+
+ LOG_INFO("is called\n");
+ ZXDH_AUX_INIT_COMP_CHECK(en_dev);
+ msg.hdr_to_agt.op_code = AGENT_DEV_STATUS_NOTIFY;
+ msg.hdr_to_agt.pcie_id = en_dev->pcie_id;
+ pdev = en_dev->ops->get_pdev(en_dev->parent);
+ num_vfs = pci_num_vf(pdev);
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) {
+ vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx);
+ if (vf_item->link_forced == FALSE && vf_item->is_probed) {
+ func_no = GET_FUNC_NO(pf_no, vf_idx);
+ LOG_INFO("vf_idx:%d, func_no=0x%x\n", vf_idx, func_no);
+ msg.pcie_msix_msg.func_no[msg.pcie_msix_msg.num++] = func_no;
+ en_dev->ops->set_vf_link_info(en_dev->parent, vf_idx,
+ pf_link_up ? 1 : 0);
+ }
+ }
+ LOG_INFO("msg.pcie_msix_msg.num:%d\n", msg.pcie_msix_msg.num);
+ if (msg.pcie_msix_msg.num > 0) {
+ err = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &ack);
+ if (err != 0) {
+ LOG_ERR("failed to update VF link info\n");
+ }
+ }
+}
+
+static void link_info_irq_process_handler(struct work_struct *_work)
+{
+ struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device,
+ link_info_irq_process_work);
+ int32_t ret = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack = { 0 };
+ struct link_info_struct link_info_val = { 0 };
+
+ ZXDH_AUX_INIT_COMP_CHECK(en_dev);
+
+ msg.hdr_to_agt.op_code = AGENT_MAC_LINK_INFO_GET;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+ ret = zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &ack);
+ if (ret != 0) {
+ LOG_ERR("get speed and duplex from agent failed: %d\n", ret);
+ return;
+ }
+ en_dev->speed = ack.mac_set_msg.speed;
+ en_dev->duplex = ack.mac_set_msg.duplex;
+ LOG_DEBUG("netdev:%s, phy_port:0x%x, speed:%d, duplex:0x%x\n",
+ en_dev->netdev->name, en_dev->phy_port, en_dev->speed,
+ en_dev->duplex);
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ link_info_val.speed = en_dev->speed;
+ link_info_val.autoneg_enable = en_dev->autoneg_enable;
+ link_info_val.supported_speed_modes = en_dev->supported_speed_modes;
+ link_info_val.advertising_speed_modes = en_dev->advertising_speed_modes;
+ link_info_val.duplex = en_dev->duplex;
+
+ en_dev->ops->update_pf_link_info(en_dev->parent, &link_info_val);
+
+ zxdh_port_th_update(en_dev);
+ }
+
+ return;
+}
+
+static void link_info_irq_update_np_work_handler(struct work_struct *_work)
+{
+ struct zxdh_en_device *en_dev = container_of(_work, struct zxdh_en_device,
+ link_info_irq_update_np_work);
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ ZXDH_AUX_INIT_COMP_CHECK(en_dev);
+ if (!en_dev->ops->is_bond(en_dev->parent)) {
+ if (!netif_running(en_dev->netdev)) {
+ return;
+ }
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ zxdh_vf_egr_port_attr_set(en_dev, EGR_FLAG_VPORT_IS_UP,
+ en_dev->link_up, 0);
+ } else {
+ dpp_egr_port_attr_set(&pf_info, EGR_FLAG_VPORT_IS_UP,
+ en_dev->link_up);
+ }
+ return;
+ }
+
+ if (!en_dev->link_up) {
+ dpp_panel_attr_set(&pf_info, en_dev->phy_port, PANEL_FLAG_IS_UP, 0);
+ } else {
+ if (en_dev->netdev->flags & IFF_UP) {
+ dpp_panel_attr_set(&pf_info, en_dev->phy_port, PANEL_FLAG_IS_UP, 1);
+ }
+ }
+}
+
+static void en_aux_spoof_check(struct zxdh_en_device *en_dev)
+{
+ uint64_t ssvpc = 0;
+ uint16_t en_aux_pf_id = 0;
+ uint32_t ret = 0;
+ uint16_t num_vfs = 0;
+ struct pci_dev *pdev = NULL;
+ struct dh_core_dev *dh_dev = NULL;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ dh_dev = en_dev->parent;
+ pdev = en_dev->ops->get_pdev(dh_dev);
+ num_vfs = pci_num_vf(pdev);
+
+ if (!IS_PF(en_dev->vport)) {
+ return;
+ }
+ if (num_vfs == 0) {
+ return;
+ }
+
+ en_aux_pf_id = DH_AUX_PF_ID_OFFSET(en_dev->vport);
+ // spoof static register clear to 0 after read
+ ret = dpp_stat_spoof_packet_drop_cnt_get(&pf_info, en_aux_pf_id, 1, &ssvpc);
+ if (ret != 0) {
+ LOG_ERR("Failed to get spoof check dropped packets number.\n");
+ return;
+ }
+ if (!ssvpc) {
+ return;
+ }
+ LOG_INFO("%llu Spoofed packets detected in EP%d, PF%d\n", ssvpc,
+ EPID(en_dev->vport), FUNC_NUM(en_dev->vport));
+ return;
+}
+
+static void en_aux_service_task(struct work_struct *_work)
+{
+ struct zxdh_en_device *en_dev =
+ container_of(_work, struct zxdh_en_device, service_task);
+
+ ZXDH_AUX_INIT_COMP_CHECK(en_dev);
+ en_aux_spoof_check(en_dev);
+}
+
+static void en_aux_service_timer(struct timer_list *t)
+{
+ unsigned long next_event_offset = HZ * 360000;
+ struct zxdh_en_device *en_dev = from_timer(en_dev, t, service_timer);
+ struct zxdh_en_priv *en_priv =
+ container_of(en_dev, struct zxdh_en_priv, edev);
+
+ /* Reset the timer */
+ mod_timer(&en_dev->service_timer, next_event_offset + jiffies);
+ queue_work(en_priv->events->wq, &en_dev->service_task);
+}
+
+static void en_aux_service_riscv_task(struct work_struct *_work)
+{
+ int32_t retval = 0;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info ack;
+
+ time64_t time64;
+ struct rtc_time tm;
+ unsigned long next_event_offset = HZ * 259200;
+
+ struct zxdh_en_device *en_dev =
+ container_of(_work, struct zxdh_en_device, service_riscv_task);
+ LOG_INFO("is called en_aux_service_riscv_task\n");
+
+ if (!IS_PF(en_dev->vport)) {
+ return;
+ }
+
+ msg.hdr_to_cmn.pcie_id = en_dev->pcie_id;
+ ;
+ msg.hdr_to_cmn.write_bytes = 9;
+ msg.hdr_to_cmn.type = RISC_SERVER_TIME;
+ msg.hdr_to_cmn.field = 0;
+
+ time64 = ktime_get_real_seconds();
+ time64 += 28800; // CST比UST晚八个小时
+ rtc_time64_to_tm(time64, &tm);
+
+ msg.time_cfg_msg.tmmng_type = 0xF0;
+ msg.time_cfg_msg.dir = 0x2;
+ msg.time_cfg_msg.year = tm.tm_year + 1900;
+ msg.time_cfg_msg.month = tm.tm_mon + 1;
+ msg.time_cfg_msg.day = tm.tm_mday;
+ msg.time_cfg_msg.hour = tm.tm_hour;
+ msg.time_cfg_msg.min = tm.tm_min;
+ msg.time_cfg_msg.sec = tm.tm_sec;
+
+ LOG_INFO("send msg timer to riscv:%d-%d-%d %d:%d:%d\n",
+ msg.time_cfg_msg.year, msg.time_cfg_msg.month,
+ msg.time_cfg_msg.day, msg.time_cfg_msg.hour, msg.time_cfg_msg.min,
+ msg.time_cfg_msg.sec);
+ ZXDH_AUX_INIT_COMP_CHECK(en_dev);
+
+ retval = zxdh_send_command_to_specify(en_dev, MODULE_PF_TIMER_TO_RISC_MSG,
+ &msg, &ack);
+ if (retval != 0) {
+ LOG_ERR("zxdh_send_command_to_riscv failed: %d\n", retval);
+ }
+
+ mod_timer(&en_dev->service_riscv_timer, next_event_offset + jiffies);
+}
+
+static void en_aux_service_riscv_timer(struct timer_list *t)
+{
+ unsigned long next_event_offset = HZ * 60;
+ struct zxdh_en_device *en_dev = from_timer(en_dev, t, service_riscv_timer);
+ struct zxdh_en_priv *en_priv =
+ container_of(en_dev, struct zxdh_en_priv, edev);
+ LOG_INFO("is called en_aux_service_riscv_timer\n");
+
+ /* Reset the timer */
+ mod_timer(&en_dev->service_riscv_timer, next_event_offset + jiffies);
+ queue_work(en_priv->events->wq, &en_dev->service_riscv_task);
+}
+
+static void pf2vf_msg_proc_work_handler(struct work_struct *_work)
+{
+ struct zxdh_en_device *en_dev =
+ container_of(_work, struct zxdh_en_device, pf2vf_msg_proc_work);
+ uint64_t virt_addr = 0;
+
+ LOG_INFO("is called\n");
+ ZXDH_AUX_INIT_COMP_CHECK(en_dev);
+ virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) +
+ ZXDH_BAR_MSG_OFFSET + ZXDH_BAR_PFVF_MSG_OFFSET;
+ zxdh_bar_irq_recv(MSG_CHAN_END_PF, MSG_CHAN_END_VF, virt_addr, en_dev);
+}
+
+static int32_t pf2vf_notifier(struct notifier_block *nb, unsigned long type,
+ void *data)
+{
+ struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb);
+ struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)event_nb->ctx;
+
+ LOG_INFO("is called\n");
+ queue_work(en_priv->events->wq, &en_priv->edev.pf2vf_msg_proc_work);
+
+ return NOTIFY_OK;
+}
+
+static void riscv2aux_msg_proc_work_handler(struct work_struct *_work)
+{
+ struct zxdh_en_device *en_dev =
+ container_of(_work, struct zxdh_en_device, riscv2aux_msg_proc_work);
+ uint64_t virt_addr = 0;
+ uint16_t src = MSG_CHAN_END_RISC;
+ uint16_t dst = MSG_CHAN_END_PF;
+
+ LOG_INFO("is called\n");
+ ZXDH_AUX_INIT_COMP_CHECK(en_dev);
+ virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) +
+ ZXDH_BAR_MSG_OFFSET;
+ zxdh_bar_irq_recv(src, dst, virt_addr, en_dev);
+}
+
+static int32_t riscv2aux_notifier(struct notifier_block *nb, unsigned long type,
+ void *data)
+{
+ struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb);
+ struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)event_nb->ctx;
+ LOG_INFO("is called\n");
+ queue_work(en_priv->events->wq, &en_priv->edev.riscv2aux_msg_proc_work);
+
+ return NOTIFY_OK;
+}
+
+void pf_notify_vf_reset_handler(struct work_struct *work)
+{
+ int32_t ret = 0;
+ struct zxdh_en_device *en_dev =
+ container_of(work, struct zxdh_en_device, pf_notify_vf_reset_work);
+ struct net_device *netdev = en_dev->netdev;
+
+ LOG_INFO("pf_notify_vf_reset_handler is called\n");
+ ZXDH_AUX_INIT_COMP_CHECK(en_dev);
+ ret = zxdh_vf_get_mac(netdev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_vf_get_mac failed: %d\n", ret);
+ }
+}
+
+typedef uint32_t (*zxdh_pf_msg_func)(zxdh_msg_info *msg, zxdh_reps_info *reps,
+ struct zxdh_en_device *en_dev);
+
+typedef struct {
+ zxdh_msg_op_code op_code;
+ uint8_t proc_name[ZXDH_MSG_TYPE_CNT_MAX];
+ zxdh_pf_msg_func msg_proc;
+} zxdh_pf_msg_proc;
+
+static uint32_t zxdh_set_vf_link_state(zxdh_msg_info *msg, zxdh_reps_info *reps,
+ struct zxdh_en_device *en_dev)
+{
+ uint32_t ret = 0;
+ uint16_t vf_idx = msg->hdr_vf.dst_pcie_id & (0xff);
+
+ if (!msg->link_state_msg.is_link_force_set) {
+ en_dev->speed = msg->link_state_msg.speed;
+ en_dev->autoneg_enable = msg->link_state_msg.autoneg_enable;
+ en_dev->supported_speed_modes =
+ msg->link_state_msg.supported_speed_modes;
+ en_dev->advertising_speed_modes =
+ msg->link_state_msg.advertising_speed_modes;
+ if (msg->link_state_msg.link_forced) {
+ return 0;
+ }
+ }
+
+ en_dev->ops->set_pf_link_up(en_dev->parent, msg->link_state_msg.link_up);
+ if (en_dev->ops->get_pf_link_up(en_dev->parent)) {
+ netif_carrier_on(en_dev->netdev);
+ } else {
+ netif_carrier_off(en_dev->netdev);
+ }
+ LOG_INFO("[VF GET MSG FROM PF]--VF[%d] link_state[%s] update success!\n",
+ vf_idx,
+ en_dev->ops->get_pf_link_up(en_dev->parent) ? "TRUE" : "FALSE");
+ return ret;
+}
+
+static uint32_t zxdh_set_vf_reset(zxdh_msg_info *msg, zxdh_reps_info *reps,
+ struct zxdh_en_device *en_dev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(en_dev->netdev);
+ queue_work(en_priv->events->wq, &en_priv->edev.pf_notify_vf_reset_work);
+ return 0;
+}
+
+static uint32_t zxdh_set_vf_vlan(zxdh_msg_info *msg, zxdh_reps_info *reps,
+ struct zxdh_en_device *edev)
+{
+ uint32_t ret = 0;
+ /* update local var*/
+ edev->vlan_dev.vlan_id = msg->vf_vlan_msg.vlan_id;
+ edev->vlan_dev.qos = msg->vf_vlan_msg.qos;
+ edev->vlan_dev.protcol = msg->vf_vlan_msg.protocl;
+
+ return ret;
+}
+
+zxdh_pf_msg_proc pf_msg_proc[] = {
+ { ZXDH_SET_VF_LINK_STATE, "set_vf_link_state", zxdh_set_vf_link_state },
+ { ZXDH_SET_VF_RESET, "set_vf_reset", zxdh_set_vf_reset },
+ { ZXDH_PF_SET_VF_VLAN, "pf_set_vf_vlan", zxdh_set_vf_vlan },
+};
+
+int32_t zxdh_vf_msg_recv_func(void *pay_load, uint16_t len, void *reps_buffer,
+ uint16_t *reps_len, void *dev)
+{
+ zxdh_msg_info *msg = (zxdh_msg_info *)pay_load;
+ zxdh_reps_info *reps = (zxdh_reps_info *)reps_buffer;
+ struct zxdh_en_device *en_dev = (struct zxdh_en_device *)dev;
+ int32_t ret = 0;
+ int32_t i = 0;
+ int32_t num = 0;
+
+ LOG_INFO("is called\n");
+ if (len != sizeof(zxdh_msg_info)) {
+ LOG_ERR("invalid data_len\n");
+ return -1;
+ }
+
+ if (en_dev == NULL) {
+ LOG_ERR("dev is NULL\n");
+ return -1;
+ }
+
+ num = sizeof(pf_msg_proc) / sizeof(zxdh_pf_msg_proc);
+
+ for (i = 0; i < num; i++) {
+ *reps_len = sizeof(zxdh_reps_info);
+ if (pf_msg_proc[i].op_code == msg->hdr_vf.op_code) {
+ LOG_INFO("%s is called", pf_msg_proc[i].proc_name);
+ ret = pf_msg_proc[i].msg_proc(msg, reps, en_dev);
+ if (ret != 0) {
+ reps->flag = ZXDH_REPS_FAIL;
+ LOG_ERR("%s failed, ret: %d\n", pf_msg_proc[i].proc_name, ret);
+ return -1;
+ }
+ reps->flag = ZXDH_REPS_SUCC;
+ return 0;
+ }
+ }
+
+ LOG_ERR("invalid op_code: [%u]\n", msg->hdr_vf.op_code);
+ return -2;
+}
+
+int32_t dh_aux_ipv6_notifier_init(struct zxdh_en_priv *en_priv)
+{
+ int32_t ret = 0;
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ en_dev->ipv6_notifier.notifier_call = inet6_addr_change_notifier;
+ en_dev->ipv6_notifier.priority = 0;
+ ret = dh_inet6_addr_change_notifier_register(&(en_dev->ipv6_notifier));
+ if (ret) {
+ LOG_ERR("Failed to register inet6addr_notifier, ret:%d\n", ret);
+ return ret;
+ }
+ LOG_INFO("netdev:%s ipv6_notifier_init success\n", en_dev->netdev->name);
+ return ret;
+}
+
+int32_t dh_aux_events_init(struct zxdh_en_priv *en_priv)
+{
+ struct dh_events *events = NULL;
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t i = 0;
+ int32_t ret = 0;
+ uint32_t evt_num = ARRAY_SIZE(aux_events);
+
+ if (!en_dev->ops->if_init(en_dev->parent)) {
+ evt_num -= 1;
+ }
+
+ events = kzalloc((sizeof(*events) + evt_num * sizeof(struct dh_event_nb)),
+ GFP_KERNEL);
+ if (unlikely(events == NULL)) {
+ LOG_ERR("events kzalloc failed: %p\n", events);
+ ret = -ENOMEM;
+ goto err_events_kzalloc;
+ }
+
+ events->evt_num = evt_num;
+ events->dev = NULL;
+ en_priv->events = events;
+ events->wq = create_singlethread_workqueue("dh_aux_events");
+ if (!events->wq) {
+ LOG_ERR("events->wq create_singlethread_workqueue failed: %p\n",
+ events->wq);
+ ret = -ENOMEM;
+ goto err_create_wq;
+ }
+
+ INIT_WORK(&en_dev->vf_link_info_update_work, vf_link_info_update_handler);
+ INIT_WORK(&en_dev->link_info_irq_update_vf_work,
+ link_info_irq_update_vf_handler);
+ INIT_WORK(&en_dev->link_info_irq_process_work,
+ link_info_irq_process_handler);
+ INIT_WORK(&en_dev->link_info_irq_update_np_work,
+ link_info_irq_update_np_work_handler);
+ INIT_WORK(&en_dev->rx_mode_set_work, rx_mode_set_handler);
+ INIT_WORK(&en_dev->pf2vf_msg_proc_work, pf2vf_msg_proc_work_handler);
+ INIT_WORK(&en_dev->pf_notify_vf_reset_work, pf_notify_vf_reset_handler);
+ INIT_WORK(&en_dev->service_task, en_aux_service_task);
+ INIT_WORK(&en_dev->service_riscv_task, en_aux_service_riscv_task);
+ INIT_WORK(&en_dev->riscv2aux_msg_proc_work,
+ riscv2aux_msg_proc_work_handler);
+
+ timer_setup(&en_dev->service_timer, en_aux_service_timer, 0);
+ ret = mod_timer(&en_dev->service_timer, jiffies);
+ if (ret) {
+ LOG_ERR("timer add failed\n");
+ goto err_mod_timer;
+ }
+
+ timer_setup(&en_dev->service_riscv_timer, en_aux_service_riscv_timer, 0);
+ ret = mod_timer(&en_dev->service_riscv_timer, jiffies);
+ if (ret) {
+ LOG_ERR("timer add failed\n");
+ goto err_riscv_timer;
+ }
+
+ for (i = 0; i < evt_num; i++) {
+ events->notifiers[i].nb = aux_events[i];
+ events->notifiers[i].ctx = en_priv;
+ dh_eq_notifier_register(&en_priv->eq_table, &events->notifiers[i].nb);
+ }
+
+ return ret;
+
+err_riscv_timer:
+ del_timer(&en_dev->service_riscv_timer);
+err_mod_timer:
+ del_timer(&en_dev->service_timer);
+ destroy_workqueue(events->wq);
+err_create_wq:
+ kfree(events);
+err_events_kzalloc:
+ return ret;
+}
+
+void dh_aux_events_uninit(struct zxdh_en_priv *en_priv)
+{
+ struct dh_events *events = en_priv->events;
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t i = 0;
+
+ for (i = events->evt_num - 1; i >= 0; i--) {
+ dh_eq_notifier_unregister(&en_priv->eq_table, &events->notifiers[i].nb);
+ }
+
+ del_timer(&en_dev->service_timer);
+ del_timer(&en_dev->service_riscv_timer);
+ destroy_workqueue(en_priv->events->wq);
+ kfree(en_priv->events);
+
+ return;
+}
+
+static int32_t mgr_test_cnt(void *data, uint16_t len, void *reps,
+ uint16_t *reps_len, void *dev)
+{
+ uint8_t *pay_load = (uint8_t *)data;
+ uint8_t *reps_buffer = (uint8_t *)reps;
+ uint16_t idx = 0;
+ uint16_t sum = 0;
+
+ if (reps_buffer == NULL) {
+ return 0;
+ }
+
+ for (idx = 0; idx < len; idx++) {
+ sum += pay_load[idx];
+ }
+
+ reps_buffer[0] = (uint8_t)sum;
+ reps_buffer[1] = (uint8_t)(sum >> 8);
+ *reps_len = 2;
+ return 0;
+}
+
+static int32_t msgq_test_func(void *data, uint16_t len, void *reps,
+ uint16_t *reps_len, void *dev)
+{
+ if (reps == NULL) {
+ return 0;
+ }
+
+ *reps_len = len;
+ return 0;
+}
+
+int32_t dh_aux_msg_recv_func_register(void)
+{
+ int32_t ret = 0;
+
+ ret = zxdh_bar_chan_msg_recv_register(MODULE_PF_BAR_MSG_TO_VF,
+ zxdh_vf_msg_recv_func);
+ if (0 != ret) {
+ LOG_ERR("event_id[%d] register failed: %d\n", MODULE_PF_BAR_MSG_TO_VF,
+ ret);
+ return ret;
+ }
+
+ ret = zxdh_bar_chan_msg_recv_register(MODULE_DHTOOL,
+ zxdh_tools_sendto_user_netlink);
+ if (0 != ret) {
+ LOG_ERR("event_id[%d] register failed: %d\n", MODULE_DHTOOL, ret);
+ goto unregister_pf_to_vf;
+ }
+
+ ret = zxdh_bar_chan_msg_recv_register(MODULE_DEMO, mgr_test_cnt);
+ if (0 != ret) {
+ LOG_ERR("event_id[%d] register failed: %d\n", MODULE_MSGQ, ret);
+ goto unregister_dhtool;
+ }
+
+ ret = zxdh_bar_chan_msg_recv_register(MODULE_MSGQ, msgq_test_func);
+ if (0 != ret) {
+ LOG_ERR("event_id[%d] register failed: %d\n", MODULE_MSGQ, ret);
+ goto unregister_demo;
+ }
+
+ return ret;
+unregister_demo:
+ zxdh_bar_chan_msg_recv_unregister(MODULE_DEMO);
+unregister_dhtool:
+ zxdh_bar_chan_msg_recv_unregister(MODULE_DHTOOL);
+unregister_pf_to_vf:
+ zxdh_bar_chan_msg_recv_unregister(MODULE_PF_BAR_MSG_TO_VF);
+ return ret;
+}
+
+void dh_aux_msg_recv_func_unregister(void)
+{
+ zxdh_bar_chan_msg_recv_unregister(MODULE_MSGQ);
+ zxdh_bar_chan_msg_recv_unregister(MODULE_DEMO);
+ zxdh_bar_chan_msg_recv_unregister(MODULE_DHTOOL);
+ zxdh_bar_chan_msg_recv_unregister(MODULE_PF_BAR_MSG_TO_VF);
+ return;
+}
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/events.h b/src/net/drivers/net/ethernet/dinghai/en_aux/events.h
index 57348563fc7fd104080c77901c5de53e6cddb0fc..58c44e83961d501b0e1b36c038879d6140436f05 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/events.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/events.h
@@ -1,24 +1,24 @@
-#ifndef __ZXDH_PF_EVENTS_H__
-#define __ZXDH_PF_EVENTS_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include "en_aux.h"
-#include "../en_np/table/include/dpp_tbl_comm.h"
-
-#define DH_AUX_PF_ID_OFFSET(vport) (EPID(vport) * 8 + FUNC_NUM(vport))
-
-int32_t dh_aux_events_init(struct zxdh_en_priv *en_priv);
-void dh_aux_events_uninit(struct zxdh_en_priv *en_priv);
-int32_t dh_aux_msg_recv_func_register(void);
-void dh_aux_msg_recv_func_unregister(void);
-int32_t dh_aux_ipv6_notifier_init(struct zxdh_en_priv *en_priv);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
+#ifndef __ZXDH_PF_EVENTS_H__
+#define __ZXDH_PF_EVENTS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+#include "en_aux.h"
+#include "../en_np/table/include/dpp_tbl_comm.h"
+
+#define DH_AUX_PF_ID_OFFSET(vport) (EPID(vport) * 8 + FUNC_NUM(vport))
+
+int32_t dh_aux_events_init(struct zxdh_en_priv *en_priv);
+void dh_aux_events_uninit(struct zxdh_en_priv *en_priv);
+int32_t dh_aux_msg_recv_func_register(void);
+void dh_aux_msg_recv_func_unregister(void);
+int32_t dh_aux_ipv6_notifier_init(struct zxdh_en_priv *en_priv);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/priv_queue.c b/src/net/drivers/net/ethernet/dinghai/en_aux/priv_queue.c
index e29058eedaff85426607fbce9190ec20fc08f919..76208ad3c8b234608bedb6548fafb49cedf2fa31 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/priv_queue.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/priv_queue.c
@@ -1,4 +1,3 @@
-
#include
#include
#include
@@ -7,806 +6,739 @@
static void poll_timer_callback(struct timer_list *this_timer)
{
- struct msgq_dev *msgq_dev = from_timer(msgq_dev, this_timer, poll_timer);
- struct msg_buff *this_msg_buff = NULL;
- uint16_t i = 0;
- uint32_t tx_timeouts = 0;
-
- if (msgq_dev == NULL)
- {
- LOG_ERR("msgq_dev is NULL\n");
- return;
- }
-
- for (i = 0; i < MSGQ_MAX_MSG_BUFF_NUM; ++i)
- {
- if (msgq_dev->free_cnt == 0)
- {
- msgq_dev->timer_in_use = false;
- return;
- }
- this_msg_buff = &msgq_dev->msg_buff_ring[i];
-
- if (!this_msg_buff->using || !this_msg_buff->need_free)
- {
- continue;
- }
-
- if (this_msg_buff->timeout_cnt == 0)
- {
- *(this_msg_buff->data_len) = 0;
- this_msg_buff->data = NULL;
- msgq_dev->free_cnt--;
- tx_timeouts++;
- LOG_ERR("msg[%d] get callback out of time\n", i);
- this_msg_buff->using = false;
- continue;
- }
- this_msg_buff->timeout_cnt--;
- }
-
- u64_stats_update_begin(&msgq_dev->sq_priv->stats.syncp);
- msgq_dev->sq_priv->stats.tx_timeouts += tx_timeouts;
- u64_stats_update_end(&msgq_dev->sq_priv->stats.syncp);
-
- mod_timer(this_timer, jiffies + msecs_to_jiffies(TIMER_DELAY_US));
+ struct msgq_dev *msgq_dev = from_timer(msgq_dev, this_timer, poll_timer);
+ struct msg_buff *this_msg_buff = NULL;
+ uint16_t i = 0;
+ uint32_t tx_timeouts = 0;
+
+ if (msgq_dev == NULL) {
+ LOG_ERR("msgq_dev is NULL\n");
+ return;
+ }
+
+ for (i = 0; i < MSGQ_MAX_MSG_BUFF_NUM; ++i) {
+ if (msgq_dev->free_cnt == 0) {
+ msgq_dev->timer_in_use = false;
+ return;
+ }
+ this_msg_buff = &msgq_dev->msg_buff_ring[i];
+
+ if (!this_msg_buff->using || !this_msg_buff->need_free) {
+ continue;
+ }
+
+ if (this_msg_buff->timeout_cnt == 0) {
+ *(this_msg_buff->data_len) = 0;
+ this_msg_buff->data = NULL;
+ msgq_dev->free_cnt--;
+ tx_timeouts++;
+ LOG_ERR("msg[%d] get callback out of time\n", i);
+ this_msg_buff->using = false;
+ continue;
+ }
+ this_msg_buff->timeout_cnt--;
+ }
+
+ u64_stats_update_begin(&msgq_dev->sq_priv->stats.syncp);
+ msgq_dev->sq_priv->stats.tx_timeouts += tx_timeouts;
+ u64_stats_update_end(&msgq_dev->sq_priv->stats.syncp);
+
+ mod_timer(this_timer, jiffies + msecs_to_jiffies(TIMER_DELAY_US));
}
-static uint32_t msgq_get_mergeable_buf_len(struct receive_queue *rq, struct ewma_pkt_len *avg_pkt_len)
+static uint32_t msgq_get_mergeable_buf_len(struct receive_queue *rq,
+ struct ewma_pkt_len *avg_pkt_len)
{
- const size_t hdr_len = PRIV_HEADER_LEN;
- uint32_t len = 0;
+ const size_t hdr_len = PRIV_HEADER_LEN;
+ uint32_t len = 0;
- len = hdr_len + clamp_t(uint32_t, ewma_pkt_len_read(avg_pkt_len), rq->min_buf_len, PAGE_SIZE - hdr_len);
+ len = hdr_len + clamp_t(uint32_t, ewma_pkt_len_read(avg_pkt_len),
+ rq->min_buf_len, PAGE_SIZE - hdr_len);
- return ALIGN(len, L1_CACHE_BYTES);
+ return ALIGN(len, L1_CACHE_BYTES);
}
static int32_t msgq_add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
{
- struct page_frag *alloc_frag = &rq->alloc_frag;
- char *buf = NULL;
- void *ctx = NULL;
- int32_t err = 0;
- uint32_t len = 0;
- uint32_t hole = 0;
-
- len = msgq_get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len);
- if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
- {
- return -ENOMEM;
- }
-
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
- get_page(alloc_frag->page);
- alloc_frag->offset += len;
- hole = alloc_frag->size - alloc_frag->offset;
- if (hole < len)
- {
- len += hole;
- alloc_frag->offset += hole;
- }
-
- sg_init_one(rq->sg, buf, len);
- ctx = (void *)(unsigned long)len;
- err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
- if (err < 0)
- {
- put_page(virt_to_head_page(buf));
- }
-
- return err;
+ struct page_frag *alloc_frag = &rq->alloc_frag;
+ char *buf = NULL;
+ void *ctx = NULL;
+ int32_t err = 0;
+ uint32_t len = 0;
+ uint32_t hole = 0;
+
+ len = msgq_get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len);
+ if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) {
+ return -ENOMEM;
+ }
+
+ buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+ get_page(alloc_frag->page);
+ alloc_frag->offset += len;
+ hole = alloc_frag->size - alloc_frag->offset;
+ if (hole < len) {
+ len += hole;
+ alloc_frag->offset += hole;
+ }
+
+ sg_init_one(rq->sg, buf, len);
+ ctx = (void *)(unsigned long)len;
+ err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
+ if (err < 0) {
+ put_page(virt_to_head_page(buf));
+ }
+
+ return err;
}
static bool msgq_try_fill_recv(struct receive_queue *rq, gfp_t gfp)
{
- int32_t err = 0;
- bool oom = 0;
- unsigned long flags = 0;
-
- do
- {
- err = msgq_add_recvbuf_mergeable(rq, gfp);
- oom = err == -ENOMEM;
- if (err)
- {
- break;
- }
- } while (rq->vq->num_free);
-
- if (virtqueue_kick_prepare_packed(rq->vq) && virtqueue_notify(rq->vq))
- {
- flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
- rq->stats.kicks++;
- u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
- }
-
- return !oom;
+ int32_t err = 0;
+ bool oom = 0;
+ unsigned long flags = 0;
+
+ do {
+ err = msgq_add_recvbuf_mergeable(rq, gfp);
+ oom = err == -ENOMEM;
+ if (err) {
+ break;
+ }
+ } while (rq->vq->num_free);
+
+ if (virtqueue_kick_prepare_packed(rq->vq) && virtqueue_notify(rq->vq)) {
+ flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
+ rq->stats.kicks++;
+ u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
+ }
+
+ return !oom;
}
uint32_t msgq_mergeable_min_buf_len(struct virtqueue *vq)
{
- const uint32_t hdr_len = PRIV_HEADER_LEN;
- uint32_t rq_size = virtqueue_get_vring_size(vq);
- uint32_t min_buf_len = DIV_ROUND_UP(BUFF_LEN, rq_size);
+ const uint32_t hdr_len = PRIV_HEADER_LEN;
+ uint32_t rq_size = virtqueue_get_vring_size(vq);
+ uint32_t min_buf_len = DIV_ROUND_UP(BUFF_LEN, rq_size);
- return max(max(min_buf_len, hdr_len) - hdr_len, (uint32_t)GOOD_PACKET_LEN);
+ return max(max(min_buf_len, hdr_len) - hdr_len, (uint32_t)GOOD_PACKET_LEN);
}
-static int32_t msgq_privq_init(struct msgq_dev *msgq_dev, struct net_device *netdev)
+static int32_t msgq_privq_init(struct msgq_dev *msgq_dev,
+ struct net_device *netdev)
{
- struct receive_queue *rq = msgq_dev->rq_priv;
- struct send_queue *sq = msgq_dev->sq_priv;
-
- rq->pages = NULL;
- rq->min_buf_len = msgq_mergeable_min_buf_len(rq->vq);
- netif_napi_add(netdev, &rq->napi, zxdh_msgq_poll, NAPI_POLL_WEIGHT);
- netif_tx_napi_add(netdev, &sq->napi, NULL, NAPI_POLL_WEIGHT);
-
- sg_init_table(rq->sg, ARRAY_SIZE(rq->sg));
- ewma_pkt_len_init(&rq->mrg_avg_pkt_len);
- sg_init_table(sq->sg, ARRAY_SIZE(sq->sg));
-
- u64_stats_init(&rq->stats.syncp);
- u64_stats_init(&sq->stats.syncp);
-
- if (!msgq_try_fill_recv(rq, GFP_KERNEL))
- {
- LOG_ERR("msgq_try_fill_recv failed\n");
- ZXDH_FREE_PTR(msgq_dev);
- return MSGQ_RET_ERR_CHANNEL_NOT_READY;
- }
-
- msgq_dev->msgq_enable = true;
- virtnet_napi_enable(rq->vq, &rq->napi);
- LOG_INFO("zxdh_msgq_init success\n");
- return MSGQ_RET_OK;
+ struct receive_queue *rq = msgq_dev->rq_priv;
+ struct send_queue *sq = msgq_dev->sq_priv;
+
+ rq->pages = NULL;
+ rq->min_buf_len = msgq_mergeable_min_buf_len(rq->vq);
+ netif_napi_add(netdev, &rq->napi, zxdh_msgq_poll, NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(netdev, &sq->napi, NULL, NAPI_POLL_WEIGHT);
+
+ sg_init_table(rq->sg, ARRAY_SIZE(rq->sg));
+ ewma_pkt_len_init(&rq->mrg_avg_pkt_len); //
+ sg_init_table(sq->sg, ARRAY_SIZE(sq->sg));
+
+ u64_stats_init(&rq->stats.syncp);
+ u64_stats_init(&sq->stats.syncp);
+
+ if (!msgq_try_fill_recv(rq, GFP_KERNEL)) {
+ LOG_ERR("msgq_try_fill_recv failed\n");
+ ZXDH_FREE_PTR(msgq_dev);
+ return MSGQ_RET_ERR_CHANNEL_NOT_READY;
+ }
+
+ msgq_dev->msgq_enable = true;
+ virtnet_napi_enable(rq->vq, &rq->napi);
+ LOG_INFO("zxdh_msgq_init success\n");
+ return MSGQ_RET_OK;
}
int32_t zxdh_msgq_init(struct zxdh_en_device *en_dev)
{
- struct msgq_dev *msgq_dev = NULL;
- int32_t idx = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
- en_dev->msgq_dev = kzalloc(sizeof(struct msgq_dev), GFP_KERNEL);
- ZXDH_CHECK_PTR_RETURN(en_dev->msgq_dev);
-
- idx = en_dev->max_queue_pairs - ZXDH_PQ_PAIRS_NUM;
- msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
- msgq_dev->sq_priv = &en_dev->sq[idx];
- msgq_dev->rq_priv = &en_dev->rq[idx];
- msgq_dev->msgq_vfid = (uint16_t)VQM_VFID(en_dev->vport);
- msgq_dev->msgq_rqid = (uint16_t)msgq_dev->rq_priv->vq->phy_index;
-
- dpp_vport_create_by_vqm_vfid(&pf_info, RISCV_COMMON_VFID);
- spin_lock_init(&msgq_dev->sn_lock);
- spin_lock_init(&msgq_dev->tx_lock);
- mutex_init(&msgq_dev->mlock);
- timer_setup(&msgq_dev->poll_timer, poll_timer_callback, 0);
-
- return msgq_privq_init(msgq_dev, en_dev->netdev);
+ struct msgq_dev *msgq_dev = NULL;
+ int32_t idx = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+ en_dev->msgq_dev = kzalloc(sizeof(struct msgq_dev), GFP_KERNEL);
+ ZXDH_CHECK_PTR_RETURN(en_dev->msgq_dev);
+
+ idx = en_dev->max_queue_pairs - ZXDH_PQ_PAIRS_NUM;
+ msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
+ msgq_dev->sq_priv = &en_dev->sq[idx];
+ msgq_dev->rq_priv = &en_dev->rq[idx];
+ msgq_dev->msgq_vfid = (uint16_t)VQM_VFID(en_dev->vport);
+ msgq_dev->msgq_rqid = (uint16_t)msgq_dev->rq_priv->vq->phy_index;
+
+ dpp_vport_create_by_vqm_vfid(&pf_info, RISCV_COMMON_VFID);
+ spin_lock_init(&msgq_dev->sn_lock);
+ spin_lock_init(&msgq_dev->tx_lock);
+ mutex_init(&msgq_dev->mlock);
+ timer_setup(&msgq_dev->poll_timer, poll_timer_callback, 0);
+
+ return msgq_privq_init(msgq_dev, en_dev->netdev);
}
void zxdh_msgq_exit(struct zxdh_en_device *en_dev)
{
- struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
+ struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
- if (msgq_dev == NULL)
- {
- LOG_ERR("msgq_dev is null!\n");
- return;
- }
+ if (msgq_dev == NULL) {
+ LOG_ERR("msgq_dev is null!\n");
+ return;
+ }
- msgq_dev->msgq_enable = false;
- napi_disable(&msgq_dev->rq_priv->napi);
- del_timer(&msgq_dev->poll_timer);
+ msgq_dev->msgq_enable = false;
+ napi_disable(&msgq_dev->rq_priv->napi);
+ del_timer(&msgq_dev->poll_timer);
- ZXDH_FREE_PTR(msgq_dev);
- LOG_INFO("zxdh_msg_chan_pkt remove success\n");
+ ZXDH_FREE_PTR(msgq_dev);
+ LOG_INFO("zxdh_msg_chan_pkt remove success\n");
}
void msgq_print_data(uint8_t *buf, uint32_t len, uint8_t flag)
{
- uint32_t print_len = 0;
-
- if (flag == MSGQ_PRINT_HDR)
- {
- print_len = PRIV_HEADER_LEN;
- }
- else if (flag == MSGQ_PRINT_128B)
- {
- print_len = len > 128 ? 128 : len;
- }
- else if (flag == MSGQ_PRINT_ALL)
- {
- print_len = len;
- }
- print_data(buf, print_len);
+ uint32_t print_len = 0;
+
+ if (flag == MSGQ_PRINT_HDR) {
+ print_len = PRIV_HEADER_LEN;
+ } else if (flag == MSGQ_PRINT_128B) {
+ print_len = len > 128 ? 128 : len;
+ } else if (flag == MSGQ_PRINT_ALL) {
+ print_len = len;
+ }
+ print_data(buf, print_len);
}
-static int32_t zxdh_msg_para_check(struct msgq_pkt_info *msg, struct reps_info *reps)
+static int32_t zxdh_msg_para_check(struct msgq_pkt_info *msg,
+ struct reps_info *reps)
{
- ZXDH_CHECK_PTR_RETURN(msg);
- ZXDH_CHECK_PTR_RETURN(msg->addr);
-
- if ((msg->len == 0) || (msg->len > MSGQ_MAX_ADDR_LEN))
- {
- LOG_ERR("invalid data_len: %d\n", msg->len);
- goto free_addr;
- }
-
- if (msg->event_id >= MSG_MODULE_NUM)
- {
- LOG_ERR("invalid event_id\n");
- goto free_addr;
- }
-
- if (msg->no_reps)
- {
- return MSGQ_RET_OK;
- }
-
- ZXDH_CHECK_PTR_GOTO_ERR(reps, free_addr);
- ZXDH_CHECK_PTR_GOTO_ERR(reps->addr, free_addr);
- if (reps->len == 0)
- {
- LOG_ERR("invalid reps_len: %d\n", reps->len);
- goto free_addr;
- }
-
- return MSGQ_RET_OK;
+ ZXDH_CHECK_PTR_RETURN(msg);
+ ZXDH_CHECK_PTR_RETURN(msg->addr);
+
+ if ((msg->len == 0) || (msg->len > MSGQ_MAX_ADDR_LEN)) {
+ LOG_ERR("invalid data_len: %d\n", msg->len);
+ goto free_addr;
+ }
+
+ if (msg->event_id >= MSG_MODULE_NUM) {
+ LOG_ERR("invalid event_id\n");
+ goto free_addr;
+ }
+
+ if (msg->no_reps) {
+ return MSGQ_RET_OK;
+ }
+
+ ZXDH_CHECK_PTR_GOTO_ERR(reps, free_addr);
+ ZXDH_CHECK_PTR_GOTO_ERR(reps->addr, free_addr);
+ if (reps->len == 0) {
+ LOG_ERR("invalid reps_len: %d\n", reps->len);
+ goto free_addr;
+ }
+
+ return MSGQ_RET_OK;
free_addr:
- ZXDH_FREE_PTR(msg->addr);
- return MSGQ_RET_ERR_INVALID_PARA;
+ ZXDH_FREE_PTR(msg->addr);
+ return MSGQ_RET_ERR_INVALID_PARA;
}
-static int32_t zxdh_sequence_num_get(struct msgq_dev *msgq_dev, uint16_t *sequence_num)
+static int32_t zxdh_sequence_num_get(struct msgq_dev *msgq_dev,
+ uint16_t *sequence_num)
{
- uint16_t sn = 0;
- uint16_t loop = 0;
-
- spin_lock(&msgq_dev->sn_lock);
- sn = msgq_dev->sequence_num;
-
- for (loop = 0; loop < MSGQ_MAX_MSG_BUFF_NUM; loop++)
- {
- if (!msgq_dev->msg_buff_ring[sn].using)
- {
- *sequence_num = sn;
- msgq_dev->msg_buff_ring[sn].using = true;
- msgq_dev->msg_buff_ring[sn].valid = false;
- msgq_dev->free_cnt++;
- SEQUENCE_NUM_ADD(sn);
- break;
- }
- SEQUENCE_NUM_ADD(sn);
- }
-
- msgq_dev->sequence_num = sn;
- spin_unlock(&msgq_dev->sn_lock);
-
- if (loop == MSGQ_MAX_MSG_BUFF_NUM)
- {
- return MSGQ_RET_ERR_CHAN_BUSY;
- }
-
- return MSGQ_RET_OK;
+ uint16_t sn = 0;
+ uint16_t loop = 0;
+
+ spin_lock(&msgq_dev->sn_lock);
+ sn = msgq_dev->sequence_num;
+
+ for (loop = 0; loop < MSGQ_MAX_MSG_BUFF_NUM; loop++) {
+ if (!msgq_dev->msg_buff_ring[sn].using) {
+ *sequence_num = sn;
+ msgq_dev->msg_buff_ring[sn].using = true;
+ msgq_dev->msg_buff_ring[sn].valid = false;
+ msgq_dev->free_cnt++;
+ SEQUENCE_NUM_ADD(sn);
+ break;
+ }
+ SEQUENCE_NUM_ADD(sn);
+ }
+
+ msgq_dev->sequence_num = sn;
+ spin_unlock(&msgq_dev->sn_lock);
+
+ if (loop == MSGQ_MAX_MSG_BUFF_NUM) {
+ return MSGQ_RET_ERR_CHAN_BUSY;
+ }
+
+ return MSGQ_RET_OK;
}
-static int32_t page_send_cmd(struct send_queue *sq, uint8_t *buf, uint16_t buf_len, uint8_t print)
+static int32_t page_send_cmd(struct send_queue *sq, uint8_t *buf,
+ uint16_t buf_len, uint8_t print)
{
- uint16_t i = 0;
- int32_t err = 0;
- uint16_t total_sg = 0;
- uint16_t last_buff_len = 0;
-
- if (print != 0)
- {
- LOG_DEBUG("send pkt start\n");
- msgq_print_data(buf, buf_len, print);
- }
-
- total_sg = buf_len / BUFF_LEN;
- last_buff_len = buf_len % BUFF_LEN;
- if (last_buff_len != 0)
- {
- total_sg += 1;
- }
-
- sg_init_table(sq->sg, total_sg);
- for (i = 0; i < total_sg; ++i)
- {
- if (i == (total_sg - 1))
- {
- sg_set_buf(&sq->sg[i], buf + (i * BUFF_LEN), ((last_buff_len != 0) ? (last_buff_len) : (BUFF_LEN)));
- }
- else
- {
- sg_set_buf(&sq->sg[i], buf + (i * BUFF_LEN), BUFF_LEN);
- }
- }
-
- err = virtqueue_add_outbuf(sq->vq, sq->sg, total_sg, buf, GFP_ATOMIC);
- ZXDH_CHECK_RET_GOTO_ERR(err, free_addr, "virtqueue_add_outbuf failed: %d\n", err);
-
- if (virtqueue_kick_prepare_packed(sq->vq) && virtqueue_notify(sq->vq))
- {
- u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.kicks++;
- u64_stats_update_end(&sq->stats.syncp);
- }
- return err;
+ uint16_t i = 0;
+ int32_t err = 0;
+ uint16_t total_sg = 0;
+ uint16_t last_buff_len = 0;
+
+ if (print != 0) {
+ LOG_DEBUG("send pkt start\n");
+ msgq_print_data(buf, buf_len, print);
+ }
+
+ total_sg = buf_len / BUFF_LEN;
+ last_buff_len = buf_len % BUFF_LEN;
+ if (last_buff_len != 0) {
+ total_sg += 1;
+ }
+
+ sg_init_table(sq->sg, total_sg);
+ for (i = 0; i < total_sg; ++i) {
+ if (i == (total_sg - 1)) {
+ sg_set_buf(&sq->sg[i], buf + (i * BUFF_LEN),
+ ((last_buff_len != 0) ? (last_buff_len) : (BUFF_LEN)));
+ } else {
+ sg_set_buf(&sq->sg[i], buf + (i * BUFF_LEN), BUFF_LEN);
+ }
+ }
+
+ err = virtqueue_add_outbuf(sq->vq, sq->sg, total_sg, buf, GFP_ATOMIC);
+ ZXDH_CHECK_RET_GOTO_ERR(err, free_addr, "virtqueue_add_outbuf failed: %d\n",
+ err);
+
+ if (virtqueue_kick_prepare_packed(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.kicks++;
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+ return err;
free_addr:
- return MSGQ_RET_ERR_VQ_BROKEN;
+ return MSGQ_RET_ERR_VQ_BROKEN;
}
-static int32_t zxdh_msgq_pkt_send(struct msgq_dev *msgq_dev, \
- struct msgq_pkt_info *pkt_info, uint16_t sn)
+static int32_t zxdh_msgq_pkt_send(struct msgq_dev *msgq_dev,
+ struct msgq_pkt_info *pkt_info, uint16_t sn)
{
- struct priv_queues_net_hdr *hdr = (struct priv_queues_net_hdr *)pkt_info->addr;
- void *buf = NULL;
- uint32_t len = 0;
-
- if (spin_trylock(&msgq_dev->tx_lock))
- {
- while ((buf = virtqueue_get_buf(msgq_dev->sq_priv->vq, &len)) != NULL)
- {
- ZXDH_FREE_PTR(buf);
- };
- spin_unlock(&msgq_dev->tx_lock);
- }
-
- memset(hdr, 0, PRIV_HEADER_LEN);
- hdr->tx_port = TX_PORT_NP;
- hdr->pd_len = PRIV_HEADER_LEN / 2;
- hdr->pi_hdr.pi_type = DEFAULT_PI_TYPE;
- hdr->pi_hdr.pkt_type = CONTROL_MSG_TYPE;
- hdr->pi_hdr.vfid_dst = htons(RISCV_COMMON_VFID);
- hdr->pi_hdr.qid_dst = htons(RISCV_COMMON_QID);
- hdr->pi_hdr.vfid_src = htons(msgq_dev->msgq_vfid);
- hdr->pi_hdr.qid_src = htons(msgq_dev->msgq_rqid);
- hdr->pi_hdr.event_id = pkt_info->event_id;
- hdr->pi_hdr.sequence_num = sn;
- if (sn == NO_REPS_SEQUENCE_NUM)
- {
- hdr->pi_hdr.msg_type = NO_REPS_MSG;
- }
- if (msgq_dev->loopback)
- {
- hdr->pi_hdr.event_id = MODULE_MSGQ;
- hdr->pi_hdr.vfid_dst = hdr->pi_hdr.vfid_src;
- hdr->pi_hdr.qid_dst = hdr->pi_hdr.qid_src;
- }
-
- return page_send_cmd(msgq_dev->sq_priv, pkt_info->addr, \
- pkt_info->len, msgq_dev->print_flag);
+ struct priv_queues_net_hdr *hdr =
+ (struct priv_queues_net_hdr *)pkt_info->addr;
+ void *buf = NULL;
+ uint32_t len = 0;
+
+ if (spin_trylock(&msgq_dev->tx_lock)) {
+ while ((buf = virtqueue_get_buf(msgq_dev->sq_priv->vq, &len)) != NULL) {
+ ZXDH_FREE_PTR(buf);
+ };
+ spin_unlock(&msgq_dev->tx_lock);
+ }
+
+ memset(hdr, 0, PRIV_HEADER_LEN);
+ hdr->tx_port = TX_PORT_NP;
+ hdr->pd_len = PRIV_HEADER_LEN / 2;
+ hdr->pi_hdr.pi_type = DEFAULT_PI_TYPE;
+ hdr->pi_hdr.pkt_type = CONTROL_MSG_TYPE;
+ hdr->pi_hdr.vfid_dst = htons(RISCV_COMMON_VFID);
+ hdr->pi_hdr.qid_dst = htons(RISCV_COMMON_QID);
+ hdr->pi_hdr.vfid_src = htons(msgq_dev->msgq_vfid);
+ hdr->pi_hdr.qid_src = htons(msgq_dev->msgq_rqid);
+ hdr->pi_hdr.event_id = pkt_info->event_id;
+ hdr->pi_hdr.sequence_num = sn;
+ if (sn == NO_REPS_SEQUENCE_NUM) {
+ hdr->pi_hdr.msg_type = NO_REPS_MSG;
+ }
+ if (msgq_dev->loopback) {
+ hdr->pi_hdr.event_id = MODULE_MSGQ;
+ hdr->pi_hdr.vfid_dst = hdr->pi_hdr.vfid_src;
+ hdr->pi_hdr.qid_dst = hdr->pi_hdr.qid_src;
+ }
+
+ return page_send_cmd(msgq_dev->sq_priv, pkt_info->addr, pkt_info->len,
+ msgq_dev->print_flag);
}
-int32_t zxdh_msgq_send_cmd(struct msgq_dev *msgq_dev, \
- struct msgq_pkt_info *pkt_info, struct reps_info *reps)
+int32_t zxdh_msgq_send_cmd(struct msgq_dev *msgq_dev,
+ struct msgq_pkt_info *pkt_info,
+ struct reps_info *reps)
{
- uint16_t sn = NO_REPS_SEQUENCE_NUM;
- uint16_t sync_poll_cnt = 0;
- int32_t err = 0;
- int32_t i = 0;
- uint32_t tx_timeouts = 0;
- uint32_t tx_errs = 0;
-
- err = zxdh_msg_para_check(pkt_info, reps);
- ZXDH_CHECK_RET_GOTO_ERR(err, tx_err, "zxdh_msg_para_check failed: %d\n", err);
-
- ZXDH_CHECK_PTR_GOTO_ERR(msgq_dev, free_addr);
- CHECK_CHANNEL_USABLE(msgq_dev, err, free_addr);
-
- if (!pkt_info->no_reps)
- {
- err = zxdh_sequence_num_get(msgq_dev, &sn);
- ZXDH_CHECK_RET_GOTO_ERR(err, free_addr, \
- "zxdh_sequence_num_get failed: %d\n", err);
- }
-
- mutex_lock(&msgq_dev->mlock);
- err = zxdh_msgq_pkt_send(msgq_dev, pkt_info, sn);
- mutex_unlock(&msgq_dev->mlock);
- ZXDH_CHECK_RET_GOTO_ERR(err, free_addr, "zxdh_msgq_pkt_send failed: %d\n", err);
-
- if (pkt_info->no_reps)
- {
- return MSGQ_RET_OK;
- }
-
- msgq_dev->msg_buff_ring[sn].data = &reps->addr;
- msgq_dev->msg_buff_ring[sn].data_len = &reps->len;
- msgq_dev->msg_buff_ring[sn].timeout_cnt = pkt_info->timeout_us / TIMER_DELAY_US;
- if (!pkt_info->is_async)
- {
- sync_poll_cnt = pkt_info->timeout_us / 10;
- for (i = 0; i < sync_poll_cnt; ++i)
- {
- usleep_range(5, 10);
- if (!msgq_dev->msg_buff_ring[sn].using &&
- msgq_dev->msg_buff_ring[sn].valid)
- {
- return MSGQ_RET_OK;
- }
- }
- err = MSGQ_RET_ERR_CALLBACK_OUT_OF_TIME;
- goto free_sn;
- }
- else
- {
- msgq_dev->msg_buff_ring[sn].need_free = true;
- if (!msgq_dev->timer_in_use)
- {
- mod_timer(&msgq_dev->poll_timer, jiffies + usecs_to_jiffies(TIMER_DELAY_US));
- msgq_dev->timer_in_use = true;
- }
- }
- return MSGQ_RET_OK;
+ uint16_t sn = NO_REPS_SEQUENCE_NUM;
+ uint16_t sync_poll_cnt = 0;
+ int32_t err = 0;
+ int32_t i = 0;
+ uint32_t tx_timeouts = 0;
+ uint32_t tx_errs = 0;
+
+ err = zxdh_msg_para_check(pkt_info, reps);
+ ZXDH_CHECK_RET_GOTO_ERR(err, tx_err, "zxdh_msg_para_check failed: %d\n",
+ err);
+
+ ZXDH_CHECK_PTR_GOTO_ERR(msgq_dev, free_addr);
+ CHECK_CHANNEL_USABLE(msgq_dev, err, free_addr);
+
+ if (!pkt_info->no_reps) {
+ err = zxdh_sequence_num_get(msgq_dev, &sn);
+ ZXDH_CHECK_RET_GOTO_ERR(err, free_addr,
+ "zxdh_sequence_num_get failed: %d\n", err);
+ }
+
+ mutex_lock(&msgq_dev->mlock);
+ err = zxdh_msgq_pkt_send(msgq_dev, pkt_info, sn);
+ mutex_unlock(&msgq_dev->mlock);
+ ZXDH_CHECK_RET_GOTO_ERR(err, free_addr, "zxdh_msgq_pkt_send failed: %d\n",
+ err);
+
+ if (pkt_info->no_reps) {
+ return MSGQ_RET_OK;
+ }
+
+ msgq_dev->msg_buff_ring[sn].data = &reps->addr;
+ msgq_dev->msg_buff_ring[sn].data_len = &reps->len;
+ msgq_dev->msg_buff_ring[sn].timeout_cnt =
+ pkt_info->timeout_us / TIMER_DELAY_US;
+ if (!pkt_info->is_async) {
+ sync_poll_cnt = pkt_info->timeout_us / 10;
+ for (i = 0; i < sync_poll_cnt; ++i) {
+ usleep_range(5, 10);
+ if (!msgq_dev->msg_buff_ring[sn].using &&
+ msgq_dev->msg_buff_ring[sn].valid) {
+ return MSGQ_RET_OK;
+ }
+ }
+ err = MSGQ_RET_ERR_CALLBACK_OUT_OF_TIME;
+ goto free_sn;
+ } else {
+ msgq_dev->msg_buff_ring[sn].need_free = true;
+ if (!msgq_dev->timer_in_use) {
+ mod_timer(&msgq_dev->poll_timer,
+ jiffies + usecs_to_jiffies(TIMER_DELAY_US));
+ msgq_dev->timer_in_use = true;
+ }
+ }
+ return MSGQ_RET_OK;
free_addr:
- ZXDH_FREE_PTR(pkt_info->addr);
+ ZXDH_FREE_PTR(pkt_info->addr);
tx_err:
- tx_errs++;
+ tx_errs++;
free_sn:
- if ((sn != NO_REPS_SEQUENCE_NUM) && (sn < MSGQ_MAX_MSG_BUFF_NUM))
- {
- LOG_ERR("timeout, sn[%d] is free\n", sn);
- msgq_dev->msg_buff_ring[sn].using = false;
- tx_timeouts++;
- msgq_dev->free_cnt--;
- }
- u64_stats_update_begin(&msgq_dev->sq_priv->stats.syncp);
- msgq_dev->sq_priv->stats.xdp_tx_drops += tx_errs;
- msgq_dev->sq_priv->stats.tx_timeouts += tx_timeouts;
- u64_stats_update_end(&msgq_dev->sq_priv->stats.syncp);
- return err;
+ if (sn != NO_REPS_SEQUENCE_NUM) {
+ LOG_ERR("timeout, sn[%d] is free\n", sn);
+ msgq_dev->msg_buff_ring[sn].using = false;
+ tx_timeouts++;
+ msgq_dev->free_cnt--;
+ }
+ u64_stats_update_begin(&msgq_dev->sq_priv->stats.syncp);
+ msgq_dev->sq_priv->stats.xdp_tx_drops += tx_errs;
+ msgq_dev->sq_priv->stats.tx_timeouts += tx_timeouts;
+ u64_stats_update_end(&msgq_dev->sq_priv->stats.syncp);
+ return err;
}
static void zxdh_swap_dst_and_src(uint16_t *dst, uint16_t *src)
{
- uint16_t temp = 0;
+ uint16_t temp = 0;
- temp = *dst;
- *dst = *src;
- *src = temp;
+ temp = *dst;
+ *dst = *src;
+ *src = temp;
}
static int32_t zxdh_pi_header_check(struct pi_header *hdr)
{
- if (hdr->pi_type != DEFAULT_PI_TYPE)
- {
- LOG_ERR("INVALID_PI_TYPE: %d\n", hdr->pi_type);
- return MSGQ_RET_ERR_CALLBACK_FAIL;
- }
-
- if (hdr->pkt_type != CONTROL_MSG_TYPE)
- {
- LOG_ERR("INVALID_PKT_TYPE: %d\n", hdr->pkt_type);
- return MSGQ_RET_ERR_CALLBACK_FAIL;
- }
-
- if (hdr->msg_type > NO_REPS_MSG)
- {
- LOG_ERR("INVALID_MSG_TYPE: %d\n", hdr->msg_type);
- return MSGQ_RET_ERR_CALLBACK_FAIL;
- }
-
- if (hdr->event_id >= MSG_MODULE_NUM)
- {
- LOG_ERR("INVALID_MSG_MODULE_ID: %d\n", hdr->event_id);
- return MSGQ_RET_ERR_CALLBACK_FAIL;
- }
-
- if (hdr->err_code != MSGQ_RET_OK)
- {
- LOG_ERR("MSG_ERR_CODE: %d\n", hdr->err_code);
- return MSGQ_RET_ERR_CALLBACK_FAIL;
- }
-
- return MSGQ_RET_OK;
+ if (hdr->pi_type != DEFAULT_PI_TYPE) {
+ LOG_ERR("INVALID_PI_TYPE: %d\n", hdr->pi_type);
+ return MSGQ_RET_ERR_CALLBACK_FAIL;
+ }
+
+ if (hdr->pkt_type != CONTROL_MSG_TYPE) {
+ LOG_ERR("INVALID_PKT_TYPE: %d\n", hdr->pkt_type);
+ return MSGQ_RET_ERR_CALLBACK_FAIL;
+ }
+
+ if (hdr->msg_type > NO_REPS_MSG) {
+ LOG_ERR("INVALID_MSG_TYPE: %d\n", hdr->msg_type);
+ return MSGQ_RET_ERR_CALLBACK_FAIL;
+ }
+
+ if (hdr->event_id >= MSG_MODULE_NUM) {
+ LOG_ERR("INVALID_MSG_MODULE_ID: %d\n", hdr->event_id);
+ return MSGQ_RET_ERR_CALLBACK_FAIL;
+ }
+
+ if (hdr->err_code != MSGQ_RET_OK) {
+ LOG_ERR("MSG_ERR_CODE: %d\n", hdr->err_code);
+ return MSGQ_RET_ERR_CALLBACK_FAIL;
+ }
+
+ return MSGQ_RET_OK;
}
static void rx_free_pages(struct msgq_dev *msgq_dev, void *buf, uint32_t len)
{
- if (msgq_dev->print_flag == MSGQ_PRINT_ALL)
- {
- print_data((uint8_t *)buf, len);
- LOG_DEBUG("buf: 0x%llx refcnt: %d\n", (uint64_t)buf, \
- page_ref_count(virt_to_head_page(buf)));
- }
- put_page(virt_to_head_page(buf));
+ if (msgq_dev->print_flag == MSGQ_PRINT_ALL) {
+ print_data((uint8_t *)buf, len);
+ LOG_DEBUG("buf: 0x%llx refcnt: %d\n", (uint64_t)buf,
+ page_ref_count(virt_to_head_page(buf)));
+ }
+ put_page(virt_to_head_page(buf));
}
-static int32_t zxdh_response_msg_handle(struct msgq_dev *msgq_dev, \
- struct virtnet_rq_stats *stats, uint16_t num_buf, void *buf, uint32_t len)
+static int32_t zxdh_response_msg_handle(struct msgq_dev *msgq_dev,
+ struct virtnet_rq_stats *stats,
+ uint16_t num_buf, void *buf,
+ uint32_t len)
{
- struct priv_queues_net_hdr *hdr = (struct priv_queues_net_hdr *)buf;
- uint16_t sn = hdr->pi_hdr.sequence_num;
- int32_t err = MSGQ_RET_OK;
- struct msg_buff *tmp_buff = NULL;
- uint32_t max_len = 0;
- uint32_t pkt_len = 0;
-
- if (sn >= MSGQ_MAX_MSG_BUFF_NUM)
- {
- LOG_ERR("INVALID_SEQUENCE_NUM: %d\n", sn);
- err = MSGQ_RET_ERR;
- goto put_page;
- }
-
- tmp_buff = &msgq_dev->msg_buff_ring[sn];
- if (!tmp_buff->using)
- {
- LOG_ERR("buff[%d] is free\n", sn);
- err = MSGQ_RET_ERR_CALLBACK_OUT_OF_TIME;
- goto put_page;
- }
- ZXDH_CHECK_PTR_GOTO_ERR(*tmp_buff->data, put_page);
-
- max_len = *(tmp_buff->data_len);
- pkt_len = len - PRIV_HEADER_LEN;
- if (pkt_len > max_len)
- {
- LOG_ERR("buf_len: %d > tmp_buff->data_len: %d\n", pkt_len, max_len);
- err = MSGQ_RET_ERR_REPS_LEN_NOT_ENOUGH;
- goto put_page;
- }
-
- memcpy(*tmp_buff->data, (uint8_t *)buf + PRIV_HEADER_LEN, pkt_len);
- while (--num_buf != 0)
- {
- rx_free_pages(msgq_dev, buf, len);
- buf = virtqueue_get_buf(msgq_dev->rq_priv->vq, &len);
- if (unlikely(buf == NULL))
- {
- LOG_ERR("msgq rx error: %dth buffers missing\n", num_buf);
- stats->drops++;
- err = MSGQ_RET_ERR_RX_INVALID_NUM_BUF;
- goto out;
- }
-
- if ((len + pkt_len) > max_len)
- {
- LOG_ERR("buf_len: %d > tmp_buff->data_len: %d\n", len + pkt_len, max_len);
- err = MSGQ_RET_ERR_REPS_LEN_NOT_ENOUGH;
- goto put_page;
- }
-
- stats->bytes += len;
- memcpy((*tmp_buff->data) + pkt_len, buf, len);
- pkt_len += len;
- }
- *(tmp_buff->data_len) = pkt_len;
- tmp_buff->valid = true;
- stats->xdp_drops--;
+ struct priv_queues_net_hdr *hdr = (struct priv_queues_net_hdr *)buf;
+ uint16_t sn = hdr->pi_hdr.sequence_num;
+ int32_t err = MSGQ_RET_OK;
+ struct msg_buff *tmp_buff = NULL;
+ uint32_t max_len = 0;
+ uint32_t pkt_len = 0;
+
+ if (sn >= MSGQ_MAX_MSG_BUFF_NUM) {
+ LOG_ERR("INVALID_SEQUENCE_NUM: %d\n", sn);
+ err = MSGQ_RET_ERR;
+ goto put_page;
+ }
+
+ tmp_buff = &msgq_dev->msg_buff_ring[sn];
+ if (!tmp_buff->using) {
+ LOG_ERR("buff[%d] is free\n", sn);
+ err = MSGQ_RET_ERR_CALLBACK_OUT_OF_TIME;
+ goto put_page;
+ }
+ ZXDH_CHECK_PTR_GOTO_ERR(*tmp_buff->data, put_page);
+
+ max_len = *(tmp_buff->data_len);
+ pkt_len = len - PRIV_HEADER_LEN;
+ if (pkt_len > max_len) {
+ LOG_ERR("buf_len: %d > tmp_buff->data_len: %d\n", pkt_len, max_len);
+ err = MSGQ_RET_ERR_REPS_LEN_NOT_ENOUGH;
+ goto put_page;
+ }
+
+ memcpy(*tmp_buff->data, (uint8_t *)buf + PRIV_HEADER_LEN, pkt_len);
+ while (--num_buf != 0) {
+ rx_free_pages(msgq_dev, buf, len);
+ buf = virtqueue_get_buf(msgq_dev->rq_priv->vq, &len);
+ if (unlikely(buf == NULL)) {
+ LOG_ERR("msgq rx error: %dth buffers missing\n", num_buf);
+ stats->drops++;
+ err = MSGQ_RET_ERR_RX_INVALID_NUM_BUF;
+ goto out;
+ }
+
+ if ((len + pkt_len) > max_len) {
+ LOG_ERR("buf_len: %d > tmp_buff->data_len: %d\n", len + pkt_len,
+ max_len);
+ err = MSGQ_RET_ERR_REPS_LEN_NOT_ENOUGH;
+ goto put_page;
+ }
+
+ stats->bytes += len;
+ memcpy((*tmp_buff->data) + pkt_len, buf, len);
+ pkt_len += len;
+ }
+ *(tmp_buff->data_len) = pkt_len;
+ tmp_buff->valid = true;
+ stats->xdp_drops--;
put_page:
- put_page(virt_to_head_page(buf));
+ put_page(virt_to_head_page(buf));
out:
- if (tmp_buff != NULL)
- {
- tmp_buff->using = false;
- tmp_buff->data = NULL;
- msgq_dev->free_cnt--;
- }
- return err;
+ tmp_buff->using = false;
+ msgq_dev->free_cnt--;
+ tmp_buff->data = NULL;
+ return err;
}
-static int32_t zxdh_callback_msg_handle(struct zxdh_en_device *en_dev, \
- uint8_t *buf_addr, uint32_t buf_len)
+static int32_t zxdh_callback_msg_handle(struct zxdh_en_device *en_dev,
+ uint8_t *buf_addr, uint32_t buf_len)
{
- struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
- int32_t err = BAR_MSG_ERR_MODULE_NOEXIST;
- uint8_t *reps_addr = NULL;
- uint16_t reps_len = MAX_PACKET_LEN;
- uint16_t hdr_len = PRIV_HEADER_LEN;
- struct priv_queues_net_hdr *hdr = NULL;
-
- hdr = (struct priv_queues_net_hdr *)buf_addr;
- if (hdr->pi_hdr.msg_type == NO_REPS_MSG)
- {
- return call_msg_recv_func_tbl(hdr->pi_hdr.event_id, \
- buf_addr + hdr_len, buf_len - hdr_len, NULL, 0, en_dev);
- }
-
- reps_addr = kzalloc(MSGQ_MAX_ADDR_LEN, GFP_ATOMIC);
- ZXDH_CHECK_PTR_RETURN(reps_addr);
- memcpy(reps_addr, buf_addr, hdr_len);
- hdr = (struct priv_queues_net_hdr *)reps_addr;
-
- if (hdr->pi_hdr.event_id < MSG_MODULE_NUM)
- {
- err = call_msg_recv_func_tbl(hdr->pi_hdr.event_id, \
- buf_addr + hdr_len, buf_len - hdr_len, \
- reps_addr + hdr_len, &reps_len, en_dev);
- hdr->pi_hdr.msg_type = ACK_MSG;
- }
-
- if (err == BAR_MSG_ERR_MODULE_NOEXIST)
- {
- hdr->pi_hdr.err_code = ERR_CODE_EVENT_UNREGIST;
- }
- else if ((err != MSGQ_RET_OK) || (reps_len > MAX_PACKET_LEN))
- {
- LOG_ERR("get reps failed, reps_len:%d\n", reps_len);
- hdr->pi_hdr.err_code = ERR_CODE_EVENT_FAIL;
- }
-
- zxdh_swap_dst_and_src(&hdr->pi_hdr.vfid_dst, &hdr->pi_hdr.vfid_src);
- zxdh_swap_dst_and_src(&hdr->pi_hdr.qid_dst, &hdr->pi_hdr.qid_src);
-
- return page_send_cmd(msgq_dev->sq_priv, \
- reps_addr, reps_len + hdr_len, msgq_dev->print_flag);
+ struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
+ int32_t err = MSGQ_RET_OK;
+ uint8_t *reps_addr = NULL;
+ uint16_t reps_len = MAX_PACKET_LEN;
+ uint16_t hdr_len = PRIV_HEADER_LEN;
+ struct priv_queues_net_hdr *hdr = NULL;
+
+ reps_addr = kzalloc(MSGQ_MAX_ADDR_LEN, GFP_ATOMIC);
+ ZXDH_CHECK_PTR_RETURN(reps_addr);
+ memcpy(reps_addr, buf_addr, hdr_len);
+ hdr = (struct priv_queues_net_hdr *)reps_addr;
+
+ if (hdr->pi_hdr.msg_type == NO_REPS_MSG) {
+ return call_msg_recv_func_tbl(hdr->pi_hdr.event_id, buf_addr + hdr_len,
+ buf_len - hdr_len, NULL, 0, en_dev);
+ }
+
+ err = call_msg_recv_func_tbl(hdr->pi_hdr.event_id, buf_addr + hdr_len,
+ buf_len - hdr_len, reps_addr + hdr_len,
+ &reps_len, en_dev);
+ hdr->pi_hdr.msg_type = ACK_MSG;
+
+ if (err == BAR_MSG_ERR_MODULE_NOEXIST) {
+ hdr->pi_hdr.err_code = ERR_CODE_EVENT_UNREGIST;
+ } else if ((err != MSGQ_RET_OK) || (reps_len > MAX_PACKET_LEN)) {
+ LOG_ERR("get reps failed, reps_len:%d\n", reps_len);
+ hdr->pi_hdr.err_code = ERR_CODE_EVENT_FAIL;
+ }
+
+ zxdh_swap_dst_and_src(&hdr->pi_hdr.vfid_dst, &hdr->pi_hdr.vfid_src);
+ zxdh_swap_dst_and_src(&hdr->pi_hdr.qid_dst, &hdr->pi_hdr.qid_src);
+
+ return page_send_cmd(msgq_dev->sq_priv, reps_addr, reps_len + hdr_len,
+ msgq_dev->print_flag);
}
-static void msgq_receive_buf(struct zxdh_en_device *en_dev, struct receive_queue *rq,
- void *buf, uint32_t len, void **ctx, struct virtnet_rq_stats *stats)
+static void msgq_receive_buf(struct zxdh_en_device *en_dev,
+ struct receive_queue *rq, void *buf, uint32_t len,
+ void **ctx, struct virtnet_rq_stats *stats)
{
- struct net_device *netdev = en_dev->netdev;
- struct priv_queues_net_hdr *hdr = (struct priv_queues_net_hdr *)buf;
- uint16_t num_buf = vqm16_to_cpu(netdev, hdr->num_buffers);
- struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
- int32_t err = MSGQ_RET_OK;
- uint8_t *tmp_addr = NULL;
- uint32_t tmp_addr_len = len;
- bool free_tmp_addr = false;
-
- if (msgq_dev->print_flag != 0)
- {
- LOG_DEBUG("receive pkt start, num_buf: %d\n", num_buf);
- msgq_print_data((uint8_t *)buf, len, msgq_dev->print_flag);
- }
-
- stats->xdp_drops++;
- err = zxdh_pi_header_check(&hdr->pi_hdr);
- ZXDH_CHECK_RET_GOTO_ERR(err, free_pages, "invalid pi_header\n");
-
- if (hdr->pi_hdr.msg_type == ACK_MSG)
- {
- err = zxdh_response_msg_handle(msgq_dev, stats, num_buf, buf, len);
- goto free_addr;
- }
- else if (num_buf == 1)
- {
- err = zxdh_callback_msg_handle(en_dev, (uint8_t *)buf, len);
- }
- else
- {
- tmp_addr = kzalloc(MSGQ_MAX_ADDR_LEN, GFP_ATOMIC);
- ZXDH_CHECK_PTR_GOTO_ERR(tmp_addr, free_pages);
- memcpy(tmp_addr, buf, tmp_addr_len);
- free_tmp_addr = true;
- while (--num_buf != 0)
- {
- rx_free_pages(msgq_dev, buf, len);
- buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, ctx);
- if (unlikely(buf == NULL))
- {
- LOG_ERR("msgq rx error: %dth buffers missing\n", num_buf);
- stats->drops++;
- goto free_addr;
- }
-
- memcpy(tmp_addr + tmp_addr_len, buf, len);
- tmp_addr_len += len;
- }
- err = zxdh_callback_msg_handle(en_dev, tmp_addr, tmp_addr_len);
- }
- stats->xdp_drops--;
+ struct net_device *netdev = en_dev->netdev;
+ struct priv_queues_net_hdr *hdr = (struct priv_queues_net_hdr *)buf;
+ uint16_t num_buf = vqm16_to_cpu(netdev, hdr->num_buffers);
+ struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
+ int32_t err = MSGQ_RET_OK;
+ uint8_t *tmp_addr = NULL;
+ uint32_t tmp_addr_len = len;
+ bool free_tmp_addr = false;
+
+ if (msgq_dev->print_flag != 0) {
+ LOG_DEBUG("receive pkt start, num_buf: %d\n", num_buf);
+ msgq_print_data((uint8_t *)buf, len, msgq_dev->print_flag);
+ }
+
+ stats->xdp_drops++;
+ err = zxdh_pi_header_check(&hdr->pi_hdr);
+ ZXDH_CHECK_RET_GOTO_ERR(err, free_pages, "invalid pi_header\n");
+
+ if (hdr->pi_hdr.msg_type == ACK_MSG) {
+ err = zxdh_response_msg_handle(msgq_dev, stats, num_buf, buf, len);
+ goto free_addr;
+ } else if (num_buf == 1) {
+ err = zxdh_callback_msg_handle(en_dev, (uint8_t *)buf, len);
+ } else {
+ tmp_addr = kzalloc(MSGQ_MAX_ADDR_LEN, GFP_ATOMIC);
+ ZXDH_CHECK_PTR_GOTO_ERR(tmp_addr, free_pages);
+ memcpy(tmp_addr, buf, tmp_addr_len);
+ free_tmp_addr = true;
+ while (--num_buf != 0) {
+ rx_free_pages(msgq_dev, buf, len);
+ buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, ctx);
+ if (unlikely(buf == NULL)) {
+ LOG_ERR("msgq rx error: %dth buffers missing\n", num_buf);
+ stats->drops++;
+ goto free_addr;
+ }
+
+ memcpy(tmp_addr + tmp_addr_len, buf, len);
+ tmp_addr_len += len;
+ }
+ err = zxdh_callback_msg_handle(en_dev, tmp_addr, tmp_addr_len);
+ }
+ stats->xdp_drops--;
free_pages:
- put_page(virt_to_head_page(buf));
+ put_page(virt_to_head_page(buf));
free_addr:
- if (free_tmp_addr)
- {
- ZXDH_FREE_PTR(tmp_addr);
- }
- stats->bytes += tmp_addr_len;
- return;
+ if (free_tmp_addr) {
+ ZXDH_FREE_PTR(tmp_addr);
+ }
+ stats->bytes += tmp_addr_len;
+ return;
}
static int32_t zxdh_msgq_receive(struct receive_queue *rq, int32_t budget)
{
- struct zxdh_en_device *en_dev = netdev_priv(rq->vq->vdev);
- struct virtnet_rq_stats stats = {};
- uint32_t len = 0;
- void *buf = NULL;
- int32_t i = 0;
- void *ctx = NULL;
- uint64_t *item = NULL;
-
- while (stats.packets < budget && (buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, &ctx)))
- {
- msgq_receive_buf(en_dev, rq, buf, len, &ctx, &stats);
- stats.packets++;
- }
-
- if (rq->vq->num_free > min((uint32_t)budget, virtqueue_get_vring_size(rq->vq)) / 2)
- {
- if (!msgq_try_fill_recv(rq, GFP_ATOMIC))
- {
- LOG_ERR("msgq_try_fill_recv failed\n");
- }
- }
-
- u64_stats_update_begin(&rq->stats.syncp);
- for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++)
- {
- size_t offset = virtnet_rq_stats_desc[i].offset;
- item = (uint64_t *)((uint8_t *)&rq->stats + offset);
- *item += *(uint64_t *)((uint8_t *)&stats + offset);
- }
- u64_stats_update_end(&rq->stats.syncp);
-
- return stats.packets;
+ struct zxdh_en_device *en_dev = netdev_priv(rq->vq->vdev);
+ struct virtnet_rq_stats stats = {};
+ uint32_t len = 0;
+ void *buf = NULL;
+ int32_t i = 0;
+ void *ctx = NULL;
+ uint64_t *item = NULL;
+
+ while (stats.packets < budget &&
+ (buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, &ctx))) {
+ msgq_receive_buf(en_dev, rq, buf, len, &ctx, &stats);
+ stats.packets++;
+ }
+
+ if (rq->vq->num_free >
+ min((uint32_t)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
+ if (!msgq_try_fill_recv(rq, GFP_ATOMIC)) {
+ LOG_ERR("msgq_try_fill_recv failed\n");
+ }
+ }
+
+ u64_stats_update_begin(&rq->stats.syncp);
+ for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+ size_t offset = virtnet_rq_stats_desc[i].offset;
+ item = (uint64_t *)((uint8_t *)&rq->stats + offset);
+ *item += *(uint64_t *)((uint8_t *)&stats + offset);
+ }
+ u64_stats_update_end(&rq->stats.syncp);
+
+ return stats.packets;
}
static void free_old_xmit_bufs(struct net_device *netdev, struct send_queue *sq)
{
- uint32_t len = 0;
- uint32_t packets = 0;
- uint32_t bytes = 0;
- void *buf = NULL;
-
- while ((buf = virtqueue_get_buf(sq->vq, &len)) != NULL)
- {
- bytes += len;
- packets++;
- ZXDH_FREE_PTR(buf);
- }
-
- if (!packets)
- {
- return;
- }
-
- u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.bytes += bytes;
- sq->stats.packets += packets;
- u64_stats_update_end(&sq->stats.syncp);
+ uint32_t len = 0;
+ uint32_t packets = 0;
+ uint32_t bytes = 0;
+ void *buf = NULL;
+
+ while ((buf = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ bytes += len;
+ packets++;
+ ZXDH_FREE_PTR(buf);
+ }
+
+ if (!packets) {
+ return;
+ }
+
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.bytes += bytes;
+ sq->stats.packets += packets;
+ u64_stats_update_end(&sq->stats.syncp);
}
static void msgq_poll_cleantx(struct receive_queue *rq)
{
- struct zxdh_en_priv *en_priv = netdev_priv(rq->vq->vdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
- struct send_queue *sq = msgq_dev->sq_priv;
-
- if (!sq->napi.weight)
- {
- return;
- }
-
- if (spin_trylock(&msgq_dev->tx_lock))
- {
- free_old_xmit_bufs(en_dev->netdev, sq);
- spin_unlock(&msgq_dev->tx_lock);
- }
+ struct zxdh_en_priv *en_priv = netdev_priv(rq->vq->vdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
+ struct send_queue *sq = msgq_dev->sq_priv;
+
+ if (!sq->napi.weight) {
+ return;
+ }
+
+ if (spin_trylock(&msgq_dev->tx_lock)) {
+ free_old_xmit_bufs(en_dev->netdev, sq);
+ spin_unlock(&msgq_dev->tx_lock);
+ }
}
int zxdh_msgq_poll(struct napi_struct *napi, int budget)
{
- struct receive_queue *rq = container_of(napi, struct receive_queue, napi);
- struct zxdh_en_device *en_dev = netdev_priv(rq->vq->vdev);
- struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
- uint32_t received = 0;
-
- if (msgq_dev->msgq_enable)
- {
- msgq_poll_cleantx(rq);
- received = zxdh_msgq_receive(rq, budget);
- }
-
- if (received < budget)
- {
- virtqueue_napi_complete(napi, rq->vq, received);
- }
-
- return received;
+ struct receive_queue *rq = container_of(napi, struct receive_queue, napi);
+ struct zxdh_en_device *en_dev = netdev_priv(rq->vq->vdev);
+ struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev;
+ uint32_t received = 0;
+
+ if (msgq_dev->msgq_enable) {
+ msgq_poll_cleantx(rq);
+ received = zxdh_msgq_receive(rq, budget);
+ }
+
+ if (received < budget) {
+ virtqueue_napi_complete(napi, rq->vq, received);
+ }
+
+ return received;
}
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/priv_queue.h b/src/net/drivers/net/ethernet/dinghai/en_aux/priv_queue.h
index 3ad57da3ae4cd953303a1a4c7aa12e23fa4e9797..e7d21748b323af69fe749d3cedf0067a6b7dfe37 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/priv_queue.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/priv_queue.h
@@ -12,210 +12,202 @@ extern "C" {
#include
#include "queue.h"
#include "../en_aux.h"
-#include "../../dinghai/en_np/table/include/dpp_tbl_api.h"
#define MSGQ_TEST 1
-#define MSGQ_RET_OK 0
-#define MSGQ_RET_ERR (-1)
-#define MSGQ_RET_ERR_NULL_PTR (-2)
-#define MSGQ_RET_ERR_INVALID_PARA (-3)
-#define MSGQ_RET_ERR_CHANNEL_NOT_READY (-5)
-#define MSGQ_RET_ERR_CHAN_BUSY (-6)
-#define MSGQ_RET_ERR_VQ_BROKEN (-7)
-#define MSGQ_RET_ERR_CALLBACK_OUT_OF_TIME (-8)
-#define MSGQ_RET_ERR_CALLBACK_FAIL (-9)
-#define MSGQ_RET_ERR_REPS_LEN_NOT_ENOUGH (-10)
-#define MSGQ_RET_ERR_RX_INVALID_NUM_BUF (-11)
-
-struct reps_info
-{
- uint32_t len;
- uint8_t *addr;
+#define MSGQ_RET_OK 0
+#define MSGQ_RET_ERR (-1)
+#define MSGQ_RET_ERR_NULL_PTR (-2)
+#define MSGQ_RET_ERR_INVALID_PARA (-3)
+#define MSGQ_RET_ERR_CHANNEL_NOT_READY (-5)
+#define MSGQ_RET_ERR_CHAN_BUSY (-6)
+#define MSGQ_RET_ERR_VQ_BROKEN (-7)
+#define MSGQ_RET_ERR_CALLBACK_OUT_OF_TIME (-8)
+#define MSGQ_RET_ERR_CALLBACK_FAIL (-9)
+#define MSGQ_RET_ERR_REPS_LEN_NOT_ENOUGH (-10)
+#define MSGQ_RET_ERR_RX_INVALID_NUM_BUF (-11)
+
+struct reps_info {
+ uint32_t len;
+ uint8_t *addr;
};
-struct msgq_pkt_info
-{
- uint32_t timeout_us;
- uint16_t event_id;
- bool is_async;
- bool no_reps;
- uint8_t msg_priority;
- uint8_t rsv;
- uint32_t len;
- uint8_t *addr;
+struct msgq_pkt_info {
+ uint32_t timeout_us;
+ uint16_t event_id;
+ bool is_async;
+ bool no_reps;
+ uint8_t msg_priority;
+ uint8_t rsv;
+ uint32_t len;
+ uint8_t *addr;
} __attribute__((packed));
/* msg_chan_pkt Definitions */
-#define MAX_PACKET_LEN (MSGQ_MAX_ADDR_LEN - PRIV_HEADER_LEN)
-#define MSGQ_MAX_ADDR_LEN 14000
-#define NO_REPS_SEQUENCE_NUM 0x8000
-
-#define TIMER_DELAY_US 100
-#define MSGQ_MAX_MSG_BUFF_NUM 1024
-#define BUFF_LEN 4096
-
-#define PRIV_HEADER_LEN sizeof(struct priv_queues_net_hdr)
-#define DEFAULT_PI_TYPE 0x00 /*NP*/
-#define CONTROL_MSG_TYPE 0x1f
-#define NEED_REPS_MSG 0x00
-#define ACK_MSG 0x01
-#define NO_REPS_MSG 0x02
-
-#define RISCV_COMMON_VFID (1192)
-#define RISCV_COMMON_QID (4092)
-
-enum msgq_err_code
-{
- ERR_CODE_INVALID_EVENTID = 1,
- ERR_CODE_EVENT_UNREGIST,
- ERR_CODE_INVALID_ACK,
- ERR_CODE_EVENT_FAIL,
- ERR_CODE_INVALID_REPS_LEN,
- ERR_CODE_PEER_BROKEN,
+#define MAX_PACKET_LEN (MSGQ_MAX_ADDR_LEN - PRIV_HEADER_LEN)
+#define MSGQ_MAX_ADDR_LEN 14000
+#define NO_REPS_SEQUENCE_NUM 0x8000
+
+#define TIMER_DELAY_US 100
+#define MSGQ_MAX_MSG_BUFF_NUM 1024
+#define BUFF_LEN 4096
+
+#define PRIV_HEADER_LEN sizeof(struct priv_queues_net_hdr)
+#define DEFAULT_PI_TYPE 0x00 /*NP*/
+#define CONTROL_MSG_TYPE 0x1f
+#define NEED_REPS_MSG 0x00
+#define ACK_MSG 0x01
+#define NO_REPS_MSG 0x02
+
+#define RISCV_COMMON_VFID (1192)
+#define RISCV_COMMON_QID (4092)
+
+#define VF_ACTIVE(VPORT) ((VPORT & 0x0800) >> 11)
+#define EPID(VPORT) ((VPORT & 0x7000) >> 12)
+#define FUNC_NUM(VPORT) ((VPORT & 0x0700) >> 8)
+#define VFUNC_NUM(VPORT) ((VPORT & 0x00FF))
+
+#define PF_VQM_VFID_OFFSET (1152)
+#define IS_PF(VPORT) (!VF_ACTIVE(VPORT))
+#define VQM_VFID(VPORT) \
+ (IS_PF(VPORT) ? (PF_VQM_VFID_OFFSET + EPID(VPORT) * 8 + FUNC_NUM(VPORT)) : \
+ (EPID(VPORT) * 256 + VFUNC_NUM(VPORT)))
+#define OWNER_PF_VQM_VFID(VPORT) \
+ (PF_VQM_VFID_OFFSET + EPID(VPORT) * 8 + FUNC_NUM(VPORT))
+
+enum msgq_err_code {
+ ERR_CODE_INVALID_EVENTID = 1,
+ ERR_CODE_EVENT_UNREGIST,
+ ERR_CODE_INVALID_ACK,
+ ERR_CODE_EVENT_FAIL,
+ ERR_CODE_INVALID_REPS_LEN,
+ ERR_CODE_PEER_BROKEN,
};
-struct pi_header
-{
- uint8_t pi_type;
- uint8_t pkt_type;
- uint16_t event_id;
- uint16_t vfid_dst;
- uint16_t qid_dst;
- uint16_t vfid_src;
- uint16_t qid_src;
- uint16_t sequence_num;
- uint8_t msg_priority;
- uint8_t msg_type;
- uint8_t err_code;
- uint8_t rsv[3];
+struct pi_header {
+ uint8_t pi_type;
+ uint8_t pkt_type;
+ uint16_t event_id;
+ uint16_t vfid_dst;
+ uint16_t qid_dst;
+ uint16_t vfid_src;
+ uint16_t qid_src;
+ uint16_t sequence_num;
+ uint8_t msg_priority;
+ uint8_t msg_type;
+ uint8_t err_code;
+ uint8_t rsv[3];
};
-struct msgq_pi_info
-{
- uint16_t event_id;
- uint16_t vfid_dst;
- uint16_t qid_dst;
- uint16_t vfid_src;
- uint16_t qid_src;
- uint16_t sequence_num;
+struct msgq_pi_info {
+ uint16_t event_id;
+ uint16_t vfid_dst;
+ uint16_t qid_dst;
+ uint16_t vfid_src;
+ uint16_t qid_src;
+ uint16_t sequence_num;
} __attribute__((packed));
-struct priv_queues_net_hdr
-{
- uint8_t tx_port;
- uint8_t pd_len;
- uint8_t num_buffers;
- uint8_t rsv;
- struct pi_header pi_hdr;
+struct priv_queues_net_hdr {
+ uint8_t tx_port;
+ uint8_t pd_len;
+ uint8_t num_buffers;
+ uint8_t rsv;
+ struct pi_header pi_hdr;
};
-struct msg_buff
-{
- bool using;
- bool valid;
- bool need_free;
- uint32_t timeout_cnt;
- uint8_t **data;
- uint32_t *data_len;
+struct msg_buff {
+ bool using;
+ bool valid;
+ bool need_free;
+ uint32_t timeout_cnt;
+ uint8_t **data;
+ uint32_t *data_len;
} __attribute__((packed));
-#define MSGQ_PRINT_HDR 1
-#define MSGQ_PRINT_128B 2
-#define MSGQ_PRINT_ALL 3
-#define MSGQ_PRINT_STA 4
-
-struct msgq_dev
-{
- bool msgq_enable;
- bool timer_in_use;
- bool loopback;
- uint8_t print_flag;
- uint16_t sequence_num;
- uint16_t free_cnt;
- uint16_t msgq_vfid;
- uint16_t msgq_rqid;
- struct send_queue *sq_priv;
- struct receive_queue *rq_priv;
- struct mutex mlock;
- struct spinlock sn_lock;
- struct spinlock tx_lock;
- struct msg_buff msg_buff_ring[MSGQ_MAX_MSG_BUFF_NUM];
- struct timer_list poll_timer;
+#define MSGQ_PRINT_HDR 1
+#define MSGQ_PRINT_128B 2
+#define MSGQ_PRINT_ALL 3
+
+struct msgq_dev {
+ bool msgq_enable;
+ bool timer_in_use;
+ bool loopback;
+ uint8_t print_flag;
+ uint16_t sequence_num;
+ uint16_t free_cnt;
+ uint16_t msgq_vfid;
+ uint16_t msgq_rqid;
+ struct send_queue *sq_priv;
+ struct receive_queue *rq_priv;
+ struct mutex mlock;
+ struct spinlock sn_lock;
+ struct spinlock tx_lock;
+ struct msg_buff msg_buff_ring[MSGQ_MAX_MSG_BUFF_NUM];
+ struct timer_list poll_timer;
} __attribute__((packed));
-#define CHECK_CHANNEL_USABLE(msgq, ret, err) \
-do \
-{ \
- if (!(msgq)->msgq_enable) \
- { \
- LOG_ERR("msgq unable\n"); \
- ret = MSGQ_RET_ERR_CHANNEL_NOT_READY; \
- goto err; \
- } \
-} while (0)
-
-#define ZXDH_CHECK_RET_RETURN(ret, fmt, arg...) \
-do \
-{ \
- if ((ret) != MSGQ_RET_OK) \
- { \
- LOG_ERR(fmt, ##arg); \
- return (ret); \
- } \
-} while (0)
-
-#define ZXDH_CHECK_RET_GOTO_ERR(ret, err, fmt, arg...) \
-do \
-{ \
- if ((ret) != MSGQ_RET_OK) \
- { \
- LOG_ERR(fmt, ##arg); \
- goto err; \
- } \
-} while (0)
-
-#define ZXDH_CHECK_PTR_RETURN(ptr) \
-do \
-{ \
- if (unlikely((ptr) == NULL)) \
- { \
- LOG_ERR("null pointer\n"); \
- return MSGQ_RET_ERR_NULL_PTR; \
- } \
-} while (0)
-
-#define ZXDH_CHECK_PTR_GOTO_ERR(ptr, err) \
-do \
-{ \
- if (unlikely((ptr) == NULL)) \
- { \
- LOG_ERR("null pointer\n"); \
- goto err; \
- } \
-} while (0)
-
-#define ZXDH_FREE_PTR(ptr) \
-do \
-{ \
- if ((ptr) != NULL) \
- { \
- kfree(ptr); \
- (ptr) = NULL; \
- } \
-} while (0)
-
-#define SEQUENCE_NUM_ADD(id) \
-do \
-{ \
- (id)++; \
- (id) %= MSGQ_MAX_MSG_BUFF_NUM; \
-} while (0)
+#define CHECK_CHANNEL_USABLE(msgq, ret, err) \
+ do { \
+ if (!(msgq)->msgq_enable) { \
+ LOG_ERR("msgq unable\n"); \
+ ret = MSGQ_RET_ERR_CHANNEL_NOT_READY; \
+ goto err; \
+ } \
+ } while (0)
+
+#define ZXDH_CHECK_RET_RETURN(ret, fmt, arg...) \
+ do { \
+ if ((ret) != MSGQ_RET_OK) { \
+ LOG_ERR(fmt, ##arg); \
+ return ret; \
+ } \
+ } while (0)
+
+#define ZXDH_CHECK_RET_GOTO_ERR(ret, err, fmt, arg...) \
+ do { \
+ if ((ret) != MSGQ_RET_OK) { \
+ LOG_ERR(fmt, ##arg); \
+ goto err; \
+ } \
+ } while (0)
+
+#define ZXDH_CHECK_PTR_RETURN(ptr) \
+ do { \
+ if (unlikely((ptr) == NULL)) { \
+ LOG_ERR("null pointer\n"); \
+ return MSGQ_RET_ERR_NULL_PTR; \
+ } \
+ } while (0)
+
+#define ZXDH_CHECK_PTR_GOTO_ERR(ptr, err) \
+ do { \
+ if (unlikely((ptr) == NULL)) { \
+ LOG_ERR("null pointer\n"); \
+ goto err; \
+ } \
+ } while (0)
+
+#define ZXDH_FREE_PTR(ptr) \
+ do { \
+ if ((ptr) != NULL) { \
+ kfree(ptr); \
+ (ptr) = NULL; \
+ } \
+ } while (0)
+
+#define SEQUENCE_NUM_ADD(id) \
+ do { \
+ (id)++; \
+ (id) %= MSGQ_MAX_MSG_BUFF_NUM; \
+ } while (0)
int32_t zxdh_msgq_init(struct zxdh_en_device *en_dev);
void zxdh_msgq_exit(struct zxdh_en_device *en_dev);
int32_t print_data(uint8_t *data, uint32_t len);
-int32_t zxdh_msgq_send_cmd(struct msgq_dev *msgq_dev, struct msgq_pkt_info *pkt_info, struct reps_info *reps);
+int32_t zxdh_msgq_send_cmd(struct msgq_dev *msgq_dev,
+ struct msgq_pkt_info *pkt_info,
+ struct reps_info *reps);
int zxdh_msgq_poll(struct napi_struct *napi, int budget);
#ifdef __cplusplus
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/queue.c b/src/net/drivers/net/ethernet/dinghai/en_aux/queue.c
index 1493e6de85d5306d08e7fd50b3c34c2c7b9fcc7f..cfced6924ace79125b1e962079bdf78fbb3b5e9c 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/queue.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/queue.c
@@ -1,2913 +1,2714 @@
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include "../en_aux.h"
-#include "queue.h"
-#ifdef TIME_STAMP_1588
-#include "en_1588_pkt_proc.h"
-#endif
-#ifdef ZXDH_MSGQ
-#include "priv_queue.h"
-#endif
-
-static uint32_t features_table[] =
-{
- ZXDH_NET_F_MRG_RXBUF, ZXDH_NET_F_STATUS, ZXDH_NET_F_CTRL_VQ, ZXDH_NET_F_MQ, \
- ZXDH_RING_F_INDIRECT_DESC, ZXDH_RING_F_EVENT_IDX, ZXDH_F_VERSION_1, ZXDH_F_RING_PACKED
-};
-
-void zxdh_print_vring_info(struct virtqueue *vq, uint32_t desc_index, uint32_t desc_num)
-{
- struct vring_virtqueue *vvq = to_vvq(vq);
- struct vring_packed_desc *desc = NULL;
- uint32_t i = 0;
-
- LOG_INFO("phy_index : %d\n", vq->phy_index);
- LOG_INFO("num free : %d\n", vq->num_free);
- LOG_INFO("vring address : 0x%llx\n", (uint64_t)&vvq->packed.vring);
- LOG_INFO("vring size : %d\n", vvq->packed.vring.num);
- LOG_INFO("last_used_idx : %d\n", vvq->last_used_idx);
- LOG_INFO("avail_wrap_counter: %d\n", vvq->packed.avail_wrap_counter);
- LOG_INFO("used_wrap_counter : %d\n", vvq->packed.used_wrap_counter);
- LOG_INFO("next_avail_idx : %d\n", vvq->packed.next_avail_idx);
- LOG_INFO("free head : %d\n", vvq->free_head);
- LOG_INFO("driver->flags : 0x%x\n", vvq->packed.vring.driver->flags);
- LOG_INFO("driver->off_wrap : %d\n", vvq->packed.vring.driver->off_wrap);
- LOG_INFO("device->flags : 0x%x\n", vvq->packed.vring.device->flags);
- LOG_INFO("device->off_wrap : %d\n", vvq->packed.vring.device->off_wrap);
- LOG_INFO("DESC[x]:\tDESC_ADDR\t[BUFFER_ADDR]\t\t[LEN]\t\t[ID]\t[FLAG]\n");
-
- desc = vvq->packed.vring.desc;
- for (i = desc_index; i < desc_num; i++)
- {
- LOG_INFO("DESC[%d] 0x%llx:\t0x%016llx\t0x%08x\t%8d\t0x%x\n", \
- i, (uint64_t)desc, desc->addr, desc->len, desc->id, desc->flags);
- desc++;
- }
-
- return;
-}
-
-/* enable irq handlers */
-void zxdh_vp_enable_cbs(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t i = 0;
-
- for (i = 0; i< en_dev->channels_num; i++)
- {
- en_dev->ops->switch_vqs_channel(en_dev->parent, i, 1);
- }
-}
-
-/* disable irq handlers */
-void zxdh_vp_disable_cbs(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t i = 0;
-
- for (i = 0; i< en_dev->channels_num; i++)
- {
- en_dev->ops->switch_vqs_channel(en_dev->parent, i, 0);
- }
-}
-
-void zxdh_vp_reset(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- /* 0 status means a reset. */
- en_dev->ops->set_status(en_dev->parent, 0);
-
- /* After writing 0 to device_status, the driver MUST wait for a read of
- * device_status to return 0 before reinitializing the device.
- * This will flush out the status write, and flush in device writes,
- * including MSI-X interrupts, if any.
- */
- LOG_INFO("vp reset: get_status start\n");
- while (en_dev->ops->get_status(en_dev->parent) != 0)
- {
- msleep(1);
- }
- LOG_INFO("vp reset: get_status stop\n");
-
- return;
-}
-
-void zxdh_add_status(struct net_device *netdev, uint32_t status)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint8_t dev_status = 0;
-
- might_sleep();
-
- dev_status = en_dev->ops->get_status(en_dev->parent);
-
- en_dev->ops->set_status(en_dev->parent, (dev_status | status));
-
- return;
-}
-
-bool zxdh_has_status(struct net_device *netdev, uint32_t sbit)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint8_t dev_status = 0;
-
- dev_status = en_dev->ops->get_status(en_dev->parent);
-
- return (dev_status & sbit);
-}
-
-void zxdh_pf_features_init(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t i = 0;
- uint64_t features = 0;
-
- en_dev->device_feature = en_dev->ops->get_features(en_dev->parent);
- en_dev->device_feature |= BIT(34);
- en_dev->driver_feature = 0;
-
- for (i = 0; i < ARRAY_SIZE(features_table); i++)
- {
- features = features_table[i];
- en_dev->driver_feature |= (1ULL << features);
- }
- en_dev->guest_feature = en_dev->device_feature & 0xfffffff5dfffffff;
- LOG_INFO("device_feature: 0x%llx, guest_feature: 0x%llx\n", en_dev->device_feature, en_dev->guest_feature);
- en_dev->ops->set_features(en_dev->parent, en_dev->guest_feature);
-
- return;
-}
-
-bool zxdh_has_feature(struct net_device *netdev, uint32_t fbit)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- return en_dev->guest_feature & BIT_ULL(fbit);
-}
-
-int32_t vq2txq(struct virtqueue *vq)
-{
- return (vq->index - 1) / 2;
-}
-
-int32_t txq2vq(int32_t txq)
-{
- return txq * 2 + 1;
-}
-int32_t vq2rxq(struct virtqueue *vq)
-{
- return vq->index / 2;
-}
-
-int32_t rxq2vq(int32_t rxq)
-{
- return rxq * 2;
-}
-
-inline void vqm_mb(bool weak_barriers)
-{
- if (weak_barriers)
- {
- virt_mb();
- }
- else
- {
- mb();
- }
-}
-
-inline void vqm_rmb(bool weak_barriers)
-{
- if (weak_barriers)
- {
- virt_rmb();
- }
- else
- {
- dma_rmb();
- }
-}
-
-inline void vqm_wmb(bool weak_barriers)
-{
- if (weak_barriers)
- {
- virt_wmb();
- }
- else
- {
- dma_wmb();
- }
-}
-
-void vring_del_virtqueue(struct virtqueue *_vq)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(_vq->vdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- spin_lock(&en_dev->vqs_list_lock);
- list_del(&_vq->list);
- spin_unlock(&en_dev->vqs_list_lock);
-
- if (vq->we_own_ring)
- {
- vring_free_queue(vq->vq.vdev,
- vq->packed.ring_size_in_bytes,
- vq->packed.vring.desc,
- vq->packed.ring_dma_addr);
-
- vring_free_queue(vq->vq.vdev,
- vq->packed.event_size_in_bytes,
- vq->packed.vring.driver,
- vq->packed.driver_event_dma_addr);
-
- vring_free_queue(vq->vq.vdev,
- vq->packed.event_size_in_bytes,
- vq->packed.vring.device,
- vq->packed.device_event_dma_addr);
-
- kfree(vq->packed.desc_state);
- vq->packed.desc_state = NULL;
- kfree(vq->packed.desc_extra);
- vq->packed.desc_extra = NULL;
- }
-
- kfree(vq);
- vq = NULL;
-}
-
-void del_vq(struct zxdh_pci_vq_info *info)
-{
- struct virtqueue *vq = info->vq;
- struct zxdh_en_priv *en_priv = netdev_priv(vq->vdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- en_dev->ops->vq_unbind_channel(en_dev->parent, vq->phy_index);
-
- en_dev->ops->vp_modern_unmap_vq_notify(en_dev->parent, vq->priv);
-
- vring_del_virtqueue(vq);
-}
-
-void vp_del_vq(struct virtqueue *vq)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(vq->vdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_pci_vq_info *info = en_dev->vqs[vq->index];
- unsigned long flags;
-
- spin_lock_irqsave(&en_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&en_dev->lock, flags);
-
- del_vq(info);
- kfree(info);
- en_dev->vqs[vq->index] = NULL;
-}
-
-void vp_detach_vqs(void *para)
-{
- struct net_device *netdev = para;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct virtqueue *vq;
- struct virtqueue *n;
-
- list_for_each_entry_safe(vq, n, &en_dev->vqs_list, list)
- {
- vp_del_vq(vq);
- }
-}
-
-void vp_del_vqs(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- vp_detach_vqs(netdev);
-
- kfree(en_dev->vqs);
- en_dev->vqs = NULL;
-}
-
-/**
- * virtqueue_get_vring_size - return the size of the virtqueue's vring
- * @_vq: the struct virtqueue containing the vring of interest.
- *
- * Returns the size of the vring. This is mainly used for boasting to
- * userspace. Unlike other operations, this need not be serialized.
- */
-uint32_t virtqueue_get_vring_size(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- return vq->packed.vring.num;
-}
-
-dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- BUG_ON(!vq->we_own_ring);
-
- return vq->packed.ring_dma_addr;
-}
-
-dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- BUG_ON(!vq->we_own_ring);
-
- return vq->packed.driver_event_dma_addr;
-}
-
-dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- BUG_ON(!vq->we_own_ring);
-
- return vq->packed.device_event_dma_addr;
-}
-
-bool vqm_has_dma_quirk(struct net_device *netdev)
-{
- /*
- * Note the reverse polarity of the quirk feature (compared to most
- * other features), this is for compatibility with legacy systems.
- */
- return !zxdh_has_feature(netdev, ZXDH_F_ACCESS_PLATFORM);
-}
-
-bool vring_use_dma_api(struct net_device *netdev)
-{
- if (!vqm_has_dma_quirk(netdev))
- {
- return true;
- }
-
- /* Otherwise, we are left to guess. */
- /*
- * In theory, it's possible to have a buggy QEMU-supposed
- * emulated Q35 IOMMU and Xen enabled at the same time. On
- * such a configuration, zxdh has never worked and will
- * not work without an even larger kludge. Instead, enable
- * the DMA API if we're a Xen guest, which at least allows
- * all of the sensible Xen configurations to work correctly.
- */
- if (xen_domain())
- {
- return true;
- }
-
- return false;
-}
-
-void vring_free_queue(struct net_device *netdev, size_t size, void *queue, dma_addr_t dma_handle)
-{
- if (vring_use_dma_api(netdev))
- {
- dma_free_coherent(netdev->dev.parent, size, queue, dma_handle);
- }
- else
- {
- free_pages_exact(queue, PAGE_ALIGN(size));
- }
-}
-
-void *vring_alloc_queue(struct net_device *netdev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
-{
- if (vring_use_dma_api(netdev))
- {
- return dma_alloc_coherent(netdev->dev.parent, size, dma_handle, flag);
- }
- else
- {
- void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
-
- if (queue)
- {
- phys_addr_t phys_addr = virt_to_phys(queue);
- *dma_handle = (dma_addr_t)phys_addr;
-
- /*
- * Sanity check: make sure we dind't truncate
- * the address. The only arches I can find that
- * have 64-bit phys_addr_t but 32-bit dma_addr_t
- * are certain non-highmem MIPS and x86
- * configurations, but these configurations
- * should never allocate physical pages above 32
- * bits, so this is fine. Just in case, throw a
- * warning and abort if we end up with an
- * unrepresentable address.
- */
- if (WARN_ON_ONCE(*dma_handle != phys_addr))
- {
- free_pages_exact(queue, PAGE_ALIGN(size));
- return NULL;
- }
- }
- return queue;
- }
-}
-
-struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq, uint32_t num)
-{
- struct vring_desc_extra *desc_extra = NULL;
- uint32_t i = 0;
-
- desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra), GFP_KERNEL);
- if (unlikely(desc_extra == NULL))
- {
- LOG_ERR("desc_extra kmalloc_array failed\n");
- return NULL;
- }
-
- memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
-
- for (i = 0; i < num - 1; i++)
- {
- desc_extra[i].next = i + 1;
- }
-
- return desc_extra;
-}
-
-struct virtqueue *vring_create_virtqueue_packed(uint32_t index,
- uint32_t num,
- uint32_t vring_align,
- struct net_device *netdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct vring_virtqueue *vq = NULL;
- struct vring_packed_desc *ring = NULL;
- struct vring_packed_desc_event *driver = NULL;
- struct vring_packed_desc_event *device = NULL;
- dma_addr_t ring_dma_addr;
- dma_addr_t driver_event_dma_addr;
- dma_addr_t device_event_dma_addr;
- size_t ring_size_in_bytes;
- size_t event_size_in_bytes;
-
- ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
-
- ring = vring_alloc_queue(netdev, ring_size_in_bytes, &ring_dma_addr, GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
- if (unlikely(ring == NULL))
- {
- LOG_ERR("ring vring_alloc_queue failed\n");
- goto err_ring;
- }
-
- event_size_in_bytes = sizeof(struct vring_packed_desc_event);
-
- driver = vring_alloc_queue(netdev, event_size_in_bytes, &driver_event_dma_addr, GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
- if (unlikely(driver == NULL))
- {
- LOG_ERR("driver vring_alloc_queue failed\n");
- goto err_driver;
- }
-
- device = vring_alloc_queue(netdev, event_size_in_bytes, &device_event_dma_addr, GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
- if (unlikely(device == NULL))
- {
- LOG_ERR("device vring_alloc_queue failed\n");
- goto err_device;
- }
-
- vq = kmalloc(sizeof(*vq), GFP_KERNEL);
- if (unlikely(vq == NULL))
- {
- LOG_ERR("vq kmalloc failed\n");
- goto err_vq;
- }
-
- vq->vq.callback = callback;
- vq->vq.vdev = netdev;
- vq->vq.name = name;
- vq->vq.num_free = num;
- vq->vq.index = index;
- vq->we_own_ring = true;
- vq->notify = notify;
- vq->weak_barriers = weak_barriers;
- vq->broken = false;
- vq->last_used_idx = 0;
- vq->event_triggered = false;
- vq->num_added = 0;
- vq->packed_ring = true;
- vq->use_dma_api = vring_use_dma_api(netdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
-
- vq->indirect = zxdh_has_feature(netdev, ZXDH_RING_F_INDIRECT_DESC) && !context;
- vq->event = zxdh_has_feature(netdev, ZXDH_RING_F_EVENT_IDX);
-
- if (zxdh_has_feature(netdev, ZXDH_F_ORDER_PLATFORM))
- {
- vq->weak_barriers = false;
- }
-
- vq->packed.ring_dma_addr = ring_dma_addr;
- vq->packed.driver_event_dma_addr = driver_event_dma_addr;
- vq->packed.device_event_dma_addr = device_event_dma_addr;
-
- vq->packed.ring_size_in_bytes = ring_size_in_bytes;
- vq->packed.event_size_in_bytes = event_size_in_bytes;
-
- vq->packed.vring.num = num;
- vq->packed.vring.desc = ring;
- vq->packed.vring.driver = driver;
- vq->packed.vring.device = device;
-
- vq->packed.next_avail_idx = 0;
- vq->packed.avail_wrap_counter = 1;
- vq->packed.used_wrap_counter = 1;
- vq->packed.event_flags_shadow = 0;
- vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
-
- vq->packed.desc_state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
- if (unlikely(vq->packed.desc_state == NULL))
- {
- LOG_ERR("vq->packed.desc_state kmalloc_array failed\n");
- goto err_desc_state;
- }
-
- memset(vq->packed.desc_state, 0, num * sizeof(struct vring_desc_state_packed));
-
- /* Put everything in free lists. */
- vq->free_head = 0;
-
- vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
- if (unlikely(vq->packed.desc_extra == NULL))
- {
- LOG_ERR("vq->packed.desc_extra vring_alloc_desc_extra failed\n");
- goto err_desc_extra;
- }
-
- /* No callback? Tell other side not to bother us. */
- if (!callback)
- {
- vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
- vq->packed.vring.driver->flags = cpu_to_le16(vq->packed.event_flags_shadow);
- }
-
- spin_lock(&en_dev->vqs_list_lock);
- list_add_tail(&vq->vq.list, &en_dev->vqs_list);
- spin_unlock(&en_dev->vqs_list_lock);
-
- return &vq->vq;
-
-err_desc_extra:
- kfree(vq->packed.desc_state);
- vq->packed.desc_state = NULL;
-err_desc_state:
- kfree(vq);
- vq = NULL;
-err_vq:
- vring_free_queue(netdev, event_size_in_bytes, device, device_event_dma_addr);
-err_device:
- vring_free_queue(netdev, event_size_in_bytes, driver, driver_event_dma_addr);
-err_driver:
- vring_free_queue(netdev, ring_size_in_bytes, ring, ring_dma_addr);
-err_ring:
- return NULL;
-}
-
-/* the notify function used when creating a virt queue */
-bool vp_notify(struct virtqueue *vq)
-{
- /* we write the queue's selector into the notification register to
- * signal the other end */
- iowrite16(vq->phy_index, (void __iomem *)vq->priv);
-
- return true;
-}
-
-struct virtqueue *vp_setup_vq(struct net_device *netdev, unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name, bool ctx, uint16_t channel_num)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct zxdh_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
- struct virtqueue *vq = NULL;
- struct virtqueue *n = NULL;
- unsigned long flags;
- uint16_t num = 0;
- int32_t err = 0;
- struct dh_vq_handler vq_handler;
-
- /* fill out our structure that represents an active queue */
- if (unlikely(info == NULL))
- {
- LOG_ERR("info kmalloc failed\n");
- return ERR_PTR(-ENOMEM);
- }
-
- /*if (index >= en_dev->ops->get_queue_num(en_dev->parent))
- {
- LOG_ERR("index over queue nums\n");
- return ERR_PTR(-ENOENT);
- }*/
-
- /* Check if queue is either not available or already active. */
- num = en_dev->ops->get_queue_size(en_dev->parent, en_dev->phy_index[index]);
- // if (!num || zxdh_get_queue_enable(en_dev, en_dev->phy_index[index]))
- // {
- // return ERR_PTR(-ENOENT);
- // }
- num = ZXDH_PF_MIN_DESC_NUM;
-
- if (num & (num - 1))
- {
- LOG_ERR("bad queue size %u\n", num);
- err = -ENOMEM;
- goto out_info;
- }
-
- /* create the vring */
- vq = vring_create_virtqueue_packed(index, num, SMP_CACHE_BYTES, en_dev->netdev,
- true, true, ctx, vp_notify, callback, name);
- if (vq == NULL)
- {
- LOG_ERR("create the vring failed\n");
- err = -ENOMEM;
- goto out_info;
- }
-
- /* activate the queue */
- en_dev->ops->activate_phy_vq(en_dev->parent, en_dev->phy_index[index], virtqueue_get_vring_size(vq), virtqueue_get_desc_addr(vq), virtqueue_get_avail_addr(vq), virtqueue_get_used_addr(vq));
-
- vq->priv = (void __force *)en_dev->ops->vp_modern_map_vq_notify(en_dev->parent, en_dev->phy_index[index], NULL);
- if (!vq->priv)
- {
- LOG_ERR("vp_modern_map_vq_notify failed\n");
- err = -ENOMEM;
- goto err_map_notify;
- }
-
- vq->phy_index = en_dev->phy_index[index];
- vq->index = index;
- info->channel_num = channel_num;
-
- memset(&vq_handler, 0, sizeof(struct dh_vq_handler));
- vq_handler.callback = dh_eq_vqs_vring_int;
- if (channel_num < (en_dev->ops->get_channels_num(en_dev->parent)))
- {
- err = en_dev->ops->vqs_channel_bind_handler(en_dev->parent, channel_num, &vq_handler);
- if (err < 0)
- {
- LOG_ERR("vqs_channel_bind_handler failed: %d\n", err);
- goto err_vqs_channel_bind_handler;
- }
- }
-
- if (channel_num >= (en_dev->ops->get_channels_num(en_dev->parent)))
- {
- channel_num = en_dev->ops->get_channels_num(en_dev->parent) - 1;
- }
- err = en_dev->ops->vq_bind_channel(en_dev->parent, channel_num, en_dev->phy_index[index]);
- if (err < 0)
- {
- LOG_ERR("vq_bind_channel failed: %d\n", err);
- goto err_vq_bind_channel;
- }
-
- if (callback)
- {
- spin_lock_irqsave(&en_dev->lock, flags);
- err = en_dev->ops->vqs_bind_eqs(en_dev->parent, channel_num, &info->node);
- spin_unlock_irqrestore(&en_dev->lock, flags);
- if (err < 0)
- {
- LOG_ERR("vqs_bind_eqs failed: %d\n", err);
- goto err_vqs_bind_eqs;
- }
- }
- else
- {
- INIT_LIST_HEAD(&info->node);
- }
-
- info->vq = vq;
- en_dev->vqs[index] = info;
- return vq;
-
-err_vqs_bind_eqs:
- list_for_each_entry_safe(vq, n, &en_dev->vqs_list, list)
- {
- en_dev->ops->vq_unbind_channel(en_dev->parent, vq->phy_index);
- }
-err_vq_bind_channel:
- if (channel_num < (en_dev->ops->get_channels_num(en_dev->parent)))
- {
- en_dev->ops->vqs_channel_unbind_handler(en_dev->parent, channel_num);
- }
-err_vqs_channel_bind_handler:
- en_dev->ops->vp_modern_unmap_vq_notify(en_dev->parent, (void __iomem __force *)vq->priv);
-err_map_notify:
- vring_del_virtqueue(vq);
-out_info:
- kfree(info);
- en_dev->vqs[index] = NULL;
- return ERR_PTR(err);
-}
-
-uint32_t get_mergeable_buf_len(struct receive_queue *rq, struct ewma_pkt_len *avg_pkt_len, uint32_t room)
-{
- const size_t hdr_len = sizeof(struct zxdh_net_hdr);
- uint32_t len = 0;
-
- if (room)
- {
- return PAGE_SIZE - room;
- }
-
- len = hdr_len + clamp_t(uint32_t, ewma_pkt_len_read(avg_pkt_len), rq->min_buf_len, PAGE_SIZE - hdr_len);
-
- return ALIGN(len, L1_CACHE_BYTES);
-}
-
-/*
- * The DMA ops on various arches are rather gnarly right now, and
- * making all of the arch DMA ops work on the vring device itself
- * is a mess. For now, we use the parent device for DMA ops.
- */
-static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
-{
- return vq->vq.vdev->dev.parent; //todo
-}
-
-/* Map one sg entry. */
-dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg, enum dma_data_direction direction)
-{
- if (!vq->use_dma_api)
- {
- return (dma_addr_t)sg_phys(sg);
- }
-
- /*
- * We can't use dma_map_sg, because we don't use scatterlists in
- * the way it expects (we don't guarantee that the scatterlist
- * will exist for the lifetime of the mapping).
- */
- return dma_map_page(vring_dma_dev(vq), sg_page(sg), sg->offset, sg->length, direction);
-}
-
-dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
- void *cpu_addr, size_t size,
- enum dma_data_direction direction)
-{
- if (!vq->use_dma_api)
- {
- return (dma_addr_t)virt_to_phys(cpu_addr);
- }
-
- return dma_map_single(vring_dma_dev(vq), cpu_addr, size, direction);
-}
-
-int32_t vring_mapping_error(const struct vring_virtqueue *vq, dma_addr_t addr)
-{
- if (!vq->use_dma_api)
- {
- return 0;
- }
-
- return dma_mapping_error(vring_dma_dev(vq), addr);
-}
-
-/*
- * Packed ring specific functions - *_packed().
- */
-void vring_unmap_state_packed(const struct vring_virtqueue *vq, struct vring_desc_extra *state)
-{
- uint16_t flags = 0;
-
- if (!vq->use_dma_api)
- {
- return;
- }
-
- flags = state->flags;
- if (flags & VRING_DESC_F_INDIRECT)
- {
- dma_unmap_single(vring_dma_dev(vq),
- state->addr, state->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- }
- else
- {
- dma_unmap_page(vring_dma_dev(vq),
- state->addr, state->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- }
-}
-
-void vring_unmap_desc_packed(const struct vring_virtqueue *vq, struct vring_packed_desc *desc)
-{
- uint16_t flags = 0;
-
- if (!vq->use_dma_api)
- {
- return;
- }
-
- flags = le16_to_cpu(desc->flags);
-
- if (flags & VRING_DESC_F_INDIRECT)
- {
- dma_unmap_single(vring_dma_dev(vq),
- le64_to_cpu(desc->addr),
- le32_to_cpu(desc->len),
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- }
- else
- {
- dma_unmap_page(vring_dma_dev(vq),
- le64_to_cpu(desc->addr),
- le32_to_cpu(desc->len),
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- }
-}
-
-void *mergeable_len_to_ctx(uint32_t truesize, uint32_t headroom)
-{
- return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
-}
-
-inline bool virtqueue_use_indirect(struct virtqueue *_vq, unsigned int total_sg)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- /*
- * If the host supports indirect descriptor tables, and we have multiple
- * buffers, then go indirect. FIXME: tune this threshold
- */
- return (vq->indirect && total_sg > 1 && vq->vq.num_free);
-}
-
-struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg, gfp_t gfp)
-{
- struct vring_packed_desc *desc = NULL;
-
- /*
- * We require lowmem mappings for the descriptors because
- * otherwise virt_to_phys will give us bogus addresses in the
- * virtqueue.
- */
- gfp &= ~__GFP_HIGHMEM;
-
- desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
-
- return desc;
-}
-
-int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
- struct scatterlist *sgs[],
- unsigned int total_sg,
- unsigned int out_sgs,
- unsigned int in_sgs,
- void *data,
- gfp_t gfp)
-{
- struct vring_packed_desc *desc = NULL;
- struct scatterlist *sg = NULL;
- uint32_t i = 0;
- uint32_t n = 0;
- uint32_t err_idx = 0;
- uint16_t head = 0;
- uint16_t id = 0;
- dma_addr_t addr;
-
- head = vq->packed.next_avail_idx;
- desc = alloc_indirect_packed(total_sg, gfp);
- if (desc == NULL)
- {
- LOG_ERR("desc alloc_indirect_packed failed\n");
- return -ENOMEM;
- }
-
- if (unlikely(vq->vq.num_free < 1))
- {
- LOG_DEBUG("can't add buf len 1 - avail = 0\n");
- kfree(desc);
- END_USE(vq);
- return -ENOSPC;
- }
-
- i = 0;
- id = vq->free_head;
- BUG_ON(id == vq->packed.vring.num);
-
- for (n = 0; n < out_sgs + in_sgs; n++)
- {
- for (sg = sgs[n]; sg; sg = sg_next(sg))
- {
- addr = vring_map_one_sg(vq, sg, n < out_sgs ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
- {
- LOG_ERR("vring_map_one_sg error\n");
- goto unmap_release;
- }
-
- desc[i].flags = cpu_to_le16(n < out_sgs ? 0 : VRING_DESC_F_WRITE);
- desc[i].addr = cpu_to_le64(addr);
- desc[i].len = cpu_to_le32(sg->length);
- i++;
- }
- }
-
- /* Now that the indirect table is filled in, map it. */
- addr = vring_map_single(vq, desc, total_sg * sizeof(struct vring_packed_desc), DMA_TO_DEVICE);
- if (vring_mapping_error(vq, addr))
- {
- LOG_ERR("vring_map_single error\n");
- goto unmap_release;
- }
-
- vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
- vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * sizeof(struct vring_packed_desc));
- vq->packed.vring.desc[head].id = cpu_to_le16(id);
-
- if (vq->use_dma_api)
- {
- vq->packed.desc_extra[id].addr = addr;
- vq->packed.desc_extra[id].len = total_sg * sizeof(struct vring_packed_desc);
- vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | vq->packed.avail_used_flags;
- }
-
- /*
- * A driver MUST NOT make the first descriptor in the list
- * available before all subsequent descriptors comprising
- * the list are made available.
- */
- vqm_wmb(vq->weak_barriers);
- vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | vq->packed.avail_used_flags);
-
- /* We're using some buffers from the free list. */
- vq->vq.num_free -= 1;
-
- /* Update free pointer */
- n = head + 1;
- if (n >= vq->packed.vring.num)
- {
- n = 0;
- vq->packed.avail_wrap_counter ^= 1;
- vq->packed.avail_used_flags ^=
- 1 << VRING_PACKED_DESC_F_AVAIL |
- 1 << VRING_PACKED_DESC_F_USED;
- }
- vq->packed.next_avail_idx = n;
- vq->free_head = vq->packed.desc_extra[id].next;
-
- /* Store token and indirect buffer state. */
- vq->packed.desc_state[id].num = 1;
- vq->packed.desc_state[id].data = data;
- vq->packed.desc_state[id].indir_desc = desc;
- vq->packed.desc_state[id].last = id;
-
- vq->num_added += 1;
-
- //LOG_DEBUG("added buffer head %i to %p\n", head, vq);
- END_USE(vq);
-
- return 0;
-
-unmap_release:
- err_idx = i;
-
- for (i = 0; i < err_idx; i++)
- {
- vring_unmap_desc_packed(vq, &desc[i]);
- }
-
- kfree(desc);
-
- END_USE(vq);
- return -ENOMEM;
-}
-
-int32_t virtqueue_add_packed(struct virtqueue *_vq,
- struct scatterlist *sgs[],
- uint32_t total_sg,
- uint32_t out_sgs,
- uint32_t in_sgs,
- void *data,
- void *ctx,
- gfp_t gfp)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
- struct vring_packed_desc *desc = NULL;
- struct scatterlist *sg = NULL;
- uint32_t i = 0;
- uint32_t n = 0;
- uint32_t c = 0;
- uint32_t descs_used = 0;
- uint32_t err_idx = 0;
- __le16 head_flags = 0;
- __le16 flags = 0;
- uint16_t head = 0;
- uint16_t id = 0;
- uint16_t prev = 0;
- uint16_t curr = 0;
- uint16_t avail_used_flags = 0;
- int32_t err = 0;
-
- START_USE(vq);
-
- BUG_ON(data == NULL);
- BUG_ON(ctx && vq->indirect);
-
- if (unlikely(vq->broken))
- {
- LOG_ERR("vq->broken\n");
- END_USE(vq);
- return -EIO;
- }
-
- LAST_ADD_TIME_UPDATE(vq);
-
- BUG_ON(total_sg == 0);
-
- if (virtqueue_use_indirect(_vq, total_sg))
- {
- err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
- if (err != -ENOMEM)
- {
- END_USE(vq);
- return err;
- }
- /* fall back on direct */
- }
-
- head = vq->packed.next_avail_idx;
- avail_used_flags = vq->packed.avail_used_flags;
-
- WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
-
- desc = vq->packed.vring.desc;
- i = head;
- descs_used = total_sg;
-
- if (unlikely(vq->vq.num_free < descs_used))
- {
- LOG_ERR("can't add buf len %i - avail = %i\n", descs_used, vq->vq.num_free);
- END_USE(vq);
- return -ENOSPC;
- }
-
- id = vq->free_head;
- BUG_ON(id == vq->packed.vring.num);
-
- curr = id;
- c = 0;
- for (n = 0; n < out_sgs + in_sgs; n++)
- {
- for (sg = sgs[n]; sg; sg = sg_next(sg))
- {
- dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
- {
- LOG_ERR("vring_map_one_sg error\n");
- goto unmap_release;
- }
-
- flags = cpu_to_le16(vq->packed.avail_used_flags |
- (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
- (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
-
- desc[i].addr = cpu_to_le64(addr);
- desc[i].len = cpu_to_le32(sg->length);
- desc[i].id = cpu_to_le16(id);
-
- if (i == head)
- {
- head_flags = flags;
- }
- else
- {
- desc[i].flags = flags;
- }
-
- if (unlikely(vq->use_dma_api))
- {
- vq->packed.desc_extra[curr].addr = addr;
- vq->packed.desc_extra[curr].len = sg->length;
- vq->packed.desc_extra[curr].flags = le16_to_cpu(flags);
- }
- prev = curr;
- curr = vq->packed.desc_extra[curr].next;
-
- if ((unlikely(++i >= vq->packed.vring.num)))
- {
- i = 0;
- vq->packed.avail_used_flags ^=
- 1 << VRING_PACKED_DESC_F_AVAIL |
- 1 << VRING_PACKED_DESC_F_USED;
- }
- }
- }
-
- if (i < head)
- {
- vq->packed.avail_wrap_counter ^= 1;
- }
-
- /* We're using some buffers from the free list. */
- vq->vq.num_free -= descs_used;
-
- /* Update free pointer */
- vq->packed.next_avail_idx = i;
- vq->free_head = curr;
-
- /* Store token. */
- vq->packed.desc_state[id].num = descs_used;
- vq->packed.desc_state[id].data = data;
- vq->packed.desc_state[id].indir_desc = ctx;
- vq->packed.desc_state[id].last = prev;
-
- /*
- * A driver MUST NOT make the first descriptor in the list
- * available before all subsequent descriptors comprising
- * the list are made available.
- */
- vqm_wmb(vq->weak_barriers);
- vq->packed.vring.desc[head].flags = head_flags;
- vq->num_added += descs_used;
-
- //LOG_INFO("added buffer head %i to %p\n", head, vq);
- END_USE(vq);
-
- return 0;
-
-unmap_release:
- err_idx = i;
- i = head;
- curr = vq->free_head;
-
- vq->packed.avail_used_flags = avail_used_flags;
-
- for (n = 0; n < total_sg; n++)
- {
- if (i == err_idx)
- {
- break;
- }
-
- vring_unmap_state_packed(vq, &vq->packed.desc_extra[curr]);
- curr = vq->packed.desc_extra[curr].next;
- i++;
- if (i >= vq->packed.vring.num)
- {
- i = 0;
- }
- }
-
- END_USE(vq);
- return -EIO;
-}
-
-/**
- * virtqueue_add_inbuf_ctx - expose input buffers to other end
- * @vq: the struct virtqueue we're talking about.
- * @sg: scatterlist (must be well-formed and terminated!)
- * @num: the number of entries in @sg writable by other side
- * @data: the token identifying the buffer.
- * @ctx: extra context for the token
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
- */
-int32_t virtqueue_add_inbuf_ctx(struct virtqueue *vq,
- struct scatterlist *sg, uint32_t num,
- void *data,
- void *ctx,
- gfp_t gfp)
-{
- return virtqueue_add_packed(vq, &sg, num, 0, 1, data, ctx, gfp);
-}
-
-bool is_used_desc_packed(struct vring_virtqueue *vq, uint16_t idx, bool used_wrap_counter)
-{
- bool avail = false;
- bool used = false;
- uint16_t flags = 0;
-
- flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
- avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
- used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
-
- return avail == used && used == used_wrap_counter;
-}
-
-bool virtqueue_poll_packed(struct virtqueue *_vq, uint16_t off_wrap)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
- bool wrap_counter = false;
- uint16_t used_idx = 0;
-
- wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
- used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
-
- return is_used_desc_packed(vq, used_idx, wrap_counter);
-}
-
-/**
- * virtqueue_poll - query pending used buffers
- * @_vq: the struct virtqueue we're talking about.
- * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
- *
- * Returns "true" if there are pending used buffers in the queue.
- *
- * This does not need to be serialized.
- */
-bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- if (unlikely(vq->broken))
- {
- LOG_ERR("vq->broken\n");
- return false;
- }
-
- vqm_mb(vq->weak_barriers);
- return virtqueue_poll_packed(_vq, last_used_idx);
-}
-
-unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- START_USE(vq);
-
- /*
- * We optimistically turn back on interrupts, then check if there was
- * more to do.
- */
- if (vq->event)
- {
- vq->packed.vring.driver->off_wrap =
- cpu_to_le16(vq->last_used_idx | (vq->packed.used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
- /*
- * We need to update event offset and event wrap
- * counter first before updating event flags.
- */
- vqm_wmb(vq->weak_barriers);
- }
-
- if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE)
- {
- vq->packed.event_flags_shadow = vq->event ?
- VRING_PACKED_EVENT_FLAG_DESC :
- VRING_PACKED_EVENT_FLAG_ENABLE;
- vq->packed.vring.driver->flags = cpu_to_le16(vq->packed.event_flags_shadow);
- }
-
- END_USE(vq);
- return vq->last_used_idx | ((uint16_t)vq->packed.used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR);
-}
-
-int32_t virtqueue_enable_cb_prepare(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- if (vq->event_triggered)
- {
- vq->event_triggered = false;
- }
-
- return virtqueue_enable_cb_prepare_packed(_vq);
-}
-
-bool more_used_packed(struct vring_virtqueue *vq)
-{
- return is_used_desc_packed(vq, vq->last_used_idx, vq->packed.used_wrap_counter);
-}
-
-void detach_buf_packed(struct vring_virtqueue *vq, uint32_t id, void **ctx)
-{
- struct vring_desc_state_packed *state = NULL;
- struct vring_packed_desc *desc = NULL;
- uint32_t i = 0;
- uint32_t curr = 0;
-
- state = &vq->packed.desc_state[id];
-
- /* Clear data ptr. */
- state->data = NULL;
-
- vq->packed.desc_extra[state->last].next = vq->free_head;
- vq->free_head = id;
- vq->vq.num_free += state->num;
-
- if (unlikely(vq->use_dma_api))
- {
- curr = id;
- for (i = 0; i < state->num; i++)
- {
- vring_unmap_state_packed(vq, &vq->packed.desc_extra[curr]);
- curr = vq->packed.desc_extra[curr].next;
- }
- }
-
- if (vq->indirect)
- {
- uint32_t len;
-
- /* Free the indirect table, if any, now that it's unmapped. */
- desc = state->indir_desc;
- if (!desc)
- {
- return;
- }
-
- if (vq->use_dma_api)
- {
- len = vq->packed.desc_extra[id].len;
- for (i = 0; i < len / sizeof(struct vring_packed_desc); i++)
- {
- vring_unmap_desc_packed(vq, &desc[i]);
- }
- }
- kfree(desc);
- state->indir_desc = NULL;
- }
- else if (ctx)
- {
- *ctx = state->indir_desc;
- }
-}
-
-void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, uint32_t *len, void **ctx)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
- uint16_t last_used = 0;
- uint16_t id = 0;
- void *ret = NULL;
-
- START_USE(vq);
-
- if (unlikely(vq->broken))
- {
- END_USE(vq);
- return NULL;
- }
-
- if (!more_used_packed(vq))
- {
- //LOG_ERR("no more buffers in queue\n");
- END_USE(vq);
- return NULL;
- }
-
- /* Only get used elements after they have been exposed by host. */
- vqm_rmb(vq->weak_barriers);
-
- last_used = vq->last_used_idx;
- id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
- *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
-
- if (unlikely(id >= vq->packed.vring.num))
- {
- zxdh_print_vring_info(_vq, 0, vq->packed.vring.num);
- BAD_RING(vq, "id %u out of range\n", id);
- return NULL;
- }
- if (unlikely(!vq->packed.desc_state[id].data))
- {
- zxdh_print_vring_info(_vq, 0, vq->packed.vring.num);
- BAD_RING(vq, "id %u is not a head!\n", id);
- return NULL;
- }
-
- /* detach_buf_packed clears data, so grab it now. */
- ret = vq->packed.desc_state[id].data;
- detach_buf_packed(vq, id, ctx);
-
- vq->last_used_idx += vq->packed.desc_state[id].num;
- if (unlikely(vq->last_used_idx >= vq->packed.vring.num))
- {
- vq->last_used_idx -= vq->packed.vring.num;
- vq->packed.used_wrap_counter ^= 1;
- }
-
- /*
- * If we expect an interrupt for the next entry, tell host
- * by writing event index and flush out the write before
- * the read in the next get_buf call.
- */
- if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
- vqm_store_mb(vq->weak_barriers,
- &vq->packed.vring.driver->off_wrap,
- cpu_to_le16(vq->last_used_idx |
- (vq->packed.used_wrap_counter <<
- VRING_PACKED_EVENT_F_WRAP_CTR)));
-
- LAST_ADD_TIME_INVALID(vq);
-
- END_USE(vq);
- return ret;
-}
-
-void *virtqueue_get_buf(struct virtqueue *_vq, uint32_t *len)
-{
- return virtqueue_get_buf_ctx_packed(_vq, len, NULL);
-}
-
-/*
- * private is used to chain pages for big packets, put the whole
- * most recent used list in the beginning for reuse
- */
-void give_pages(struct receive_queue *rq, struct page *page)
-{
- struct page *end = NULL;
-
- /* Find end of list, sew whole thing into vi->rq.pages. */
- for (end = page; end->private; end = (struct page *)end->private);
- end->private = (unsigned long)rq->pages;
- rq->pages = page;
-}
-
-void free_old_xmit_skbs(struct net_device *netdev, struct send_queue *sq, bool in_napi)
-{
- uint32_t len = 0;
- uint32_t packets = 0;
- uint32_t bytes = 0;
- void *ptr = NULL;
-
- while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL)
- {
- struct sk_buff *skb = ptr;
-
- //LOG_DEBUG("sent skb %p\n", skb);
-
- bytes += skb->len;
- napi_consume_skb(skb, in_napi);
- packets++;
- }
-
- /* Avoid overhead when no packets have been processed
- * happens when called speculatively from start_xmit.
- */
- if (!packets)
- {
- return;
- }
-
- u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.bytes += bytes;
- sq->stats.packets += packets;
- netdev->stats.tx_bytes += bytes;
- netdev->stats.tx_packets += packets;
- u64_stats_update_end(&sq->stats.syncp);
-}
-
-void virtqueue_disable_cb_packed(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE)
- {
- vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
- vq->packed.vring.driver->flags = cpu_to_le16(vq->packed.event_flags_shadow);
- }
-}
-
-/**
- * virtqueue_disable_cb - disable callbacks
- * @_vq: the struct virtqueue we're talking about.
- *
- * Note that this is not necessarily synchronous, hence unreliable and only
- * useful as an optimization.
- *
- * Unlike other operations, this need not be serialized.
- */
-void virtqueue_disable_cb(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- /* If device triggered an event already it won't trigger one again:
- * no need to disable.
- */
- if (vq->event_triggered)
- {
- return;
- }
-
- virtqueue_disable_cb_packed(_vq);
-}
-
-void virtqueue_napi_schedule(struct napi_struct *napi, struct virtqueue *vq)
-{
- if (napi_schedule_prep(napi))
- {
- virtqueue_disable_cb(vq);
- __napi_schedule(napi);
- }
-}
-
-void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
-{
- napi_enable(napi);
-
- /* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets now.
- * Call local_bh_enable after to trigger softIRQ processing.
- */
- local_bh_disable();
- virtqueue_napi_schedule(napi, vq);
- local_bh_enable();
-}
-
-void virtnet_napi_tx_enable(struct net_device *netdev, struct virtqueue *vq, struct napi_struct *napi)
-{
- if (!napi->weight)
- {
- return;
- }
-
- virtnet_napi_enable(vq, napi);
-
- return;
-}
-
-void virtnet_napi_tx_disable(struct napi_struct *napi)
-{
- if (napi->weight)
- {
- napi_disable(napi);
- }
-}
-
-int virtnet_poll_tx(struct napi_struct *napi, int budget)
-{
- struct send_queue *sq = container_of(napi, struct send_queue, napi);
- struct zxdh_en_priv *en_priv = netdev_priv(sq->vq->vdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t index = vq2txq(sq->vq);
- struct netdev_queue *txq = NULL;
- int32_t opaque = 0;
- bool done = false;
-
- txq = netdev_get_tx_queue(en_dev->netdev, index);
- __netif_tx_lock(txq, raw_smp_processor_id());
- virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(en_dev->netdev, sq, true);
-
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
- {
- netif_tx_wake_queue(txq);
- }
-
- opaque = virtqueue_enable_cb_prepare(sq->vq);
-
- done = napi_complete_done(napi, 0);
-
- if (!done)
- {
- virtqueue_disable_cb(sq->vq);
- }
-
- __netif_tx_unlock(txq);
-
- if (done)
- {
- if (unlikely(virtqueue_poll(sq->vq, opaque)))
- {
- if (napi_schedule_prep(napi))
- {
- __netif_tx_lock(txq, raw_smp_processor_id());
- virtqueue_disable_cb(sq->vq);
- __netif_tx_unlock(txq);
- __napi_schedule(napi);
- }
- }
- }
-
- return 0;
-}
-
-bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
- uint16_t used_idx = 0;
- uint16_t wrap_counter = 0;
- uint16_t bufs = 0;
-
- START_USE(vq);
-
- /*
- * We optimistically turn back on interrupts, then check if there was
- * more to do.
- */
-
- if (vq->event)
- {
- /* TODO: tune this threshold */
- bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
- wrap_counter = vq->packed.used_wrap_counter;
-
- used_idx = vq->last_used_idx + bufs;
- if (used_idx >= vq->packed.vring.num)
- {
- used_idx -= vq->packed.vring.num;
- wrap_counter ^= 1;
- }
-
- vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
- (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
-
- /*
- * We need to update event offset and event wrap
- * counter first before updating event flags.
- */
- vqm_wmb(vq->weak_barriers);
- }
-
- if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE)
- {
- vq->packed.event_flags_shadow = vq->event ?
- VRING_PACKED_EVENT_FLAG_DESC :
- VRING_PACKED_EVENT_FLAG_ENABLE;
- vq->packed.vring.driver->flags = cpu_to_le16(vq->packed.event_flags_shadow);
- }
-
- /*
- * We need to update event suppression structure first
- * before re-checking for more used buffers.
- */
- vqm_mb(vq->weak_barriers);
-
- if (is_used_desc_packed(vq, vq->last_used_idx, vq->packed.used_wrap_counter))
- {
- END_USE(vq);
- return false;
- }
-
- END_USE(vq);
- return true;
-}
-uint16_t __vqm16_to_cpu(bool little_endian, __vqm16 val)
-{
- if (little_endian)
- {
- return le16_to_cpu((__force __le16)val);
- }
- else
- {
- return be16_to_cpu((__force __be16)val);
- }
-}
-
-static inline bool zxdh_legacy_is_little_endian(void)
-{
-#ifdef __LITTLE_ENDIAN
- return true;
-#else
- return false;
-#endif
-}
-
-bool zxdh_is_little_endian(struct net_device *dev)
-{
- return zxdh_has_feature(dev, ZXDH_F_VERSION_1) || zxdh_legacy_is_little_endian();
-}
-
-/* Memory accessors */
-uint16_t vqm16_to_cpu(struct net_device *netdev, __vqm16 val)
-{
- return __vqm16_to_cpu(zxdh_is_little_endian(netdev), val);
-}
-
-uint32_t mergeable_ctx_to_headroom(void *mrg_ctx)
-{
- return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
-}
-
-uint32_t mergeable_ctx_to_truesize(void *mrg_ctx)
-{
- return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
-}
-
-/**
- * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
- * @_vq: the struct virtqueue we're talking about.
- *
- * This re-enables callbacks but hints to the other side to delay
- * interrupts until most of the available buffers have been processed;
- * it returns "false" if there are many pending buffers in the queue,
- * to detect a possible race between the driver checking for more work,
- * and enabling callbacks.
- *
- * Caller must ensure we don't call this with other virtqueue
- * operations at the same time (except where noted).
- */
-bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- if (vq->event_triggered)
- {
- vq->event_triggered = false;
- }
-
- return virtqueue_enable_cb_delayed_packed(_vq);
-}
-
-void virtnet_poll_cleantx(struct receive_queue *rq)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(rq->vq->vdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t index = vq2rxq(rq->vq);
- struct send_queue *sq = &en_dev->sq[index];
- struct netdev_queue *txq = netdev_get_tx_queue(en_dev->netdev, index);
-
- if (!sq->napi.weight)
- {
- return;
- }
-
- if (__netif_tx_trylock(txq))
- {
- do
- {
- virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(en_dev->netdev, sq, true);
- } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
-
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
- {
- netif_tx_wake_queue(txq);
- }
-
- __netif_tx_unlock(txq);
- }
-}
-
-inline struct zxdh_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
-{
- return (struct zxdh_net_hdr *)skb->cb;
-}
-
-/* Called from bottom half context */
-struct sk_buff *page_to_skb(struct zxdh_en_device *en_dev,
- struct receive_queue *rq,
- struct page *page, uint32_t offset,
- uint32_t len, uint32_t truesize,
- uint32_t metasize, uint32_t headroom)
-{
- struct sk_buff *skb = NULL;
- struct zxdh_net_hdr *hdr = NULL;
- uint32_t copy = 0;
- uint32_t hdr_len = 0;
- struct page *page_to_free = NULL;
- int32_t tailroom = 0;
- int32_t shinfo_size = 0;
- char *p = NULL;
- char *hdr_p = NULL;
- char *buf = NULL;
- uint32_t hdr_len_tmp = 0;
-
- p = page_address(page) + offset;
- hdr_p = p;
-
- hdr_len = (((struct zxdh_net_hdr *)hdr_p)->pd_len) * HDR_2B_UNIT;
-
- /* If headroom is not 0, there is an offset between the beginning of the
- * data and the allocated space, otherwise the data and the allocated
- * space are aligned.
- *
- * Buffers with headroom use PAGE_SIZE as alloc size, see
- * add_recvbuf_mergeable() + get_mergeable_buf_len()
- */
- truesize = headroom ? PAGE_SIZE : truesize;
- tailroom = truesize - headroom;
- buf = p - headroom;
-
- len -= hdr_len;
- offset += hdr_len;
- p += hdr_len;
- tailroom -= hdr_len + len;
-
- shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
- /* copy small packet so we can reuse these pages */
- if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size)
- {
- skb = build_skb(buf, truesize);
- if (unlikely(!skb))
- {
- LOG_ERR("build_skb is null\n");
- return NULL;
- }
-
- skb_reserve(skb, p - buf);
- skb_put(skb, len);
-
- page = (struct page *)page->private;
- if (page)
- {
- give_pages(rq, page);
- }
- goto ok;
- }
-
- /* copy small packet so we can reuse these pages for small data */
- skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
- if (unlikely(!skb))
- {
- LOG_ERR("napi_alloc_skb is null\n");
- return NULL;
- }
-
- /* Copy all frame if it fits skb->head */
- if (len <= skb_tailroom(skb))
- {
- copy = len;
- }
- else
- {
- copy = ETH_HLEN + metasize;
- }
- skb_put_data(skb, p, copy);
-
- len -= copy;
- offset += copy;
-
- if (len)
- {
- skb_add_rx_frag(skb, 0, page, offset, len, truesize);
- }
- else
- {
- page_to_free = page;
- }
-
-ok:
- hdr = skb_vnet_hdr(skb);
- hdr_len_tmp = hdr_len > 48 ? 48 : hdr_len; //todo
- memcpy(hdr, hdr_p, hdr_len_tmp);
- //memcpy(hdr, hdr_p, hdr_len);
-
- if (page_to_free)
- {
- put_page(page_to_free);
- }
-
- if (metasize)
- {
- __skb_pull(skb, metasize);
- skb_metadata_set(skb, metasize);
- }
-
- return skb;
-}
-/**
- * virtqueue_add_outbuf - expose output buffers to other end
- * @vq: the struct virtqueue we're talking about.
- * @sg: scatterlist (must be well-formed and terminated!)
- * @num: the number of entries in @sg readable by other side
- * @data: the token identifying the buffer.
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
- */
-int32_t virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, uint32_t num, void *data, gfp_t gfp)
-{
- return virtqueue_add_packed(vq, &sg, num, 1, 0, data, NULL, gfp);
-}
-
-struct sk_buff *receive_mergeable(struct net_device *netdev,
- struct zxdh_en_device *en_dev,
- struct receive_queue *rq,
- void *buf,
- void *ctx,
- uint32_t len,
- struct virtnet_rq_stats *stats)
-{
- struct zxdh_net_hdr *hdr = buf;
- uint16_t num_buf = vqm16_to_cpu(netdev, hdr->num_buffers);
- struct page *page = virt_to_head_page(buf);
- int32_t offset = buf - page_address(page);
- struct sk_buff *head_skb = NULL;
- struct sk_buff *curr_skb = NULL;
- uint32_t truesize = mergeable_ctx_to_truesize(ctx);
- uint32_t headroom = mergeable_ctx_to_headroom(ctx);
- uint32_t metasize = 0;
-
- stats->bytes += (len - (hdr->pd_len * HDR_2B_UNIT));
- netdev->stats.rx_bytes += (len - (hdr->pd_len * HDR_2B_UNIT));
-
- if (unlikely(len > truesize))
- {
- LOG_ERR("%s: rx error: len %u exceeds truesize %lu\n", netdev->name, len, (unsigned long)ctx);
- netdev->stats.rx_length_errors++;
- netdev->stats.rx_errors++;
- goto err_skb;
- }
-
- head_skb = page_to_skb(en_dev, rq, page, offset, len, truesize, metasize, headroom);
- curr_skb = head_skb;
-
- if (unlikely(!curr_skb))
- {
- LOG_ERR("page_to_skb is null\n");
- goto err_skb;
- }
- while (--num_buf)
- {
- int32_t num_skb_frags;
-
- buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, &ctx);
- if (unlikely(!buf))
- {
- LOG_ERR("%s: rx error: %d buffers out of %d missing\n", netdev->name, num_buf, vqm16_to_cpu(netdev, hdr->num_buffers));
- netdev->stats.rx_length_errors++;
- netdev->stats.rx_errors++;
- goto err_buf;
- }
-
- stats->bytes += len;
- netdev->stats.rx_bytes += len;
- page = virt_to_head_page(buf);
-
- truesize = mergeable_ctx_to_truesize(ctx);
- if (unlikely(len > truesize))
- {
- LOG_ERR("%s: rx error: len %u exceeds truesize %lu\n", netdev->name, len, (unsigned long)ctx);
- netdev->stats.rx_length_errors++;
- netdev->stats.rx_errors++;
- goto err_skb;
- }
-
- num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
- if (unlikely(num_skb_frags == MAX_SKB_FRAGS))
- {
- struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
-
- if (unlikely(!nskb))
- {
- LOG_ERR("alloc_skb is null\n");
- goto err_skb;
- }
- if (curr_skb == head_skb)
- {
- skb_shinfo(curr_skb)->frag_list = nskb;
- }
- else
- {
- curr_skb->next = nskb;
- }
- curr_skb = nskb;
- head_skb->truesize += nskb->truesize;
- num_skb_frags = 0;
- }
-
- if (curr_skb != head_skb)
- {
- head_skb->data_len += len;
- head_skb->len += len;
- head_skb->truesize += truesize;
- }
- offset = buf - page_address(page);
-
- if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset))
- {
- put_page(page);
- skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, len, truesize);
- }
- else
- {
- skb_add_rx_frag(curr_skb, num_skb_frags, page, offset, len, truesize);
- }
- }
-
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
- return head_skb;
-
-err_skb:
- put_page(page);
- while (num_buf-- > 1)
- {
- buf = virtqueue_get_buf(rq->vq, &len);
- if (unlikely(!buf))
- {
- LOG_ERR("%s: rx error: %d buffers missing\n", netdev->name, num_buf);
- netdev->stats.rx_length_errors++;
- netdev->stats.rx_errors++;
- break;
- }
- stats->bytes += len;
- page = virt_to_head_page(buf);
- put_page(page);
- }
-err_buf:
- stats->drops++;
- netdev->stats.rx_dropped++;
- dev_kfree_skb(head_skb);
- return NULL;
-}
-
-static bool is_dev_configed_vlan(struct zxdh_en_device *en_dev)
-{
- return (en_dev->vlan_dev.vlan_id != 0);
-}
-
-void receive_buf(struct zxdh_en_device *en_dev, struct receive_queue *rq,
- void *buf, uint32_t len, void **ctx,
- struct virtnet_rq_stats *stats)
-{
- bool vlan_striped, qinq_striped;
- struct net_device *netdev = en_dev->netdev;
- struct sk_buff *skb = NULL;
- struct zxdh_net_hdr_rcv *hdr_rcv = (struct zxdh_net_hdr_rcv *)buf;
-
-#ifdef TIME_STAMP_1588
- int32_t ret = 0;
-#endif
-
- if (unlikely(len < (hdr_rcv->pd_len * HDR_2B_UNIT) + ETH_HLEN))
- {
- LOG_ERR("%s: short packet %i\n", netdev->name, len);
- netdev->stats.rx_length_errors++;
- netdev->stats.rx_errors++;
-
- put_page(virt_to_head_page(buf));
- return;
- }
-
- skb = receive_mergeable(netdev, en_dev, rq, buf, ctx, len, stats);
-
- if (unlikely(!skb))
- {
- LOG_ERR("skb receive_mergeable null\n");
- return;
- }
-
- /* rx packet contain the strip label & open rxvlan offloading*/
- vlan_striped = hdr_rcv->pd_hdr.flags & RX_VLAN_STRIPED_MASK && !is_dev_configed_vlan(en_dev);
- qinq_striped = hdr_rcv->pd_hdr.flags & RX_QINQ_STRIPED_MASK;
- if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_striped || qinq_striped))
- {
- u16 vid = htons(hdr_rcv->pd_hdr.striped_ctci) & RX_TPID_VLAN_ID_MASK;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
- }
-
- if ((netdev->features & NETIF_F_RXCSUM) && !(hdr_rcv->pi_hdr.error_code[1] & PI_HDR_L4_CHKSUM_ERROR_CODE)
- && !(hdr_rcv->pi_hdr.error_code[0] & PI_HDR_L3_CHKSUM_ERROR_CODE) && !(hdr_rcv->pd_hdr.flags & OUTER_IP_CHKSUM_ERROT_CODE))
- {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- else
- {
- skb->ip_summed = CHECKSUM_NONE;
- }
-
-#ifdef TIME_STAMP_1588
- ret = pkt_1588_proc_rcv(skb, hdr_rcv, en_dev->clock_no, en_dev);
- if ((ret != PTP_SUCCESS) && (ret != IS_NOT_PTP_MSG))
- {
- LOG_ERR("pkt_1588_proc_rcv err!!!\n");
- return;
- }
-#endif
-
- skb_record_rx_queue(skb, vq2rxq(rq->vq));
- skb->protocol = eth_type_trans(skb, netdev);
-
- //LOG_INFO("receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type);
-
- napi_gro_receive(&rq->napi, skb);
- return;
-}
-
-/**
- * virtqueue_notify - second half of split virtqueue_kick call.
- * @_vq: the struct virtqueue
- *
- * This does not need to be serialized.
- *
- * Returns false if host notify failed or queue is broken, otherwise true.
- */
-bool virtqueue_notify(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- if (unlikely(vq->broken))
- {
- LOG_ERR("vq->broken\n");
- return false;
- }
-
- /* Prod other side to tell it about changes. */
- if (!vq->notify(_vq))
- {
- LOG_ERR("vq->notify(_vq) failed\n");
- vq->broken = true;
- return false;
- }
-
- return true;
-}
-
-int32_t add_recvbuf_mergeable(struct zxdh_en_device *en_dev, struct receive_queue *rq, gfp_t gfp)
-{
- struct page_frag *alloc_frag = &rq->alloc_frag;
- uint32_t headroom = 0;
- uint32_t tailroom = 0;
- uint32_t room = SKB_DATA_ALIGN(headroom + tailroom);
- char *buf = NULL;
- void *ctx = NULL;
- int32_t err = 0;
- uint32_t len = 0;
- uint32_t hole = 0;
-
- /* Extra tailroom is needed to satisfy XDP's assumption. This
- * means rx frags coalescing won't work, but consider we've
- * disabled GSO for XDP, it won't be a big issue.
- */
- len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
- if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
- {
- LOG_ERR("skb_page_frag_refill failed\n");
- return -ENOMEM;
- }
-
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
- buf += headroom; /* advance address leaving hole at front of pkt */
- get_page(alloc_frag->page);
- alloc_frag->offset += len + room;
- hole = alloc_frag->size - alloc_frag->offset;
- if (hole < len + room)
- {
- /* To avoid internal fragmentation, if there is very likely not
- * enough space for another buffer, add the remaining space to
- * the current buffer.
- */
- len += hole;
- alloc_frag->offset += hole;
- }
-
- sg_init_one(rq->sg, buf, len);
- ctx = mergeable_len_to_ctx(len, headroom);
- err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
- if (err < 0)
- {
- put_page(virt_to_head_page(buf));
- }
-
- return err;
-}
-
-/* Assuming a given event_idx value from the other side, if
- * we have just incremented index from old to new_idx,
- * should we trigger an event? */
-int32_t vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
-{
- /* Note: Xen has similar logic for notification hold-off
- * in include/xen/interface/io/ring.h with req_event and req_prod
- * corresponding to event_idx + 1 and new_idx respectively.
- * Note also that req_event and req_prod in Xen start at 1,
- * event indexes in custom queue start at 0. */
- return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
-}
-
-bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
- uint16_t new = 0;
- uint16_t old = 0;
- uint16_t off_wrap = 0;
- uint16_t flags = 0;
- uint16_t wrap_counter = 0;
- uint16_t event_idx = 0;
- bool needs_kick = false;
- union
- {
- struct
- {
- __le16 off_wrap;
- __le16 flags;
- };
- uint32_t u32;
- } snapshot;
-
- START_USE(vq);
-
- /*
- * We need to expose the new flags value before checking notification
- * suppressions.
- */
- vqm_mb(vq->weak_barriers);
-
- old = vq->packed.next_avail_idx - vq->num_added;
- new = vq->packed.next_avail_idx;
- vq->num_added = 0;
-
- snapshot.u32 = *(uint32_t *)vq->packed.vring.device;
- flags = le16_to_cpu(snapshot.flags);
-
- LAST_ADD_TIME_CHECK(vq);
- LAST_ADD_TIME_INVALID(vq);
-
- if (flags != VRING_PACKED_EVENT_FLAG_DESC)
- {
- needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
- goto out;
- }
-
- off_wrap = le16_to_cpu(snapshot.off_wrap);
-
- wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
- event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
- if (wrap_counter != vq->packed.avail_wrap_counter)
- {
- event_idx -= vq->packed.vring.num;
- }
-
- needs_kick = vring_need_event(event_idx, new, old);
-out:
- END_USE(vq);
- return needs_kick;
-}
-
-/*
- * Returns false if we couldn't fill entirely (OOM).
- *
- * Normally run in the receive path, but can also be run from ndo_open
- * before we're receiving packets, or from refill_work which is
- * careful to disable receiving (using napi_disable).
- */
-bool try_fill_recv(struct net_device *netdev, struct receive_queue *rq, gfp_t gfp)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t err = 0;
- bool oom = 0;
- unsigned long flags = 0;
-
- do
- {
- err = add_recvbuf_mergeable(en_dev, rq, gfp);
- oom = err == -ENOMEM;
- if (err)
- {
- break;
- }
- } while (rq->vq->num_free);
-
- if (virtqueue_kick_prepare_packed(rq->vq) && virtqueue_notify(rq->vq))
- {
- flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
- rq->stats.kicks++;
- u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
- }
-
- return !oom;
-}
-
-int32_t virtnet_receive(struct receive_queue *rq, int32_t budget)
-{
- struct net_device *netdev = rq->vq->vdev;
- struct zxdh_en_priv *en_priv = netdev_priv(rq->vq->vdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct virtnet_rq_stats stats = {};
- uint32_t len = 0;
- void *buf = NULL;
- int32_t i = 0;
- void *ctx = NULL;
-
- while (stats.packets < budget && (buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, &ctx)))
- {
- receive_buf(en_dev, rq, buf, len, ctx, &stats);
- stats.packets++;
- netdev->stats.rx_packets++;
- }
-
- if (rq->vq->num_free > min((uint32_t)budget, virtqueue_get_vring_size(rq->vq)) / 2)
- {
- if (!try_fill_recv(rq->vq->vdev, rq, GFP_ATOMIC))
- {
- schedule_delayed_work(&en_dev->refill, 0);
- }
- }
-
- u64_stats_update_begin(&rq->stats.syncp);
- for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++)
- {
- size_t offset = virtnet_rq_stats_desc[i].offset;
- uint64_t *item;
-
- item = (uint64_t *)((uint8_t *)&rq->stats + offset);
- *item += *(uint64_t *)((uint8_t *)&stats + offset);
- }
- u64_stats_update_end(&rq->stats.syncp);
-
- return stats.packets;
-}
-
-void virtqueue_napi_complete(struct napi_struct *napi, struct virtqueue *vq, int32_t processed)
-{
- int32_t opaque = 0;
-
- opaque = virtqueue_enable_cb_prepare(vq);
- if (napi_complete_done(napi, processed))
- {
- if (unlikely(virtqueue_poll(vq, opaque)))
- {
- virtqueue_napi_schedule(napi, vq);
- }
- }
- else
- {
- virtqueue_disable_cb(vq);
- }
-}
-
-int virtnet_poll(struct napi_struct *napi, int budget)
-{
- struct receive_queue *rq = container_of(napi, struct receive_queue, napi);
- uint32_t received = 0;
-
- virtnet_poll_cleantx(rq);
-
- received = virtnet_receive(rq, budget);
-
- /* Out of packets? */
- if (received < budget)
- {
- virtqueue_napi_complete(napi, rq->vq, received);
- }
-
- return received;
-}
-
-int32_t virtnet_alloc_queues(struct zxdh_en_device *en_dev)
-{
- int32_t i = 0;
-
- en_dev->sq = kcalloc(en_dev->max_queue_pairs, sizeof(*en_dev->sq), GFP_KERNEL);
- if (unlikely(en_dev->sq == NULL))
- {
- LOG_ERR("en_dev->sq kcalloc failed\n");
- goto err_sq;
- }
-
- en_dev->rq = kcalloc(en_dev->max_queue_pairs, sizeof(*en_dev->rq), GFP_KERNEL);
- if (unlikely(en_dev->rq == NULL))
- {
- LOG_ERR("en_dev->rq kcalloc failed\n");
- goto err_rq;
- }
-
- INIT_DELAYED_WORK(&en_dev->refill, refill_work);
-
- for (i = 0; i < en_dev->curr_queue_pairs; i++)
- {
- en_dev->rq[i].pages = NULL;
- netif_napi_add(en_dev->netdev, &en_dev->rq[i].napi, virtnet_poll, NAPI_POLL_WEIGHT);
- netif_tx_napi_add(en_dev->netdev, &en_dev->sq[i].napi, virtnet_poll_tx, NAPI_POLL_WEIGHT);
-
- sg_init_table(en_dev->rq[i].sg, ARRAY_SIZE(en_dev->rq[i].sg));
- ewma_pkt_len_init(&en_dev->rq[i].mrg_avg_pkt_len);
- sg_init_table(en_dev->sq[i].sg, ARRAY_SIZE(en_dev->sq[i].sg));
-
- u64_stats_init(&en_dev->rq[i].stats.syncp);
- u64_stats_init(&en_dev->sq[i].stats.syncp);
- }
-
- return 0;
-
-err_rq:
- kfree(en_dev->sq);
- en_dev->sq = NULL;
-err_sq:
- return -ENOMEM;
-}
-
-/**
- * virtqueue_set_affinity - setting affinity for a virtqueue
- * @vq: the virtqueue
- * @cpu_mask: the cpu no.
- *
- * Pay attention the function are best-effort: the affinity hint may not be set
- * due to config support, irq type and sharing.
- *
- */
-int32_t virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
-{
- if (!vq->callback)
- {
- LOG_ERR("vq->callback is null\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-void refill_work(struct work_struct *work)
-{
- int32_t i = 0;
- bool still_empty = false;
- struct zxdh_en_device *en_dev = container_of(work, struct zxdh_en_device, refill.work);
-
- for (i = 0; i < en_dev->curr_queue_pairs; i++)
- {
- struct receive_queue *rq = &en_dev->rq[i];
-
- napi_disable(&rq->napi);
- still_empty = !try_fill_recv(en_dev->netdev, rq, GFP_KERNEL);
- virtnet_napi_enable(rq->vq, &rq->napi);
-
- /* In theory, this can happen: if we don't get any buffers in
- * we will *never* try to fill again.
- */
- if (still_empty)
- {
- schedule_delayed_work(&en_dev->refill, HZ/2);
- }
- }
-}
-
-int32_t dh_eq_vqs_vring_int(struct notifier_block *nb, unsigned long action, void *data)
-{
- struct dh_eq_vq *eq_vq = container_of(nb, struct dh_eq_vq, irq_nb);
- struct dh_eq_vqs *eq_vqs = container_of(eq_vq, struct dh_eq_vqs, vq_s);
- struct list_head *item = NULL;
- struct zxdh_pci_vq_info *info = NULL;
- struct vring_virtqueue *vq = NULL;
-
- list_for_each(item, &eq_vqs->vqs)
- {
- info = list_entry(item, struct zxdh_pci_vq_info, node);
-
- vq = to_vvq(info->vq);
- if (!more_used_packed(vq))
- {
- continue;
- }
-
- if (unlikely(vq->broken))
- {
- LOG_ERR("vq:%d is broken\n", info->vq->phy_index);
- continue;
- }
-
- /* Just a hint for performance: so it's ok that this can be racy! */
- if (vq->event)
- {
- vq->event_triggered = true;
- }
-
- if (vq->vq.callback)
- {
- vq->vq.callback(&vq->vq);
- }
- }
-
- return 0;
-}
-
-static DEFINE_SPINLOCK(vp_find_lock);
-
-int32_t vp_find_vqs_msix(struct net_device *netdev, unsigned nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t err = 0;
- uint16_t qidx = 0;
- int32_t phy_index = -1;
-
- en_dev->vqs = kcalloc(nvqs, sizeof(*en_dev->vqs), GFP_KERNEL);
- if (unlikely(en_dev->vqs == NULL))
- {
- LOG_ERR("en_dev->vqs kcalloc failed\n");
- return -ENOMEM;
- }
-
- spin_lock(&vp_find_lock);
- for (qidx = 0; qidx < nvqs; ++qidx)
- {
- phy_index = en_dev->ops->get_phy_vq(en_dev->parent, qidx);
- if(phy_index < 0)
- {
- LOG_ERR("get_phy_vq failed: %d\n", phy_index);
- err = phy_index;
- goto err;
- }
- en_dev->phy_index[qidx] = phy_index;
-
- vqs[qidx] = vp_setup_vq(netdev, qidx, callbacks[qidx], names[qidx], ctx ? ctx[qidx] : false, qidx);
- if (IS_ERR_OR_NULL(vqs[qidx]))
- {
- err = PTR_ERR(vqs[qidx]);
- LOG_ERR("vp_setup_vq failed: %d\n", err);
- goto err;
- }
-
- en_dev->ops->set_queue_enable(en_dev->parent, phy_index, true);
- }
- spin_unlock(&vp_find_lock);
- return 0;
-
-err:
- en_dev->ops->release_phy_vq(en_dev->parent, en_dev->phy_index, qidx+1);
- spin_unlock(&vp_find_lock);
- vp_del_vqs(netdev);
- return err;
-}
-
-/* How large should a single buffer be so a queue full of these can fit at
- * least one full packet?
- * Logic below assumes the mergeable buffer header is used.
- */
-uint32_t mergeable_min_buf_len(struct zxdh_en_device *en_dev, struct virtqueue *vq)
-{
- const uint32_t hdr_len = sizeof(struct zxdh_net_hdr);
- uint32_t rq_size = virtqueue_get_vring_size(vq);
- uint32_t packet_len = en_dev->netdev->max_mtu;
- uint32_t buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
- uint32_t min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
-
- return max(max(min_buf_len, hdr_len) - hdr_len, (uint32_t)GOOD_PACKET_LEN);
-}
-
-void zxdh_en_recv_pkts(struct virtqueue *rvq)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(rvq->vdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct receive_queue *rq = &en_dev->rq[vq2rxq(rvq)];
-
- virtqueue_napi_schedule(&rq->napi, rvq);
-}
-
-void zxdh_en_xmit_pkts(struct virtqueue *tvq)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(tvq->vdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct napi_struct *napi = &en_dev->sq[vq2txq(tvq)].napi;
-
- /* Suppress further interrupts. */
- virtqueue_disable_cb(tvq);
-
- if (napi->weight)
- {
- virtqueue_napi_schedule(napi, tvq);
- }
- else
- {
- /* We were probably waiting for more output buffers. */
- netif_wake_subqueue(en_dev->netdev, vq2txq(tvq));
- en_dev->hw_stats.q_stats[vq2txq(tvq)].q_tx_wake++;
- }
-}
-
-int32_t virtnet_find_vqs(struct zxdh_en_device *en_dev)
-{
- vq_callback_t **callbacks = NULL;
- struct virtqueue **vqs = NULL;
- int32_t ret = -ENOMEM;
- int32_t i = 0;
- int32_t total_vqs = 0;
- const char **names = NULL;
- bool *ctx = NULL;
-
- total_vqs = en_dev->max_queue_pairs * 2;
-
- /* Allocate space for find_vqs parameters */
- vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
- if (unlikely(vqs == NULL))
- {
- LOG_ERR("vqs kcalloc failed\n");
- goto err_vq;
- }
-
- callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
- if (unlikely(callbacks == NULL))
- {
- LOG_ERR("callbacks kmalloc_array failed\n");
- goto err_callback;
- }
-
- names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
- if (unlikely(names == NULL))
- {
- LOG_ERR("names kmalloc_array failed\n");
- goto err_names;
- }
-
- ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
- if (unlikely(ctx == NULL))
- {
- LOG_ERR("ctx kmalloc failed\n");
- goto err_ctx;
- }
-
- /* Allocate/initialize parameters for services send/receive virtqueues */
- for (i = 0; i < en_dev->max_queue_pairs; i++)
- {
- callbacks[rxq2vq(i)] = zxdh_en_recv_pkts;
- callbacks[txq2vq(i)] = zxdh_en_xmit_pkts;
- sprintf(en_dev->rq[i].name, "input.%d", i);
- sprintf(en_dev->sq[i].name, "output.%d", i);
- names[rxq2vq(i)] = en_dev->rq[i].name;
- names[txq2vq(i)] = en_dev->sq[i].name;
- if (ctx)
- {
- ctx[rxq2vq(i)] = true;
- }
- }
-
- ret = vp_find_vqs_msix(en_dev->netdev, total_vqs, vqs, callbacks, names, ctx);
- if (ret)
- {
- LOG_ERR("vp_find_vqs_msix failed: %d\n", ret);
- goto err_find;
- }
-
- for (i = 0; i < en_dev->max_queue_pairs; i++)
- {
- en_dev->rq[i].vq = vqs[rxq2vq(i)];
- en_dev->rq[i].min_buf_len = mergeable_min_buf_len(en_dev, en_dev->rq[i].vq);
- en_dev->sq[i].vq = vqs[txq2vq(i)];
- }
-
-err_find:
- kfree(ctx);
- ctx = NULL;
-err_ctx:
- kfree(names);
- names = NULL;
-err_names:
- kfree(callbacks);
- callbacks = NULL;
-err_callback:
- kfree(vqs);
- vqs = NULL;
-err_vq:
- return ret;
-}
-
-void virtnet_free_queues(struct zxdh_en_device *en_dev)
-{
- int32_t i = 0;
-
- for (i = 0; i < en_dev->max_queue_pairs; i++)
- {
- netif_napi_del(&en_dev->rq[i].napi);
- netif_napi_del(&en_dev->sq[i].napi);
- }
-
- /* We called __netif_napi_del(),
- * we need to respect an RCU grace period before freeing zxdev->rq
- */
- synchronize_net();
-
- kfree(en_dev->rq);
- kfree(en_dev->sq);
-}
-
-void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
- uint32_t i = 0;
- void *buf = NULL;
-
- START_USE(vq);
-
- for (i = 0; i < vq->packed.vring.num; i++)
- {
- if (!vq->packed.desc_state[i].data)
- {
- continue;
- }
-
- /* detach_buf clears data, so grab it now. */
- buf = vq->packed.desc_state[i].data;
- detach_buf_packed(vq, i, NULL);
- END_USE(vq);
- return buf;
- }
-
- /* That should have freed everything. */
- BUG_ON(vq->vq.num_free != vq->packed.vring.num);
-
- END_USE(vq);
- return NULL;
-}
-
-void zxdh_free_unused_bufs(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct virtqueue *vq = NULL;
- void *buf = NULL;
- int32_t i = 0;
-
- for (i = 0; i < en_dev->max_queue_pairs; i++)
- {
- vq = en_dev->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf_packed(vq)) != NULL)
- {
-#ifdef ZXDH_MSGQ
- if (i == (en_dev->max_queue_pairs - 1))
- {
- NEED_MSGQ(en_dev)
- {
- ZXDH_FREE_PTR(buf);
- continue;
- }
- }
-#endif
- dev_kfree_skb(buf);
- }
- }
-
- for (i = 0; i < en_dev->max_queue_pairs; i++)
- {
- vq = en_dev->rq[i].vq;
- while ((buf = virtqueue_detach_unused_buf_packed(vq)) != NULL)
- {
- put_page(virt_to_head_page(buf));
- }
- }
-}
-
-struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
-{
- struct page *p = rq->pages;
-
- if (p)
- {
- rq->pages = (struct page *)p->private;
- /* clear private here, it is used to chain pages */
- p->private = 0;
- }
- else
- {
- p = alloc_page(gfp_mask);
- }
- return p;
-}
-
-void _free_receive_bufs(struct zxdh_en_device *en_dev)
-{
- int32_t i = 0;
-
- for (i = 0; i < en_dev->max_queue_pairs; i++)
- {
- while (en_dev->rq[i].pages)
- {
- __free_pages(get_a_page(&en_dev->rq[i], GFP_KERNEL), 0);
- }
- }
-}
-
-void zxdh_free_receive_bufs(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- rtnl_lock();
- _free_receive_bufs(en_dev);
- rtnl_unlock();
-}
-
-void zxdh_free_receive_page_frags(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t i = 0;
-
- for (i = 0; i < en_dev->max_queue_pairs; i++)
- {
- if (en_dev->rq[i].alloc_frag.page)
- {
- put_page(en_dev->rq[i].alloc_frag.page);
- }
- }
-}
-
-void zxdh_virtnet_del_vqs(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- vp_del_vqs(netdev);
- en_dev->ops->vqs_unbind_eqs(en_dev->parent, (en_dev->max_queue_pairs * 2 - 1));
- en_dev->ops->vqs_channel_unbind_handler(en_dev->parent, (en_dev->max_queue_pairs * 2 - 1));
- virtnet_free_queues(en_dev);
-}
-
-void zxdh_vqs_uninit(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- zxdh_vp_reset(netdev);
-
- cancel_delayed_work_sync(&en_dev->refill);
- zxdh_free_unused_bufs(netdev);
- zxdh_free_receive_bufs(netdev);
- zxdh_free_receive_page_frags(netdev);
- zxdh_virtnet_del_vqs(netdev);
- en_dev->ops->release_phy_vq(en_dev->parent, en_dev->phy_index, en_dev->max_queue_pairs*2);
-}
-
-int32_t zxdh_vqs_init(struct net_device *netdev)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t ret = 0;
-
- en_dev->hdr_len = sizeof(struct zxdh_net_hdr);
- en_dev->any_header_sg = zxdh_has_feature(netdev, ZXDH_F_ANY_LAYOUT);
- en_dev->netdev->needed_headroom = en_dev->hdr_len;
- en_dev->max_queue_pairs = max_pairs;
- memset(en_dev->phy_index, 0xFF, sizeof(en_dev->phy_index));
-
- if (en_dev->ops->is_bond(en_dev->parent))
- {
- en_dev->max_queue_pairs = ZXDH_BOND_ETH_MQ_PAIRS_NUM;
- }
- en_dev->curr_queue_pairs = en_dev->max_queue_pairs;
-
-#ifdef ZXDH_MSGQ
- IS_MSGQ_DEV(en_dev)
- {
- en_dev->need_msgq = true;
- en_dev->max_queue_pairs += ZXDH_PQ_PAIRS_NUM;
- LOG_INFO("max_queue_pairs: %d\n", en_dev->max_queue_pairs);
- }
-#endif
-
- INIT_LIST_HEAD(&en_dev->vqs_list);
- spin_lock_init(&en_dev->vqs_list_lock);
-
- INIT_LIST_HEAD(&en_dev->virtqueues);
- spin_lock_init(&en_dev->lock);
-
- /* Allocate services send & receive queues */
- ret = virtnet_alloc_queues(en_dev);
- if (ret)
- {
- LOG_ERR("virtnet_alloc_queues failed: %d\n", ret);
- return ret;
- }
-
- ret = virtnet_find_vqs(en_dev);
- if (ret)
- {
- LOG_ERR("virtnet_find_vqs failed: %d\n", ret);
- goto err_find_vqs;
- }
-
- rtnl_lock();
- netif_set_real_num_tx_queues(en_dev->netdev, en_dev->curr_queue_pairs);
- rtnl_unlock();
- rtnl_lock();
- netif_set_real_num_rx_queues(en_dev->netdev, en_dev->curr_queue_pairs);
- rtnl_unlock();
-
- return 0;
-
-err_find_vqs:
- virtnet_free_queues(en_dev);
- return ret;
-}
+#include "queue.h"
+#include "../en_aux.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#ifdef TIME_STAMP_1588
+#include "en_1588_pkt_proc.h"
+#endif
+#ifdef ZXDH_MSGQ
+#include "priv_queue.h"
+#endif
+
+static uint32_t features_table[] = {
+ ZXDH_NET_F_MRG_RXBUF, ZXDH_NET_F_STATUS, ZXDH_NET_F_CTRL_VQ,
+ ZXDH_NET_F_MQ, ZXDH_RING_F_INDIRECT_DESC, ZXDH_RING_F_EVENT_IDX,
+ ZXDH_F_VERSION_1, ZXDH_F_RING_PACKED
+};
+
+void zxdh_print_vring_info(struct virtqueue *vq, uint32_t desc_index,
+ uint32_t desc_num)
+{
+ struct vring_virtqueue *vvq = to_vvq(vq);
+ struct vring_packed_desc *desc = NULL;
+ uint32_t i = 0;
+
+ LOG_INFO("phy_index : %d\n", vq->phy_index);
+ LOG_INFO("num free : %d\n", vq->num_free);
+ LOG_INFO("vring address : 0x%llx\n", (uint64_t)&vvq->packed.vring);
+ LOG_INFO("vring size : %d\n", vvq->packed.vring.num);
+ LOG_INFO("last_used_idx : %d\n", vvq->last_used_idx);
+ LOG_INFO("avail_wrap_counter: %d\n", vvq->packed.avail_wrap_counter);
+ LOG_INFO("used_wrap_counter : %d\n", vvq->packed.used_wrap_counter);
+ LOG_INFO("next_avail_idx : %d\n", vvq->packed.next_avail_idx);
+ LOG_INFO("free head : %d\n", vvq->free_head);
+ LOG_INFO("driver->flags : 0x%x\n", vvq->packed.vring.driver->flags);
+ LOG_INFO("driver->off_wrap : %d\n", vvq->packed.vring.driver->off_wrap);
+ LOG_INFO("device->flags : 0x%x\n", vvq->packed.vring.device->flags);
+ LOG_INFO("device->off_wrap : %d\n", vvq->packed.vring.device->off_wrap);
+ LOG_INFO("DESC[x]:\tDESC_ADDR\t[BUFFER_ADDR]\t\t[LEN]\t\t[ID]\t[FLAG]\n");
+
+ desc = vvq->packed.vring.desc;
+ for (i = desc_index; i < desc_num; i++) {
+ LOG_INFO("DESC[%d] 0x%llx:\t0x%016llx\t0x%08x\t%8d\t0x%x\n", i,
+ (uint64_t)desc, desc->addr, desc->len, desc->id, desc->flags);
+ desc++;
+ }
+
+ return;
+}
+
+/* enable irq handlers */
+void zxdh_vp_enable_cbs(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t i = 0;
+
+ for (i = 0; i < en_dev->channels_num; i++) {
+ en_dev->ops->switch_vqs_channel(en_dev->parent, i, 1);
+ }
+}
+
+/* disable irq handlers */
+void zxdh_vp_disable_cbs(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t i = 0;
+
+ for (i = 0; i < en_dev->channels_num; i++) {
+ en_dev->ops->switch_vqs_channel(en_dev->parent, i, 0);
+ }
+}
+
+void zxdh_vp_reset(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ /* 0 status means a reset. */
+ en_dev->ops->set_status(en_dev->parent, 0);
+
+ /* After writing 0 to device_status, the driver MUST wait for a read of
+ * device_status to return 0 before reinitializing the device.
+ * This will flush out the status write, and flush in device writes,
+ * including MSI-X interrupts, if any.
+ */
+ while (en_dev->ops->get_status(en_dev->parent) != 0) {
+ msleep(1);
+ }
+
+ return;
+}
+
+void zxdh_add_status(struct net_device *netdev, uint32_t status)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint8_t dev_status = 0;
+
+ might_sleep();
+
+ dev_status = en_dev->ops->get_status(en_dev->parent);
+
+ en_dev->ops->set_status(en_dev->parent, (dev_status | status));
+
+ return;
+}
+
+bool zxdh_has_status(struct net_device *netdev, uint32_t sbit)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint8_t dev_status = 0;
+
+ dev_status = en_dev->ops->get_status(en_dev->parent);
+
+ return (dev_status & sbit);
+}
+
+void zxdh_pf_features_init(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t i = 0;
+ uint64_t features = 0;
+
+ en_dev->device_feature = en_dev->ops->get_features(en_dev->parent);
+ en_dev->device_feature |= BIT(34);
+ en_dev->driver_feature = 0;
+
+ for (i = 0; i < ARRAY_SIZE(features_table); i++) {
+ features = features_table[i];
+ en_dev->driver_feature |= (1ULL << features);
+ }
+ en_dev->guest_feature = en_dev->device_feature & 0xfffffff5dfffffff;
+ en_dev->ops->set_features(en_dev->parent, en_dev->guest_feature);
+
+ return;
+}
+
+bool zxdh_has_feature(struct net_device *netdev, uint32_t fbit)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ return en_dev->guest_feature & BIT_ULL(fbit);
+}
+
+int32_t vq2txq(struct virtqueue *vq)
+{
+ return (vq->index - 1) / 2;
+}
+
+int32_t txq2vq(int32_t txq)
+{
+ return txq * 2 + 1;
+}
+int32_t vq2rxq(struct virtqueue *vq)
+{
+ return vq->index / 2;
+}
+
+int32_t rxq2vq(int32_t rxq)
+{
+ return rxq * 2;
+}
+
+inline void vqm_mb(bool weak_barriers)
+{
+ if (weak_barriers) {
+ virt_mb();
+ } else {
+ mb();
+ }
+}
+
+inline void vqm_rmb(bool weak_barriers)
+{
+ if (weak_barriers) {
+ virt_rmb();
+ } else {
+ dma_rmb();
+ }
+}
+
+inline void vqm_wmb(bool weak_barriers)
+{
+ if (weak_barriers) {
+ virt_wmb();
+ } else {
+ dma_wmb();
+ }
+}
+
+void vring_del_virtqueue(struct virtqueue *_vq)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(_vq->vdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ spin_lock(&en_dev->vqs_list_lock);
+ list_del(&_vq->list);
+ spin_unlock(&en_dev->vqs_list_lock);
+
+ if (vq->we_own_ring) {
+ vring_free_queue(vq->vq.vdev, vq->packed.ring_size_in_bytes,
+ vq->packed.vring.desc, vq->packed.ring_dma_addr);
+
+ vring_free_queue(vq->vq.vdev, vq->packed.event_size_in_bytes,
+ vq->packed.vring.driver,
+ vq->packed.driver_event_dma_addr);
+
+ vring_free_queue(vq->vq.vdev, vq->packed.event_size_in_bytes,
+ vq->packed.vring.device,
+ vq->packed.device_event_dma_addr);
+
+ kfree(vq->packed.desc_state);
+ vq->packed.desc_state = NULL;
+ kfree(vq->packed.desc_extra);
+ vq->packed.desc_extra = NULL;
+ }
+
+ kfree(vq);
+ vq = NULL;
+}
+
+void del_vq(struct zxdh_pci_vq_info *info)
+{
+ struct virtqueue *vq = info->vq;
+ struct zxdh_en_priv *en_priv = netdev_priv(vq->vdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ en_dev->ops->vq_unbind_channel(en_dev->parent, vq->phy_index);
+
+ en_dev->ops->vp_modern_unmap_vq_notify(en_dev->parent, vq->priv);
+
+ vring_del_virtqueue(vq);
+}
+
+void vp_del_vq(struct virtqueue *vq)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(vq->vdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_pci_vq_info *info = en_dev->vqs[vq->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&en_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&en_dev->lock, flags);
+
+ del_vq(info);
+ kfree(info);
+ en_dev->vqs[vq->index] = NULL;
+}
+
+void vp_detach_vqs(void *para)
+{
+ struct net_device *netdev = para;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct virtqueue *vq;
+ struct virtqueue *n;
+
+ list_for_each_entry_safe(vq, n, &en_dev->vqs_list, list) {
+ vp_del_vq(vq);
+ }
+}
+
+void vp_del_vqs(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ vp_detach_vqs(netdev);
+
+ kfree(en_dev->vqs);
+ en_dev->vqs = NULL;
+}
+
+/**
+ * virtqueue_get_vring_size - return the size of the virtqueue's vring
+ * @_vq: the struct virtqueue containing the vring of interest.
+ *
+ * Returns the size of the vring. This is mainly used for boasting to
+ * userspace. Unlike other operations, this need not be serialized.
+ */
+uint32_t virtqueue_get_vring_size(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vq->packed.vring.num;
+}
+
+dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ BUG_ON(!vq->we_own_ring);
+
+ return vq->packed.ring_dma_addr;
+}
+
+dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ BUG_ON(!vq->we_own_ring);
+
+ return vq->packed.driver_event_dma_addr;
+}
+
+dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ BUG_ON(!vq->we_own_ring);
+
+ return vq->packed.device_event_dma_addr;
+}
+
+bool vqm_has_dma_quirk(struct net_device *netdev)
+{
+ /*
+ * Note the reverse polarity of the quirk feature (compared to most
+ * other features), this is for compatibility with legacy systems.
+ */
+ return !zxdh_has_feature(netdev, ZXDH_F_ACCESS_PLATFORM);
+}
+
+bool vring_use_dma_api(struct net_device *netdev)
+{
+ if (!vqm_has_dma_quirk(netdev)) {
+ return true;
+ }
+
+ /* Otherwise, we are left to guess. */
+ /*
+ * In theory, it's possible to have a buggy QEMU-supposed
+ * emulated Q35 IOMMU and Xen enabled at the same time. On
+ * such a configuration, zxdh has never worked and will
+ * not work without an even larger kludge. Instead, enable
+ * the DMA API if we're a Xen guest, which at least allows
+ * all of the sensible Xen configurations to work correctly.
+ */
+ if (xen_domain()) {
+ return true;
+ }
+
+ return false;
+}
+
+void vring_free_queue(struct net_device *netdev, size_t size, void *queue,
+ dma_addr_t dma_handle)
+{
+ if (vring_use_dma_api(netdev)) {
+ dma_free_coherent(netdev->dev.parent, size, queue, dma_handle);
+ } else {
+ free_pages_exact(queue, PAGE_ALIGN(size));
+ }
+}
+
+void *vring_alloc_queue(struct net_device *netdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ if (vring_use_dma_api(netdev)) {
+ return dma_alloc_coherent(netdev->dev.parent, size, dma_handle, flag);
+ } else {
+ void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
+
+ if (queue) {
+ phys_addr_t phys_addr = virt_to_phys(queue);
+ *dma_handle = (dma_addr_t)phys_addr;
+
+ /*
+ * Sanity check: make sure we dind't truncate
+ * the address. The only arches I can find that
+ * have 64-bit phys_addr_t but 32-bit dma_addr_t
+ * are certain non-highmem MIPS and x86
+ * configurations, but these configurations
+ * should never allocate physical pages above 32
+ * bits, so this is fine. Just in case, throw a
+ * warning and abort if we end up with an
+ * unrepresentable address.
+ */
+ if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
+ free_pages_exact(queue, PAGE_ALIGN(size));
+ return NULL;
+ }
+ }
+ return queue;
+ }
+}
+
+struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
+ uint32_t num)
+{
+ struct vring_desc_extra *desc_extra = NULL;
+ uint32_t i = 0;
+
+ desc_extra =
+ kmalloc_array(num, sizeof(struct vring_desc_extra), GFP_KERNEL);
+ if (unlikely(desc_extra == NULL)) {
+ return NULL;
+ }
+
+ memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
+
+ for (i = 0; i < num - 1; i++) {
+ desc_extra[i].next = i + 1;
+ }
+
+ return desc_extra;
+}
+
+struct virtqueue *vring_create_virtqueue_packed(
+ uint32_t index, uint32_t num, uint32_t vring_align,
+ struct net_device *netdev, bool weak_barriers, bool may_reduce_num,
+ bool context, bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *), const char *name)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct vring_virtqueue *vq = NULL;
+ struct vring_packed_desc *ring = NULL;
+ struct vring_packed_desc_event *driver = NULL;
+ struct vring_packed_desc_event *device = NULL;
+ dma_addr_t ring_dma_addr;
+ dma_addr_t driver_event_dma_addr;
+ dma_addr_t device_event_dma_addr;
+ size_t ring_size_in_bytes;
+ size_t event_size_in_bytes;
+
+ ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
+
+ ring = vring_alloc_queue(netdev, ring_size_in_bytes, &ring_dma_addr,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ if (unlikely(ring == NULL)) {
+ goto err_ring;
+ }
+
+ event_size_in_bytes = sizeof(struct vring_packed_desc_event);
+
+ driver = vring_alloc_queue(netdev, event_size_in_bytes,
+ &driver_event_dma_addr,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ if (unlikely(driver == NULL)) {
+ goto err_driver;
+ }
+
+ device = vring_alloc_queue(netdev, event_size_in_bytes,
+ &device_event_dma_addr,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ if (unlikely(device == NULL)) {
+ goto err_device;
+ }
+
+ vq = kmalloc(sizeof(*vq), GFP_KERNEL);
+ if (unlikely(vq == NULL)) {
+ goto err_vq;
+ }
+
+ vq->vq.callback = callback;
+ vq->vq.vdev = netdev;
+ vq->vq.name = name;
+ vq->vq.num_free = num;
+ vq->vq.index = index;
+ vq->we_own_ring = true;
+ vq->notify = notify;
+ vq->weak_barriers = weak_barriers;
+ vq->broken = false;
+ vq->last_used_idx = 0;
+ vq->event_triggered = false;
+ vq->num_added = 0;
+ vq->packed_ring = true;
+ vq->use_dma_api = vring_use_dma_api(netdev);
+#ifdef DEBUG
+ vq->in_use = false;
+ vq->last_add_time_valid = false;
+#endif
+
+ vq->indirect = zxdh_has_feature(netdev, ZXDH_RING_F_INDIRECT_DESC) &&
+ !context;
+ vq->event = zxdh_has_feature(netdev, ZXDH_RING_F_EVENT_IDX);
+
+ if (zxdh_has_feature(netdev, ZXDH_F_ORDER_PLATFORM)) {
+ vq->weak_barriers = false;
+ }
+
+ vq->packed.ring_dma_addr = ring_dma_addr;
+ vq->packed.driver_event_dma_addr = driver_event_dma_addr;
+ vq->packed.device_event_dma_addr = device_event_dma_addr;
+
+ vq->packed.ring_size_in_bytes = ring_size_in_bytes;
+ vq->packed.event_size_in_bytes = event_size_in_bytes;
+
+ vq->packed.vring.num = num;
+ vq->packed.vring.desc = ring;
+ vq->packed.vring.driver = driver;
+ vq->packed.vring.device = device;
+
+ vq->packed.next_avail_idx = 0;
+ vq->packed.avail_wrap_counter = 1;
+ vq->packed.used_wrap_counter = 1;
+ vq->packed.event_flags_shadow = 0;
+ vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
+
+ vq->packed.desc_state = kmalloc_array(
+ num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
+ if (unlikely(vq->packed.desc_state == NULL)) {
+ LOG_ERR("vq->packed.desc_state kmalloc_array failed\n");
+ goto err_desc_state;
+ }
+
+ memset(vq->packed.desc_state, 0,
+ num * sizeof(struct vring_desc_state_packed));
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+
+ vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
+ if (unlikely(vq->packed.desc_extra == NULL)) {
+ LOG_ERR("vq->packed.desc_extra vring_alloc_desc_extra failed\n");
+ goto err_desc_extra;
+ }
+
+ /* No callback? Tell other side not to bother us. */
+ if (!callback) {
+ vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+ vq->packed.vring.driver->flags =
+ cpu_to_le16(vq->packed.event_flags_shadow);
+ }
+
+ spin_lock(&en_dev->vqs_list_lock);
+ list_add_tail(&vq->vq.list, &en_dev->vqs_list);
+ spin_unlock(&en_dev->vqs_list_lock);
+
+ return &vq->vq;
+
+err_desc_extra:
+ kfree(vq->packed.desc_state);
+ vq->packed.desc_state = NULL;
+err_desc_state:
+ kfree(vq);
+ vq = NULL;
+err_vq:
+ vring_free_queue(netdev, event_size_in_bytes, device,
+ device_event_dma_addr);
+err_device:
+ vring_free_queue(netdev, event_size_in_bytes, driver,
+ driver_event_dma_addr);
+err_driver:
+ vring_free_queue(netdev, ring_size_in_bytes, ring, ring_dma_addr);
+err_ring:
+ return NULL;
+}
+
+/* the notify function used when creating a virt queue */
+bool vp_notify(struct virtqueue *vq)
+{
+ /* we write the queue's selector into the notification register to
+ * signal the other end */
+ iowrite16(vq->phy_index, (void __iomem *)vq->priv);
+
+ return true;
+}
+
+struct virtqueue *vp_setup_vq(struct net_device *netdev, unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name, bool ctx, uint16_t channel_num)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
+ struct virtqueue *vq = NULL;
+ struct virtqueue *n = NULL;
+ unsigned long flags;
+ uint16_t num = 0;
+ int32_t err = 0;
+ struct dh_vq_handler vq_handler;
+
+ /* fill out our structure that represents an active queue */
+ if (unlikely(info == NULL)) {
+ LOG_ERR("info kmalloc failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*if (index >= en_dev->ops->get_queue_num(en_dev->parent))
+ {
+ LOG_ERR("index over queue nums\n");
+ return ERR_PTR(-ENOENT);
+ }*/
+
+ /* Check if queue is either not available or already active. */
+ num = en_dev->ops->get_queue_size(en_dev->parent, en_dev->phy_index[index]);
+ // if (!num || zxdh_get_queue_enable(en_dev, en_dev->phy_index[index]))
+ // {
+ // return ERR_PTR(-ENOENT);
+ // }
+ num = ZXDH_PF_MIN_DESC_NUM;
+
+ if (num & (num - 1)) {
+ LOG_ERR("bad queue size %u\n", num);
+ err = -ENOMEM;
+ goto out_info;
+ }
+
+ /* create the vring */
+ vq = vring_create_virtqueue_packed(index, num, SMP_CACHE_BYTES,
+ en_dev->netdev, true, true, ctx,
+ vp_notify, callback, name);
+ if (vq == NULL) {
+ LOG_ERR("create the vring failed\n");
+ err = -ENOMEM;
+ goto out_info;
+ }
+
+ /* activate the queue */
+ en_dev->ops->activate_phy_vq(en_dev->parent, en_dev->phy_index[index],
+ virtqueue_get_vring_size(vq),
+ virtqueue_get_desc_addr(vq),
+ virtqueue_get_avail_addr(vq),
+ virtqueue_get_used_addr(vq));
+
+ vq->priv = (void __force *)en_dev->ops->vp_modern_map_vq_notify(
+ en_dev->parent, en_dev->phy_index[index], NULL);
+ if (!vq->priv) {
+ err = -ENOMEM;
+ goto err_map_notify;
+ }
+
+ vq->phy_index = en_dev->phy_index[index];
+ vq->index = index;
+ info->channel_num = channel_num;
+
+ memset(&vq_handler, 0, sizeof(struct dh_vq_handler));
+ vq_handler.callback = dh_eq_vqs_vring_int;
+ if (channel_num < (en_dev->ops->get_channels_num(en_dev->parent))) {
+ err = en_dev->ops->vqs_channel_bind_handler(en_dev->parent, channel_num,
+ &vq_handler);
+ if (err < 0) {
+ LOG_ERR("vqs_channel_bind_handler failed: %d\n", err);
+ goto err_vqs_channel_bind_handler;
+ }
+ }
+
+ if (channel_num >= (en_dev->ops->get_channels_num(en_dev->parent))) {
+ channel_num = en_dev->ops->get_channels_num(en_dev->parent) - 1;
+ }
+ err = en_dev->ops->vq_bind_channel(en_dev->parent, channel_num,
+ en_dev->phy_index[index]);
+ if (err < 0) {
+ LOG_ERR("vq_bind_channel failed: %d\n", err);
+ goto err_vq_bind_channel;
+ }
+
+ if (callback) {
+ spin_lock_irqsave(&en_dev->lock, flags);
+ err = en_dev->ops->vqs_bind_eqs(en_dev->parent, channel_num,
+ &info->node);
+ spin_unlock_irqrestore(&en_dev->lock, flags);
+ if (err < 0) {
+ LOG_ERR("vqs_bind_eqs failed: %d\n", err);
+ goto err_vqs_bind_eqs;
+ }
+ } else {
+ INIT_LIST_HEAD(&info->node);
+ }
+
+ info->vq = vq;
+ en_dev->vqs[index] = info;
+ return vq;
+
+err_vqs_bind_eqs:
+ list_for_each_entry_safe(vq, n, &en_dev->vqs_list, list) {
+ en_dev->ops->vq_unbind_channel(en_dev->parent, vq->phy_index);
+ }
+err_vq_bind_channel:
+ if (channel_num < (en_dev->ops->get_channels_num(en_dev->parent))) {
+ en_dev->ops->vqs_channel_unbind_handler(en_dev->parent, channel_num);
+ }
+err_vqs_channel_bind_handler:
+ en_dev->ops->vp_modern_unmap_vq_notify(en_dev->parent,
+ (void __iomem __force *)vq->priv);
+err_map_notify:
+ vring_del_virtqueue(vq);
+out_info:
+ kfree(info);
+ en_dev->vqs[index] = NULL;
+ return ERR_PTR(err);
+}
+
+uint32_t get_mergeable_buf_len(struct receive_queue *rq,
+ struct ewma_pkt_len *avg_pkt_len, uint32_t room)
+{
+ const size_t hdr_len = sizeof(struct zxdh_net_hdr);
+ uint32_t len = 0;
+
+ if (room) {
+ return PAGE_SIZE - room;
+ }
+
+ len = hdr_len + clamp_t(uint32_t, ewma_pkt_len_read(avg_pkt_len),
+ rq->min_buf_len, PAGE_SIZE - hdr_len);
+
+ return ALIGN(len, L1_CACHE_BYTES);
+}
+
+/*
+ * The DMA ops on various arches are rather gnarly right now, and
+ * making all of the arch DMA ops work on the vring device itself
+ * is a mess. For now, we use the parent device for DMA ops.
+ */
+static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+{
+ return vq->vq.vdev->dev.parent; // todo
+}
+
+/* Map one sg entry. */
+dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
+ struct scatterlist *sg,
+ enum dma_data_direction direction)
+{
+ if (!vq->use_dma_api) {
+ return (dma_addr_t)sg_phys(sg);
+ }
+
+ /*
+ * We can't use dma_map_sg, because we don't use scatterlists in
+ * the way it expects (we don't guarantee that the scatterlist
+ * will exist for the lifetime of the mapping).
+ */
+ return dma_map_page(vring_dma_dev(vq), sg_page(sg), sg->offset, sg->length,
+ direction);
+}
+
+dma_addr_t vring_map_single(const struct vring_virtqueue *vq, void *cpu_addr,
+ size_t size, enum dma_data_direction direction)
+{
+ if (!vq->use_dma_api) {
+ return (dma_addr_t)virt_to_phys(cpu_addr);
+ }
+
+ return dma_map_single(vring_dma_dev(vq), cpu_addr, size, direction);
+}
+
+int32_t vring_mapping_error(const struct vring_virtqueue *vq, dma_addr_t addr)
+{
+ if (!vq->use_dma_api) {
+ return 0;
+ }
+
+ return dma_mapping_error(vring_dma_dev(vq), addr);
+}
+
+/*
+ * Packed ring specific functions - *_packed().
+ */
+void vring_unmap_state_packed(const struct vring_virtqueue *vq,
+ struct vring_desc_extra *state)
+{
+ uint16_t flags = 0;
+
+ if (!vq->use_dma_api) {
+ return;
+ }
+
+ flags = state->flags;
+ if (flags & VRING_DESC_F_INDIRECT) {
+ dma_unmap_single(vring_dma_dev(vq), state->addr, state->len,
+ (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(vring_dma_dev(vq), state->addr, state->len,
+ (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ }
+}
+
+void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
+ struct vring_packed_desc *desc)
+{
+ uint16_t flags = 0;
+
+ if (!vq->use_dma_api) {
+ return;
+ }
+
+ flags = le16_to_cpu(desc->flags);
+
+ if (flags & VRING_DESC_F_INDIRECT) {
+ dma_unmap_single(vring_dma_dev(vq), le64_to_cpu(desc->addr),
+ le32_to_cpu(desc->len),
+ (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(vring_dma_dev(vq), le64_to_cpu(desc->addr),
+ le32_to_cpu(desc->len),
+ (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ }
+}
+
+void *mergeable_len_to_ctx(uint32_t truesize, uint32_t headroom)
+{
+ return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) |
+ truesize);
+}
+
+inline bool virtqueue_use_indirect(struct virtqueue *_vq, unsigned int total_sg)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /*
+ * If the host supports indirect descriptor tables, and we have multiple
+ * buffers, then go indirect. FIXME: tune this threshold
+ */
+ return (vq->indirect && total_sg > 1 && vq->vq.num_free);
+}
+
+struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
+ gfp_t gfp)
+{
+ struct vring_packed_desc *desc = NULL;
+
+ /*
+ * We require lowmem mappings for the descriptors because
+ * otherwise virt_to_phys will give us bogus addresses in the
+ * virtqueue.
+ */
+ gfp &= ~__GFP_HIGHMEM;
+
+ desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
+
+ return desc;
+}
+
+int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
+ struct scatterlist *sgs[],
+ unsigned int total_sg, unsigned int out_sgs,
+ unsigned int in_sgs, void *data, gfp_t gfp)
+{
+ struct vring_packed_desc *desc = NULL;
+ struct scatterlist *sg = NULL;
+ uint32_t i = 0;
+ uint32_t n = 0;
+ uint32_t err_idx = 0;
+ uint16_t head = 0;
+ uint16_t id = 0;
+ dma_addr_t addr;
+
+ head = vq->packed.next_avail_idx;
+ desc = alloc_indirect_packed(total_sg, gfp);
+ if (desc == NULL) {
+ return -ENOMEM;
+ }
+
+ if (unlikely(vq->vq.num_free < 1)) {
+ LOG_DEBUG("can't add buf len 1 - avail = 0\n");
+ kfree(desc);
+ END_USE(vq);
+ return -ENOSPC;
+ }
+
+ i = 0;
+ id = vq->free_head;
+ BUG_ON(id == vq->packed.vring.num);
+
+ for (n = 0; n < out_sgs + in_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ addr = vring_map_one_sg(
+ vq, sg, n < out_sgs ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (vring_mapping_error(vq, addr)) {
+ goto unmap_release;
+ }
+
+ desc[i].flags = cpu_to_le16(n < out_sgs ? 0 : VRING_DESC_F_WRITE);
+ desc[i].addr = cpu_to_le64(addr);
+ desc[i].len = cpu_to_le32(sg->length);
+ i++;
+ }
+ }
+
+ /* Now that the indirect table is filled in, map it. */
+ addr = vring_map_single(vq, desc,
+ total_sg * sizeof(struct vring_packed_desc),
+ DMA_TO_DEVICE);
+ if (vring_mapping_error(vq, addr)) {
+ goto unmap_release;
+ }
+
+ vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
+ vq->packed.vring.desc[head].len =
+ cpu_to_le32(total_sg * sizeof(struct vring_packed_desc));
+ vq->packed.vring.desc[head].id = cpu_to_le16(id);
+
+ if (vq->use_dma_api) {
+ vq->packed.desc_extra[id].addr = addr;
+ vq->packed.desc_extra[id].len =
+ total_sg * sizeof(struct vring_packed_desc);
+ vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
+ vq->packed.avail_used_flags;
+ }
+
+ /*
+ * A driver MUST NOT make the first descriptor in the list
+ * available before all subsequent descriptors comprising
+ * the list are made available.
+ */
+ vqm_wmb(vq->weak_barriers);
+ vq->packed.vring.desc[head].flags =
+ cpu_to_le16(VRING_DESC_F_INDIRECT | vq->packed.avail_used_flags);
+
+ /* We're using some buffers from the free list. */
+ vq->vq.num_free -= 1;
+
+ /* Update free pointer */
+ n = head + 1;
+ if (n >= vq->packed.vring.num) {
+ n = 0;
+ vq->packed.avail_wrap_counter ^= 1;
+ vq->packed.avail_used_flags ^= 1 << VRING_PACKED_DESC_F_AVAIL |
+ 1 << VRING_PACKED_DESC_F_USED;
+ }
+ vq->packed.next_avail_idx = n;
+ vq->free_head = vq->packed.desc_extra[id].next;
+
+ /* Store token and indirect buffer state. */
+ vq->packed.desc_state[id].num = 1;
+ vq->packed.desc_state[id].data = data;
+ vq->packed.desc_state[id].indir_desc = desc;
+ vq->packed.desc_state[id].last = id;
+
+ vq->num_added += 1;
+
+ LOG_DEBUG("added buffer head %i to %p\n", head, vq);
+ END_USE(vq);
+
+ return 0;
+
+unmap_release:
+ err_idx = i;
+
+ for (i = 0; i < err_idx; i++) {
+ vring_unmap_desc_packed(vq, &desc[i]);
+ }
+
+ kfree(desc);
+
+ END_USE(vq);
+ return -ENOMEM;
+}
+
+int32_t virtqueue_add_packed(struct virtqueue *_vq, struct scatterlist *sgs[],
+ uint32_t total_sg, uint32_t out_sgs,
+ uint32_t in_sgs, void *data, void *ctx, gfp_t gfp)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct vring_packed_desc *desc = NULL;
+ struct scatterlist *sg = NULL;
+ uint32_t i = 0;
+ uint32_t n = 0;
+ uint32_t c = 0;
+ uint32_t descs_used = 0;
+ uint32_t err_idx = 0;
+ __le16 head_flags = 0;
+ __le16 flags = 0;
+ uint16_t head = 0;
+ uint16_t id = 0;
+ uint16_t prev = 0;
+ uint16_t curr = 0;
+ uint16_t avail_used_flags = 0;
+ int32_t err = 0;
+
+ START_USE(vq);
+
+ BUG_ON(data == NULL);
+ BUG_ON(ctx && vq->indirect);
+
+ if (unlikely(vq->broken)) {
+ END_USE(vq);
+ return -EIO;
+ }
+
+ LAST_ADD_TIME_UPDATE(vq);
+
+ BUG_ON(total_sg == 0);
+
+ if (virtqueue_use_indirect(_vq, total_sg)) {
+ err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in_sgs,
+ data, gfp);
+ if (err != -ENOMEM) {
+ END_USE(vq);
+ return err;
+ }
+ /* fall back on direct */
+ }
+
+ head = vq->packed.next_avail_idx;
+ avail_used_flags = vq->packed.avail_used_flags;
+
+ WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
+
+ desc = vq->packed.vring.desc;
+ i = head;
+ descs_used = total_sg;
+
+ if (unlikely(vq->vq.num_free < descs_used)) {
+ LOG_ERR("can't add buf len %i - avail = %i\n", descs_used,
+ vq->vq.num_free);
+ END_USE(vq);
+ return -ENOSPC;
+ }
+
+ id = vq->free_head;
+ BUG_ON(id == vq->packed.vring.num);
+
+ curr = id;
+ c = 0;
+ for (n = 0; n < out_sgs + in_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(
+ vq, sg, n < out_sgs ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (vring_mapping_error(vq, addr)) {
+ goto unmap_release;
+ }
+
+ flags = cpu_to_le16(vq->packed.avail_used_flags |
+ (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
+ (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
+
+ desc[i].addr = cpu_to_le64(addr);
+ desc[i].len = cpu_to_le32(sg->length);
+ desc[i].id = cpu_to_le16(id);
+
+ if (i == head) {
+ head_flags = flags;
+ } else {
+ desc[i].flags = flags;
+ }
+
+ if (unlikely(vq->use_dma_api)) {
+ vq->packed.desc_extra[curr].addr = addr;
+ vq->packed.desc_extra[curr].len = sg->length;
+ vq->packed.desc_extra[curr].flags = le16_to_cpu(flags);
+ }
+ prev = curr;
+ curr = vq->packed.desc_extra[curr].next;
+
+ if ((unlikely(++i >= vq->packed.vring.num))) {
+ i = 0;
+ vq->packed.avail_used_flags ^= 1 << VRING_PACKED_DESC_F_AVAIL |
+ 1 << VRING_PACKED_DESC_F_USED;
+ }
+ }
+ }
+
+ if (i < head) {
+ vq->packed.avail_wrap_counter ^= 1;
+ }
+
+ /* We're using some buffers from the free list. */
+ vq->vq.num_free -= descs_used;
+
+ /* Update free pointer */
+ vq->packed.next_avail_idx = i;
+ vq->free_head = curr;
+
+ /* Store token. */
+ vq->packed.desc_state[id].num = descs_used;
+ vq->packed.desc_state[id].data = data;
+ vq->packed.desc_state[id].indir_desc = ctx;
+ vq->packed.desc_state[id].last = prev;
+
+ /*
+ * A driver MUST NOT make the first descriptor in the list
+ * available before all subsequent descriptors comprising
+ * the list are made available.
+ */
+ vqm_wmb(vq->weak_barriers);
+ vq->packed.vring.desc[head].flags = head_flags;
+ vq->num_added += descs_used;
+
+ // LOG_INFO("added buffer head %i to %p\n", head, vq);
+ END_USE(vq);
+
+ return 0;
+
+unmap_release:
+ err_idx = i;
+ i = head;
+ curr = vq->free_head;
+
+ vq->packed.avail_used_flags = avail_used_flags;
+
+ for (n = 0; n < total_sg; n++) {
+ if (i == err_idx) {
+ break;
+ }
+
+ vring_unmap_state_packed(vq, &vq->packed.desc_extra[curr]);
+ curr = vq->packed.desc_extra[curr].next;
+ i++;
+ if (i >= vq->packed.vring.num) {
+ i = 0;
+ }
+ }
+
+ END_USE(vq);
+ return -EIO;
+}
+
+/**
+ * virtqueue_add_inbuf_ctx - expose input buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg writable by other side
+ * @data: the token identifying the buffer.
+ * @ctx: extra context for the token
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int32_t virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg,
+ uint32_t num, void *data, void *ctx, gfp_t gfp)
+{
+ return virtqueue_add_packed(vq, &sg, num, 0, 1, data, ctx, gfp);
+}
+
+bool is_used_desc_packed(struct vring_virtqueue *vq, uint16_t idx,
+ bool used_wrap_counter)
+{
+ bool avail = false;
+ bool used = false;
+ uint16_t flags = 0;
+
+ flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
+ avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
+ used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
+
+ return avail == used && used == used_wrap_counter;
+}
+
+bool virtqueue_poll_packed(struct virtqueue *_vq, uint16_t off_wrap)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ bool wrap_counter = false;
+ uint16_t used_idx = 0;
+
+ wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
+ used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+
+ return is_used_desc_packed(vq, used_idx, wrap_counter);
+}
+
+/**
+ * virtqueue_poll - query pending used buffers
+ * @_vq: the struct virtqueue we're talking about.
+ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
+ *
+ * Returns "true" if there are pending used buffers in the queue.
+ *
+ * This does not need to be serialized.
+ */
+bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (unlikely(vq->broken)) {
+ return false;
+ }
+
+ vqm_mb(vq->weak_barriers);
+ return virtqueue_poll_packed(_vq, last_used_idx);
+}
+
+unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ START_USE(vq);
+
+ /*
+ * We optimistically turn back on interrupts, then check if there was
+ * more to do.
+ */
+ if (vq->event) {
+ vq->packed.vring.driver->off_wrap = cpu_to_le16(
+ vq->last_used_idx | (vq->packed.used_wrap_counter
+ << VRING_PACKED_EVENT_F_WRAP_CTR));
+ /*
+ * We need to update event offset and event wrap
+ * counter first before updating event flags.
+ */
+ vqm_wmb(vq->weak_barriers);
+ }
+
+ if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
+ vq->packed.event_flags_shadow = vq->event ?
+ VRING_PACKED_EVENT_FLAG_DESC :
+ VRING_PACKED_EVENT_FLAG_ENABLE;
+ vq->packed.vring.driver->flags =
+ cpu_to_le16(vq->packed.event_flags_shadow);
+ }
+
+ END_USE(vq);
+ return vq->last_used_idx | ((uint16_t)vq->packed.used_wrap_counter
+ << VRING_PACKED_EVENT_F_WRAP_CTR);
+}
+
+int32_t virtqueue_enable_cb_prepare(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (vq->event_triggered) {
+ vq->event_triggered = false;
+ }
+
+ return virtqueue_enable_cb_prepare_packed(_vq);
+}
+
+bool more_used_packed(struct vring_virtqueue *vq)
+{
+ return is_used_desc_packed(vq, vq->last_used_idx,
+ vq->packed.used_wrap_counter);
+}
+
+void detach_buf_packed(struct vring_virtqueue *vq, uint32_t id, void **ctx)
+{
+ struct vring_desc_state_packed *state = NULL;
+ struct vring_packed_desc *desc = NULL;
+ uint32_t i = 0;
+ uint32_t curr = 0;
+
+ state = &vq->packed.desc_state[id];
+
+ /* Clear data ptr. */
+ state->data = NULL;
+
+ vq->packed.desc_extra[state->last].next = vq->free_head;
+ vq->free_head = id;
+ vq->vq.num_free += state->num;
+
+ if (unlikely(vq->use_dma_api)) {
+ curr = id;
+ for (i = 0; i < state->num; i++) {
+ vring_unmap_state_packed(vq, &vq->packed.desc_extra[curr]);
+ curr = vq->packed.desc_extra[curr].next;
+ }
+ }
+
+ if (vq->indirect) {
+ uint32_t len;
+
+ /* Free the indirect table, if any, now that it's unmapped. */
+ desc = state->indir_desc;
+ if (!desc) {
+ return;
+ }
+
+ if (vq->use_dma_api) {
+ len = vq->packed.desc_extra[id].len;
+ for (i = 0; i < len / sizeof(struct vring_packed_desc); i++) {
+ vring_unmap_desc_packed(vq, &desc[i]);
+ }
+ }
+ kfree(desc);
+ state->indir_desc = NULL;
+ } else if (ctx) {
+ *ctx = state->indir_desc;
+ }
+}
+
+void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, uint32_t *len,
+ void **ctx)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ uint16_t last_used = 0;
+ uint16_t id = 0;
+ void *ret = NULL;
+
+ START_USE(vq);
+
+ if (unlikely(vq->broken)) {
+ END_USE(vq);
+ return NULL;
+ }
+
+ if (!more_used_packed(vq)) {
+ // LOG_ERR("no more buffers in queue\n");
+ END_USE(vq);
+ return NULL;
+ }
+
+ /* Only get used elements after they have been exposed by host. */
+ vqm_rmb(vq->weak_barriers);
+
+ last_used = vq->last_used_idx;
+ id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
+ *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
+
+ if (unlikely(id >= vq->packed.vring.num)) {
+ zxdh_print_vring_info(_vq, 0, vq->packed.vring.num);
+ BAD_RING(vq, "id %u out of range\n", id);
+ return NULL;
+ }
+ if (unlikely(!vq->packed.desc_state[id].data)) {
+ zxdh_print_vring_info(_vq, 0, vq->packed.vring.num);
+ BAD_RING(vq, "id %u is not a head!\n", id);
+ return NULL;
+ }
+
+ /* detach_buf_packed clears data, so grab it now. */
+ ret = vq->packed.desc_state[id].data;
+ detach_buf_packed(vq, id, ctx);
+
+ vq->last_used_idx += vq->packed.desc_state[id].num;
+ if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
+ vq->last_used_idx -= vq->packed.vring.num;
+ vq->packed.used_wrap_counter ^= 1;
+ }
+
+ /*
+ * If we expect an interrupt for the next entry, tell host
+ * by writing event index and flush out the write before
+ * the read in the next get_buf call.
+ */
+ if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
+ vqm_store_mb(vq->weak_barriers, &vq->packed.vring.driver->off_wrap,
+ cpu_to_le16(vq->last_used_idx |
+ (vq->packed.used_wrap_counter
+ << VRING_PACKED_EVENT_F_WRAP_CTR)));
+
+ LAST_ADD_TIME_INVALID(vq);
+
+ END_USE(vq);
+ return ret;
+}
+
+void *virtqueue_get_buf(struct virtqueue *_vq, uint32_t *len)
+{
+ return virtqueue_get_buf_ctx_packed(_vq, len, NULL);
+}
+
+/*
+ * private is used to chain pages for big packets, put the whole
+ * most recent used list in the beginning for reuse
+ */
+void give_pages(struct receive_queue *rq, struct page *page)
+{
+ struct page *end = NULL;
+
+ /* Find end of list, sew whole thing into vi->rq.pages. */
+ for (end = page; end->private; end = (struct page *)end->private)
+ ;
+ end->private = (unsigned long)rq->pages;
+ rq->pages = page;
+}
+
+void free_old_xmit_skbs(struct net_device *netdev, struct send_queue *sq,
+ bool in_napi)
+{
+ uint32_t len = 0;
+ uint32_t packets = 0;
+ uint32_t bytes = 0;
+ void *ptr = NULL;
+
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ struct sk_buff *skb = ptr;
+
+ // LOG_DEBUG("sent skb %p\n", skb);
+
+ bytes += skb->len;
+ napi_consume_skb(skb, in_napi);
+ packets++;
+ }
+
+ /* Avoid overhead when no packets have been processed
+ * happens when called speculatively from start_xmit.
+ */
+ if (!packets) {
+ return;
+ }
+
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.bytes += bytes;
+ sq->stats.packets += packets;
+ netdev->stats.tx_bytes += bytes;
+ netdev->stats.tx_packets += packets;
+ u64_stats_update_end(&sq->stats.syncp);
+}
+
+void virtqueue_disable_cb_packed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
+ vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+ vq->packed.vring.driver->flags =
+ cpu_to_le16(vq->packed.event_flags_shadow);
+ }
+}
+
+/**
+ * virtqueue_disable_cb - disable callbacks
+ * @_vq: the struct virtqueue we're talking about.
+ *
+ * Note that this is not necessarily synchronous, hence unreliable and only
+ * useful as an optimization.
+ *
+ * Unlike other operations, this need not be serialized.
+ */
+void virtqueue_disable_cb(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* If device triggered an event already it won't trigger one again:
+ * no need to disable.
+ */
+ if (vq->event_triggered) {
+ return;
+ }
+
+ virtqueue_disable_cb_packed(_vq);
+}
+
+void virtqueue_napi_schedule(struct napi_struct *napi, struct virtqueue *vq)
+{
+ if (napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(vq);
+ __napi_schedule(napi);
+ }
+}
+
+void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
+{
+ napi_enable(napi);
+
+ /* If all buffers were filled by other side before we napi_enabled, we
+ * won't get another interrupt, so process any outstanding packets now.
+ * Call local_bh_enable after to trigger softIRQ processing.
+ */
+ local_bh_disable();
+ virtqueue_napi_schedule(napi, vq);
+ local_bh_enable();
+}
+
+void virtnet_napi_tx_enable(struct net_device *netdev, struct virtqueue *vq,
+ struct napi_struct *napi)
+{
+ if (!napi->weight) {
+ return;
+ }
+
+ virtnet_napi_enable(vq, napi);
+
+ return;
+}
+
+void virtnet_napi_tx_disable(struct napi_struct *napi)
+{
+ if (napi->weight) {
+ napi_disable(napi);
+ }
+}
+
+int virtnet_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct send_queue *sq = container_of(napi, struct send_queue, napi);
+ struct zxdh_en_priv *en_priv = netdev_priv(sq->vq->vdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t index = vq2txq(sq->vq);
+ struct netdev_queue *txq = NULL;
+ int32_t opaque = 0;
+ bool done = false;
+
+ txq = netdev_get_tx_queue(en_dev->netdev, index);
+ __netif_tx_lock(txq, raw_smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+ free_old_xmit_skbs(en_dev->netdev, sq, true);
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
+ netif_tx_wake_queue(txq);
+ }
+
+ opaque = virtqueue_enable_cb_prepare(sq->vq);
+
+ done = napi_complete_done(napi, 0);
+
+ if (!done) {
+ virtqueue_disable_cb(sq->vq);
+ }
+
+ __netif_tx_unlock(txq);
+
+ if (done) {
+ if (unlikely(virtqueue_poll(sq->vq, opaque))) {
+ if (napi_schedule_prep(napi)) {
+ __netif_tx_lock(txq, raw_smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+ __netif_tx_unlock(txq);
+ __napi_schedule(napi);
+ }
+ }
+ }
+
+ return 0;
+}
+
+bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ uint16_t used_idx = 0;
+ uint16_t wrap_counter = 0;
+ uint16_t bufs = 0;
+
+ START_USE(vq);
+
+ /*
+ * We optimistically turn back on interrupts, then check if there was
+ * more to do.
+ */
+
+ if (vq->event) {
+ /* TODO: tune this threshold */
+ bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
+ wrap_counter = vq->packed.used_wrap_counter;
+
+ used_idx = vq->last_used_idx + bufs;
+ if (used_idx >= vq->packed.vring.num) {
+ used_idx -= vq->packed.vring.num;
+ wrap_counter ^= 1;
+ }
+
+ vq->packed.vring.driver->off_wrap = cpu_to_le16(
+ used_idx | (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
+
+ /*
+ * We need to update event offset and event wrap
+ * counter first before updating event flags.
+ */
+ vqm_wmb(vq->weak_barriers);
+ }
+
+ if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
+ vq->packed.event_flags_shadow = vq->event ?
+ VRING_PACKED_EVENT_FLAG_DESC :
+ VRING_PACKED_EVENT_FLAG_ENABLE;
+ vq->packed.vring.driver->flags =
+ cpu_to_le16(vq->packed.event_flags_shadow);
+ }
+
+ /*
+ * We need to update event suppression structure first
+ * before re-checking for more used buffers.
+ */
+ vqm_mb(vq->weak_barriers);
+
+ if (is_used_desc_packed(vq, vq->last_used_idx,
+ vq->packed.used_wrap_counter)) {
+ END_USE(vq);
+ return false;
+ }
+
+ END_USE(vq);
+ return true;
+}
+uint16_t __vqm16_to_cpu(bool little_endian, __vqm16 val)
+{
+ if (little_endian) {
+ return le16_to_cpu((__force __le16)val);
+ } else {
+ return be16_to_cpu((__force __be16)val);
+ }
+}
+
+static inline bool zxdh_legacy_is_little_endian(void)
+{
+#ifdef __LITTLE_ENDIAN
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool zxdh_is_little_endian(struct net_device *dev)
+{
+ return zxdh_has_feature(dev, ZXDH_F_VERSION_1) ||
+ zxdh_legacy_is_little_endian();
+}
+
+/* Memory accessors */
+uint16_t vqm16_to_cpu(struct net_device *netdev, __vqm16 val)
+{
+ return __vqm16_to_cpu(zxdh_is_little_endian(netdev), val);
+}
+
+uint32_t mergeable_ctx_to_headroom(void *mrg_ctx)
+{
+ return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
+}
+
+uint32_t mergeable_ctx_to_truesize(void *mrg_ctx)
+{
+ return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
+}
+
+/**
+ * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * @_vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks but hints to the other side to delay
+ * interrupts until most of the available buffers have been processed;
+ * it returns "false" if there are many pending buffers in the queue,
+ * to detect a possible race between the driver checking for more work,
+ * and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (vq->event_triggered) {
+ vq->event_triggered = false;
+ }
+
+ return virtqueue_enable_cb_delayed_packed(_vq);
+}
+
+void virtnet_poll_cleantx(struct receive_queue *rq)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(rq->vq->vdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t index = vq2rxq(rq->vq);
+ struct send_queue *sq = &en_dev->sq[index];
+ struct netdev_queue *txq = netdev_get_tx_queue(en_dev->netdev, index);
+
+ if (!sq->napi.weight) {
+ return;
+ }
+
+ if (__netif_tx_trylock(txq)) {
+ do {
+ virtqueue_disable_cb(sq->vq);
+ free_old_xmit_skbs(en_dev->netdev, sq, true);
+ } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
+ netif_tx_wake_queue(txq);
+ }
+
+ __netif_tx_unlock(txq);
+ }
+}
+
+inline struct zxdh_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
+{
+ return (struct zxdh_net_hdr *)skb->cb;
+}
+
+/* Called from bottom half context */
+struct sk_buff *page_to_skb(struct zxdh_en_device *en_dev,
+ struct receive_queue *rq, struct page *page,
+ uint32_t offset, uint32_t len, uint32_t truesize,
+ uint32_t metasize, uint32_t headroom)
+{
+ struct sk_buff *skb = NULL;
+ struct zxdh_net_hdr *hdr = NULL;
+ uint32_t copy = 0;
+ uint32_t hdr_len = 0;
+ struct page *page_to_free = NULL;
+ int32_t tailroom = 0;
+ int32_t shinfo_size = 0;
+ char *p = NULL;
+ char *hdr_p = NULL;
+ char *buf = NULL;
+ uint32_t hdr_len_tmp = 0;
+
+ p = page_address(page) + offset;
+ hdr_p = p;
+
+ hdr_len = (((struct zxdh_net_hdr *)hdr_p)->pd_len) * HDR_2B_UNIT;
+
+ /* If headroom is not 0, there is an offset between the beginning of the
+ * data and the allocated space, otherwise the data and the allocated
+ * space are aligned.
+ *
+ * Buffers with headroom use PAGE_SIZE as alloc size, see
+ * add_recvbuf_mergeable() + get_mergeable_buf_len()
+ */
+ truesize = headroom ? PAGE_SIZE : truesize;
+ tailroom = truesize - headroom;
+ buf = p - headroom;
+
+ len -= hdr_len;
+ offset += hdr_len;
+ p += hdr_len;
+ tailroom -= hdr_len + len;
+
+ shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ /* copy small packet so we can reuse these pages */
+ if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
+ skb = build_skb(buf, truesize);
+ if (unlikely(!skb)) {
+ return NULL;
+ }
+
+ skb_reserve(skb, p - buf);
+ skb_put(skb, len);
+
+ page = (struct page *)page->private;
+ if (page) {
+ give_pages(rq, page);
+ }
+ goto ok;
+ }
+
+ /* copy small packet so we can reuse these pages for small data */
+ skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
+ if (unlikely(!skb)) {
+ return NULL;
+ }
+
+ /* Copy all frame if it fits skb->head */
+ if (len <= skb_tailroom(skb)) {
+ copy = len;
+ } else {
+ copy = ETH_HLEN + metasize;
+ }
+ skb_put_data(skb, p, copy);
+
+ len -= copy;
+ offset += copy;
+
+ if (len) {
+ skb_add_rx_frag(skb, 0, page, offset, len, truesize);
+ } else {
+ page_to_free = page;
+ }
+
+ok:
+ hdr = skb_vnet_hdr(skb);
+ hdr_len_tmp = hdr_len > 48 ? 48 : hdr_len; // todo
+ memcpy(hdr, hdr_p, hdr_len_tmp);
+ // memcpy(hdr, hdr_p, hdr_len);
+
+ if (page_to_free) {
+ put_page(page_to_free);
+ }
+
+ if (metasize) {
+ __skb_pull(skb, metasize);
+ skb_metadata_set(skb, metasize);
+ }
+
+ return skb;
+}
+/**
+ * virtqueue_add_outbuf - expose output buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: scatterlist (must be well-formed and terminated!)
+ * @num: the number of entries in @sg readable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int32_t virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg,
+ uint32_t num, void *data, gfp_t gfp)
+{
+ return virtqueue_add_packed(vq, &sg, num, 1, 0, data, NULL, gfp);
+}
+
+struct sk_buff *receive_mergeable(struct net_device *netdev,
+ struct zxdh_en_device *en_dev,
+ struct receive_queue *rq, void *buf,
+ void *ctx, uint32_t len,
+ struct virtnet_rq_stats *stats)
+{
+ struct zxdh_net_hdr *hdr = buf;
+ uint16_t num_buf = vqm16_to_cpu(netdev, hdr->num_buffers);
+ struct page *page = virt_to_head_page(buf);
+ int32_t offset = buf - page_address(page);
+ struct sk_buff *head_skb = NULL;
+ struct sk_buff *curr_skb = NULL;
+ uint32_t truesize = mergeable_ctx_to_truesize(ctx);
+ uint32_t headroom = mergeable_ctx_to_headroom(ctx);
+ uint32_t metasize = 0;
+
+ stats->bytes += (len - (hdr->pd_len * HDR_2B_UNIT));
+ netdev->stats.rx_bytes += (len - (hdr->pd_len * HDR_2B_UNIT));
+
+ if (unlikely(len > truesize)) {
+ LOG_ERR("%s: rx error: len %u exceeds truesize %lu\n", netdev->name,
+ len, (unsigned long)ctx);
+ netdev->stats.rx_length_errors++;
+ netdev->stats.rx_errors++;
+ goto err_skb;
+ }
+
+ head_skb = page_to_skb(en_dev, rq, page, offset, len, truesize, metasize,
+ headroom);
+ curr_skb = head_skb;
+
+ if (unlikely(!curr_skb)) {
+ goto err_skb;
+ }
+ while (--num_buf) {
+ int32_t num_skb_frags;
+
+ buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, &ctx);
+ if (unlikely(!buf)) {
+ LOG_ERR("%s: rx error: %d buffers out of %d missing\n",
+ netdev->name, num_buf,
+ vqm16_to_cpu(netdev, hdr->num_buffers));
+ netdev->stats.rx_length_errors++;
+ netdev->stats.rx_errors++;
+ goto err_buf;
+ }
+
+ stats->bytes += len;
+ netdev->stats.rx_bytes += len;
+ page = virt_to_head_page(buf);
+
+ truesize = mergeable_ctx_to_truesize(ctx);
+ if (unlikely(len > truesize)) {
+ LOG_ERR("%s: rx error: len %u exceeds truesize %lu\n", netdev->name,
+ len, (unsigned long)ctx);
+ netdev->stats.rx_length_errors++;
+ netdev->stats.rx_errors++;
+ goto err_skb;
+ }
+
+ num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+ if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
+ struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
+
+ if (unlikely(!nskb)) {
+ goto err_skb;
+ }
+ if (curr_skb == head_skb) {
+ skb_shinfo(curr_skb)->frag_list = nskb;
+ } else {
+ curr_skb->next = nskb;
+ }
+ curr_skb = nskb;
+ head_skb->truesize += nskb->truesize;
+ num_skb_frags = 0;
+ }
+
+ if (curr_skb != head_skb) {
+ head_skb->data_len += len;
+ head_skb->len += len;
+ head_skb->truesize += truesize;
+ }
+ offset = buf - page_address(page);
+
+ if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
+ put_page(page);
+ skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, len, truesize);
+ } else {
+ skb_add_rx_frag(curr_skb, num_skb_frags, page, offset, len,
+ truesize);
+ }
+ }
+
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
+ return head_skb;
+
+err_skb:
+ put_page(page);
+ while (num_buf-- > 1) {
+ buf = virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!buf)) {
+ LOG_ERR("%s: rx error: %d buffers missing\n", netdev->name,
+ num_buf);
+ netdev->stats.rx_length_errors++;
+ netdev->stats.rx_errors++;
+ break;
+ }
+ stats->bytes += len;
+ page = virt_to_head_page(buf);
+ put_page(page);
+ }
+err_buf:
+ stats->drops++;
+ netdev->stats.rx_dropped++;
+ dev_kfree_skb(head_skb);
+ return NULL;
+}
+
+void receive_buf(struct zxdh_en_device *en_dev, struct receive_queue *rq,
+ void *buf, uint32_t len, void **ctx,
+ struct virtnet_rq_stats *stats)
+{
+ struct net_device *netdev = en_dev->netdev;
+ struct sk_buff *skb = NULL;
+ struct zxdh_net_hdr_rcv *hdr_rcv = (struct zxdh_net_hdr_rcv *)buf;
+
+#ifdef TIME_STAMP_1588
+ int32_t ret = 0;
+#endif
+
+ if (unlikely(len < (hdr_rcv->pd_len * HDR_2B_UNIT) + ETH_HLEN)) {
+ LOG_ERR("%s: short packet %i\n", netdev->name, len);
+ netdev->stats.rx_length_errors++;
+ netdev->stats.rx_errors++;
+
+ put_page(virt_to_head_page(buf));
+ return;
+ }
+
+ skb = receive_mergeable(netdev, en_dev, rq, buf, ctx, len, stats);
+
+ if (unlikely(!skb)) {
+ return;
+ }
+
+ /* rx packet contain the strip label & open rxvlan offloading*/
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+ hdr_rcv->pd_hdr.flags & (1 << 4)) {
+ u16 vid = htons(hdr_rcv->pd_hdr.striped_ctci) & 0xfff;
+ LOG_DEBUG("recived packtes vlan stripped, vlan-id: %d\n", vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+
+ if ((netdev->features & NETIF_F_RXCSUM) &&
+ !(hdr_rcv->pi_hdr.error_code[0] & PI_HDR_L4_CHKSUM_ERROR_CODE) &&
+ !(hdr_rcv->pi_hdr.error_code[1] & PI_HDR_L3_CHKSUM_ERROR_CODE) &&
+ !(hdr_rcv->pd_hdr.flags & OUTER_IP_CHKSUM_ERROT_CODE)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+#ifdef TIME_STAMP_1588
+ ret = pkt_1588_proc_rcv(skb, hdr_rcv, en_dev->clock_no, en_dev);
+ if ((ret != PTP_SUCCESS) && (ret != IS_NOT_PTP_MSG)) {
+ LOG_ERR("pkt_1588_proc_rcv err!!!\n");
+ return;
+ }
+#endif
+
+ skb_record_rx_queue(skb, vq2rxq(rq->vq));
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ // LOG_INFO("receiving skb proto 0x%04x len %i type %i\n",
+ // ntohs(skb->protocol), skb->len, skb->pkt_type);
+
+ napi_gro_receive(&rq->napi, skb);
+ return;
+}
+
+/**
+ * virtqueue_notify - second half of split virtqueue_kick call.
+ * @_vq: the struct virtqueue
+ *
+ * This does not need to be serialized.
+ *
+ * Returns false if host notify failed or queue is broken, otherwise true.
+ */
+bool virtqueue_notify(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (unlikely(vq->broken)) {
+ return false;
+ }
+
+ /* Prod other side to tell it about changes. */
+ if (!vq->notify(_vq)) {
+ vq->broken = true;
+ return false;
+ }
+
+ return true;
+}
+
+int32_t add_recvbuf_mergeable(struct zxdh_en_device *en_dev,
+ struct receive_queue *rq, gfp_t gfp)
+{
+ struct page_frag *alloc_frag = &rq->alloc_frag;
+ uint32_t headroom = 0;
+ uint32_t tailroom = 0;
+ uint32_t room = SKB_DATA_ALIGN(headroom + tailroom);
+ char *buf = NULL;
+ void *ctx = NULL;
+ int32_t err = 0;
+ uint32_t len = 0;
+ uint32_t hole = 0;
+
+ /* Extra tailroom is needed to satisfy XDP's assumption. This
+ * means rx frags coalescing won't work, but consider we've
+ * disabled GSO for XDP, it won't be a big issue.
+ */
+ len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
+ if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) {
+ return -ENOMEM;
+ }
+
+ buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+ buf += headroom; /* advance address leaving hole at front of pkt */
+ get_page(alloc_frag->page);
+ alloc_frag->offset += len + room;
+ hole = alloc_frag->size - alloc_frag->offset;
+ if (hole < len + room) {
+ /* To avoid internal fragmentation, if there is very likely not
+ * enough space for another buffer, add the remaining space to
+ * the current buffer.
+ */
+ len += hole;
+ alloc_frag->offset += hole;
+ }
+
+ sg_init_one(rq->sg, buf, len);
+ ctx = mergeable_len_to_ctx(len, headroom);
+ err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
+ if (err < 0) {
+ put_page(virt_to_head_page(buf));
+ }
+
+ return err;
+}
+
+/* Assuming a given event_idx value from the other side, if
+ * we have just incremented index from old to new_idx,
+ * should we trigger an event? */
+int32_t vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
+{
+ /* Note: Xen has similar logic for notification hold-off
+ * in include/xen/interface/io/ring.h with req_event and req_prod
+ * corresponding to event_idx + 1 and new_idx respectively.
+ * Note also that req_event and req_prod in Xen start at 1,
+ * event indexes in custom queue start at 0. */
+ return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
+}
+
+bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ uint16_t new = 0;
+ uint16_t old = 0;
+ uint16_t off_wrap = 0;
+ uint16_t flags = 0;
+ uint16_t wrap_counter = 0;
+ uint16_t event_idx = 0;
+ bool needs_kick = false;
+ union {
+ struct {
+ __le16 off_wrap;
+ __le16 flags;
+ };
+ uint32_t u32;
+ } snapshot;
+
+ START_USE(vq);
+
+ /*
+ * We need to expose the new flags value before checking notification
+ * suppressions.
+ */
+ vqm_mb(vq->weak_barriers);
+
+ old = vq->packed.next_avail_idx - vq->num_added;
+ new = vq->packed.next_avail_idx;
+ vq->num_added = 0;
+
+ snapshot.u32 = *(uint32_t *)vq->packed.vring.device;
+ flags = le16_to_cpu(snapshot.flags);
+
+ LAST_ADD_TIME_CHECK(vq);
+ LAST_ADD_TIME_INVALID(vq);
+
+ if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
+ needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
+ goto out;
+ }
+
+ off_wrap = le16_to_cpu(snapshot.off_wrap);
+
+ wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
+ event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+ if (wrap_counter != vq->packed.avail_wrap_counter) {
+ event_idx -= vq->packed.vring.num;
+ }
+
+ needs_kick = vring_need_event(event_idx, new, old);
+out:
+ END_USE(vq);
+ return needs_kick;
+}
+
+/*
+ * Returns false if we couldn't fill entirely (OOM).
+ *
+ * Normally run in the receive path, but can also be run from ndo_open
+ * before we're receiving packets, or from refill_work which is
+ * careful to disable receiving (using napi_disable).
+ */
+bool try_fill_recv(struct net_device *netdev, struct receive_queue *rq,
+ gfp_t gfp)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t err = 0;
+ bool oom = 0;
+ unsigned long flags = 0;
+
+ do {
+ err = add_recvbuf_mergeable(en_dev, rq, gfp);
+ oom = err == -ENOMEM;
+ if (err) {
+ break;
+ }
+ } while (rq->vq->num_free);
+
+ if (virtqueue_kick_prepare_packed(rq->vq) && virtqueue_notify(rq->vq)) {
+ flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
+ rq->stats.kicks++;
+ u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
+ }
+
+ return !oom;
+}
+
+int32_t virtnet_receive(struct receive_queue *rq, int32_t budget)
+{
+ struct net_device *netdev = rq->vq->vdev;
+ struct zxdh_en_priv *en_priv = netdev_priv(rq->vq->vdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct virtnet_rq_stats stats = {};
+ uint32_t len = 0;
+ void *buf = NULL;
+ int32_t i = 0;
+ void *ctx = NULL;
+
+ while (stats.packets < budget &&
+ (buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, &ctx))) {
+ receive_buf(en_dev, rq, buf, len, ctx, &stats);
+ stats.packets++;
+ netdev->stats.rx_packets++;
+ }
+
+ if (rq->vq->num_free >
+ min((uint32_t)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
+ if (!try_fill_recv(rq->vq->vdev, rq, GFP_ATOMIC)) {
+ schedule_delayed_work(&en_dev->refill, 0);
+ }
+ }
+
+ u64_stats_update_begin(&rq->stats.syncp);
+ for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+ size_t offset = virtnet_rq_stats_desc[i].offset;
+ uint64_t *item;
+
+ item = (uint64_t *)((uint8_t *)&rq->stats + offset);
+ *item += *(uint64_t *)((uint8_t *)&stats + offset);
+ }
+ u64_stats_update_end(&rq->stats.syncp);
+
+ return stats.packets;
+}
+
+void virtqueue_napi_complete(struct napi_struct *napi, struct virtqueue *vq,
+ int32_t processed)
+{
+ int32_t opaque = 0;
+
+ opaque = virtqueue_enable_cb_prepare(vq);
+ if (napi_complete_done(napi, processed)) {
+ if (unlikely(virtqueue_poll(vq, opaque))) {
+ virtqueue_napi_schedule(napi, vq);
+ }
+ } else {
+ virtqueue_disable_cb(vq);
+ }
+}
+
+int virtnet_poll(struct napi_struct *napi, int budget)
+{
+ struct receive_queue *rq = container_of(napi, struct receive_queue, napi);
+ uint32_t received = 0;
+
+ virtnet_poll_cleantx(rq);
+
+ received = virtnet_receive(rq, budget);
+
+ /* Out of packets? */
+ if (received < budget) {
+ virtqueue_napi_complete(napi, rq->vq, received);
+ }
+
+ return received;
+}
+
+int32_t virtnet_alloc_queues(struct zxdh_en_device *en_dev)
+{
+ int32_t i = 0;
+
+ en_dev->sq =
+ kcalloc(en_dev->max_queue_pairs, sizeof(*en_dev->sq), GFP_KERNEL);
+ if (unlikely(en_dev->sq == NULL)) {
+ LOG_ERR("vi->sq kcalloc failed\n");
+ goto err_sq;
+ }
+
+ en_dev->rq =
+ kcalloc(en_dev->max_queue_pairs, sizeof(*en_dev->rq), GFP_KERNEL);
+ if (unlikely(en_dev->rq == NULL)) {
+ LOG_ERR("vi->rq kcalloc failed\n");
+ goto err_rq;
+ }
+
+ INIT_DELAYED_WORK(&en_dev->refill, refill_work);
+
+ for (i = 0; i < en_dev->curr_queue_pairs; i++) {
+ en_dev->rq[i].pages = NULL;
+ netif_napi_add(en_dev->netdev, &en_dev->rq[i].napi, virtnet_poll,
+ NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(en_dev->netdev, &en_dev->sq[i].napi, virtnet_poll_tx,
+ NAPI_POLL_WEIGHT);
+
+ sg_init_table(en_dev->rq[i].sg, ARRAY_SIZE(en_dev->rq[i].sg));
+ ewma_pkt_len_init(&en_dev->rq[i].mrg_avg_pkt_len);
+ sg_init_table(en_dev->sq[i].sg, ARRAY_SIZE(en_dev->sq[i].sg));
+
+ u64_stats_init(&en_dev->rq[i].stats.syncp);
+ u64_stats_init(&en_dev->sq[i].stats.syncp);
+ }
+
+ return 0;
+
+err_rq:
+ kfree(en_dev->sq);
+ en_dev->sq = NULL;
+err_sq:
+ return -ENOMEM;
+}
+
+/**
+ * virtqueue_set_affinity - setting affinity for a virtqueue
+ * @vq: the virtqueue
+ * @cpu_mask: the cpu no.
+ *
+ * Pay attention the function are best-effort: the affinity hint may not be set
+ * due to config support, irq type and sharing.
+ *
+ */
+int32_t virtqueue_set_affinity(struct virtqueue *vq,
+ const struct cpumask *cpu_mask)
+{
+ if (!vq->callback) {
+ LOG_ERR("vq->callback is null\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void refill_work(struct work_struct *work)
+{
+ int32_t i = 0;
+ bool still_empty = false;
+ struct zxdh_en_device *en_dev =
+ container_of(work, struct zxdh_en_device, refill.work);
+
+ for (i = 0; i < en_dev->curr_queue_pairs; i++) {
+ struct receive_queue *rq = &en_dev->rq[i];
+
+ napi_disable(&rq->napi);
+ still_empty = !try_fill_recv(en_dev->netdev, rq, GFP_KERNEL);
+ virtnet_napi_enable(rq->vq, &rq->napi);
+
+ /* In theory, this can happen: if we don't get any buffers in
+ * we will *never* try to fill again.
+ */
+ if (still_empty) {
+ schedule_delayed_work(&en_dev->refill, HZ / 2);
+ }
+ }
+}
+
+int32_t dh_eq_vqs_vring_int(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct dh_eq_vq *eq_vq = container_of(nb, struct dh_eq_vq, irq_nb);
+ struct dh_eq_vqs *eq_vqs = container_of(eq_vq, struct dh_eq_vqs, vq_s);
+ struct list_head *item = NULL;
+ struct zxdh_pci_vq_info *info = NULL;
+ struct vring_virtqueue *vq = NULL;
+
+ list_for_each(item, &eq_vqs->vqs) {
+ info = list_entry(item, struct zxdh_pci_vq_info, node);
+
+ vq = to_vvq(info->vq);
+ if (!more_used_packed(vq)) {
+ continue;
+ }
+
+ if (unlikely(vq->broken)) {
+ LOG_ERR("vq:%d is broken\n", info->vq->phy_index);
+ continue;
+ }
+
+ /* Just a hint for performance: so it's ok that this can be racy! */
+ if (vq->event) {
+ vq->event_triggered = true;
+ }
+
+ if (vq->vq.callback) {
+ vq->vq.callback(&vq->vq);
+ }
+ }
+
+ return 0;
+}
+
+static DEFINE_SPINLOCK(vp_find_lock);
+
+int32_t vp_find_vqs_msix(struct net_device *netdev, unsigned nvqs,
+ struct virtqueue *vqs[], vq_callback_t *callbacks[],
+ const char *const names[], const bool *ctx)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t err = 0;
+ uint16_t qidx = 0;
+ int32_t phy_index = -1;
+
+ en_dev->vqs = kcalloc(nvqs, sizeof(*en_dev->vqs), GFP_KERNEL);
+ if (unlikely(en_dev->vqs == NULL)) {
+ LOG_ERR("zxdev->vqs kcalloc failed\n");
+ return -ENOMEM;
+ }
+
+ spin_lock(&vp_find_lock);
+ for (qidx = 0; qidx < nvqs; ++qidx) {
+ phy_index = en_dev->ops->get_phy_vq(en_dev->parent, qidx);
+ if (phy_index < 0) {
+ LOG_ERR("get_phy_vq failed: %d\n", phy_index);
+ err = phy_index;
+ goto err;
+ }
+ en_dev->phy_index[qidx] = phy_index;
+
+ vqs[qidx] = vp_setup_vq(netdev, qidx, callbacks[qidx], names[qidx],
+ ctx ? ctx[qidx] : false, qidx);
+ if (IS_ERR_OR_NULL(vqs[qidx])) {
+ err = PTR_ERR(vqs[qidx]);
+ LOG_ERR("vp_setup_vq failed: %d\n", err);
+ goto err;
+ }
+
+ en_dev->ops->set_queue_enable(en_dev->parent, phy_index, true);
+ }
+ spin_unlock(&vp_find_lock);
+ return 0;
+
+err:
+ en_dev->ops->release_phy_vq(en_dev->parent, en_dev->phy_index, qidx + 1);
+ spin_unlock(&vp_find_lock);
+ vp_del_vqs(netdev);
+ return err;
+}
+
+/* How large should a single buffer be so a queue full of these can fit at
+ * least one full packet?
+ * Logic below assumes the mergeable buffer header is used.
+ */
+uint32_t mergeable_min_buf_len(struct zxdh_en_device *en_dev,
+ struct virtqueue *vq)
+{
+ const uint32_t hdr_len = sizeof(struct zxdh_net_hdr);
+ uint32_t rq_size = virtqueue_get_vring_size(vq);
+ uint32_t packet_len = en_dev->netdev->max_mtu;
+ uint32_t buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
+ uint32_t min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
+
+ return max(max(min_buf_len, hdr_len) - hdr_len, (uint32_t)GOOD_PACKET_LEN);
+}
+
+void zxdh_en_recv_pkts(struct virtqueue *rvq)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(rvq->vdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct receive_queue *rq = &en_dev->rq[vq2rxq(rvq)];
+
+ virtqueue_napi_schedule(&rq->napi, rvq);
+}
+
+void zxdh_en_xmit_pkts(struct virtqueue *tvq)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(tvq->vdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct napi_struct *napi = &en_dev->sq[vq2txq(tvq)].napi;
+
+ /* Suppress further interrupts. */
+ virtqueue_disable_cb(tvq);
+
+ if (napi->weight) {
+ virtqueue_napi_schedule(napi, tvq);
+ } else {
+ /* We were probably waiting for more output buffers. */
+ netif_wake_subqueue(en_dev->netdev, vq2txq(tvq));
+ en_dev->hw_stats.q_stats[vq2txq(tvq)].q_tx_wake++;
+ }
+}
+
+int32_t virtnet_find_vqs(struct zxdh_en_device *en_dev)
+{
+ vq_callback_t **callbacks = NULL;
+ struct virtqueue **vqs = NULL;
+ int32_t ret = -ENOMEM;
+ int32_t i = 0;
+ int32_t total_vqs = 0;
+ const char **names = NULL;
+ bool *ctx = NULL;
+
+ total_vqs = en_dev->max_queue_pairs * 2;
+
+ /* Allocate space for find_vqs parameters */
+ vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
+ if (unlikely(vqs == NULL)) {
+ LOG_ERR("vqs kcalloc failed\n");
+ goto err_vq;
+ }
+
+ callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
+ if (unlikely(callbacks == NULL)) {
+ LOG_ERR("callbacks kmalloc_array failed\n");
+ goto err_callback;
+ }
+
+ names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
+ if (unlikely(names == NULL)) {
+ LOG_ERR("names kmalloc_array failed\n");
+ goto err_names;
+ }
+
+ ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
+ if (unlikely(ctx == NULL)) {
+ LOG_ERR("ctx kmalloc failed\n");
+ goto err_ctx;
+ }
+
+ /* Allocate/initialize parameters for services send/receive virtqueues */
+ for (i = 0; i < en_dev->max_queue_pairs; i++) {
+ callbacks[rxq2vq(i)] = zxdh_en_recv_pkts;
+ callbacks[txq2vq(i)] = zxdh_en_xmit_pkts;
+ sprintf(en_dev->rq[i].name, "input.%d", i);
+ sprintf(en_dev->sq[i].name, "output.%d", i);
+ names[rxq2vq(i)] = en_dev->rq[i].name;
+ names[txq2vq(i)] = en_dev->sq[i].name;
+ if (ctx) {
+ ctx[rxq2vq(i)] = true;
+ }
+ }
+
+ ret = vp_find_vqs_msix(en_dev->netdev, total_vqs, vqs, callbacks, names,
+ ctx);
+ if (ret) {
+ LOG_ERR("vp_find_vqs_msix failed: %d\n", ret);
+ goto err_find;
+ }
+
+ for (i = 0; i < en_dev->max_queue_pairs; i++) {
+ en_dev->rq[i].vq = vqs[rxq2vq(i)];
+ en_dev->rq[i].min_buf_len =
+ mergeable_min_buf_len(en_dev, en_dev->rq[i].vq);
+ en_dev->sq[i].vq = vqs[txq2vq(i)];
+ }
+
+err_find:
+ kfree(ctx);
+ ctx = NULL;
+err_ctx:
+ kfree(names);
+ names = NULL;
+err_names:
+ kfree(callbacks);
+ callbacks = NULL;
+err_callback:
+ kfree(vqs);
+ vqs = NULL;
+err_vq:
+ return ret;
+}
+
+void virtnet_free_queues(struct zxdh_en_device *en_dev)
+{
+ int32_t i = 0;
+
+ for (i = 0; i < en_dev->max_queue_pairs; i++) {
+ netif_napi_del(&en_dev->rq[i].napi);
+ netif_napi_del(&en_dev->sq[i].napi);
+ }
+
+ /* We called __netif_napi_del(),
+ * we need to respect an RCU grace period before freeing zxdev->rq
+ */
+ synchronize_net();
+
+ kfree(en_dev->rq);
+ kfree(en_dev->sq);
+}
+
+void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ uint32_t i = 0;
+ void *buf = NULL;
+
+ START_USE(vq);
+
+ for (i = 0; i < vq->packed.vring.num; i++) {
+ if (!vq->packed.desc_state[i].data) {
+ continue;
+ }
+
+ /* detach_buf clears data, so grab it now. */
+ buf = vq->packed.desc_state[i].data;
+ detach_buf_packed(vq, i, NULL);
+ END_USE(vq);
+ return buf;
+ }
+
+ /* That should have freed everything. */
+ BUG_ON(vq->vq.num_free != vq->packed.vring.num);
+
+ END_USE(vq);
+ return NULL;
+}
+
+void zxdh_free_unused_bufs(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct virtqueue *vq = NULL;
+ void *buf = NULL;
+ int32_t i = 0;
+
+ for (i = 0; i < en_dev->max_queue_pairs; i++) {
+ vq = en_dev->sq[i].vq;
+ while ((buf = virtqueue_detach_unused_buf_packed(vq)) != NULL) {
+#ifdef ZXDH_MSGQ
+ if (i == (en_dev->max_queue_pairs - 1)) {
+ if (en_dev->need_msgq) {
+ ZXDH_FREE_PTR(buf);
+ continue;
+ }
+ }
+#endif
+ dev_kfree_skb(buf);
+ }
+ }
+
+ for (i = 0; i < en_dev->max_queue_pairs; i++) {
+ vq = en_dev->rq[i].vq;
+ while ((buf = virtqueue_detach_unused_buf_packed(vq)) != NULL) {
+ put_page(virt_to_head_page(buf));
+ }
+ }
+}
+
+struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
+{
+ struct page *p = rq->pages;
+
+ if (p) {
+ rq->pages = (struct page *)p->private;
+ /* clear private here, it is used to chain pages */
+ p->private = 0;
+ } else {
+ p = alloc_page(gfp_mask);
+ }
+ return p;
+}
+
+void _free_receive_bufs(struct zxdh_en_device *en_dev)
+{
+ int32_t i = 0;
+
+ for (i = 0; i < en_dev->max_queue_pairs; i++) {
+ while (en_dev->rq[i].pages) {
+ __free_pages(get_a_page(&en_dev->rq[i], GFP_KERNEL), 0);
+ }
+ }
+}
+
+void zxdh_free_receive_bufs(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ rtnl_lock();
+ _free_receive_bufs(en_dev);
+ rtnl_unlock();
+}
+
+void zxdh_free_receive_page_frags(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t i = 0;
+
+ for (i = 0; i < en_dev->max_queue_pairs; i++) {
+ if (en_dev->rq[i].alloc_frag.page) {
+ put_page(en_dev->rq[i].alloc_frag.page);
+ }
+ }
+}
+
+void zxdh_virtnet_del_vqs(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ vp_del_vqs(netdev);
+ en_dev->ops->vqs_unbind_eqs(en_dev->parent,
+ (en_dev->max_queue_pairs * 2 - 1));
+ en_dev->ops->vqs_channel_unbind_handler(en_dev->parent,
+ (en_dev->max_queue_pairs * 2 - 1));
+ virtnet_free_queues(en_dev);
+}
+
+void zxdh_vqs_uninit(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ zxdh_vp_reset(netdev);
+
+ cancel_delayed_work_sync(&en_dev->refill);
+ zxdh_free_unused_bufs(netdev);
+ zxdh_free_receive_bufs(netdev);
+ zxdh_free_receive_page_frags(netdev);
+ zxdh_virtnet_del_vqs(netdev);
+ en_dev->ops->release_phy_vq(en_dev->parent, en_dev->phy_index,
+ en_dev->max_queue_pairs * 2);
+}
+
+int32_t zxdh_vqs_init(struct net_device *netdev)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t ret = 0;
+
+ en_dev->hdr_len = sizeof(struct zxdh_net_hdr);
+ en_dev->any_header_sg = zxdh_has_feature(netdev, ZXDH_F_ANY_LAYOUT);
+ en_dev->netdev->needed_headroom = en_dev->hdr_len;
+ en_dev->max_queue_pairs = max_pairs;
+ memset(en_dev->phy_index, 0xFF, sizeof(en_dev->phy_index));
+
+ if (en_dev->ops->is_bond(en_dev->parent)) {
+ en_dev->max_queue_pairs = ZXDH_BOND_ETH_MQ_PAIRS_NUM;
+ }
+ en_dev->curr_queue_pairs = en_dev->max_queue_pairs;
+
+#ifdef ZXDH_MSGQ
+ if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) &&
+ ((!en_dev->ops->is_bond(en_dev->parent)) ||
+ (en_dev->ops->is_bond(en_dev->parent) &&
+ en_dev->ops->if_init(en_dev->parent)))) {
+ en_dev->need_msgq = true;
+ en_dev->max_queue_pairs += ZXDH_PQ_PAIRS_NUM;
+ LOG_INFO("max_queue_pairs: %d\n", en_dev->max_queue_pairs);
+ }
+#endif
+
+ INIT_LIST_HEAD(&en_dev->vqs_list);
+ spin_lock_init(&en_dev->vqs_list_lock);
+
+ INIT_LIST_HEAD(&en_dev->virtqueues);
+ spin_lock_init(&en_dev->lock);
+
+ /* Allocate services send & receive queues */
+ ret = virtnet_alloc_queues(en_dev);
+ if (ret) {
+ LOG_ERR("virtnet_alloc_queues failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = virtnet_find_vqs(en_dev);
+ if (ret) {
+ LOG_ERR("virtnet_find_vqs failed: %d\n", ret);
+ goto err_find_vqs;
+ }
+
+ rtnl_lock();
+ netif_set_real_num_tx_queues(en_dev->netdev, en_dev->curr_queue_pairs);
+ rtnl_unlock();
+ rtnl_lock();
+ netif_set_real_num_rx_queues(en_dev->netdev, en_dev->curr_queue_pairs);
+ rtnl_unlock();
+
+ return 0;
+
+err_find_vqs:
+ virtnet_free_queues(en_dev);
+ return ret;
+}
diff --git a/src/net/drivers/net/ethernet/dinghai/en_aux/queue.h b/src/net/drivers/net/ethernet/dinghai/en_aux/queue.h
index 0c370d71c6c75f5917d015f8c2d2fd9be43f4a0f..77a72bc00caa79885da91644eafb5de47532c607 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_aux/queue.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_aux/queue.h
@@ -1,830 +1,792 @@
-#ifndef __ZXDH_QUEUE_H__
-#define __ZXDH_QUEUE_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-/*========================================================
- * 是否走1588处理流程(1588pd头宏开关):
- * 在提交代码时候这里注释掉,在使用1588功能时需要打开。
- *=========================================================*/
-// #define TIME_STAMP_1588
-
-/*========================================================
- * 是否打开依赖PTP驱动的接口调用代码:
- * 在提交代码时候这里注释掉,在实际调试时需要打开。
- *=========================================================*/
-#define PTP_DRIVER_INTERFACE_EN
-
-/**========================================================
- * 是否打开依赖os时间戳补丁接口的代码:
- * 在提交代码时候这里注释掉,在实际调试时需要打开。
- *=========================================================*/
-/* #define CGEL_TSTAMP_2_PATCH_EN TODO 补丁不可用*/
-
-/* 判断两个值是否相等,相等表示出错,打印信息后返回指定值 */
-#define CHECK_EQUAL_ERR(a, b, c, fmt, arg...) \
-do { \
- if (unlikely(a == b)) \
- { \
- LOG_ERR(fmt, ##arg); \
- return c; \
- } \
-} while(0)
-
-/* 判断两个值是否不等,不等表示出错,打印信息后返回指定值 */
-#define CHECK_UNEQUAL_ERR(a, b, c, fmt, arg...) \
-do { \
- if (unlikely(a != b)) \
- { \
- LOG_ERR(fmt, ##arg); \
- return c; \
- } \
-} while(0)
-
-#define ZXDH_MQ_PAIRS_NUM 8
-#define ZXDH_PQ_PAIRS_NUM 1
-#define ZXDH_MAX_PAIRS_NUM 128
-#define ZXDH_BOND_ETH_MQ_PAIRS_NUM 1
-#define ZXDH_MAX_QUEUES_NUM 4096
-#define ZXDH_PF_MAX_BAR_VAL 0x5
-#define ZXDH_PF_BAR0 0
-#define ZXDH_PF_MAX_DESC_NUM (32 * 1024)
-#define ZXDH_PF_MIN_DESC_NUM 1024
-#define ZXDH_INDIR_RQT_SIZE 256
-#define ZXDH_NET_HASH_KEY_SIZE 40
-
-#define VQM_HOST_BAR_OFFSET 0x0
-#define PHY_VQ_REG_OFFSET 0x5000
-#define LOCK_VQ_REG_OFFSET 0x90
-#define ZXDH_PHY_REG_BITS 32
-#define ZXDH_PF_LOCK_ENABLE_MASK 0x1
-#define ZXDH_PF_RELEASE_LOCK_VAL 0
-#define ZXDH_PF_GET_PHY_INDEX_DONE 1
-#define ZXDH_PF_GET_PHY_INDEX_BIT 1
-#define ZXDH_PF_WAIT_COUNT 2048
-#define ZXDH_PF_DELAY_US 10
-#define ZXDH_PF_RQ_TYPE 0
-#define ZXDH_PF_TQ_TYPE 1
-#define ZXDH_PF_POWER_INDEX2 2
-
-#define MSG_PAYLOAD_FIX_FIELD 8
-#define MSG_CHAN_PF_MODULE_ID 0
-#define MSG_PAYLOAD_TYPE_WRITE 1
-#define MSG_PAYLOAD_FIELD_MSG_CHL 2
-#define MSG_PAYLOAD_FIELD_DATA_CHL 3
-#define MSG_PAYLOAD_MSG_CHL_SLEN 4
-#define MSG_RECV_BUF_LEN 6
-
-#define ZXDH_MAC_NUM 6
-#define ZXDH_MAX_MTU 14000
-#define ZXDH_DEFAULT_MTU 1500
-
-/* The feature bitmap for zxdh net */
-#define ZXDH_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
-#define ZXDH_NET_F_STATUS 16 /* net_config.status available */
-#define ZXDH_NET_F_CTRL_VQ 17 /* Control channel available */
-#define ZXDH_NET_F_MQ 22 /* Device supports Receive Flow Steering */
-#define ZXDH_F_ANY_LAYOUT 27 /* Can the device handle any descriptor layout? */
-#define ZXDH_RING_F_INDIRECT_DESC 28 /* We support indirect buffer descriptors */
-
-/* The Guest publishes the used index for which it expects an interrupt
- * at the end of the avail ring. Host should ignore the avail->flags field. */
-/* The Host publishes the avail index for which it expects a kick
- * at the end of the used ring. Guest should ignore the used->flags field. */
-#define ZXDH_RING_F_EVENT_IDX 29
-
-#define ZXDH_F_VERSION_1 32 /* v1.0 compliant */
-
-/*
- * If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature.
- * If set - use platform DMA tools to access the memory.
- *
- * Note the reverse polarity (compared to most other features),
- * this is for compatibility with legacy systems.
- */
-#define ZXDH_F_ACCESS_PLATFORM 33
-
-/* This feature indicates support for the packed virtqueue layout. */
-#define ZXDH_F_RING_PACKED 34
-
-/*
- * This feature indicates that memory accesses by the driver and the
- * device are ordered in a way described by the platform.
- */
-#define ZXDH_F_ORDER_PLATFORM 36
-
-/* This marks a buffer as continuing via the next field. */
-#define VRING_DESC_F_NEXT 1
-/* This marks a buffer as write-only (otherwise read-only). */
-#define VRING_DESC_F_WRITE 2
-/* This means the buffer contains a list of buffer descriptors. */
-#define VRING_DESC_F_INDIRECT 4
-
-/*
- * Mark a descriptor as available or used in packed ring.
- * Notice: they are defined as shifts instead of shifted values.
- */
-#define VRING_PACKED_DESC_F_AVAIL 7
-#define VRING_PACKED_DESC_F_USED 15
-
-/* The Host uses this in used->flags to advise the Guest: don't kick me when
- * you add a buffer. It's unreliable, so it's simply an optimization. Guest
- * will still kick if it's out of buffers. */
-#define VRING_USED_F_NO_NOTIFY 1
-/* The Guest uses this in avail->flags to advise the Host: don't interrupt me
- * when you consume a buffer. It's unreliable, so it's simply an
- * optimization. */
-#define VRING_AVAIL_F_NO_INTERRUPT 1
-
-/* Enable events in packed ring. */
-#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
-/* Disable events in packed ring. */
-#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
-/*
- * Enable events for a specific descriptor in packed ring.
- * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
- * Only valid if ZXDH_RING_F_EVENT_IDX has been negotiated.
- */
-#define VRING_PACKED_EVENT_FLAG_DESC 0x2
-
-/*
- * Wrap counter bit shift in event suppression structure
- * of packed ring.
- */
-#define VRING_PACKED_EVENT_F_WRAP_CTR 15
-
-/* Alignment requirements for vring elements */
-#define VRING_AVAIL_ALIGN_SIZE 2
-#define VRING_USED_ALIGN_SIZE 4
-#define VRING_DESC_ALIGN_SIZE 16
-
-#define MRG_CTX_HEADER_SHIFT 22
-
-
-/* FIXME: MTU in config. */
-#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
-#define GOOD_COPY_LEN 128
-
-
-#define TX_PORT_NP 0x00
-#define TX_PORT_DRS 0x01
-#define TX_PORT_DTP 0x02
-#define HDR_2B_UNIT 2
-#define ENABLE_PI_FLAG_32B 0x1
-#define DISABLE_PI_FIELD_PARSE 0x80
-#define IPV4_TYPE 0x0
-#define IPV6_TYPE 0x1
-#define NOT_IP_TYPE 0x2
-#define PKT_SRC_NP 0x0
-#define PKT_SRC_CPU 0x1
-#define PCODE_IP 0x1
-#define PCODE_TCP 0x2
-#define PCODE_UDP 0x3
-#define INVALID_ETH_PORT_ID 0xff
-#define ETH_MTU_4B_UNIT 4
-#define IP_FRG_CSUM_FLAG 0x8000
-#define NOT_IP_FRG_CSUM_FLAG 0x6000
-#define TCP_FRG_CSUM_FLAG 0x24
-#define NOT_TCP_FRG_CSUM_FLAG 0x30
-#define HDR_2B_UNIT 2
-
-#define HDR_BUFFER_LEN 100
-#define IP_BASE_HLEN 20
-#define IPV6_BASE_HLEN 40
-#define TCP_BASE_HLEN 20
-
-#define OUTER_IP_CHECKSUM_OFFSET (12)
-#define INNER_IP_CHECKSUM_OFFSET (15)
-#define INNER_L4_CHECKSUM_OFFSET (2)
-#define PI_HDR_L3_CHKSUM_ERROR_CODE (0xff)
-#define PI_HDR_L4_CHKSUM_ERROR_CODE (0xff)
-#define OUTER_IP_CHKSUM_ERROT_CODE (0x20)
-
-#define RX_VLAN_STRIPED_MASK (1 << 4)
-#define RX_QINQ_STRIPED_MASK (1 << 14)
-#define RX_TPID_VLAN_ID_MASK (0xfff)
-
-/* PD header offload flags */
-#define PANELID_EN (1 << 15)
-
-/* PD header sk_prio */
-#define ZXDH_DCBNL_SET_SK_PRIO(sk_prio) ((0x7 & sk_prio) << 8)
-
-/*
- * __vqm{16,32,64} have the following meaning:
- * - __u{16,32,64} for zxdh devices in legacy mode, accessed in native endian
- * - __le{16,32,64} for standard-compliant zxdh devices
- */
-typedef __u16 __bitwise __vqm16;
-typedef __u32 __bitwise __vqm32;
-typedef __u64 __bitwise __vqm64;
-
-
-/* Constants for MSI-X */
-/* Use first vector for configuration changes, second and the rest for
- * virtqueues Thus, we need at least 2 vectors for MSI. */
-enum
-{
- VP_MSIX_CONFIG_VECTOR = 0,
- VP_MSIX_VQ_VECTOR = 1,
-};
-
-
-struct vring_packed_desc_event
-{
- /* Descriptor Ring Change Event Offset/Wrap Counter. */
- __le16 off_wrap;
- /* Descriptor Ring Change Event Flags. */
- __le16 flags;
-};
-
-struct vring_packed_desc
-{
- /* Buffer Address. */
- __le64 addr;
- /* Buffer Length. */
- __le32 len;
- /* Buffer ID. */
- __le16 id;
- /* The flags depending on descriptor type. */
- __le16 flags;
-};
-
-struct vring_desc_state_packed
-{
- void *data; /* Data for callback. */
- struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
- uint16_t num; /* Descriptor list length. */
- uint16_t last; /* The last desc state in a list. */
-};
-
-struct vring_desc_extra
-{
- dma_addr_t addr; /* Buffer DMA addr. */
- uint32_t len; /* Buffer length. */
- uint16_t flags; /* Descriptor flags. */
- uint16_t next; /* The next desc state in a list. */
-};
-
-union pkt_type_t
-{
- uint8_t pkt_type;
- struct
- {
- uint8_t pkt_code:5;
- uint8_t pkt_src:1;
- uint8_t ip_type:2;
- }type_ctx;
-}__attribute__((packed));
-
-struct pi_net_hdr
-{
- uint8_t bttl_pi_len;
- union pkt_type_t pt;
- uint16_t vlan_id;
- uint32_t ipv6_exp_flags;
- uint16_t hdr_l3_offset;
- uint16_t hdr_l4_offset;
- uint8_t eth_port_id;
- uint8_t pkt_action_flag2;
- uint16_t pkt_action_flag1;
- uint8_t sa_index[8];
- uint8_t error_code[2];
- uint8_t rsv[6];
-}__attribute__((packed));
-
-struct pd_net_hdr_tx
-{
-#define TXCAP_STAG_INSERT_EN_BIT (1 << 14)
-#define TXCAP_CTAG_INSERT_EN_BIT (1 << 13)
-#define DELAY_STATISTICS_INSERT_EN_BIT (1 << 7)
- uint16_t ol_flag;
- uint8_t rsv;
- uint8_t panel_id;
- uint8_t tag_idx;
- uint8_t tag_data;
- uint16_t vfid;
- struct {
- uint16_t tpid;
- uint16_t tci;
- } svlan;
- struct {
- uint16_t tpid;
- uint16_t tci;
- } cvlan;
-}__attribute__((packed));
-
-
-struct pd_net_hdr_rx
-{
-#define RX_PD_HEAD_VLAN_STRIP_BIT (1 << 28)
- uint32_t flags;
- uint32_t rss_hash;
- uint32_t fd;
- uint16_t striped_stci;
- uint16_t striped_ctci;
- uint8_t tag_idx;
- uint8_t tag_data;
- uint16_t src_port;
- uint16_t outer_pkt_type;
- uint16_t inner_pkt_type;
-}__attribute__((packed));
-
-/* zxdh net header */
-struct zxdh_net_hdr
-{
- uint8_t tx_port; //bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP
- uint8_t pd_len; //bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位
- uint8_t num_buffers; //表示接收方向num buffers字段
- uint8_t rsv; //保留
-
- struct pi_net_hdr pi_hdr;
- struct pd_net_hdr_tx pd_hdr;
-}__attribute__((packed));
-
-struct zxdh_net_1588_hdr
-{
- uint8_t tx_port; //bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP
- uint8_t pd_len; //bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位
- uint8_t num_buffers; //表示接收方向num buffers字段
- uint8_t rsv; //保留
-
- struct pi_net_hdr pi_hdr;
- struct pd_net_hdr_tx pd_hdr;
-
- uint8_t ptp_type[3]; /* 低bit0-16预留,bit17-19 pkt_type, bit23 ptp_udp */
- uint8_t ts_offset;
- uint32_t cpu_tx;
- uint8_t port; /* egress_port/ingress_port, L4报文此字段无用 */
- uint8_t rsv1[4];
- uint8_t sec_1588_key[3];
-}__attribute__((packed));
-
-struct zxdh_net_hdr_rcv
-{
- uint8_t tx_port; //bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP
- uint8_t pd_len; //bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位
- uint8_t num_buffers; //表示接收方向num buffers字段
- uint8_t rsv; //保留
-
- struct pi_net_hdr pi_hdr;
- struct pd_net_hdr_rx pd_hdr;
-}__attribute__((packed));
-
-struct zxdh_net_1588_hdr_rcv
-{
- uint8_t tx_port; //bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP
- uint8_t pd_len; //bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位
- uint8_t num_buffers; //表示接收方向num buffers字段
- uint8_t rsv; //保留
-
- struct pi_net_hdr pi_hdr;
- struct pd_net_hdr_rx pd_hdr;
-
- uint8_t egress_port;
- uint8_t ptp_type[2]; /* 低bit0-8预留,bit9-11 pkt_type, bit 12-14预留,bit15 ptp_udp */
- uint8_t ts_offset;
- uint32_t rx_ts;
-}__attribute__((packed));
-
-#ifdef DEBUG
-/* For development, we want to crash whenever the ring is screwed. */
-#define BAD_RING(_vq, fmt, args...) \
- do { \
- LOG_ERR("%s:"fmt, (_vq)->vq.name, ##args); \
- BUG(); \
- } while (0)
-/* Caller is supposed to guarantee no reentry. */
-#define START_USE(_vq) \
- do { \
- if ((_vq)->in_use) \
- panic("%s:in_use = %i\n", \
- (_vq)->vq.name, (_vq)->in_use); \
- (_vq)->in_use = __LINE__; \
- } while (0)
-#define END_USE(_vq) \
- do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
-#define LAST_ADD_TIME_UPDATE(_vq) \
- do { \
- ktime_t now = ktime_get(); \
- \
- /* No kick or get, with .1 second between? Warn. */ \
- if ((_vq)->last_add_time_valid) \
- WARN_ON(ktime_to_ms(ktime_sub(now, \
- (_vq)->last_add_time)) > 100); \
- (_vq)->last_add_time = now; \
- (_vq)->last_add_time_valid = true; \
- } while (0)
-#define LAST_ADD_TIME_CHECK(_vq) \
- do { \
- if ((_vq)->last_add_time_valid) { \
- WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
- (_vq)->last_add_time)) > 100); \
- } \
- } while (0)
-#define LAST_ADD_TIME_INVALID(_vq) \
- ((_vq)->last_add_time_valid = false)
-#else
-#define BAD_RING(_vq, fmt, args...) \
- do { \
- LOG_ERR("%s:"fmt, (_vq)->vq.name, ##args); \
- (_vq)->broken = true; \
- } while (0)
-#define START_USE(vq)
-#define END_USE(vq)
-#define LAST_ADD_TIME_UPDATE(vq)
-#define LAST_ADD_TIME_CHECK(vq)
-#define LAST_ADD_TIME_INVALID(vq)
-#endif
-
-
-#define vqm_store_mb(weak_barriers, p, v) \
-do { \
- if (weak_barriers) { \
- virt_store_mb(*p, v); \
- } else { \
- WRITE_ONCE(*p, v); \
- mb(); \
- } \
-} while (0) \
-
-
-/* This is the PCI capability header: */
-struct zxdh_pci_cap
-{
- __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
- __u8 cap_next; /* Generic PCI field: next ptr. */
- __u8 cap_len; /* Generic PCI field: capability length */
- __u8 cfg_type; /* Identifies the structure. */
- __u8 bar; /* Where to find it. */
- __u8 id; /* Multiple capabilities of the same type */
- __u8 padding[2]; /* Pad to full dword. */
- __le32 offset; /* Offset within bar. */
- __le32 length; /* Length of the structure, in bytes. */
-};
-
-struct zxdh_pci_notify_cap
-{
- struct zxdh_pci_cap cap;
- __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
-};
-
-struct virtqueue
-{
- struct list_head list;
- void (*callback)(struct virtqueue *vq);
- const char *name;
- struct net_device *vdev;
- uint32_t index;
- uint32_t phy_index;
- uint32_t num_free;
- void *priv;
-};
-
-/* custom queue ring descriptors: 16 bytes. These can chain together via "next". */
-struct vring_desc
-{
- /* Address (guest-physical). */
- uint64_t addr;
- /* Length. */
- uint32_t len;
- /* The flags as indicated above. */
- uint16_t flags;
- /* We chain unused descriptors via this, too */
- uint16_t next;
-};
-
-struct vring_avail
-{
- uint16_t flags;
- uint16_t idx;
- uint16_t ring[];
-};
-
-/* u32 is used here for ids for padding reasons. */
-struct vring_used_elem
-{
- /* Index of start of used descriptor chain. */
- uint32_t id;
- /* Total length of the descriptor chain which was used (written to) */
- uint32_t len;
-};
-
-typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
- vring_used_elem_t;
-
-struct vring_used
-{
- uint16_t flags;
- uint16_t idx;
- vring_used_elem_t ring[];
-};
-
-typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
- vring_desc_t;
-typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
- vring_avail_t;
-typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
- vring_used_t;
-
-struct vring
-{
- uint32_t num;
-
- vring_desc_t *desc;
-
- vring_avail_t *avail;
-
- vring_used_t *used;
-};
-
-struct vring_virtqueue
-{
- struct virtqueue vq;
-
- /* Is this a packed ring? */
- bool packed_ring;
-
- /* Is DMA API used? */
- bool use_dma_api;
-
- /* Can we use weak barriers? */
- bool weak_barriers;
-
- /* Other side has made a mess, don't try any more. */
- bool broken;
-
- /* Host supports indirect buffers */
- bool indirect;
-
- /* Host publishes avail event idx */
- bool event;
-
- /* Head of free buffer list. */
- uint32_t free_head;
- /* Number we've added since last sync. */
- uint32_t num_added;
-
- /* Last used index we've seen. */
- uint16_t last_used_idx;
-
- /* Hint for event idx: already triggered no need to disable. */
- bool event_triggered;
-
- /* Available for packed ring */
- struct
- {
- /* Actual memory layout for this queue. */
- struct
- {
- uint32_t num;
- struct vring_packed_desc *desc;
- struct vring_packed_desc_event *driver;
- struct vring_packed_desc_event *device;
- } vring;
-
- /* Driver ring wrap counter. */
- bool avail_wrap_counter;
-
- /* Device ring wrap counter. */
- bool used_wrap_counter;
-
- /* Avail used flags. */
- uint16_t avail_used_flags;
-
- /* Index of the next avail descriptor. */
- uint16_t next_avail_idx;
-
- /*
- * Last written value to driver->flags in
- * guest byte order.
- */
- uint16_t event_flags_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_packed *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t ring_dma_addr;
- dma_addr_t driver_event_dma_addr;
- dma_addr_t device_event_dma_addr;
- size_t ring_size_in_bytes;
- size_t event_size_in_bytes;
- } packed;
-
- /* How to notify other side. FIXME: commonalize hcalls! */
- bool (*notify)(struct virtqueue *vq);
-
- /* DMA, allocation, and size information */
- bool we_own_ring;
-
-#ifdef DEBUG
- /* They're supposed to lock for us. */
- uint32_t in_use;
-
- /* Figure out if their kicks are too delayed. */
- bool last_add_time_valid;
- ktime_t last_add_time;
-#endif
-};
-
-struct zxdh_pci_vq_info
-{
- /* the actual virtqueue */
- struct virtqueue *vq;
-
- /* the list node for the virtqueues list */
- struct list_head node;
-
- /* channel num map 1-1 to vector*/
- unsigned channel_num;
-};
-
-struct virtnet_stat_desc
-{
- char desc[ETH_GSTRING_LEN];
- size_t offset;
-};
-
-struct virtnet_sq_stats
-{
- struct u64_stats_sync syncp;
- uint64_t packets;
- uint64_t bytes;
- uint64_t xdp_tx;
- uint64_t xdp_tx_drops;
- uint64_t kicks;
- uint64_t tx_timeouts;
-};
-
-struct virtnet_rq_stats
-{
- struct u64_stats_sync syncp;
- uint64_t packets;
- uint64_t bytes;
- uint64_t drops;
- uint64_t xdp_packets;
- uint64_t xdp_tx;
- uint64_t xdp_redirects;
- uint64_t xdp_drops;
- uint64_t kicks;
-};
-#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
-#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
-
-static const struct virtnet_stat_desc virtnet_sq_stats_desc[] =
-{
- { "packets", VIRTNET_SQ_STAT(packets) },
- { "bytes", VIRTNET_SQ_STAT(bytes) },
- { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
- { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
- { "kicks", VIRTNET_SQ_STAT(kicks) },
- { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) },
-};
-
-static const struct virtnet_stat_desc virtnet_rq_stats_desc[] =
-{
- { "packets", VIRTNET_RQ_STAT(packets) },
- { "bytes", VIRTNET_RQ_STAT(bytes) },
- { "drops", VIRTNET_RQ_STAT(drops) },
- { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
- { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
- { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
- { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
- { "kicks", VIRTNET_RQ_STAT(kicks) },
-};
-
-#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
-#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
-
-/* RX packet size EWMA. The average packet size is used to determine the packet
- * buffer size when refilling RX rings. As the entire RX ring may be refilled
- * at once, the weight is chosen so that the EWMA will be insensitive to short-
- * term, transient changes in packet size.
- */
-DECLARE_EWMA(pkt_len, 0, 64)
-
-
-/* Internal representation of a send virtqueue */
-struct send_queue
-{
- /* Virtqueue associated with this send _queue */
- struct virtqueue *vq;
-
- /* TX: fragments + linear part + custom queue header */
- struct scatterlist sg[MAX_SKB_FRAGS + 2];
-
- /* Name of the send queue: output.$index */
- char name[40];
-
- struct virtnet_sq_stats stats;
-
- struct napi_struct napi;
-
- uint8_t hdr_buf[HDR_BUFFER_LEN];
-};
-
-/* Internal representation of a receive virtqueue */
-struct receive_queue
-{
- /* Virtqueue associated with this receive_queue */
- struct virtqueue *vq;
-
- struct napi_struct napi;
-
- struct bpf_prog __rcu *xdp_prog;
-
- struct virtnet_rq_stats stats;
-
- /* Chain pages by the private ptr. */
- struct page *pages;
-
- /* Average packet length for mergeable receive buffers. */
- struct ewma_pkt_len mrg_avg_pkt_len; //todo
-
- /* Page frag for packet buffer allocation. */
- struct page_frag alloc_frag;
-
- /* RX: fragments + linear part + custom queue header */
- struct scatterlist sg[MAX_SKB_FRAGS + 2];
-
- /* Min single buffer size for mergeable buffers case. */
- uint32_t min_buf_len;
-
- /* Name of this receive queue: input.$index */
- char name[40];
-
- struct xdp_rxq_info xdp_rxq;
-};
-
-#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
-
-typedef void vq_callback_t(struct virtqueue *);
-
-
-void zxdh_print_vring_info(struct virtqueue *vq, uint32_t desc_index, uint32_t desc_num);
-void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi);
-void virtnet_napi_tx_enable(struct net_device *netdev, struct virtqueue *vq, struct napi_struct *napi);
-void virtnet_napi_tx_disable(struct napi_struct *napi);
-void refill_work(struct work_struct *work);
-int virtnet_poll(struct napi_struct *napi, int budget);
-int virtnet_poll_tx(struct napi_struct *napi, int budget);
-int32_t txq2vq(int32_t txq);
-int32_t rxq2vq(int32_t rxq);
-uint16_t vqm16_to_cpu(struct net_device *netdev, __vqm16 val);
-uint8_t vp_get_status(struct net_device *netdev);
-void vp_set_status(struct net_device *netdev, uint8_t status);
-void vp_set_reset_status(struct net_device *netdev, uint8_t status);
-void zxdh_add_status(struct net_device *netdev, uint32_t status);
-void zxdh_vp_enable_cbs(struct net_device *netdev);
-void zxdh_vp_disable_cbs(struct net_device *netdev);
-void zxdh_vp_reset(struct net_device *netdev);
-void vring_free_queue(struct net_device *netdev, size_t size, void *queue, dma_addr_t dma_handle);
-netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *netdev);
-bool try_fill_recv(struct net_device *netdev, struct receive_queue *rq, gfp_t gfp);
-inline struct zxdh_net_hdr *skb_vnet_hdr(struct sk_buff *skb);
-int32_t virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, uint32_t num, void *data, gfp_t gfp);
-void virtqueue_disable_cb(struct virtqueue *_vq);
-void free_old_xmit_skbs(struct net_device *netdev, struct send_queue *sq, bool in_napi);
-bool virtqueue_enable_cb_delayed(struct virtqueue *_vq);
-bool virtqueue_kick_prepare_packed(struct virtqueue *_vq);
-bool virtqueue_notify(struct virtqueue *_vq);
-void zxdh_pf_features_init(struct net_device *netdev);
-bool zxdh_has_feature(struct net_device *netdev, uint32_t fbit);
-bool zxdh_has_status(struct net_device *netdev, uint32_t sbit);
-void zxdh_free_unused_bufs(struct net_device *netdev);
-void zxdh_free_receive_bufs(struct net_device *netdev);
-void zxdh_free_receive_page_frags(struct net_device *netdev);
-void zxdh_virtnet_del_vqs(struct net_device *netdev);
-void zxdh_vqs_uninit(struct net_device *netdev);
-int32_t zxdh_vqs_init(struct net_device *netdev);
-int32_t dh_eq_vqs_vring_int(struct notifier_block *nb, unsigned long action, void *data);
-int32_t vq2rxq(struct virtqueue *vq);
-void *virtqueue_get_buf(struct virtqueue *_vq, uint32_t *len);
-void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, uint32_t *len, void **ctx);
-uint32_t virtqueue_get_vring_size(struct virtqueue *_vq);
-void virtqueue_napi_complete(struct napi_struct *napi, struct virtqueue *vq, int32_t processed);
-int32_t virtqueue_add_inbuf_ctx(struct virtqueue *vq,
- struct scatterlist *sg, uint32_t num,
- void *data,
- void *ctx,
- gfp_t gfp);
-#ifdef __cplusplus
-}
-#endif
-
-#endif
+#ifndef __ZXDH_QUEUE_H__
+#define __ZXDH_QUEUE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+/*========================================================
+ * 是否走1588处理流程(1588pd头宏开关):
+ * 在提交代码时候这里注释掉,在使用1588功能时需要打开。
+ *=========================================================*/
+// #define TIME_STAMP_1588
+
+/*========================================================
+ * 是否打开依赖PTP驱动的接口调用代码:
+ * 在提交代码时候这里注释掉,在实际调试时需要打开。
+ *=========================================================*/
+#define PTP_DRIVER_INTERFACE_EN
+
+/**========================================================
+ * 是否打开依赖os时间戳补丁接口的代码:
+ * 在提交代码时候这里注释掉,在实际调试时需要打开。
+ *=========================================================*/
+/* #define CGEL_TSTAMP_2_PATCH_EN TODO 补丁不可用*/
+
+/* 判断两个值是否相等,相等表示出错,打印信息后返回指定值 */
+#define CHECK_EQUAL_ERR(a, b, c, fmt, arg...) \
+ do { \
+ if (unlikely(a == b)) { \
+ LOG_ERR(fmt, ##arg); \
+ return c; \
+ } \
+ } while (0)
+
+/* 判断两个值是否不等,不等表示出错,打印信息后返回指定值 */
+#define CHECK_UNEQUAL_ERR(a, b, c, fmt, arg...) \
+ do { \
+ if (unlikely(a != b)) { \
+ LOG_ERR(fmt, ##arg); \
+ return c; \
+ } \
+ } while (0)
+
+#define ZXDH_MQ_PAIRS_NUM 8
+#define ZXDH_PQ_PAIRS_NUM 1
+#define ZXDH_MAX_PAIRS_NUM 128
+#define ZXDH_BOND_ETH_MQ_PAIRS_NUM 1
+#define ZXDH_MAX_QUEUES_NUM 4096
+#define ZXDH_PF_MAX_BAR_VAL 0x5
+#define ZXDH_PF_BAR0 0
+#define ZXDH_PF_MAX_DESC_NUM (32 * 1024)
+#define ZXDH_PF_MIN_DESC_NUM 1024
+#define ZXDH_INDIR_RQT_SIZE 256
+#define ZXDH_NET_HASH_KEY_SIZE 40
+
+#define VQM_HOST_BAR_OFFSET 0x0
+#define PHY_VQ_REG_OFFSET 0x5000
+#define LOCK_VQ_REG_OFFSET 0x90
+#define ZXDH_PHY_REG_BITS 32
+#define ZXDH_PF_LOCK_ENABLE_MASK 0x1
+#define ZXDH_PF_RELEASE_LOCK_VAL 0
+#define ZXDH_PF_GET_PHY_INDEX_DONE 1
+#define ZXDH_PF_GET_PHY_INDEX_BIT 1
+#define ZXDH_PF_WAIT_COUNT 2048
+#define ZXDH_PF_DELAY_US 10
+#define ZXDH_PF_RQ_TYPE 0
+#define ZXDH_PF_TQ_TYPE 1
+#define ZXDH_PF_POWER_INDEX2 2
+
+#define MSG_PAYLOAD_FIX_FIELD 8
+#define MSG_CHAN_PF_MODULE_ID 0
+#define MSG_PAYLOAD_TYPE_WRITE 1
+#define MSG_PAYLOAD_FIELD_MSG_CHL 2
+#define MSG_PAYLOAD_FIELD_DATA_CHL 3
+#define MSG_PAYLOAD_MSG_CHL_SLEN 4
+#define MSG_RECV_BUF_LEN 6
+
+#define ZXDH_MAC_NUM 6
+#define ZXDH_MAX_MTU 14000
+#define ZXDH_DEFAULT_MTU 1500
+
+/* The feature bitmap for zxdh net */
+#define ZXDH_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
+#define ZXDH_NET_F_STATUS 16 /* net_config.status available */
+#define ZXDH_NET_F_CTRL_VQ 17 /* Control channel available */
+#define ZXDH_NET_F_MQ 22 /* Device supports Receive Flow Steering */
+#define ZXDH_F_ANY_LAYOUT \
+ 27 /* Can the device handle any descriptor layout? \
+ */
+#define ZXDH_RING_F_INDIRECT_DESC \
+ 28 /* We support indirect buffer descriptors */
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field. */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field. */
+#define ZXDH_RING_F_EVENT_IDX 29
+
+#define ZXDH_F_VERSION_1 32 /* v1.0 compliant */
+
+/*
+ * If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature.
+ * If set - use platform DMA tools to access the memory.
+ *
+ * Note the reverse polarity (compared to most other features),
+ * this is for compatibility with legacy systems.
+ */
+#define ZXDH_F_ACCESS_PLATFORM 33
+
+/* This feature indicates support for the packed virtqueue layout. */
+#define ZXDH_F_RING_PACKED 34
+
+/*
+ * This feature indicates that memory accesses by the driver and the
+ * device are ordered in a way described by the platform.
+ */
+#define ZXDH_F_ORDER_PLATFORM 36
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/*
+ * Mark a descriptor as available or used in packed ring.
+ * Notice: they are defined as shifts instead of shifted values.
+ */
+#define VRING_PACKED_DESC_F_AVAIL 7
+#define VRING_PACKED_DESC_F_USED 15
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me when
+ * you add a buffer. It's unreliable, so it's simply an optimization. Guest
+ * will still kick if it's out of buffers. */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't interrupt me
+ * when you consume a buffer. It's unreliable, so it's simply an
+ * optimization. */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* Enable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
+/* Disable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor in packed ring.
+ * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
+ * Only valid if ZXDH_RING_F_EVENT_IDX has been negotiated.
+ */
+#define VRING_PACKED_EVENT_FLAG_DESC 0x2
+
+/*
+ * Wrap counter bit shift in event suppression structure
+ * of packed ring.
+ */
+#define VRING_PACKED_EVENT_F_WRAP_CTR 15
+
+/* Alignment requirements for vring elements */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+
+#define MRG_CTX_HEADER_SHIFT 22
+
+/* FIXME: MTU in config. */
+#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+#define GOOD_COPY_LEN 128
+
+#define TX_PORT_NP 0x00
+#define TX_PORT_DRS 0x01
+#define TX_PORT_DTP 0x02
+#define HDR_2B_UNIT 2
+#define ENABLE_PI_FLAG_32B 0x1
+#define DISABLE_PI_FIELD_PARSE 0x80
+#define IPV4_TYPE 0x0
+#define IPV6_TYPE 0x1
+#define NOT_IP_TYPE 0x2
+#define PKT_SRC_NP 0x0
+#define PKT_SRC_CPU 0x1
+#define PCODE_IP 0x1
+#define PCODE_TCP 0x2
+#define PCODE_UDP 0x3
+#define INVALID_ETH_PORT_ID 0xff
+#define ETH_MTU_4B_UNIT 4
+#define IP_FRG_CSUM_FLAG 0x8000
+#define NOT_IP_FRG_CSUM_FLAG 0x6000
+#define TCP_FRG_CSUM_FLAG 0x24
+#define NOT_TCP_FRG_CSUM_FLAG 0x30
+#define HDR_2B_UNIT 2
+
+#define HDR_BUFFER_LEN 100
+#define IP_BASE_HLEN 20
+#define IPV6_BASE_HLEN 40
+#define TCP_BASE_HLEN 20
+
+#define OUTER_IP_CHECKSUM_OFFSET (12)
+#define INNER_IP_CHECKSUM_OFFSET (15)
+#define INNER_L4_CHECKSUM_OFFSET (2)
+#define PI_HDR_L3_CHKSUM_ERROR_CODE (0x1)
+#define PI_HDR_L4_CHKSUM_ERROR_CODE (0x60)
+#define OUTER_IP_CHKSUM_ERROT_CODE (0x20)
+
+/* PD header offload flags */
+#define PANELID_EN (1 << 15)
+
+/* PD header sk_prio */
+#define ZXDH_DCBNL_SET_SK_PRIO(sk_prio) ((0x7 & sk_prio) << 8)
+
+/*
+ * __vqm{16,32,64} have the following meaning:
+ * - __u{16,32,64} for zxdh devices in legacy mode, accessed in native endian
+ * - __le{16,32,64} for standard-compliant zxdh devices
+ */
+typedef __u16 __bitwise __vqm16;
+typedef __u32 __bitwise __vqm32;
+typedef __u64 __bitwise __vqm64;
+
+/* Constants for MSI-X */
+/* Use first vector for configuration changes, second and the rest for
+ * virtqueues Thus, we need at least 2 vectors for MSI. */
+enum {
+ VP_MSIX_CONFIG_VECTOR = 0,
+ VP_MSIX_VQ_VECTOR = 1,
+};
+
+struct vring_packed_desc_event {
+ /* Descriptor Ring Change Event Offset/Wrap Counter. */
+ __le16 off_wrap;
+ /* Descriptor Ring Change Event Flags. */
+ __le16 flags;
+};
+
+struct vring_packed_desc {
+ /* Buffer Address. */
+ __le64 addr;
+ /* Buffer Length. */
+ __le32 len;
+ /* Buffer ID. */
+ __le16 id;
+ /* The flags depending on descriptor type. */
+ __le16 flags;
+};
+
+struct vring_desc_state_packed {
+ void *data; /* Data for callback. */
+ struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
+ uint16_t num; /* Descriptor list length. */
+ uint16_t last; /* The last desc state in a list. */
+};
+
+struct vring_desc_extra {
+ dma_addr_t addr; /* Buffer DMA addr. */
+ uint32_t len; /* Buffer length. */
+ uint16_t flags; /* Descriptor flags. */
+ uint16_t next; /* The next desc state in a list. */
+};
+
+union pkt_type_t {
+ uint8_t pkt_type;
+ struct {
+ uint8_t pkt_code : 5;
+ uint8_t pkt_src : 1;
+ uint8_t ip_type : 2;
+ } type_ctx;
+} __attribute__((packed));
+
+struct pi_net_hdr {
+ uint8_t bttl_pi_len;
+ union pkt_type_t pt;
+ uint16_t vlan_id;
+ uint32_t ipv6_exp_flags;
+ uint16_t hdr_l3_offset;
+ uint16_t hdr_l4_offset;
+ uint8_t eth_port_id;
+ uint8_t pkt_action_flag2;
+ uint16_t pkt_action_flag1;
+ uint8_t sa_index[8];
+ uint8_t error_code[2];
+ uint8_t rsv[6];
+} __attribute__((packed));
+
+struct pd_net_hdr_tx {
+#define TXCAP_STAG_INSERT_EN_BIT (1 << 14)
+#define TXCAP_CTAG_INSERT_EN_BIT (1 << 13)
+#define DELAY_STATISTICS_INSERT_EN_BIT (1 << 7)
+ uint16_t ol_flag;
+ uint8_t rsv;
+ uint8_t panel_id;
+ uint8_t tag_idx;
+ uint8_t tag_data;
+ uint16_t vfid;
+ struct {
+ uint16_t tpid;
+ uint16_t tci;
+ } svlan;
+ struct {
+ uint16_t tpid;
+ uint16_t tci;
+ } cvlan;
+} __attribute__((packed));
+
+struct pd_net_hdr_rx {
+#define RX_PD_HEAD_VLAN_STRIP_BIT (1 << 28)
+ uint32_t flags;
+ uint32_t rss_hash;
+ uint32_t fd;
+ uint16_t striped_stci;
+ uint16_t striped_ctci;
+ uint8_t tag_idx;
+ uint8_t tag_data;
+ uint16_t src_port;
+ uint16_t outer_pkt_type;
+ uint16_t inner_pkt_type;
+} __attribute__((packed));
+
+/* zxdh net header */
+struct zxdh_net_hdr {
+ uint8_t tx_port; // bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP
+ uint8_t pd_len; // bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位
+ uint8_t num_buffers; //表示接收方向num buffers字段
+ uint8_t rsv; //保留
+
+ struct pi_net_hdr pi_hdr;
+ struct pd_net_hdr_tx pd_hdr;
+} __attribute__((packed));
+
+struct zxdh_net_1588_hdr {
+ uint8_t tx_port; // bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP
+ uint8_t pd_len; // bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位
+ uint8_t num_buffers; //表示接收方向num buffers字段
+ uint8_t rsv; //保留
+
+ struct pi_net_hdr pi_hdr;
+ struct pd_net_hdr_tx pd_hdr;
+
+ uint8_t ptp_type[3]; /* 低bit0-16预留,bit17-19 pkt_type, bit23 ptp_udp */
+ uint8_t ts_offset;
+ uint32_t cpu_tx;
+ uint8_t port; /* egress_port/ingress_port, L4报文此字段无用 */
+ uint8_t rsv1[4];
+ uint8_t sec_1588_key[3];
+} __attribute__((packed));
+
+struct zxdh_net_hdr_rcv {
+ uint8_t tx_port; // bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP
+ uint8_t pd_len; // bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位
+ uint8_t num_buffers; //表示接收方向num buffers字段
+ uint8_t rsv; //保留
+
+ struct pi_net_hdr pi_hdr;
+ struct pd_net_hdr_rx pd_hdr;
+} __attribute__((packed));
+
+struct zxdh_net_1588_hdr_rcv {
+ uint8_t tx_port; // bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP
+ uint8_t pd_len; // bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位
+ uint8_t num_buffers; //表示接收方向num buffers字段
+ uint8_t rsv; //保留
+
+ struct pi_net_hdr pi_hdr;
+ struct pd_net_hdr_rx pd_hdr;
+
+ uint8_t egress_port;
+ /* 低bit0-8预留,bit9-11 pkt_type, bit 12-14预留,bit15 ptp_udp */
+ uint8_t ptp_type[2];
+ uint8_t ts_offset;
+ uint32_t rx_ts;
+} __attribute__((packed));
+
+#ifdef DEBUG
+/* For development, we want to crash whenever the ring is screwed. */
+#define BAD_RING(_vq, fmt, args...) \
+ do { \
+ LOG_ERR("%s:" fmt, (_vq)->vq.name, ##args); \
+ BUG(); \
+ } while (0)
+/* Caller is supposed to guarantee no reentry. */
+#define START_USE(_vq) \
+ do { \
+ if ((_vq)->in_use) \
+ panic("%s:in_use = %i\n", (_vq)->vq.name, (_vq)->in_use); \
+ (_vq)->in_use = __LINE__; \
+ } while (0)
+#define END_USE(_vq) \
+ do { \
+ BUG_ON(!(_vq)->in_use); \
+ (_vq)->in_use = 0; \
+ } while (0)
+#define LAST_ADD_TIME_UPDATE(_vq) \
+ do { \
+ ktime_t now = ktime_get(); \
+ /* No kick or get, with .1 second between? Warn. */ \
+ if ((_vq)->last_add_time_valid) \
+ WARN_ON(ktime_to_ms(ktime_sub(now, (_vq)->last_add_time)) > 100); \
+ (_vq)->last_add_time = now; \
+ (_vq)->last_add_time_valid = true; \
+ } while (0)
+#define LAST_ADD_TIME_CHECK(_vq) \
+ do { \
+ if ((_vq)->last_add_time_valid) { \
+ WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
+ (_vq)->last_add_time)) > 100); \
+ } \
+ } while (0)
+#define LAST_ADD_TIME_INVALID(_vq) ((_vq)->last_add_time_valid = false)
+#else
+#define BAD_RING(_vq, fmt, args...) \
+ do { \
+ LOG_ERR("%s:" fmt, (_vq)->vq.name, ##args); \
+ (_vq)->broken = true; \
+ } while (0)
+#define START_USE(vq)
+#define END_USE(vq)
+#define LAST_ADD_TIME_UPDATE(vq)
+#define LAST_ADD_TIME_CHECK(vq)
+#define LAST_ADD_TIME_INVALID(vq)
+#endif
+
+#define vqm_store_mb(weak_barriers, p, v) \
+ do { \
+ if (weak_barriers) { \
+ virt_store_mb(*p, v); \
+ } else { \
+ WRITE_ONCE(*p, v); \
+ mb(); \
+ } \
+ } while (0)
+
+/* This is the PCI capability header: */
+struct zxdh_pci_cap {
+ __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ __u8 cap_next; /* Generic PCI field: next ptr. */
+ __u8 cap_len; /* Generic PCI field: capability length */
+ __u8 cfg_type; /* Identifies the structure. */
+ __u8 bar; /* Where to find it. */
+ __u8 id; /* Multiple capabilities of the same type */
+ __u8 padding[2]; /* Pad to full dword. */
+ __le32 offset; /* Offset within bar. */
+ __le32 length; /* Length of the structure, in bytes. */
+};
+
+struct zxdh_pci_notify_cap {
+ struct zxdh_pci_cap cap;
+ __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+struct virtqueue {
+ struct list_head list;
+ void (*callback)(struct virtqueue *vq);
+ const char *name;
+ struct net_device *vdev;
+ uint32_t index;
+ uint32_t phy_index;
+ uint32_t num_free;
+ void *priv;
+};
+
+/* custom queue ring descriptors: 16 bytes. These can chain together via "next".
+ */
+struct vring_desc {
+ /* Address (guest-physical). */
+ uint64_t addr;
+ /* Length. */
+ uint32_t len;
+ /* The flags as indicated above. */
+ uint16_t flags;
+ /* We chain unused descriptors via this, too */
+ uint16_t next;
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[];
+};
+
+/* u32 is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was used (written to) */
+ uint32_t len;
+};
+
+typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+vring_used_elem_t;
+
+struct vring_used {
+ uint16_t flags;
+ uint16_t idx;
+ vring_used_elem_t ring[];
+};
+
+typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
+vring_desc_t;
+typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
+vring_avail_t;
+typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+vring_used_t;
+
+struct vring {
+ uint32_t num;
+
+ vring_desc_t *desc;
+
+ vring_avail_t *avail;
+
+ vring_used_t *used;
+};
+
+struct vring_virtqueue {
+ struct virtqueue vq;
+
+ /* Is this a packed ring? */
+ bool packed_ring;
+
+ /* Is DMA API used? */
+ bool use_dma_api;
+
+ /* Can we use weak barriers? */
+ bool weak_barriers;
+
+ /* Other side has made a mess, don't try any more. */
+ bool broken;
+
+ /* Host supports indirect buffers */
+ bool indirect;
+
+ /* Host publishes avail event idx */
+ bool event;
+
+ /* Head of free buffer list. */
+ uint32_t free_head;
+ /* Number we've added since last sync. */
+ uint32_t num_added;
+
+ /* Last used index we've seen. */
+ uint16_t last_used_idx;
+
+ /* Hint for event idx: already triggered no need to disable. */
+ bool event_triggered;
+
+ /* Available for packed ring */
+ struct {
+ /* Actual memory layout for this queue. */
+ struct {
+ uint32_t num;
+ struct vring_packed_desc *desc;
+ struct vring_packed_desc_event *driver;
+ struct vring_packed_desc_event *device;
+ } vring;
+
+ /* Driver ring wrap counter. */
+ bool avail_wrap_counter;
+
+ /* Device ring wrap counter. */
+ bool used_wrap_counter;
+
+ /* Avail used flags. */
+ uint16_t avail_used_flags;
+
+ /* Index of the next avail descriptor. */
+ uint16_t next_avail_idx;
+
+ /*
+ * Last written value to driver->flags in
+ * guest byte order.
+ */
+ uint16_t event_flags_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_packed *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t ring_dma_addr;
+ dma_addr_t driver_event_dma_addr;
+ dma_addr_t device_event_dma_addr;
+ size_t ring_size_in_bytes;
+ size_t event_size_in_bytes;
+ } packed;
+
+ /* How to notify other side. FIXME: commonalize hcalls! */
+ bool (*notify)(struct virtqueue *vq);
+
+ /* DMA, allocation, and size information */
+ bool we_own_ring;
+
+#ifdef DEBUG
+ /* They're supposed to lock for us. */
+ uint32_t in_use;
+
+ /* Figure out if their kicks are too delayed. */
+ bool last_add_time_valid;
+ ktime_t last_add_time;
+#endif
+};
+
+struct zxdh_pci_vq_info {
+ /* the actual virtqueue */
+ struct virtqueue *vq;
+
+ /* the list node for the virtqueues list */
+ struct list_head node;
+
+ /* channel num map 1-1 to vector*/
+ unsigned channel_num;
+};
+
+struct virtnet_stat_desc {
+ char desc[ETH_GSTRING_LEN];
+ size_t offset;
+};
+
+struct virtnet_sq_stats {
+ struct u64_stats_sync syncp;
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t xdp_tx;
+ uint64_t xdp_tx_drops;
+ uint64_t kicks;
+ uint64_t tx_timeouts;
+};
+
+struct virtnet_rq_stats {
+ struct u64_stats_sync syncp;
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t drops;
+ uint64_t xdp_packets;
+ uint64_t xdp_tx;
+ uint64_t xdp_redirects;
+ uint64_t xdp_drops;
+ uint64_t kicks;
+};
+#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
+#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
+
+static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
+ { "packets", VIRTNET_SQ_STAT(packets) },
+ { "bytes", VIRTNET_SQ_STAT(bytes) },
+ { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
+ { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
+ { "kicks", VIRTNET_SQ_STAT(kicks) },
+ { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) },
+};
+
+static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
+ { "packets", VIRTNET_RQ_STAT(packets) },
+ { "bytes", VIRTNET_RQ_STAT(bytes) },
+ { "drops", VIRTNET_RQ_STAT(drops) },
+ { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
+ { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
+ { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
+ { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
+ { "kicks", VIRTNET_RQ_STAT(kicks) },
+};
+
+#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
+#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
+
+/* RX packet size EWMA. The average packet size is used to determine the packet
+ * buffer size when refilling RX rings. As the entire RX ring may be refilled
+ * at once, the weight is chosen so that the EWMA will be insensitive to short-
+ * term, transient changes in packet size.
+ */
+DECLARE_EWMA(pkt_len, 0, 64)
+
+/* Internal representation of a send virtqueue */
+struct send_queue {
+ /* Virtqueue associated with this send _queue */
+ struct virtqueue *vq;
+
+ /* TX: fragments + linear part + custom queue header */
+ struct scatterlist sg[MAX_SKB_FRAGS + 2];
+
+ /* Name of the send queue: output.$index */
+ char name[40];
+
+ struct virtnet_sq_stats stats;
+
+ struct napi_struct napi;
+
+ uint8_t hdr_buf[HDR_BUFFER_LEN];
+};
+
+/* Internal representation of a receive virtqueue */
+struct receive_queue {
+ /* Virtqueue associated with this receive_queue */
+ struct virtqueue *vq;
+
+ struct napi_struct napi;
+
+ struct bpf_prog __rcu *xdp_prog;
+
+ struct virtnet_rq_stats stats;
+
+ /* Chain pages by the private ptr. */
+ struct page *pages;
+
+ /* Average packet length for mergeable receive buffers. */
+ struct ewma_pkt_len mrg_avg_pkt_len; // todo
+
+ /* Page frag for packet buffer allocation. */
+ struct page_frag alloc_frag;
+
+ /* RX: fragments + linear part + custom queue header */
+ struct scatterlist sg[MAX_SKB_FRAGS + 2];
+
+ /* Min single buffer size for mergeable buffers case. */
+ uint32_t min_buf_len;
+
+ /* Name of this receive queue: input.$index */
+ char name[40];
+
+ struct xdp_rxq_info xdp_rxq;
+};
+
+#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
+
+typedef void vq_callback_t(struct virtqueue *);
+
+void zxdh_print_vring_info(struct virtqueue *vq, uint32_t desc_index,
+ uint32_t desc_num);
+void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi);
+void virtnet_napi_tx_enable(struct net_device *netdev, struct virtqueue *vq,
+ struct napi_struct *napi);
+void virtnet_napi_tx_disable(struct napi_struct *napi);
+void refill_work(struct work_struct *work);
+int virtnet_poll(struct napi_struct *napi, int budget);
+int virtnet_poll_tx(struct napi_struct *napi, int budget);
+int32_t txq2vq(int32_t txq);
+int32_t rxq2vq(int32_t rxq);
+uint16_t vqm16_to_cpu(struct net_device *netdev, __vqm16 val);
+uint8_t vp_get_status(struct net_device *netdev);
+void vp_set_status(struct net_device *netdev, uint8_t status);
+void vp_set_reset_status(struct net_device *netdev, uint8_t status);
+void zxdh_add_status(struct net_device *netdev, uint32_t status);
+void zxdh_vp_enable_cbs(struct net_device *netdev);
+void zxdh_vp_disable_cbs(struct net_device *netdev);
+void zxdh_vp_reset(struct net_device *netdev);
+void vring_free_queue(struct net_device *netdev, size_t size, void *queue,
+ dma_addr_t dma_handle);
+netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *netdev);
+bool try_fill_recv(struct net_device *netdev, struct receive_queue *rq,
+ gfp_t gfp);
+inline struct zxdh_net_hdr *skb_vnet_hdr(struct sk_buff *skb);
+int32_t virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg,
+ uint32_t num, void *data, gfp_t gfp);
+void virtqueue_disable_cb(struct virtqueue *_vq);
+void free_old_xmit_skbs(struct net_device *netdev, struct send_queue *sq,
+ bool in_napi);
+bool virtqueue_enable_cb_delayed(struct virtqueue *_vq);
+bool virtqueue_kick_prepare_packed(struct virtqueue *_vq);
+bool virtqueue_notify(struct virtqueue *_vq);
+void zxdh_pf_features_init(struct net_device *netdev);
+bool zxdh_has_feature(struct net_device *netdev, uint32_t fbit);
+bool zxdh_has_status(struct net_device *netdev, uint32_t sbit);
+void zxdh_free_unused_bufs(struct net_device *netdev);
+void zxdh_free_receive_bufs(struct net_device *netdev);
+void zxdh_free_receive_page_frags(struct net_device *netdev);
+void zxdh_virtnet_del_vqs(struct net_device *netdev);
+void zxdh_vqs_uninit(struct net_device *netdev);
+int32_t zxdh_vqs_init(struct net_device *netdev);
+int32_t dh_eq_vqs_vring_int(struct notifier_block *nb, unsigned long action,
+ void *data);
+int32_t vq2rxq(struct virtqueue *vq);
+void *virtqueue_get_buf(struct virtqueue *_vq, uint32_t *len);
+void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, uint32_t *len,
+ void **ctx);
+uint32_t virtqueue_get_vring_size(struct virtqueue *_vq);
+void virtqueue_napi_complete(struct napi_struct *napi, struct virtqueue *vq,
+ int32_t processed);
+int32_t virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg,
+ uint32_t num, void *data, void *ctx, gfp_t gfp);
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/net/drivers/net/ethernet/dinghai/en_ethtool/ethtool.c b/src/net/drivers/net/ethernet/dinghai/en_ethtool/ethtool.c
index 3794403df3383bac7aa3c0158f1b80aa4edc00ea..2ce0f226f1ec22aa731e18d3550295d178f883d4 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_ethtool/ethtool.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_ethtool/ethtool.c
@@ -22,1363 +22,1316 @@ MODULE_LICENSE("Dual BSD/GPL");
#define MAX_DRV_VERSION_LEN 32
#define PCI_BUS(PCI_BDF) ((PCI_BDF >> 8) & 0xff)
-#define ZXDH_EN_LINK_MODE_ADD(ks, name, sup) \
-do \
-{ \
- if (sup) \
- { \
- ethtool_link_ksettings_add_link_mode((ks), supported, name); \
- } \
- else \
- { \
- ethtool_link_ksettings_add_link_mode((ks), advertising, name); \
- } \
-} while (0)
-
-#define ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, bit, sup) \
- sup ? ((en_dev->supported_speed_modes) & BIT(bit)) == BIT(bit) : \
- ((en_dev->advertising_speed_modes) & BIT(bit)) == BIT(bit)
-
-#define GET_FEC_LINK_FLAG (0)
-#define GET_FEC_CFG_FLAG (1)
-#define GET_FEC_CAP_FLAG (2)
-
-static const uint32_t fec_2_ethtool_fecparam[] =
-{
- [SPM_FEC_NONE] = ETHTOOL_FEC_OFF,
- [SPM_FEC_BASER] = ETHTOOL_FEC_BASER,
- [SPM_FEC_RS528] = ETHTOOL_FEC_RS,
- [SPM_FEC_RS544] = ETHTOOL_FEC_RS,
+#define ZXDH_EN_LINK_MODE_ADD(ks, name, sup) \
+ do { \
+ if (sup) { \
+ ethtool_link_ksettings_add_link_mode((ks), supported, name); \
+ } else { \
+ ethtool_link_ksettings_add_link_mode((ks), advertising, name); \
+ } \
+ } while (0)
+
+#define ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, bit, sup) \
+ (sup ? ((en_dev->supported_speed_modes) & BIT(bit)) == BIT(bit) : \
+ ((en_dev->advertising_speed_modes) & BIT(bit)) == BIT(bit))
+
+#define GET_FEC_LINK_FLAG (0)
+#define GET_FEC_CFG_FLAG (1)
+#define GET_FEC_CAP_FLAG (2)
+
+static const uint32_t fec_2_ethtool_fecparam[] = {
+ [SPM_FEC_NONE] = ETHTOOL_FEC_OFF,
+ [SPM_FEC_BASER] = ETHTOOL_FEC_BASER,
+ [SPM_FEC_RS528] = ETHTOOL_FEC_RS,
+ [SPM_FEC_RS544] = ETHTOOL_FEC_RS,
};
-static uint32_t zxdh_en_fec_to_ethtool_fecparam(uint32_t fec_mode, uint32_t flag)
+static uint32_t zxdh_en_fec_to_ethtool_fecparam(uint32_t fec_mode,
+ uint32_t flag)
{
- int32_t i;
- uint32_t fecparam_cap = 0;
+ int32_t i;
+ uint32_t fecparam_cap = 0;
- if(!fec_mode)
- {
- if(flag == GET_FEC_LINK_FLAG)
- return ETHTOOL_FEC_NONE;
- else if(flag == GET_FEC_CFG_FLAG)
- return ETHTOOL_FEC_AUTO;
- }
+ if (!fec_mode) {
+ if (flag == GET_FEC_LINK_FLAG)
+ return ETHTOOL_FEC_NONE;
+ else if (flag == GET_FEC_CFG_FLAG)
+ return ETHTOOL_FEC_AUTO;
+ }
- for(i = 0; i < ARRAY_SIZE(fec_2_ethtool_fecparam); i++)
- {
- if(fec_mode & BIT(i))
- {
- fecparam_cap |= fec_2_ethtool_fecparam[i];
- }
- }
+ for (i = 0; i < ARRAY_SIZE(fec_2_ethtool_fecparam); i++) {
+ if (fec_mode & BIT(i)) {
+ fecparam_cap |= fec_2_ethtool_fecparam[i];
+ }
+ }
- if(flag == GET_FEC_CAP_FLAG)
- fecparam_cap |= ETHTOOL_FEC_AUTO;
+ if (flag == GET_FEC_CAP_FLAG)
+ fecparam_cap |= ETHTOOL_FEC_AUTO;
- return fecparam_cap;
+ return fecparam_cap;
}
static void zxdh_en_fec_to_link_ksettings(uint32_t fec_mode,
- struct ethtool_link_ksettings *ks,
- bool sup)
+ struct ethtool_link_ksettings *ks,
+ bool sup)
{
- if(fec_mode & BIT(SPM_FEC_NONE))
- ZXDH_EN_LINK_MODE_ADD(ks, FEC_NONE, sup);
- if(fec_mode & BIT(SPM_FEC_BASER))
- ZXDH_EN_LINK_MODE_ADD(ks, FEC_BASER, sup);
- if(fec_mode & BIT(SPM_FEC_RS528) ||
- fec_mode & BIT(SPM_FEC_RS544))
- ZXDH_EN_LINK_MODE_ADD(ks, FEC_RS, sup);
+ if (fec_mode & BIT(SPM_FEC_NONE))
+ ZXDH_EN_LINK_MODE_ADD(ks, FEC_NONE, sup);
+ if (fec_mode & BIT(SPM_FEC_BASER))
+ ZXDH_EN_LINK_MODE_ADD(ks, FEC_BASER, sup);
+ if (fec_mode & BIT(SPM_FEC_RS528) || fec_mode & BIT(SPM_FEC_RS544))
+ ZXDH_EN_LINK_MODE_ADD(ks, FEC_RS, sup);
}
static void zxdh_en_fec_link_ksettings_get(struct zxdh_en_device *en_dev,
- struct ethtool_link_ksettings *ks)
+ struct ethtool_link_ksettings *ks)
{
- int32_t ret;
- uint32_t fec_cap;
- uint32_t fec_active;
+ int32_t ret;
+ uint32_t fec_cap;
+ uint32_t fec_active;
- ret = zxdh_en_fec_mode_get(en_dev, &fec_cap, NULL, &fec_active);
- if(ret)
- {
- LOG_ERR("zxdh_en_fec_mode_get failed!\n");
- return;
- }
- //LOG_INFO("fec_cap=0x%x, fec_active=0x%x\n", fec_cap, fec_active);
+ ret = zxdh_en_fec_mode_get(en_dev, &fec_cap, NULL, &fec_active);
+ if (ret) {
+ LOG_ERR("zxdh_en_fec_mode_get failed!\n");
+ return;
+ }
+ // LOG_INFO("fec_cap=0x%x, fec_active=0x%x\n", fec_cap, fec_active);
- zxdh_en_fec_to_link_ksettings(fec_cap, ks, true);
- zxdh_en_fec_to_link_ksettings(fec_active, ks, false);
+ zxdh_en_fec_to_link_ksettings(fec_cap, ks, true);
+ zxdh_en_fec_to_link_ksettings(fec_active, ks, false);
- return;
+ return;
}
static void zxdh_en_pause_link_ksettings_get(struct zxdh_en_device *en_dev,
- struct ethtool_link_ksettings *ks)
-{
- int32_t err;
- uint32_t fc_mode;
-
- err = zxdh_en_fc_mode_get(en_dev, &fc_mode);
- if(err != 0)
- {
- LOG_ERR("zxdh_en_fc_mode_get failed!\n");
- return;
- }
-
- ZXDH_EN_LINK_MODE_ADD(ks, Pause, true);
-
- if(fc_mode == BIT(SPM_FC_PAUSE_FULL))
- ZXDH_EN_LINK_MODE_ADD(ks, Pause, false);
- else if(fc_mode == BIT(SPM_FC_PAUSE_RX) || fc_mode == BIT(SPM_FC_PAUSE_TX))
- ZXDH_EN_LINK_MODE_ADD(ks, Asym_Pause, false);
-
- return;
-}
-
-static void zxdh_en_phytype_to_ethtool(struct zxdh_en_device *en_dev, struct ethtool_link_ksettings *ks, bool sup)
-{
- //0x20000020020
- if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_1G, sup))
- {
- ZXDH_EN_LINK_MODE_ADD(ks, 1000baseT_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 1000baseKX_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 1000baseX_Full, sup);
- }
-
- //0x5C0000081000
- if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_10G, sup))
- {
- ZXDH_EN_LINK_MODE_ADD(ks, 10000baseT_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 10000baseKR_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 10000baseCR_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 10000baseSR_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 10000baseLR_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 10000baseER_Full, sup);
- }
-
- //0x380000000
- if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_25G, sup))
- {
- ZXDH_EN_LINK_MODE_ADD(ks, 25000baseCR_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 25000baseKR_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 25000baseSR_Full, sup);
- }
-
- //0x10C00000000
- if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_50G, sup))
- {
- ZXDH_EN_LINK_MODE_ADD(ks, 50000baseCR2_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 50000baseKR2_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 50000baseSR2_Full, sup);
- }
-
- //0x7800000
- if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_4X_40G, sup))
- {
- ZXDH_EN_LINK_MODE_ADD(ks, 40000baseKR4_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 40000baseCR4_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 40000baseSR4_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 40000baseLR4_Full, sup);
- }
-
- //0xF000000000
- if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_4X_100G, sup))
- {
- ZXDH_EN_LINK_MODE_ADD(ks, 100000baseKR4_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 100000baseSR4_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 100000baseCR4_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 100000baseLR4_ER4_Full, sup);
- }
-
- //0x1E00000000000000
- if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_2X_100G, sup))
- {
- ZXDH_EN_LINK_MODE_ADD(ks, 100000baseKR2_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 100000baseSR2_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 100000baseCR2_Full, sup);
- ZXDH_EN_LINK_MODE_ADD(ks, 100000baseLR2_ER2_FR2_Full, sup);
- }
-
- return;
-}
-
-static void zxdh_en_ethtool_to_phytype(struct ethtool_link_ksettings *ks, uint32_t *speed_modes)
-{
- if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1000baseT_Full))
- {
- *speed_modes |= BIT(SPM_SPEED_1X_1G);
- }
-
- if (ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseT_Full))
- {
- *speed_modes |= BIT(SPM_SPEED_1X_10G);
- }
-
- if (ethtool_link_ksettings_test_link_mode(ks, advertising, 25000baseCR_Full))
- {
- *speed_modes |= BIT(SPM_SPEED_1X_25G);
- }
-
- if (ethtool_link_ksettings_test_link_mode(ks, advertising, 50000baseCR2_Full))
- {
- *speed_modes |= BIT(SPM_SPEED_1X_50G);
- }
-
- if (ethtool_link_ksettings_test_link_mode(ks, advertising, 40000baseKR4_Full))
- {
- *speed_modes |= BIT(SPM_SPEED_4X_40G);
- }
-
- if (ethtool_link_ksettings_test_link_mode(ks, advertising, 100000baseKR4_Full))
- {
- *speed_modes |= BIT(SPM_SPEED_4X_100G);
- }
-
- if (ethtool_link_ksettings_test_link_mode(ks, advertising, 100000baseKR2_Full))
- {
- *speed_modes |= BIT(SPM_SPEED_2X_100G);
- }
-
- return;
-}
-
-static int32_t zxdh_en_speed_to_speed_modes(uint32_t speed, uint32_t *speed_modes, uint32_t sup_modes)
-{
- switch (speed)
- {
- case SPEED_1000:
- {
- *speed_modes |= BIT(SPM_SPEED_1X_1G);
- break;
- }
- case SPEED_10000:
- {
- *speed_modes |= BIT(SPM_SPEED_1X_10G);
- break;
- }
- case SPEED_25000:
- {
- *speed_modes |= BIT(SPM_SPEED_1X_25G);
- break;
- }
- case SPEED_40000:
- {
- *speed_modes |= BIT(SPM_SPEED_4X_40G);
- break;
- }
- case SPEED_50000:
- {
- *speed_modes |= BIT(SPM_SPEED_1X_50G);
- break;
- }
- case SPEED_100000:
- {
- *speed_modes |= BIT(SPM_SPEED_2X_100G);
- *speed_modes |= BIT(SPM_SPEED_4X_100G);
- break;
- }
- default:
- {
- return -EINVAL;
- }
- }
-
- *speed_modes &= sup_modes;
- if (*speed_modes == 0)
- {
- return -EINVAL;
- }
-
- return 0;
+ struct ethtool_link_ksettings *ks)
+{
+ int32_t err;
+ uint32_t fc_mode;
+
+ err = zxdh_en_fc_mode_get(en_dev, &fc_mode);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_fc_mode_get failed!\n");
+ return;
+ }
+
+ ZXDH_EN_LINK_MODE_ADD(ks, Pause, true);
+
+ if (fc_mode == BIT(SPM_FC_PAUSE_FULL))
+ ZXDH_EN_LINK_MODE_ADD(ks, Pause, false);
+ else if (fc_mode == BIT(SPM_FC_PAUSE_RX) || fc_mode == BIT(SPM_FC_PAUSE_TX))
+ ZXDH_EN_LINK_MODE_ADD(ks, Asym_Pause, false);
+
+ return;
+}
+
+static void zxdh_en_phytype_to_ethtool(struct zxdh_en_device *en_dev,
+ struct ethtool_link_ksettings *ks,
+ bool sup)
+{
+ // 0x20000020020
+ if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_1G, sup)) {
+ ZXDH_EN_LINK_MODE_ADD(ks, 1000baseT_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 1000baseKX_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 1000baseX_Full, sup);
+ }
+
+ // 0x5C0000081000
+ if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_10G, sup)) {
+ ZXDH_EN_LINK_MODE_ADD(ks, 10000baseT_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 10000baseKR_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 10000baseCR_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 10000baseSR_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 10000baseLR_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 10000baseER_Full, sup);
+ }
+
+ // 0x380000000
+ if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_25G, sup)) {
+ ZXDH_EN_LINK_MODE_ADD(ks, 25000baseCR_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 25000baseKR_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 25000baseSR_Full, sup);
+ }
+
+ // 0x10C00000000
+ if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_50G, sup)) {
+ ZXDH_EN_LINK_MODE_ADD(ks, 50000baseCR2_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 50000baseKR2_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 50000baseSR2_Full, sup);
+ }
+
+ // 0x7800000
+ if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_4X_40G, sup)) {
+ ZXDH_EN_LINK_MODE_ADD(ks, 40000baseKR4_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 40000baseCR4_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 40000baseSR4_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 40000baseLR4_Full, sup);
+ }
+
+ // 0xF000000000
+ if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_4X_100G, sup)) {
+ ZXDH_EN_LINK_MODE_ADD(ks, 100000baseKR4_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 100000baseSR4_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 100000baseCR4_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 100000baseLR4_ER4_Full, sup);
+ }
+
+ // 0x1E00000000000000
+ if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_2X_100G, sup)) {
+ ZXDH_EN_LINK_MODE_ADD(ks, 100000baseKR2_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 100000baseSR2_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 100000baseCR2_Full, sup);
+ ZXDH_EN_LINK_MODE_ADD(ks, 100000baseLR2_ER2_FR2_Full, sup);
+ }
+
+ return;
+}
+
+static void zxdh_en_ethtool_to_phytype(struct ethtool_link_ksettings *ks,
+ uint32_t *speed_modes)
+{
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseT_Full)) {
+ *speed_modes |= BIT(SPM_SPEED_1X_1G);
+ }
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseT_Full)) {
+ *speed_modes |= BIT(SPM_SPEED_1X_10G);
+ }
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseCR_Full)) {
+ *speed_modes |= BIT(SPM_SPEED_1X_25G);
+ }
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 50000baseCR2_Full)) {
+ *speed_modes |= BIT(SPM_SPEED_1X_50G);
+ }
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseKR4_Full)) {
+ *speed_modes |= BIT(SPM_SPEED_4X_40G);
+ }
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseKR4_Full)) {
+ *speed_modes |= BIT(SPM_SPEED_4X_100G);
+ }
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseKR2_Full)) {
+ *speed_modes |= BIT(SPM_SPEED_2X_100G);
+ }
+
+ return;
+}
+
+static int32_t zxdh_en_speed_to_speed_modes(uint32_t speed,
+ uint32_t *speed_modes,
+ uint32_t sup_modes)
+{
+ switch (speed) {
+ case SPEED_1000: {
+ *speed_modes |= BIT(SPM_SPEED_1X_1G);
+ break;
+ }
+ case SPEED_10000: {
+ *speed_modes |= BIT(SPM_SPEED_1X_10G);
+ break;
+ }
+ case SPEED_25000: {
+ *speed_modes |= BIT(SPM_SPEED_1X_25G);
+ break;
+ }
+ case SPEED_40000: {
+ *speed_modes |= BIT(SPM_SPEED_4X_40G);
+ break;
+ }
+ case SPEED_50000: {
+ *speed_modes |= BIT(SPM_SPEED_1X_50G);
+ break;
+ }
+ case SPEED_100000: {
+ *speed_modes |= BIT(SPM_SPEED_2X_100G);
+ *speed_modes |= BIT(SPM_SPEED_4X_100G);
+ break;
+ }
+ default: {
+ return -EINVAL;
+ }
+ }
+
+ *speed_modes &= sup_modes;
+ if (*speed_modes == 0) {
+ return -EINVAL;
+ }
+
+ return 0;
}
static int32_t zxdh_en_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *ks)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- ethtool_link_ksettings_zero_link_mode(ks, supported);
- ethtool_link_ksettings_zero_link_mode(ks, advertising);
-
- ks->base.port = PORT_FIBRE;
- ks->base.autoneg = en_dev->autoneg_enable;
- ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
-
- if (en_dev->autoneg_enable == AUTONEG_ENABLE)
- {
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- }
-
- ks->base.speed = en_dev->speed;
- if ((!netif_running(netdev)) || (!netif_carrier_ok(netdev)))
- {
- ks->base.speed = SPEED_UNKNOWN;
- }
- ks->base.duplex = ks->base.speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL;
-
- zxdh_en_phytype_to_ethtool(en_dev, ks, true);
- zxdh_en_phytype_to_ethtool(en_dev, ks, false);
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- zxdh_en_fec_link_ksettings_get(en_dev, ks);
- zxdh_en_pause_link_ksettings_get(en_dev, ks);
- }
-
- return 0;
-}
-
-static int32_t zxdh_en_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *ks)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- struct ethtool_link_ksettings safe_ks;
- uint32_t advertising_link_modes = 0;
- uint32_t off_speed_modes = 0;
- uint32_t on_speed_modes = 0;
- int32_t err = 0;
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- return 0;
- }
- if (ks->base.duplex == DUPLEX_HALF)
- {
- return -ENAVAIL;
- }
-
- memset(&safe_ks, 0, sizeof(safe_ks));
- ethtool_link_ksettings_zero_link_mode(&safe_ks, supported);
- ethtool_link_ksettings_zero_link_mode(&safe_ks, advertising);
-
- if (ks->base.autoneg == AUTONEG_DISABLE)
- {
- err = zxdh_en_speed_to_speed_modes(ks->base.speed, &off_speed_modes,
- en_dev->supported_speed_modes);
- LOG_DEBUG("set speed: %d, off_speed_modes: 0x%x\n", ks->base.speed, off_speed_modes);
- if (err != 0)
- {
- LOG_ERR("zxdh_en_speed_to_speed_mode failed: %d\n", err);
- return -EOPNOTSUPP;
- }
-
- advertising_link_modes = off_speed_modes;
- }
- else
- {
- zxdh_en_phytype_to_ethtool(en_dev, &safe_ks, true);
- if (!bitmap_intersects(ks->link_modes.advertising,
- safe_ks.link_modes.supported, __ETHTOOL_LINK_MODE_MASK_NBITS))
- {
- LOG_ERR("link_mode not supported\n");
- return -EOPNOTSUPP;
- }
-
- bitmap_and(safe_ks.link_modes.advertising, ks->link_modes.advertising,
- safe_ks.link_modes.supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- zxdh_en_ethtool_to_phytype(&safe_ks, &on_speed_modes);
- LOG_DEBUG("on_speed_modes: 0x%x\n", on_speed_modes);
- advertising_link_modes = on_speed_modes;
- }
-
- if ((advertising_link_modes == en_dev->advertising_speed_modes) &&
- (ks->base.autoneg == en_dev->autoneg_enable))
- {
- LOG_DEBUG("nothing changed\n");
- return 0;
- }
-
- safe_ks.base.speed = en_dev->speed;
- en_dev->speed = SPEED_UNKNOWN;
- LOG_INFO("autoneg %d, link_modes: 0x%x\n", ks->base.autoneg, advertising_link_modes);
- err = zxdh_en_autoneg_set(en_dev, ks->base.autoneg, advertising_link_modes);
- if (err != 0)
- {
- en_dev->speed = safe_ks.base.speed;
- LOG_ERR("zxdh_en_autoneg_set failed: %d\n", err);
- return err;
- }
- else
- {
- en_dev->autoneg_enable = ks->base.autoneg;
- en_dev->advertising_speed_modes = advertising_link_modes;
- en_dev->link_up = false;
- netif_carrier_off(netdev);
- en_dev->ops->set_pf_link_up(en_dev->parent, FALSE); //TODO:是否需要更新pf信息?
- queue_work(en_priv->events->wq, &en_priv->edev.vf_link_info_update_work);
- queue_work(en_priv->events->wq, &en_priv->edev.link_info_irq_update_np_work);
- }
-
- return err;
+ struct ethtool_link_ksettings *ks)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+
+ ks->base.port = PORT_FIBRE;
+ ks->base.autoneg = en_dev->autoneg_enable;
+ ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+
+ if (en_dev->autoneg_enable == AUTONEG_ENABLE) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ }
+
+ ks->base.speed = en_dev->speed;
+ if ((!netif_running(netdev)) || (!netif_carrier_ok(netdev))) {
+ ks->base.speed = SPEED_UNKNOWN;
+ }
+ ks->base.duplex = ks->base.speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN :
+ DUPLEX_FULL;
+
+ zxdh_en_phytype_to_ethtool(en_dev, ks, true);
+ zxdh_en_phytype_to_ethtool(en_dev, ks, false);
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ zxdh_en_fec_link_ksettings_get(en_dev, ks);
+ zxdh_en_pause_link_ksettings_get(en_dev, ks);
+ }
+
+ return 0;
+}
+
+static int32_t
+zxdh_en_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct ethtool_link_ksettings safe_ks;
+ uint32_t advertising_link_modes = 0;
+ uint32_t off_speed_modes = 0;
+ uint32_t on_speed_modes = 0;
+ int32_t err = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ return 0;
+ }
+ if (ks->base.duplex == DUPLEX_HALF) {
+ return -ENAVAIL;
+ }
+
+ memset(&safe_ks, 0, sizeof(safe_ks));
+ ethtool_link_ksettings_zero_link_mode(&safe_ks, supported);
+ ethtool_link_ksettings_zero_link_mode(&safe_ks, advertising);
+
+ if (ks->base.autoneg == AUTONEG_DISABLE) {
+ err = zxdh_en_speed_to_speed_modes(ks->base.speed, &off_speed_modes,
+ en_dev->supported_speed_modes);
+ LOG_DEBUG("set speed: %d, off_speed_modes: 0x%x\n", ks->base.speed,
+ off_speed_modes);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_speed_to_speed_mode failed: %d\n", err);
+ return -EOPNOTSUPP;
+ }
+
+ advertising_link_modes = off_speed_modes;
+ } else {
+ zxdh_en_phytype_to_ethtool(en_dev, &safe_ks, true);
+ if (!bitmap_intersects(ks->link_modes.advertising,
+ safe_ks.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ LOG_ERR("link_mode not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ bitmap_and(safe_ks.link_modes.advertising, ks->link_modes.advertising,
+ safe_ks.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ zxdh_en_ethtool_to_phytype(&safe_ks, &on_speed_modes);
+ LOG_DEBUG("on_speed_modes: 0x%x\n", on_speed_modes);
+ advertising_link_modes = on_speed_modes;
+ }
+
+ if ((advertising_link_modes == en_dev->advertising_speed_modes) &&
+ (ks->base.autoneg == en_dev->autoneg_enable)) {
+ LOG_DEBUG("nothing changed\n");
+ return 0;
+ }
+
+ safe_ks.base.speed = en_dev->speed;
+ en_dev->speed = SPEED_UNKNOWN;
+ LOG_INFO("autoneg %d, link_modes: 0x%x\n", ks->base.autoneg,
+ advertising_link_modes);
+ err = zxdh_en_autoneg_set(en_dev, ks->base.autoneg, advertising_link_modes);
+ if (err != 0) {
+ en_dev->speed = safe_ks.base.speed;
+ LOG_ERR("zxdh_en_autoneg_set failed: %d\n", err);
+ return err;
+ } else {
+ en_dev->autoneg_enable = ks->base.autoneg;
+ en_dev->advertising_speed_modes = advertising_link_modes;
+ en_dev->link_up = false;
+ netif_carrier_off(netdev);
+ en_dev->ops->set_pf_link_up(en_dev->parent,
+ FALSE); // TODO:是否需要更新pf信息?
+ queue_work(en_priv->events->wq,
+ &en_priv->edev.vf_link_info_update_work);
+ queue_work(en_priv->events->wq,
+ &en_priv->edev.link_info_irq_update_np_work);
+ }
+
+ return err;
}
static uint32_t zxdh_en_get_link(struct net_device *netdev)
{
- return netif_carrier_ok(netdev) ? 1 : 0;
+ return netif_carrier_ok(netdev) ? 1 : 0;
}
static int zxdh_en_get_eeprom_len(struct net_device *netdev)
{
- return 0;
+ return 0;
}
-static int zxdh_en_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes)
+static int zxdh_en_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
- return 0;
+ return 0;
}
-static int zxdh_en_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes)
+static int zxdh_en_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
- return 0;
+ return 0;
}
-static void zxdh_en_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+static void zxdh_en_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
- ring->rx_max_pending = ZXDH_PF_MAX_DESC_NUM;
- ring->tx_max_pending = ZXDH_PF_MAX_DESC_NUM;
- ring->rx_pending = en_dev->ops->get_queue_size(en_dev->parent, en_dev->phy_index[0]);
- ring->tx_pending = en_dev->ops->get_queue_size(en_dev->parent, en_dev->phy_index[1]);
+ ring->rx_max_pending = ZXDH_PF_MAX_DESC_NUM;
+ ring->tx_max_pending = ZXDH_PF_MAX_DESC_NUM;
+ ring->rx_pending =
+ en_dev->ops->get_queue_size(en_dev->parent, en_dev->phy_index[0]);
+ ring->tx_pending =
+ en_dev->ops->get_queue_size(en_dev->parent, en_dev->phy_index[1]);
- return;
+ return;
}
-static int zxdh_en_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
-{
- LOG_ERR("not supported\n");
- return -1;
-}
-
-static void zxdh_en_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
-{
- int32_t err;
- uint32_t fc_mode;
- struct zxdh_en_device *en_dev = netdev_priv(netdev);
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- return;
- }
-
- err = zxdh_en_fc_mode_get(en_dev, &fc_mode);
- if(err != 0)
- {
- LOG_ERR("zxdh_en_fc_mode_get failed!\n");
- return;
- }
-
- pause->autoneg = 0;
-
- switch(fc_mode)
- {
- case BIT(SPM_FC_PAUSE_FULL):
- {
- pause->rx_pause = 1;
- pause->tx_pause = 1;
- break;
- }
- case BIT(SPM_FC_PAUSE_RX):
- {
- pause->rx_pause = 1;
- pause->tx_pause = 0;
- break;
- }
- case BIT(SPM_FC_PAUSE_TX):
- {
- pause->rx_pause = 0;
- pause->tx_pause = 1;
- break;
- }
- default:
- {
- pause->rx_pause = 0;
- pause->tx_pause = 0;
- break;
- }
- }
-
- return;
-}
-
-static int32_t zxdh_en_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
-{
- int32_t err;
- uint32_t fc_mode_cur;
- uint32_t fc_mode_cfg;
- struct zxdh_en_device *en_dev = netdev_priv(netdev);
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- return -EOPNOTSUPP;
- }
-
- if(pause->autoneg)
- {
- LOG_ERR("not support pause autoneg!\n");
- return -EOPNOTSUPP;
- }
-
- err = zxdh_en_fc_mode_get(en_dev, &fc_mode_cur);
- if(err != 0)
- {
- LOG_ERR("zxdh_en_fc_mode_get failed!\n");
- return err;
- }
-
- if((pause->rx_pause || pause->tx_pause) && (fc_mode_cur == BIT(SPM_FC_PFC_FULL)))
- {
- LOG_ERR("warning, ethtool cfg pause on, this will lead to pfc off!\n");
- }
-
- if(pause->rx_pause && pause->tx_pause)
- {
- fc_mode_cfg = BIT(SPM_FC_PAUSE_FULL);
- }
- else if(pause->rx_pause)
- {
- fc_mode_cfg = BIT(SPM_FC_PAUSE_RX);
- }
- else if(pause->tx_pause)
- {
- fc_mode_cfg = BIT(SPM_FC_PAUSE_TX);
- }
- else
- {
- if(fc_mode_cur == BIT(SPM_FC_PFC_FULL))
- fc_mode_cfg = BIT(SPM_FC_PFC_FULL);
- else
- fc_mode_cfg = BIT(SPM_FC_NONE);
- }
-
- if(fc_mode_cfg != fc_mode_cur)
- {
- err = zxdh_en_fc_mode_set(en_dev, fc_mode_cfg);
- if(err != 0)
- {
- LOG_ERR("zxdh_en_fc_mode_set failed!\n");
- return err;
- }
- }
-
- return 0;
-}
-
-static int32_t zxdh_en_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
-{
- int32_t err;
- uint32_t fec_cfg;
- uint32_t fec_active;
- struct zxdh_en_device *en_dev = netdev_priv(netdev);
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- return -EOPNOTSUPP;
- }
-
- err = zxdh_en_fec_mode_get(en_dev, NULL, &fec_cfg, &fec_active);
- if(err != 0)
- {
- LOG_ERR("zxdh_en_fec_mode_get failed!\n");
- return err;
- }
-
- fecparam->fec = zxdh_en_fec_to_ethtool_fecparam(fec_cfg, GET_FEC_CFG_FLAG);
- fecparam->active_fec = zxdh_en_fec_to_ethtool_fecparam(fec_active, GET_FEC_LINK_FLAG);
-
- //LOG_INFO("fec_cfg=0x%x, fecparam->fec=0x%x, fec_active=0x%x, fecparam->active_fec=0x%x\n",
- // fec_cfg, fecparam->fec, fec_active, fecparam->active_fec);
-
- return 0;
-}
-
-static int32_t zxdh_en_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
-{
- int32_t i;
- int32_t err;
- uint32_t fec_cap;
- uint32_t fec_cfg = 0;
- uint32_t fecparam_cap;
- struct zxdh_en_device *en_dev = netdev_priv(netdev);
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- return -EOPNOTSUPP;
- }
-
- err = zxdh_en_fec_mode_get(en_dev, &fec_cap, NULL, NULL);
- if(err != 0)
- {
- LOG_ERR("zxdh_en_fec_mode_get failed!\n");
- return err;
- }
- fecparam_cap = zxdh_en_fec_to_ethtool_fecparam(fec_cap, GET_FEC_CAP_FLAG);
-
- if((fecparam->fec | fecparam_cap) != fecparam_cap)
- {
- LOG_ERR("fecparam->fec 0x%x unsupport !\n", fecparam->fec);
- return -EOPNOTSUPP;
- }
-
- for(i = 0; i < ARRAY_SIZE(fec_2_ethtool_fecparam); i++)
- {
- if(fecparam->fec == fec_2_ethtool_fecparam[i])
- {
- fec_cfg |= BIT(i);
- }
- }
-
- if(!fec_cfg && (fecparam->fec != ETHTOOL_FEC_AUTO))
- {
- LOG_ERR("fecparam->fec 0x%x unsupport !\n", fecparam->fec);
- return -EOPNOTSUPP;
- }
-
- //LOG_INFO("fecparam_cap=0x%x, fec_cap=0x%x, fecparam->fec=0x%x, fec_cfg=0x%x\n",
- // fecparam_cap, fec_cap, fecparam->fec, fec_cfg);
-
- err = zxdh_en_fec_mode_set(en_dev, fec_cfg);
- if(err != 0)
- {
- LOG_ERR("zxdh_en_fec_mode_set failed!\n");
- return err;
- }
-
- return 0;
-}
-
-static int32_t zxdh_en_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo)
-{
- uint32_t read_bytes;
- uint8_t data[2] = {0};
- struct zxdh_en_module_eeprom_param query = {0};
- struct zxdh_en_device *en_dev = netdev_priv(netdev);
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- return -EOPNOTSUPP;
- }
-
- query.i2c_addr = SFF_I2C_ADDRESS_LOW;
- query.page = 0;
- query.offset = 0;
- query.length = 2;
- read_bytes = zxdh_en_module_eeprom_read(en_dev, &query, data);
- if(read_bytes != query.length)
- {
- LOG_ERR("zxdh_en_module_eeprom_read failed!\n");
- return -EIO;
- }
-
- switch(data[0])
- {
- case ZXDH_MODULE_ID_SFP:
- modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
- break;
- case ZXDH_MODULE_ID_QSFP:
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
- break;
- case ZXDH_MODULE_ID_QSFP_PLUS:
- case ZXDH_MODULE_ID_QSFP28:
- if(data[1] < 3)
- {
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
- }
- else
- {
- modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
- }
- break;
- default:
- LOG_ERR("can not recognize module identifier 0x%x!\n", data[0]);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int32_t zxdh_en_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data)
-{
- struct zxdh_en_module_eeprom_param query = {0};
- struct zxdh_en_device *en_dev = netdev_priv(netdev);
- uint32_t offset = ee->offset;
- uint32_t length = ee->len;
- uint8_t identifier;
- uint32_t offset_boundary = 0;
- uint32_t total_read_bytes = 0;
- uint32_t read_bytes = 0;
-
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- return -EOPNOTSUPP;
- }
-
- //LOG_INFO("offset %u, len %u\n", ee->offset, ee->len);
-
- if(!ee->len)
- return -EINVAL;
-
- memset(data, 0, ee->len);
-
- query.i2c_addr = SFF_I2C_ADDRESS_LOW;
- query.bank = 0;
- query.page = 0;
- query.offset = 0;
- query.length = 1;
- read_bytes = zxdh_en_module_eeprom_read(en_dev, &query, &identifier);
- if(read_bytes != query.length)
- {
- LOG_ERR("zxdh_en_module_eeprom_read failed!\n");
- return -EIO;
- }
-
- while(total_read_bytes < ee->len)
- {
- if(identifier == ZXDH_MODULE_ID_SFP)
- {
- if(offset < 256)
- {
- query.i2c_addr = SFF_I2C_ADDRESS_LOW;
- query.page = 0;
- query.offset = offset;
- }
- else
- {
- query.i2c_addr = SFF_I2C_ADDRESS_HIGH;
- query.page = 0;
- query.offset = offset - 256;
- }
- offset_boundary = (query.offset < 128) ? 128 : 256;
- query.length = ((query.offset + length) > offset_boundary) ? (offset_boundary - query.offset) : length;
- }
- else if(identifier == ZXDH_MODULE_ID_QSFP ||
- identifier == ZXDH_MODULE_ID_QSFP_PLUS ||
- identifier == ZXDH_MODULE_ID_QSFP28)
- {
- query.i2c_addr = SFF_I2C_ADDRESS_LOW;
- if(offset < 256)
- {
- query.page = 0;
- query.offset = offset;
- }
- else
- {
- query.page = (offset - 256) / 128 + 1;
- query.offset = offset - 128 * query.page;
- }
- offset_boundary = (query.offset < 128) ? 128 : 256;
- query.length = ((query.offset + length) > offset_boundary) ? (offset_boundary - query.offset) : length;
- }
- else
- {
- LOG_ERR("can not recognize module identifier 0x%x!\n", identifier);
- return -EINVAL;
- }
-
- read_bytes = zxdh_en_module_eeprom_read(en_dev, &query, data + total_read_bytes);
- if(read_bytes != query.length)
- {
- LOG_ERR("zxdh_en_module_eeprom_read failed!\n");
- return -EIO;
- }
-
- total_read_bytes += read_bytes;
- offset += read_bytes;
- length -= read_bytes;
- }
-
- return 0;
+static int zxdh_en_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ LOG_ERR("not supported\n");
+ return -1;
+}
+
+static void zxdh_en_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ int32_t err;
+ uint32_t fc_mode;
+ struct zxdh_en_device *en_dev = netdev_priv(netdev);
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ return;
+ }
+
+ err = zxdh_en_fc_mode_get(en_dev, &fc_mode);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_fc_mode_get failed!\n");
+ return;
+ }
+
+ pause->autoneg = 0;
+
+ switch (fc_mode) {
+ case BIT(SPM_FC_PAUSE_FULL): {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ break;
+ }
+ case BIT(SPM_FC_PAUSE_RX): {
+ pause->rx_pause = 1;
+ pause->tx_pause = 0;
+ break;
+ }
+ case BIT(SPM_FC_PAUSE_TX): {
+ pause->rx_pause = 0;
+ pause->tx_pause = 1;
+ break;
+ }
+ default: {
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ break;
+ }
+ }
+
+ return;
+}
+
+static int32_t zxdh_en_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ int32_t err;
+ uint32_t fc_mode_cur;
+ uint32_t fc_mode_cfg;
+ struct zxdh_en_device *en_dev = netdev_priv(netdev);
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ return -EOPNOTSUPP;
+ }
+
+ if (pause->autoneg) {
+ LOG_ERR("not support pause autoneg!\n");
+ return -EOPNOTSUPP;
+ }
+
+ err = zxdh_en_fc_mode_get(en_dev, &fc_mode_cur);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_fc_mode_get failed!\n");
+ return err;
+ }
+
+ if ((pause->rx_pause || pause->tx_pause) &&
+ (fc_mode_cur == BIT(SPM_FC_PFC_FULL))) {
+ LOG_ERR("warning, ethtool cfg pause on, this will lead to pfc off!\n");
+ }
+
+ if (pause->rx_pause && pause->tx_pause) {
+ fc_mode_cfg = BIT(SPM_FC_PAUSE_FULL);
+ } else if (pause->rx_pause) {
+ fc_mode_cfg = BIT(SPM_FC_PAUSE_RX);
+ } else if (pause->tx_pause) {
+ fc_mode_cfg = BIT(SPM_FC_PAUSE_TX);
+ } else {
+ if (fc_mode_cur == BIT(SPM_FC_PFC_FULL))
+ fc_mode_cfg = BIT(SPM_FC_PFC_FULL);
+ else
+ fc_mode_cfg = BIT(SPM_FC_NONE);
+ }
+
+ if (fc_mode_cfg != fc_mode_cur) {
+ err = zxdh_en_fc_mode_set(en_dev, fc_mode_cfg);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_fc_mode_set failed!\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int32_t zxdh_en_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ int32_t err;
+ uint32_t fec_cfg;
+ uint32_t fec_active;
+ struct zxdh_en_device *en_dev = netdev_priv(netdev);
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ return -EOPNOTSUPP;
+ }
+
+ err = zxdh_en_fec_mode_get(en_dev, NULL, &fec_cfg, &fec_active);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_fec_mode_get failed!\n");
+ return err;
+ }
+
+ fecparam->fec = zxdh_en_fec_to_ethtool_fecparam(fec_cfg, GET_FEC_CFG_FLAG);
+ fecparam->active_fec =
+ zxdh_en_fec_to_ethtool_fecparam(fec_active, GET_FEC_LINK_FLAG);
+
+ // LOG_INFO("fec_cfg=0x%x, fecparam->fec=0x%x, fec_active=0x%x,
+ // fecparam->active_fec=0x%x\n",
+ // fec_cfg, fecparam->fec, fec_active, fecparam->active_fec);
+
+ return 0;
+}
+
+static int32_t zxdh_en_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ int32_t i;
+ int32_t err;
+ uint32_t fec_cap;
+ uint32_t fec_cfg = 0;
+ uint32_t fecparam_cap;
+ struct zxdh_en_device *en_dev = netdev_priv(netdev);
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ return -EOPNOTSUPP;
+ }
+
+ err = zxdh_en_fec_mode_get(en_dev, &fec_cap, NULL, NULL);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_fec_mode_get failed!\n");
+ return err;
+ }
+ fecparam_cap = zxdh_en_fec_to_ethtool_fecparam(fec_cap, GET_FEC_CAP_FLAG);
+
+ if ((fecparam->fec | fecparam_cap) != fecparam_cap) {
+ LOG_ERR("fecparam->fec 0x%x unsupport !\n", fecparam->fec);
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fec_2_ethtool_fecparam); i++) {
+ if (fecparam->fec == fec_2_ethtool_fecparam[i]) {
+ fec_cfg |= BIT(i);
+ }
+ }
+
+ if (!fec_cfg && (fecparam->fec != ETHTOOL_FEC_AUTO)) {
+ LOG_ERR("fecparam->fec 0x%x unsupport !\n", fecparam->fec);
+ return -EOPNOTSUPP;
+ }
+
+ // LOG_INFO("fecparam_cap=0x%x, fec_cap=0x%x, fecparam->fec=0x%x,
+ // fec_cfg=0x%x\n",
+ // fecparam_cap, fec_cap, fecparam->fec, fec_cfg);
+
+ err = zxdh_en_fec_mode_set(en_dev, fec_cfg);
+ if (err != 0) {
+ LOG_ERR("zxdh_en_fec_mode_set failed!\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int32_t zxdh_en_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ uint32_t read_bytes;
+ uint8_t data[2] = { 0 };
+ struct zxdh_en_module_eeprom_param query = { 0 };
+ struct zxdh_en_device *en_dev = netdev_priv(netdev);
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ return -EOPNOTSUPP;
+ }
+
+ query.i2c_addr = SFF_I2C_ADDRESS_LOW;
+ query.page = 0;
+ query.offset = 0;
+ query.length = 2;
+ read_bytes = zxdh_en_module_eeprom_read(en_dev, &query, data);
+ if (read_bytes != query.length) {
+ LOG_ERR("zxdh_en_module_eeprom_read failed!\n");
+ return -EIO;
+ }
+
+ switch (data[0]) {
+ case ZXDH_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case ZXDH_MODULE_ID_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ break;
+ case ZXDH_MODULE_ID_QSFP_PLUS:
+ case ZXDH_MODULE_ID_QSFP28:
+ if (data[1] < 3) {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ }
+ break;
+ default:
+ LOG_ERR("can not recognize module identifier 0x%x!\n", data[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int32_t zxdh_en_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct zxdh_en_module_eeprom_param query = { 0 };
+ struct zxdh_en_device *en_dev = netdev_priv(netdev);
+ uint32_t offset = ee->offset;
+ uint32_t length = ee->len;
+ uint8_t identifier;
+ uint32_t offset_boundary = 0;
+ uint32_t total_read_bytes = 0;
+ uint32_t read_bytes = 0;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ return -EOPNOTSUPP;
+ }
+
+ // LOG_INFO("offset %u, len %u\n", ee->offset, ee->len);
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ query.i2c_addr = SFF_I2C_ADDRESS_LOW;
+ query.bank = 0;
+ query.page = 0;
+ query.offset = 0;
+ query.length = 1;
+ read_bytes = zxdh_en_module_eeprom_read(en_dev, &query, &identifier);
+ if (read_bytes != query.length) {
+ LOG_ERR("zxdh_en_module_eeprom_read failed!\n");
+ return -EIO;
+ }
+
+ while (total_read_bytes < ee->len) {
+ if (identifier == ZXDH_MODULE_ID_SFP) {
+ if (offset < 256) {
+ query.i2c_addr = SFF_I2C_ADDRESS_LOW;
+ query.page = 0;
+ query.offset = offset;
+ } else {
+ query.i2c_addr = SFF_I2C_ADDRESS_HIGH;
+ query.page = 0;
+ query.offset = offset - 256;
+ }
+ offset_boundary = (query.offset < 128) ? 128 : 256;
+ query.length = ((query.offset + length) > offset_boundary) ?
+ (offset_boundary - query.offset) :
+ length;
+ } else if (identifier == ZXDH_MODULE_ID_QSFP ||
+ identifier == ZXDH_MODULE_ID_QSFP_PLUS ||
+ identifier == ZXDH_MODULE_ID_QSFP28) {
+ query.i2c_addr = SFF_I2C_ADDRESS_LOW;
+ if (offset < 256) {
+ query.page = 0;
+ query.offset = offset;
+ } else {
+ query.page = (offset - 256) / 128 + 1;
+ query.offset = offset - 128 * query.page;
+ }
+ offset_boundary = (query.offset < 128) ? 128 : 256;
+ query.length = ((query.offset + length) > offset_boundary) ?
+ (offset_boundary - query.offset) :
+ length;
+ } else {
+ LOG_ERR("can not recognize module identifier 0x%x!\n", identifier);
+ return -EINVAL;
+ }
+
+ read_bytes = zxdh_en_module_eeprom_read(en_dev, &query,
+ data + total_read_bytes);
+ if (read_bytes != query.length) {
+ LOG_ERR("zxdh_en_module_eeprom_read failed!\n");
+ return -EIO;
+ }
+
+ total_read_bytes += read_bytes;
+ offset += read_bytes;
+ length -= read_bytes;
+ }
+
+ return 0;
}
#ifdef HAVE_ETHTOOL_GET_MODULE_EEPROM_BY_PAGE
-static int32_t zxdh_en_get_module_eeprom_by_page(struct net_device *netdev,
- const struct ethtool_module_eeprom *page_data,
- struct netlink_ext_ack *extack)
+static int32_t
+zxdh_en_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
{
- struct zxdh_en_module_eeprom_param query = {0};
- struct zxdh_en_device *en_dev = netdev_priv(netdev);
- uint32_t read_bytes = 0;
+ struct zxdh_en_module_eeprom_param query = { 0 };
+ struct zxdh_en_device *en_dev = netdev_priv(netdev);
+ uint32_t read_bytes = 0;
- if(en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)
- {
- return -EOPNOTSUPP;
- }
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) {
+ return -EOPNOTSUPP;
+ }
- //LOG_INFO("offset %u, length %u, page %u, bank %u, i2c_address %u\n",
- // page_data->offset, page_data->length, page_data->page, page_data->bank, page_data->i2c_address);
+ // LOG_INFO("offset %u, length %u, page %u, bank %u, i2c_address %u\n",
+ // page_data->offset, page_data->length, page_data->page,
+ // page_data->bank, page_data->i2c_address);
- if(!page_data->length)
- return -EINVAL;
+ if (!page_data->length)
+ return -EINVAL;
- memset(page_data->data, 0, page_data->length);
+ memset(page_data->data, 0, page_data->length);
- query.i2c_addr = page_data->i2c_address;
- query.bank = page_data->bank;
- query.page = page_data->page;
- query.offset = page_data->offset;
- query.length = page_data->length;
- read_bytes = zxdh_en_module_eeprom_read(en_dev, &query, page_data->data);
- if(read_bytes != query.length)
- {
- LOG_ERR("zxdh_en_module_eeprom_read failed!\n");
- return -EIO;
- }
+ query.i2c_addr = page_data->i2c_address;
+ query.bank = page_data->bank;
+ query.page = page_data->page;
+ query.offset = page_data->offset;
+ query.length = page_data->length;
+ read_bytes = zxdh_en_module_eeprom_read(en_dev, &query, page_data->data);
+ if (read_bytes != query.length) {
+ LOG_ERR("zxdh_en_module_eeprom_read failed!\n");
+ return -EIO;
+ }
- return read_bytes;
+ return read_bytes;
}
#endif
-static void zxdh_en_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data)
+static void zxdh_en_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
{
-
}
static int32_t zxdh_lldp_enable_proc(struct net_device *netdev, bool enable)
{
- int32_t ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ int32_t ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- ret = zxdh_lldp_enable_set(&en_priv->edev, enable);
- if (0 != ret)
- {
- LOG_ERR("%s lldp failed!\n", enable ? "enable" : "disable");
- return ret;
- }
+ ret = zxdh_lldp_enable_set(&en_priv->edev, enable);
+ if (0 != ret) {
+ LOG_ERR("%s lldp failed!\n", enable ? "enable" : "disable");
+ return ret;
+ }
- return ret;
+ return ret;
}
static int32_t zxdh_sshd_enable_proc(struct net_device *netdev, bool enable)
{
- int32_t ret = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ int32_t ret = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- ret = zxdh_sshd_enable_set(&en_priv->edev, enable);
- if (0 != ret)
- {
- LOG_ERR("%s riscv sshd failed!\n", enable ? "enable" : "disable");
- return ret;
- }
+ ret = zxdh_sshd_enable_set(&en_priv->edev, enable);
+ if (0 != ret) {
+ LOG_ERR("%s riscv sshd failed!\n", enable ? "enable" : "disable");
+ return ret;
+ }
- return ret;
+ return ret;
}
typedef int32_t (*zxdh_pflag_handler)(struct net_device *netdev, bool enable);
-struct flag_desc
-{
- uint8_t name[ETH_GSTRING_LEN];
- uint32_t bitno;
- zxdh_pflag_handler handler;
+struct flag_desc {
+ uint8_t name[ETH_GSTRING_LEN];
+ uint32_t bitno;
+ zxdh_pflag_handler handler;
};
-#define ZXDH_PRIV_DESC(_name, _bitno, _handler) \
-{ \
- .name = _name, \
- .bitno = _bitno, \
- .handler = _handler, \
-}
-
-static const struct flag_desc zxdh_gstrings_priv_flags[] =
-{
- ZXDH_PRIV_DESC("enable_lldp", ZXDH_PFLAG_ENABLE_LLDP, zxdh_lldp_enable_proc),
- ZXDH_PRIV_DESC("enable_sshd", ZXDH_PFLAG_ENABLE_SSHD, zxdh_sshd_enable_proc),
- ZXDH_PRIV_DESC("debug_ip", ZXDH_PFLAG_IP, NULL),
+#define ZXDH_PRIV_DESC(_name, _bitno, _handler) \
+ { \
+ .name = _name, .bitno = _bitno, .handler = _handler, \
+ }
+
+static const struct flag_desc zxdh_gstrings_priv_flags[] = {
+ ZXDH_PRIV_DESC("enable_lldp", ZXDH_PFLAG_ENABLE_LLDP,
+ zxdh_lldp_enable_proc),
+ ZXDH_PRIV_DESC("enable_sshd", ZXDH_PFLAG_ENABLE_SSHD,
+ zxdh_sshd_enable_proc),
+ ZXDH_PRIV_DESC("debug_ip", ZXDH_PFLAG_IP, NULL),
};
#define ZXDH_PRIV_FALG_ARRAY_SIZE ARRAY_SIZE(zxdh_gstrings_priv_flags)
-static void zxdh_en_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint16_t i = 0;
- int8_t ip[20] = {0};
- uint8_t is_zios = 0;
- int32_t ret = 0;
-
- switch (stringset)
- {
- case ETH_SS_STATS:
- {
- snprintf(data, ETH_GSTRING_LEN, "rx_packets");//get stat from netdev->stats
- ZXDH_ADD_STRING(data, "tx_packets");
- ZXDH_ADD_STRING(data, "rx_bytes");
- ZXDH_ADD_STRING(data, "tx_bytes");
- ZXDH_ADD_STRING(data, "tx_queue_wake");
- ZXDH_ADD_STRING(data, "tx_queue_stopped");
- ZXDH_ADD_STRING(data, "tx_queue_dropped");
-
- ZXDH_ADD_STRING(data, "rx_vport_packets");//get stat from np & vqm
- ZXDH_ADD_STRING(data, "tx_vport_packets");
- ZXDH_ADD_STRING(data, "rx_vport_bytes");
- ZXDH_ADD_STRING(data, "tx_vport_bytes");
- ZXDH_ADD_STRING(data, "rx_vport_dropped");
- ZXDH_ADD_STRING(data, "rx_vport_broadcast_packets");
- ZXDH_ADD_STRING(data, "tx_vport_broadcast_packets");
- ZXDH_ADD_STRING(data, "rx_vport_mtu_drop_packets");
- ZXDH_ADD_STRING(data, "tx_vport_mtu_drop_packets");
- ZXDH_ADD_STRING(data, "rx_vport_mtu_drop_bytes");
- ZXDH_ADD_STRING(data, "tx_vport_mtu_drop_bytes");
- ZXDH_ADD_STRING(data, "rx_vport_plcr_drop_packets");
- ZXDH_ADD_STRING(data, "tx_vport_plcr_drop_packets");
- ZXDH_ADD_STRING(data, "rx_vport_plcr_drop_bytes");
- ZXDH_ADD_STRING(data, "tx_vport_plcr_drop_bytes");
-
- ZXDH_ADD_STRING(data, "rx_packets_phy");//get stat from mac
- ZXDH_ADD_STRING(data, "tx_packets_phy");
- ZXDH_ADD_STRING(data, "rx_bytes_phy");
- ZXDH_ADD_STRING(data, "tx_bytes_phy");
- ZXDH_ADD_STRING(data, "rx_errors_phy");
- ZXDH_ADD_STRING(data, "tx_errors_phy");
- ZXDH_ADD_STRING(data, "rx_drop_phy");
- ZXDH_ADD_STRING(data, "tx_drop_phy");
- ZXDH_ADD_STRING(data, "rx_multicast_phy");
- ZXDH_ADD_STRING(data, "tx_multicast_phy");
- ZXDH_ADD_STRING(data, "rx_broadcast_phy");
- ZXDH_ADD_STRING(data, "tx_broadcast_phy");
- ZXDH_ADD_STRING(data, "rx_size_64_phy");
- ZXDH_ADD_STRING(data, "rx_size_65_127");
- ZXDH_ADD_STRING(data, "rx_size_128_255");
- ZXDH_ADD_STRING(data, "rx_size_256_511");
- ZXDH_ADD_STRING(data, "rx_size_512_1023");
- ZXDH_ADD_STRING(data, "rx_size_1024_1518");
- ZXDH_ADD_STRING(data, "rx_size_1519_mru");
- ZXDH_ADD_STRING(data, "rx_pause");
- ZXDH_ADD_STRING(data, "tx_pause");
-
- for (i = 0; i < en_dev->curr_queue_pairs; i++)
- {
- ZXDH_ADD_QUEUE_STRING(data, "rx_pkts", i);
- ZXDH_ADD_QUEUE_STRING(data, "tx_pkts", i);
- ZXDH_ADD_QUEUE_STRING(data, "rx_bytes", i);
- ZXDH_ADD_QUEUE_STRING(data, "tx_bytes", i);
- ZXDH_ADD_QUEUE_STRING(data, "tx_stopped", i);
- ZXDH_ADD_QUEUE_STRING(data, "tx_wake", i);
- ZXDH_ADD_QUEUE_STRING(data, "tx_dropped", i);
- }
- break;
- }
- case ETH_SS_PRIV_FLAGS:
- {
- for (i = 0; i < ZXDH_NUM_PFLAGS; i++)
- {
- strncpy(data + i * ETH_GSTRING_LEN, zxdh_gstrings_priv_flags[i].name, ETH_GSTRING_LEN);
- }
-
- /* 获取riscv的os类型 */
- ret = zxdh_riscv_os_type_get(en_dev, &is_zios);
- if (ret != 0)
- {
- LOG_ERR("zxdh_riscv_os_type_get failed");
- break;
- }
-
- /* 修改登录方式 */
- if (is_zios == ZIOS_TYPE)
- {
- strncpy(data + ZXDH_PFLAG_ENABLE_SSHD * ETH_GSTRING_LEN, "enable_telnetd", ETH_GSTRING_LEN);
- }
-
- /* 获取debug口的ip地址*/
- ret = zxdh_debug_ip_get(en_dev, ip);
- if (ret != 0)
- {
- LOG_ERR("ip get failed");
- break;
- }
- strncpy(data + ZXDH_PFLAG_IP * ETH_GSTRING_LEN, ip, ETH_GSTRING_LEN);
- break;
- }
- default:
- {
- LOG_ERR("invalid para\n");
- break;
- }
- }
-
- return;
+static void zxdh_en_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint16_t i = 0;
+ int8_t ip[20] = { 0 };
+ int32_t ret = 0;
+
+ switch (stringset) {
+ case ETH_SS_STATS: {
+ snprintf(data, ETH_GSTRING_LEN, "rx_packets"); // get stat from
+ // netdev->stats
+ ZXDH_ADD_STRING(data, "tx_packets");
+ ZXDH_ADD_STRING(data, "rx_bytes");
+ ZXDH_ADD_STRING(data, "tx_bytes");
+ ZXDH_ADD_STRING(data, "tx_queue_wake");
+ ZXDH_ADD_STRING(data, "tx_queue_stopped");
+ ZXDH_ADD_STRING(data, "tx_queue_dropped");
+
+ ZXDH_ADD_STRING(data, "rx_vport_packets"); // get stat from np & vqm
+ ZXDH_ADD_STRING(data, "tx_vport_packets");
+ ZXDH_ADD_STRING(data, "rx_vport_bytes");
+ ZXDH_ADD_STRING(data, "tx_vport_bytes");
+ ZXDH_ADD_STRING(data, "rx_vport_dropped");
+ ZXDH_ADD_STRING(data, "rx_vport_broadcast_packets");
+ ZXDH_ADD_STRING(data, "tx_vport_broadcast_packets");
+ ZXDH_ADD_STRING(data, "rx_vport_mtu_drop_packets");
+ ZXDH_ADD_STRING(data, "tx_vport_mtu_drop_packets");
+ ZXDH_ADD_STRING(data, "rx_vport_mtu_drop_bytes");
+ ZXDH_ADD_STRING(data, "tx_vport_mtu_drop_bytes");
+ ZXDH_ADD_STRING(data, "rx_vport_plcr_drop_packets");
+ ZXDH_ADD_STRING(data, "tx_vport_plcr_drop_packets");
+ ZXDH_ADD_STRING(data, "rx_vport_plcr_drop_bytes");
+ ZXDH_ADD_STRING(data, "tx_vport_plcr_drop_bytes");
+
+ ZXDH_ADD_STRING(data, "rx_packets_phy"); // get stat from mac
+ ZXDH_ADD_STRING(data, "tx_packets_phy");
+ ZXDH_ADD_STRING(data, "rx_bytes_phy");
+ ZXDH_ADD_STRING(data, "tx_bytes_phy");
+ ZXDH_ADD_STRING(data, "rx_errors_phy");
+ ZXDH_ADD_STRING(data, "tx_errors_phy");
+ ZXDH_ADD_STRING(data, "rx_drop_phy");
+ ZXDH_ADD_STRING(data, "tx_drop_phy");
+ ZXDH_ADD_STRING(data, "rx_multicast_phy");
+ ZXDH_ADD_STRING(data, "tx_multicast_phy");
+ ZXDH_ADD_STRING(data, "rx_broadcast_phy");
+ ZXDH_ADD_STRING(data, "tx_broadcast_phy");
+ ZXDH_ADD_STRING(data, "rx_size_64_phy");
+ ZXDH_ADD_STRING(data, "rx_size_65_127");
+ ZXDH_ADD_STRING(data, "rx_size_128_255");
+ ZXDH_ADD_STRING(data, "rx_size_256_511");
+ ZXDH_ADD_STRING(data, "rx_size_512_1023");
+ ZXDH_ADD_STRING(data, "rx_size_1024_1518");
+ ZXDH_ADD_STRING(data, "rx_size_1519_mru");
+
+ for (i = 0; i < en_dev->curr_queue_pairs; i++) {
+ ZXDH_ADD_QUEUE_STRING(data, "rx_pkts", i);
+ ZXDH_ADD_QUEUE_STRING(data, "tx_pkts", i);
+ ZXDH_ADD_QUEUE_STRING(data, "rx_bytes", i);
+ ZXDH_ADD_QUEUE_STRING(data, "tx_bytes", i);
+ ZXDH_ADD_QUEUE_STRING(data, "tx_stopped", i);
+ ZXDH_ADD_QUEUE_STRING(data, "tx_wake", i);
+ ZXDH_ADD_QUEUE_STRING(data, "tx_dropped", i);
+ }
+ break;
+ }
+ case ETH_SS_PRIV_FLAGS: {
+ for (i = 0; i < ZXDH_NUM_PFLAGS; i++) {
+ strncpy(data + i * ETH_GSTRING_LEN,
+ zxdh_gstrings_priv_flags[i].name, ETH_GSTRING_LEN);
+ }
+ /* 获取debug的ip地址*/
+ ret = zxdh_debug_ip_get(en_dev, ip);
+ if (ret != 0) {
+ LOG_ERR("ip get failed");
+ break;
+ }
+ strncpy(data + 2 * ETH_GSTRING_LEN, ip, ETH_GSTRING_LEN);
+ break;
+ }
+ default: {
+ LOG_ERR("invalid para\n");
+ break;
+ }
+ }
+
+ return;
}
static int32_t zxdh_handle_pflag(struct net_device *netdev,
- uint32_t wanted_flags,
- enum zxdh_priv_flag flag)
+ uint32_t wanted_flags,
+ enum zxdh_priv_flag flag)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- bool enable = !!(wanted_flags & BIT(flag));
- uint32_t changes = wanted_flags ^ en_priv->edev.pflags;
- int32_t err = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ bool enable = !!(wanted_flags & BIT(flag));
+ uint32_t changes = wanted_flags ^ en_priv->edev.pflags;
+ int32_t err = 0;
- /* 判断设置的值是否改变&改变的位是否为flag位 */
- if (!(changes & BIT(flag)))
- {
- return 0;
- }
+ /* 判断设置的值是否改变&改变的位是否为flag位 */
+ if (!(changes & BIT(flag))) {
+ return 0;
+ }
- if (flag == ZXDH_PFLAG_IP)
- {
- LOG_INFO("debug ip can not be changed");
- return 0;
- }
+ if (flag == ZXDH_PFLAG_IP) {
+ LOG_INFO("debug ip can not be changed");
+ return 0;
+ }
- err = zxdh_gstrings_priv_flags[flag].handler(netdev, enable);
- if (0 != err)
- {
- LOG_ERR("%s private flag '%s' failed err %d\n", \
- enable ? "Enable" : "Disable", zxdh_gstrings_priv_flags[flag].name, err);
- return err;
- }
+ err = zxdh_gstrings_priv_flags[flag].handler(netdev, enable);
+ if (0 != err) {
+ LOG_ERR("%s private flag '%s' failed err %d\n",
+ enable ? "Enable" : "Disable",
+ zxdh_gstrings_priv_flags[flag].name, err);
+ return err;
+ }
- ZXDH_SET_PFLAG(en_priv->edev.pflags, flag, enable);
+ ZXDH_SET_PFLAG(en_priv->edev.pflags, flag, enable);
- if (flag == ZXDH_PFLAG_ENABLE_SSHD) /* 同步debug的ip状态*/
- {
- ZXDH_SET_PFLAG(en_priv->edev.pflags, ZXDH_PFLAG_IP, enable);
- }
- return 0;
+ /* 同步debug的ip状态*/
+ if (flag == ZXDH_PFLAG_ENABLE_SSHD) {
+ ZXDH_SET_PFLAG(en_priv->edev.pflags, ZXDH_PFLAG_IP, enable);
+ }
+ return 0;
}
-
-static int32_t zxdh_en_set_priv_flags(struct net_device *netdev, uint32_t pflags)
+static int32_t zxdh_en_set_priv_flags(struct net_device *netdev,
+ uint32_t pflags)
{
- enum zxdh_priv_flag pflag = 0;
- int32_t err = 0;
+ enum zxdh_priv_flag pflag = 0;
+ int32_t err = 0;
- for (pflag = 0; pflag < ZXDH_NUM_PFLAGS; pflag++)
- {
- err = zxdh_handle_pflag(netdev, pflags, pflag);
- if (0 != err)
- {
- break;
- }
- }
+ for (pflag = 0; pflag < ZXDH_NUM_PFLAGS; pflag++) {
+ err = zxdh_handle_pflag(netdev, pflags, pflag);
+ if (0 != err) {
+ break;
+ }
+ }
- return err;
+ return err;
}
static uint32_t zxdh_en_get_priv_flags(struct net_device *netdev)
{
- int32_t ret = 0;
- uint32_t flag_lldp = 0;
- uint32_t lldp_mask = 0;
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ int32_t ret = 0;
+ uint32_t flag_lldp = 0;
+ uint32_t lldp_mask = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- ret = zxdh_lldp_enable_get(&en_priv->edev, &flag_lldp);
- if ((ret != 0) && (flag_lldp != 0) && (flag_lldp != 1))
- {
- LOG_ERR("zxdh_lldp_enable_get err, ret(%d), flag_lldp(%u).\n", ret, flag_lldp);
- return en_priv->edev.pflags;
- }
+ ret = zxdh_lldp_enable_get(&en_priv->edev, &flag_lldp);
+ if ((ret != 0) && (flag_lldp != 0) && (flag_lldp != 1)) {
+ LOG_ERR("zxdh_lldp_enable_get err, ret(%d), flag_lldp(%u).\n", ret,
+ flag_lldp);
+ return en_priv->edev.pflags;
+ }
- flag_lldp = flag_lldp << ZXDH_PFLAG_ENABLE_LLDP;
+ flag_lldp = flag_lldp << ZXDH_PFLAG_ENABLE_LLDP;
- lldp_mask = 0xFFFFFFFF ^ BIT(ZXDH_PFLAG_ENABLE_LLDP);
- en_priv->edev.pflags = (en_priv->edev.pflags & lldp_mask) | flag_lldp;
+ lldp_mask = 0xFFFFFFFF ^ BIT(ZXDH_PFLAG_ENABLE_LLDP);
+ en_priv->edev.pflags = (en_priv->edev.pflags & lldp_mask) | flag_lldp;
- return en_priv->edev.pflags;
+ return en_priv->edev.pflags;
}
static int zxdh_en_get_regs_len(struct net_device *netdev)
{
#define ZXDH_REGS_LEN (128 * 1024)
- return ZXDH_REGS_LEN * sizeof(uint32_t);
+ return ZXDH_REGS_LEN * sizeof(uint32_t);
}
-static void zxdh_en_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
+static void zxdh_en_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
{
-
}
-static void zxdh_en_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static void zxdh_en_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
- wol->supported = en_dev->wol_support;
- if (wol->supported == 0)
- {
- return;
- }
- wol->wolopts = en_dev->wolopts;
+ wol->supported = en_dev->wol_support;
+ if (wol->supported == 0) {
+ return;
+ }
+ wol->wolopts = en_dev->wolopts;
}
-static int zxdh_en_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int zxdh_en_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
{
- return 0;
+ return 0;
}
static uint32_t zxdh_en_get_msglevel(struct net_device *netdev)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
- return en_dev->msglevel;
+ return en_dev->msglevel;
}
static void zxdh_en_set_msglevel(struct net_device *netdev, uint32_t data)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- en_dev->msglevel = data;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ en_dev->msglevel = data;
}
static int zxdh_en_nway_reset(struct net_device *netdev)
{
- return 0;
+ return 0;
}
#ifdef HAVE_ETHTOOL_SET_PHYS_ID
-static int zxdh_en_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
-
- switch (state)
- {
- case ETHTOOL_ID_ACTIVE:
- {
- msg.payload.mac_set_msg.blink_enable = 1;
- break;
- }
- case ETHTOOL_ID_INACTIVE:
- {
- msg.payload.mac_set_msg.blink_enable = 0;
- break;
- }
- default:
- return -EOPNOTSUPP;
- }
- msg.payload.hdr_to_agt.op_code = AGENT_MAC_LED_BLINK;
- msg.payload.hdr_to_agt.phyport = en_dev->phy_port;
- LOG_DEBUG("send phyport %d, blink_enable=%d\n", en_dev->phy_port, msg.payload.mac_set_msg.blink_enable);
-
- return zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &msg);
+static int zxdh_en_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE: {
+ msg.mac_set_msg.blink_enable = 1;
+ break;
+ }
+ case ETHTOOL_ID_INACTIVE: {
+ msg.mac_set_msg.blink_enable = 0;
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+ msg.hdr_to_agt.op_code = AGENT_MAC_LED_BLINK;
+ msg.hdr_to_agt.phyport = en_dev->phy_port;
+ LOG_DEBUG("send phyport %d, blink_enable=%d\n", en_dev->phy_port,
+ msg.mac_set_msg.blink_enable);
+
+ return zxdh_send_command_to_specify(en_dev, MODULE_MAC, &msg, &reps);
}
#else
static int zxdh_en_phys_id(struct net_device *netdev, u32 data)
{
- return 0;
+ return 0;
}
#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
int32_t zxdh_en_self_test_num(void)
{
- return 0;
+ return 0;
}
#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
static int32_t zxdh_en_get_sset_count(struct net_device *netdev, int sset)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
-
- switch (sset)
- {
- case ETH_SS_STATS:
- {
- return ZXDH_NET_PF_STATS_NUM(en_dev);
- }
- case ETH_SS_PRIV_FLAGS:
- {
- return ZXDH_NUM_PFLAGS;
- }
- case ETH_SS_TEST:
- {
- return zxdh_en_self_test_num();
- }
- default:
- {
- return -EOPNOTSUPP;
- }
- }
-
- return 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+
+ switch (sset) {
+ case ETH_SS_STATS: {
+ return ZXDH_NET_PF_STATS_NUM(en_dev);
+ }
+ case ETH_SS_PRIV_FLAGS: {
+ return ZXDH_NUM_PFLAGS;
+ }
+ case ETH_SS_TEST: {
+ return zxdh_en_self_test_num();
+ }
+ default: {
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
}
#endif
-static void zxdh_en_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+static void zxdh_en_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t ret = 0;
- uint8_t drv_name_len = 0;
- uint8_t drv_version[MAX_DRV_VERSION_LEN] = {0};
- uint8_t drv_version_len = 0;
- uint16_t vport = 0;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t ret = 0;
+ uint8_t drv_name_len = 0;
+ uint8_t drv_version[MAX_DRV_VERSION_LEN] = { 0 };
+ uint8_t drv_version_len = 0;
+ uint16_t vport = 0;
- ret = en_dev->ops->get_pf_drv_msg(en_dev->parent, drv_version, &drv_version_len);
- if (drv_version_len > MAX_DRV_NAME_LEN)
- {
- LOG_ERR("drv_version_len(%hhu) greater than %u", drv_version_len, MAX_DRV_NAME_LEN);
- drv_version_len = MAX_DRV_NAME_LEN;
- }
+ ret = en_dev->ops->get_pf_drv_msg(en_dev->parent, drv_version,
+ &drv_version_len);
+ if (drv_version_len > MAX_DRV_NAME_LEN) {
+ LOG_ERR("drv_version_len(%hhu) greater than %u", drv_version_len,
+ MAX_DRV_NAME_LEN);
+ drv_version_len = MAX_DRV_NAME_LEN;
+ }
- vport = en_dev->vport;
- drv_name_len = sizeof(DRV_NAME(vport));
- if (drv_name_len > MAX_DRV_NAME_LEN)
- {
- LOG_ERR("drv_name_len(%hhu) greater than %u", drv_name_len, MAX_DRV_NAME_LEN);
- drv_name_len = MAX_DRV_NAME_LEN;
- }
+ vport = en_dev->vport;
+ drv_name_len = sizeof(DRV_NAME(vport));
+ if (drv_name_len > MAX_DRV_NAME_LEN) {
+ LOG_ERR("drv_name_len(%hhu) greater than %u", drv_name_len,
+ MAX_DRV_NAME_LEN);
+ drv_name_len = MAX_DRV_NAME_LEN;
+ }
- memcpy(drvinfo->driver, DRV_NAME(vport), drv_name_len);
- memcpy(drvinfo->version, drv_version, drv_version_len);
+ memcpy(drvinfo->driver, DRV_NAME(vport), drv_name_len);
+ memcpy(drvinfo->version, drv_version, drv_version_len);
- strlcpy(drvinfo->bus_info, dev_name(en_dev->parent->parent->device), sizeof(drvinfo->bus_info));
+ strlcpy(drvinfo->bus_info, dev_name(en_dev->parent->parent->device),
+ sizeof(drvinfo->bus_info));
- drvinfo->n_priv_flags = ZXDH_NUM_PFLAGS;
- drvinfo->n_stats = ZXDH_NET_PF_STATS_NUM(en_dev);
- drvinfo->eedump_len = zxdh_en_get_eeprom_len(netdev);
- drvinfo->regdump_len = zxdh_en_get_regs_len(netdev);
- drvinfo->testinfo_len = zxdh_en_self_test_num();
+ drvinfo->n_priv_flags = ZXDH_NUM_PFLAGS;
+ drvinfo->n_stats = ZXDH_NET_PF_STATS_NUM(en_dev);
+ drvinfo->eedump_len = zxdh_en_get_eeprom_len(netdev);
+ drvinfo->regdump_len = zxdh_en_get_regs_len(netdev);
+ drvinfo->testinfo_len = zxdh_en_self_test_num();
- memcpy(drvinfo->fw_version, en_dev->fw_version, en_dev->fw_version_len);
+ memcpy(drvinfo->fw_version, en_dev->fw_version, en_dev->fw_version_len);
}
int32_t zxdh_stats_update(struct zxdh_en_device *en_dev)
{
- uint16_t i = 0;
- int32_t ret = 0;
-
- ret = zxdh_vport_stats_get(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_mac_stats_get failed, ret: %d\n", ret);
- return -1;
- }
+ uint16_t i = 0;
+ int32_t ret = 0;
- ret = zxdh_mac_stats_get(en_dev);
- if (ret != 0)
- {
- LOG_ERR("zxdh_mac_stats_get failed, ret: %d\n", ret);
- return -1;
- }
-
- memset(&en_dev->hw_stats.netdev_stats, 0, sizeof(en_dev->hw_stats.netdev_stats));
- for (i = 0; i < en_dev->curr_queue_pairs; i++)
- {
- /* queue software statistics */
- en_dev->hw_stats.q_stats[i].q_rx_pkts = en_dev->rq[i].stats.packets;
- en_dev->hw_stats.q_stats[i].q_tx_pkts = en_dev->sq[i].stats.packets;
- en_dev->hw_stats.q_stats[i].q_rx_bytes = en_dev->rq[i].stats.bytes;
- en_dev->hw_stats.q_stats[i].q_tx_bytes = en_dev->sq[i].stats.bytes;
+#if STATS_CLEAR_AFTER_READ
+ memset(&en_dev->hw_stats, 0, sizeof(struct zxdh_en_hw_stats));
+#endif
- en_dev->hw_stats.netdev_stats.rx_packets += en_dev->rq[i].stats.packets;
- en_dev->hw_stats.netdev_stats.tx_packets += en_dev->sq[i].stats.packets;
- en_dev->hw_stats.netdev_stats.rx_bytes += en_dev->rq[i].stats.bytes;
- en_dev->hw_stats.netdev_stats.tx_bytes += en_dev->sq[i].stats.bytes;
- en_dev->hw_stats.netdev_stats.tx_queue_wake += en_dev->hw_stats.q_stats[i].q_tx_wake;
- en_dev->hw_stats.netdev_stats.tx_queue_stopped += en_dev->hw_stats.q_stats[i].q_tx_stopped;
- en_dev->hw_stats.netdev_stats.tx_queue_dropped += en_dev->hw_stats.q_stats[i].q_tx_dropped;
- }
+ ret = zxdh_vport_stats_get(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_mac_stats_get failed, ret: %d\n", ret);
+ return -1;
+ }
+
+ ret = zxdh_mac_stats_get(en_dev);
+ if (ret != 0) {
+ LOG_ERR("zxdh_mac_stats_get failed, ret: %d\n", ret);
+ return -1;
+ }
+
+ memset(&en_dev->hw_stats.netdev_stats, 0,
+ sizeof(en_dev->hw_stats.netdev_stats));
+ for (i = 0; i < en_dev->curr_queue_pairs; i++) {
+ /* queue software statistics */
+ en_dev->hw_stats.q_stats[i].q_rx_pkts = en_dev->rq[i].stats.packets;
+ en_dev->hw_stats.q_stats[i].q_tx_pkts = en_dev->sq[i].stats.packets;
+ en_dev->hw_stats.q_stats[i].q_rx_bytes = en_dev->rq[i].stats.bytes;
+ en_dev->hw_stats.q_stats[i].q_tx_bytes = en_dev->sq[i].stats.bytes;
+
+ en_dev->hw_stats.netdev_stats.rx_packets += en_dev->rq[i].stats.packets;
+ en_dev->hw_stats.netdev_stats.tx_packets += en_dev->sq[i].stats.packets;
+ en_dev->hw_stats.netdev_stats.rx_bytes += en_dev->rq[i].stats.bytes;
+ en_dev->hw_stats.netdev_stats.tx_bytes += en_dev->sq[i].stats.bytes;
+ en_dev->hw_stats.netdev_stats.tx_queue_wake +=
+ en_dev->hw_stats.q_stats[i].q_tx_wake;
+ en_dev->hw_stats.netdev_stats.tx_queue_stopped +=
+ en_dev->hw_stats.q_stats[i].q_tx_stopped;
+ en_dev->hw_stats.netdev_stats.tx_queue_dropped +=
+ en_dev->hw_stats.q_stats[i].q_tx_dropped;
+
+ /* below stats increased when queue event occur
+ en_dev->hw_stats.q_stats[i].q_tx_stopped
+ en_dev->hw_stats.q_stats[i].q_tx_wake
+ en_dev->hw_stats.q_stats[i].q_tx_dropped */
+
+#if STATS_CLEAR_AFTER_READ
+ memset(&en_dev->sq[i].stats, 0, sizeof(struct virtnet_sq_stats));
+ memset(&en_dev->rq[i].stats, 0, sizeof(struct virtnet_rq_stats));
+#endif
+ }
+
+#ifdef ZXDH_MSGQ
+ if (en_dev->need_msgq) {
+ LOG_DEBUG("msgq_rx_pkts: %lld\n", en_dev->rq[i].stats.packets);
+ LOG_DEBUG("msgq_rx_kicks: %lld\n", en_dev->rq[i].stats.kicks);
+ LOG_DEBUG("msgq_rx_bytes: %lld\n", en_dev->rq[i].stats.bytes);
+ LOG_DEBUG("msgq_rx_drops: %lld\n", en_dev->rq[i].stats.drops);
+ LOG_DEBUG("msgq_rx_errs: %lld\n", en_dev->rq[i].stats.xdp_drops);
+
+ LOG_DEBUG("msgq_tx_pkts: %lld\n", en_dev->sq[i].stats.packets);
+ LOG_DEBUG("msgq_tx_bytes: %lld\n", en_dev->sq[i].stats.bytes);
+ LOG_DEBUG("msgq_tx_kicks: %lld\n", en_dev->sq[i].stats.kicks);
+ LOG_DEBUG("msgq_tx_timeouts: %lld\n", en_dev->sq[i].stats.tx_timeouts);
+ LOG_DEBUG("msgq_tx_errs: %lld\n", en_dev->sq[i].stats.xdp_tx_drops);
+ }
+#endif
- return ret;
+ return ret;
}
-static void zxdh_en_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data)
+static void zxdh_en_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- uint32_t offset = ZXDH_NETDEV_STATS_NUM + ZXDH_MAC_STATS_NUM + ZXDH_VPORT_STATS_NUM;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ uint32_t offset =
+ ZXDH_NETDEV_STATS_NUM + ZXDH_MAC_STATS_NUM + ZXDH_VPORT_STATS_NUM;
- zxdh_stats_update(en_dev);
- memcpy(data, &en_dev->hw_stats, ZXDH_NET_PF_STATS_NUM(en_dev) * sizeof(uint64_t));
- memcpy(data + offset, en_dev->hw_stats.q_stats, (en_dev->curr_queue_pairs * ZXDH_QUEUE_STATS_NUM) * sizeof(uint64_t));
+ zxdh_stats_update(en_dev);
+ memcpy(data, &en_dev->hw_stats,
+ ZXDH_NET_PF_STATS_NUM(en_dev) * sizeof(uint64_t));
+ memcpy(data + offset, en_dev->hw_stats.q_stats,
+ (en_dev->curr_queue_pairs * ZXDH_QUEUE_STATS_NUM) *
+ sizeof(uint64_t));
- return;
+ return;
}
-static int zxdh_en_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
+static int zxdh_en_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
{
- return 0;
+ return 0;
}
-static int zxdh_en_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
+static int zxdh_en_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
{
- return 0;
+ return 0;
}
-static int zxdh_en_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
+static int zxdh_en_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
{
- return 0;
+ return 0;
}
#ifdef CONFIG_PM_RUNTIME
static int zxdh_en_ethtool_begin(struct net_device *netdev)
{
- return 0;
+ return 0;
}
static void zxdh_en_ethtool_complete(struct net_device *netdev)
{
-
}
#endif
#ifndef HAVE_NDO_SET_FEATURES
static int zxdh_en_get_rx_csum(struct net_device *netdev)
{
- return 0;
+ return 0;
}
static int zxdh_en_set_rx_csum(struct net_device *netdev, u32 data)
{
- return 0;
+ return 0;
}
static int zxdh_en_set_tx_csum(struct net_device *netdev, u32 data)
{
- return 0;
+ return 0;
}
#ifdef NETIF_F_TSO
static int zxdh_en_set_tso(struct net_device *netdev, u32 data)
{
- return 0;
+ return 0;
}
#endif /* NETIF_F_TSO */
#ifdef ETHTOOL_GFLAGS
static int zxdh_en_set_flags(struct net_device *netdev, u32 data)
{
- return 0;
+ return 0;
}
#endif /* ETHTOOL_GFLAGS */
#endif /* HAVE_NDO_SET_FEATURES */
static int zxdh_en_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
- return 0;
+ return 0;
}
static int zxdh_en_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
- return 0;
+ return 0;
}
#ifdef ETHTOOL_GRXFHINDIR
#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
static u32 zxdh_en_get_rxfh_indir_size(struct net_device *netdev)
{
- return ZXDH_INDIR_RQT_SIZE;
+ return ZXDH_INDIR_RQT_SIZE;
}
static u32 zxdh_en_get_rxfh_key_size(struct net_device *netdev)
{
- return ZXDH_NET_HASH_KEY_SIZE;
+ return ZXDH_NET_HASH_KEY_SIZE;
}
#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
#ifdef HAVE_RXFH_HASHFUNC
-static int zxdh_en_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+static int zxdh_en_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
#else
static int zxdh_en_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
#endif /* HAVE_RXFH_HASHFUNC */
@@ -1386,612 +1339,572 @@ static int zxdh_en_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
static int zxdh_en_get_rxfh_indir(struct net_device *netdev, u32 *indir)
#endif /* HAVE_ETHTOOL_GSRSSH */
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
- int32_t ret = 0;
- uint8_t func = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- LOG_INFO("zxdh_en_get_rxfh start\n");
- if (indir != NULL)
- {
- memcpy(indir, en_dev->indir_rqt, sizeof(uint32_t) * ZXDH_INDIR_RQT_SIZE);
- }
-
- if (key != NULL)
- {
- LOG_INFO("get key is called\n");
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- ret = dpp_thash_key_get(&pf_info, key, ZXDH_NET_HASH_KEY_SIZE);
- }
- else
- {
- msg.payload.hdr.op_code = ZXDH_THASH_KEY_GET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg, true);
- if (ret == 0)
- {
- memcpy(key, msg.reps.thash_key_set_msg.key_map, ZXDH_NET_HASH_KEY_SIZE);
- }
- }
- }
-
- if (hfunc != NULL)
- {
- func = en_dev->hash_func;
- switch (func)
- {
- case ZXDH_FUNC_TOP:
- {
- *hfunc = ETH_RSS_HASH_TOP;
- break;
- }
- case ZXDH_FUNC_XOR:
- {
- *hfunc = ETH_RSS_HASH_XOR;
- break;
- }
- case ZXDH_FUNC_CRC32:
- {
- *hfunc = ETH_RSS_HASH_CRC32;
- break;
- }
- default:
- {
- return -EOPNOTSUPP;
- }
- }
- }
-
- return ret;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t ret = 0;
+ uint8_t func = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ LOG_INFO("zxdh_en_get_rxfh start\n");
+ if (indir != NULL) {
+ memcpy(indir, en_dev->indir_rqt,
+ sizeof(uint32_t) * ZXDH_INDIR_RQT_SIZE);
+ }
+
+ if (key != NULL) {
+ LOG_INFO("get key is called\n");
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ ret = dpp_thash_key_get(&pf_info, key, ZXDH_NET_HASH_KEY_SIZE);
+ } else {
+ msg.hdr.op_code = ZXDH_THASH_KEY_GET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ ret = en_dev->ops->msg_send_cmd(
+ en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, &msg, &reps, true);
+ if (ret == 0) {
+ memcpy(key, reps.thash_key_set_msg.key_map,
+ ZXDH_NET_HASH_KEY_SIZE);
+ }
+ }
+ }
+
+ if (hfunc != NULL) {
+ func = en_dev->hash_func;
+ switch (func) {
+ case ZXDH_FUNC_TOP: {
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ }
+ case ZXDH_FUNC_XOR: {
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ }
+ case ZXDH_FUNC_CRC32: {
+ *hfunc = ETH_RSS_HASH_CRC32;
+ break;
+ }
+ default: {
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
+ return ret;
}
#else
-static int zxdh_en_get_rxfh_indir(struct net_device *netdev, struct ethtool_rxfh_indir *indir)
+static int zxdh_en_get_rxfh_indir(struct net_device *netdev,
+ struct ethtool_rxfh_indir *indir)
{
- return 0;
+ return 0;
}
#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
#endif /* ETHTOOL_GRXFHINDIR */
-static int32_t zxdh_indir_to_queue_map(struct zxdh_en_device *en_dev, const uint32_t *indir)
-{
- uint32_t *queue_map = NULL;
- int32_t err = 0;
- uint16_t i = 0;
- uint16_t j = 0;
-
- queue_map = kzalloc(ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t), GFP_KERNEL);
- if (queue_map == NULL)
- {
- LOG_ERR("queue_map is NULL\n");
- return -ENOMEM;
- }
- for (i = 0; i < ZXDH_INDIR_RQT_SIZE; i++)
- {
- j = indir[i];
- queue_map[i] = en_dev->phy_index[2 * j];
- }
- err = zxdh_rxfh_set(en_dev, queue_map);
- kfree(queue_map);
- if (err != 0)
- {
- LOG_ERR("zxdh_rxfh_set failed: %d\n", err);
- return err;
- }
-
- memcpy(en_dev->indir_rqt, indir, ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t));
-
- return err;
+static int32_t zxdh_indir_to_queue_map(struct zxdh_en_device *en_dev,
+ const uint32_t *indir)
+{
+ uint32_t *queue_map = NULL;
+ int32_t err = 0;
+ uint16_t i = 0;
+ uint16_t j = 0;
+
+ queue_map = kzalloc(ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t), GFP_KERNEL);
+ if (queue_map == NULL) {
+ LOG_ERR("queue_map is NULL\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < ZXDH_INDIR_RQT_SIZE; i++) {
+ j = indir[i];
+ queue_map[i] = en_dev->phy_index[2 * j];
+ }
+ err = zxdh_rxfh_set(en_dev, queue_map);
+ kfree(queue_map);
+ if (err != 0) {
+ LOG_ERR("zxdh_rxfh_set failed: %d\n", err);
+ return err;
+ }
+
+ memcpy(en_dev->indir_rqt, indir, ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t));
+
+ return err;
}
#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
#ifdef HAVE_RXFH_HASHFUNC
-static int zxdh_en_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc)
+static int zxdh_en_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
#else
-static int zxdh_en_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key)
+static int zxdh_en_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key)
#endif /* HAVE_RXFH_HASHFUNC */
#else
static int zxdh_en_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
#endif /* HAVE_ETHTOOL_GSRSSH */
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
- int32_t ret = 0;
- uint8_t func = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- LOG_INFO("zxdh_en_set_rxfh_indir start\n");
- switch (hfunc)
- {
- case ETH_RSS_HASH_NO_CHANGE:
- {
- break;
- }
- case ETH_RSS_HASH_TOP:
- {
- func = ZXDH_FUNC_TOP;
- break;
- }
- case ETH_RSS_HASH_XOR:
- {
- func = ZXDH_FUNC_XOR;
- break;
- }
- case ETH_RSS_HASH_CRC32:
- {
- func = ZXDH_FUNC_CRC32;
- break;
- }
- default:
- {
- return -EOPNOTSUPP;
- }
- }
-
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (func != en_dev->hash_func))
- {
- LOG_DEBUG("func: %u\n", func);
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- ret = dpp_vport_hash_funcs_set(&pf_info, func);
- }
- else
- {
- msg.payload.hdr.op_code = ZXDH_HASH_FUNC_SET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.hfunc_set_msg.func = func;
- ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg, true);
- }
- if (ret != 0)
- {
- LOG_ERR("hunc set failed: %d", ret);
- return ret;
- }
- en_dev->hash_func = func;
- }
-
- if (indir != NULL)
- {
- LOG_DEBUG("set indir is called\n");
- ret = zxdh_indir_to_queue_map(en_dev, indir);
- if (ret != 0)
- {
- LOG_ERR("indir set failed: %d", ret);
- return ret;
- }
- }
-
- if (key != NULL)
- {
- LOG_DEBUG("set thash key is called\n");
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- ret = dpp_thash_key_set(&pf_info, (uint8_t *)key, ZXDH_NET_HASH_KEY_SIZE);
- }
- else
- {
- memset(&msg, 0, sizeof(union zxdh_msg));
- msg.payload.hdr.op_code = ZXDH_THASH_KEY_SET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- memcpy(msg.payload.thash_key_set_msg.key_map, key, ZXDH_NET_HASH_KEY_SIZE);
- ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg, true);
- }
- }
-
- return ret;
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ int32_t ret = 0;
+ uint8_t func = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ LOG_INFO("zxdh_en_set_rxfh_indir start\n");
+ switch (hfunc) {
+ case ETH_RSS_HASH_NO_CHANGE: {
+ break;
+ }
+ case ETH_RSS_HASH_TOP: {
+ func = ZXDH_FUNC_TOP;
+ break;
+ }
+ case ETH_RSS_HASH_XOR: {
+ func = ZXDH_FUNC_XOR;
+ break;
+ }
+ case ETH_RSS_HASH_CRC32: {
+ func = ZXDH_FUNC_CRC32;
+ break;
+ }
+ default: {
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (func != en_dev->hash_func)) {
+ LOG_DEBUG("func: %u\n", func);
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ ret = dpp_vport_hash_funcs_set(&pf_info, func);
+ } else {
+ msg.hdr.op_code = ZXDH_HASH_FUNC_SET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.hfunc_set_msg.func = func;
+ ret = en_dev->ops->msg_send_cmd(
+ en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, &msg, &reps, true);
+ }
+ if (ret != 0) {
+ LOG_ERR("hunc set failed: %d", ret);
+ return ret;
+ }
+ en_dev->hash_func = func;
+ }
+
+ if (indir != NULL) {
+ LOG_DEBUG("set indir is called\n");
+ ret = zxdh_indir_to_queue_map(en_dev, indir);
+ if (ret != 0) {
+ LOG_ERR("indir set failed: %d", ret);
+ return ret;
+ }
+ }
+
+ if (key != NULL) {
+ LOG_DEBUG("set thash key is called\n");
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ ret = dpp_thash_key_set(&pf_info, (uint8_t *)key,
+ ZXDH_NET_HASH_KEY_SIZE);
+ } else {
+ memset(&msg, 0, sizeof(zxdh_msg_info));
+ msg.hdr.op_code = ZXDH_THASH_KEY_SET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ memcpy(msg.thash_key_set_msg.key_map, key, ZXDH_NET_HASH_KEY_SIZE);
+ ret = en_dev->ops->msg_send_cmd(
+ en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, &msg, &reps, true);
+ }
+ }
+
+ return ret;
}
#else
-static int zxdh_en_set_rxfh_indir(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int zxdh_en_set_rxfh_indir(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
{
- return 0;
+ return 0;
}
#endif
#ifdef ETHTOOL_GCHANNELS
-static void zxdh_en_get_channels(struct net_device *netdev, struct ethtool_channels *ch)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
- int32_t err = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- ch->max_combined = max_pairs;
- ch->combined_count = en_dev->curr_queue_pairs;
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- err = dpp_rxfh_get(&pf_info, msg.payload.rxfh_set_msg.queue_map, ZXDH_INDIR_RQT_SIZE);
- if (err != 0)
- {
- LOG_ERR("dpp_rxfh_get failed: %d\n", err);
- return;
- }
-
- LOG_DEBUG("*******pf_queue_map*******\n");
- zxdh_u32_array_print(msg.payload.rxfh_set_msg.queue_map, ZXDH_INDIR_RQT_SIZE);
- }
- else
- {
- msg.payload.hdr.op_code = ZXDH_RXFH_GET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg, true);
- if (err != 0)
- {
- LOG_ERR("dpp_rxfh_get failed: %d\n", err);
- return;
- }
-
- LOG_DEBUG("*******vf_queue_map*******\n");
- zxdh_u32_array_print(msg.reps.rxfh_get_msg.queue_map, ZXDH_INDIR_RQT_SIZE);
- }
+static void zxdh_en_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ int32_t err = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ ch->max_combined = max_pairs;
+ ch->combined_count = en_dev->curr_queue_pairs;
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ err = dpp_rxfh_get(&pf_info, msg.rxfh_set_msg.queue_map,
+ ZXDH_INDIR_RQT_SIZE);
+ if (err != 0) {
+ LOG_ERR("dpp_rxfh_get failed: %d\n", err);
+ return;
+ }
+
+ LOG_DEBUG("*******pf_queue_map*******\n");
+ zxdh_u32_array_print(msg.rxfh_set_msg.queue_map, ZXDH_INDIR_RQT_SIZE);
+ } else {
+ msg.hdr.op_code = ZXDH_RXFH_GET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF,
+ &msg, &msg, true);
+ if (err != 0) {
+ LOG_ERR("dpp_rxfh_get failed: %d\n", err);
+ return;
+ }
+ }
}
#endif /* ETHTOOL_GCHANNELS */
-int32_t zxdh_num_channels_changed(struct zxdh_en_device *en_dev, uint16_t num_changed)
+int32_t zxdh_num_channels_changed(struct zxdh_en_device *en_dev,
+ uint16_t num_changed)
{
- uint32_t indir[ZXDH_INDIR_RQT_SIZE] = {0};
- int32_t err = 0;
- uint16_t i = 0;
+ uint32_t indir[ZXDH_INDIR_RQT_SIZE] = { 0 };
+ int32_t err = 0;
+ uint16_t i = 0;
- if (!netif_is_rxfh_configured(en_dev->netdev))
- {
- LOG_INFO("indir_is_default\n");
- for (i = 0; i < ZXDH_INDIR_RQT_SIZE; ++i)
- {
- indir[i] = i % num_changed;
- }
+ LOG_DEBUG("num_changed: %d\n", num_changed);
+ if (!netif_is_rxfh_configured(en_dev->netdev)) {
+ LOG_INFO("indir_is_default\n");
+ for (i = 0; i < ZXDH_INDIR_RQT_SIZE; ++i) {
+ indir[i] = i % num_changed;
+ }
- err = zxdh_indir_to_queue_map(en_dev, indir);
- if (err != 0)
- {
- LOG_ERR("zxdh_indir_to_queue_map failed: %d\n", err);
- return err;
- }
- }
+ err = zxdh_indir_to_queue_map(en_dev, indir);
+ if (err != 0) {
+ LOG_ERR("zxdh_indir_to_queue_map failed: %d\n", err);
+ return err;
+ }
+ }
- en_dev->curr_queue_pairs = num_changed;
+ en_dev->curr_queue_pairs = num_changed;
- return err;
+ return err;
}
#ifdef ETHTOOL_SCHANNELS
-static int zxdh_en_set_channels(struct net_device *netdev, struct ethtool_channels *ch)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- int32_t ret = 0;
-
- LOG_INFO("zxdh_en_set_channels start\n");
- /* verify that the number of channels does not invalidate any current
- * flow director rules
- */
- //TODO
-
- /* We don't support separate rx/tx channels.
- * We don't allow setting 'other' channels.
- */
- if (ch->rx_count || ch->tx_count || ch->other_count)
- {
- LOG_ERR("not supported\n");
- return -EINVAL;
- }
-
- if ((ch->combined_count > max_pairs) || (ch->combined_count == 0))
- {
- LOG_ERR("invalid para\n");
- return -EINVAL;
- }
-
- if (ch->combined_count == en_dev->curr_queue_pairs)
- {
- return 0;
- }
-
- ret = zxdh_num_channels_changed(en_dev, ch->combined_count);
- if (ret != 0)
- {
- LOG_ERR("zxdh_num_channels_changed failed: %d\n", ret);
- return -1;
- }
-
- netif_set_real_num_tx_queues(netdev, en_dev->curr_queue_pairs);
- netif_set_real_num_rx_queues(netdev, en_dev->curr_queue_pairs);
-
- return 0;
+static int zxdh_en_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ int32_t ret = 0;
+
+ LOG_INFO("zxdh_en_set_channels start\n");
+ /* verify that the number of channels does not invalidate any current
+ * flow director rules
+ */
+ // TODO
+
+ /* We don't support separate rx/tx channels.
+ * We don't allow setting 'other' channels.
+ */
+ if (ch->rx_count || ch->tx_count || ch->other_count) {
+ LOG_ERR("not supported\n");
+ return -EINVAL;
+ }
+
+ if ((ch->combined_count > max_pairs) || (ch->combined_count == 0)) {
+ LOG_ERR("invalid para\n");
+ return -EINVAL;
+ }
+
+ if (ch->combined_count == en_dev->curr_queue_pairs) {
+ return 0;
+ }
+
+ ret = zxdh_num_channels_changed(en_dev, ch->combined_count);
+ if (ret != 0) {
+ LOG_ERR("zxdh_num_channels_changed failed: %d\n", ret);
+ return -1;
+ }
+
+ netif_set_real_num_tx_queues(netdev, en_dev->curr_queue_pairs);
+ netif_set_real_num_rx_queues(netdev, en_dev->curr_queue_pairs);
+
+ return 0;
}
#endif
-static int zxdh_en_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+static int zxdh_en_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
- void *rule_locs)
+ void *rule_locs)
#else
- u32 *rule_locs)
+ u32 *rule_locs)
#endif
{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
- uint32_t hash_mode = 0;
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- LOG_INFO("zxdh_en_get_rxnfc start\n");
- if (cmd->cmd == ETHTOOL_GRXRINGS)
- {
- cmd->data = en_dev->curr_queue_pairs;
- return 0;
- }
-
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- ret = dpp_rx_flow_hash_get(&pf_info, &hash_mode);
- }
- else
- {
- msg.payload.hdr.op_code = ZXDH_RX_FLOW_HASH_GET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg, true);
- hash_mode = msg.reps.rx_flow_hash_set_msg.hash_mode;
- }
- if (ret != 0)
- {
- return ret;
- }
-
- LOG_DEBUG("hash_mode: %u\n", hash_mode);
- switch (hash_mode)
- {
- case ZXDH_NET_RX_FLOW_HASH_MV:
- {
- cmd->data = RXH_L2DA + RXH_VLAN;
- break;
- }
- case ZXDH_NET_RX_FLOW_HASH_SDT:
- {
- cmd->data = RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST;
- break;
- }
- case ZXDH_NET_RX_FLOW_HASH_SDFNT:
- {
- cmd->data = RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST + RXH_L4_B_0_1 + RXH_L4_B_2_3;
- break;
- }
- default:
- {
- LOG_ERR("invalid hash_mode\n");
- return -1;
- }
- }
-
- return 0;
-}
-
-static int zxdh_en_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct zxdh_en_priv *en_priv = netdev_priv(netdev);
- struct zxdh_en_device *en_dev = &en_priv->edev;
- union zxdh_msg msg = {0};
- uint32_t hash_mode = 0;
- int32_t ret = 0;
- DPP_PF_INFO_T pf_info = {0};
-
- pf_info.slot = en_dev->slot_id;
- pf_info.vport = en_dev->vport;
-
- LOG_INFO("zxdh_en_set_rxnfc start\n");
- switch (cmd->data)
- {
- /* input parameter mv */
- case (RXH_L2DA + RXH_VLAN):
- {
- hash_mode = ZXDH_NET_RX_FLOW_HASH_MV;
- break;
- }
- /* input parameter sdt */
- case (RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST):
- {
- hash_mode = ZXDH_NET_RX_FLOW_HASH_SDT;
- break;
- }
- /* input parameter sdfnt */
- case (RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST + RXH_L4_B_0_1 + RXH_L4_B_2_3):
- {
- hash_mode = ZXDH_NET_RX_FLOW_HASH_SDFNT;
- break;
- }
- default:
- {
- LOG_ERR("invalid para, support mv, sdt, sdfnt\n");
- return -EOPNOTSUPP;
- }
- }
-
- LOG_DEBUG("hash_mode: %u\n", hash_mode);
- if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF)
- {
- ret = dpp_rx_flow_hash_set(&pf_info, hash_mode);
- }
- else
- {
- msg.payload.hdr.op_code = ZXDH_RX_FLOW_HASH_SET;
- msg.payload.hdr.vport = en_dev->vport;
- msg.payload.hdr.pcie_id = en_dev->pcie_id;
- msg.payload.rx_flow_hash_set_msg.hash_mode = hash_mode;
- ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, &msg, &msg, true);
- }
-
- return ret;
-}
-
-static const struct ethtool_ops zxdh_en_ethtool_ops =
-{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ uint32_t hash_mode = 0;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ LOG_INFO("zxdh_en_get_rxnfc start\n");
+ if (cmd->cmd == ETHTOOL_GRXRINGS) {
+ cmd->data = en_dev->curr_queue_pairs;
+ return 0;
+ }
+
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ ret = dpp_rx_flow_hash_get(&pf_info, &hash_mode);
+ } else {
+ msg.hdr.op_code = ZXDH_RX_FLOW_HASH_GET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF,
+ &msg, &reps, true);
+ hash_mode = reps.rx_flow_hash_set_msg.hash_mode;
+ }
+ if (ret != 0) {
+ return ret;
+ }
+
+ LOG_DEBUG("hash_mode: %u\n", hash_mode);
+ switch (hash_mode) {
+ case ZXDH_NET_RX_FLOW_HASH_MV: {
+ cmd->data = RXH_L2DA + RXH_VLAN;
+ break;
+ }
+ case ZXDH_NET_RX_FLOW_HASH_SDT: {
+ cmd->data = RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST;
+ break;
+ }
+ case ZXDH_NET_RX_FLOW_HASH_SDFNT: {
+ cmd->data = RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST + RXH_L4_B_0_1 +
+ RXH_L4_B_2_3;
+ break;
+ }
+ default: {
+ LOG_ERR("invalid hash_mode\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int zxdh_en_set_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct zxdh_en_priv *en_priv = netdev_priv(netdev);
+ struct zxdh_en_device *en_dev = &en_priv->edev;
+ zxdh_msg_info msg = { 0 };
+ zxdh_reps_info reps = { 0 };
+ uint32_t hash_mode = 0;
+ int32_t ret = 0;
+ DPP_PF_INFO_T pf_info = { 0 };
+
+ pf_info.slot = en_dev->slot_id;
+ pf_info.vport = en_dev->vport;
+
+ LOG_INFO("zxdh_en_set_rxnfc start\n");
+ switch (cmd->data) {
+ /* input parameter mv */
+ case (RXH_L2DA + RXH_VLAN): {
+ hash_mode = ZXDH_NET_RX_FLOW_HASH_MV;
+ break;
+ }
+ /* input parameter sdt */
+ case (RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST): {
+ hash_mode = ZXDH_NET_RX_FLOW_HASH_SDT;
+ break;
+ }
+ /* input parameter sdfnt */
+ case (RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST + RXH_L4_B_0_1 +
+ RXH_L4_B_2_3): {
+ hash_mode = ZXDH_NET_RX_FLOW_HASH_SDFNT;
+ break;
+ }
+ default: {
+ LOG_ERR("invalid para, support mv, sdt, sdfnt\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ LOG_DEBUG("hash_mode: %u\n", hash_mode);
+ if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) {
+ ret = dpp_rx_flow_hash_set(&pf_info, hash_mode);
+ } else {
+ msg.hdr.op_code = ZXDH_RX_FLOW_HASH_SET;
+ msg.hdr.vport = en_dev->vport;
+ msg.hdr.pcie_id = en_dev->pcie_id;
+ msg.rx_flow_hash_set_msg.hash_mode = hash_mode;
+ ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF,
+ &msg, &reps, true);
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops zxdh_en_ethtool_ops = {
#ifdef HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES, //ETHTOOL_COALESCE_USECS,
+ .supported_coalesce_params =
+ ETHTOOL_COALESCE_MAX_FRAMES, // ETHTOOL_COALESCE_USECS,
#endif
- .get_drvinfo = zxdh_en_get_drvinfo,
- .get_link_ksettings = zxdh_en_get_link_ksettings,
- .set_link_ksettings = zxdh_en_set_link_ksettings,
- .get_regs_len = zxdh_en_get_regs_len,
- .get_regs = zxdh_en_get_regs,
- .get_wol = zxdh_en_get_wol,
- .set_wol = zxdh_en_set_wol,
- .get_msglevel = zxdh_en_get_msglevel,
- .set_msglevel = zxdh_en_set_msglevel,
- .nway_reset = zxdh_en_nway_reset,
- .get_link = zxdh_en_get_link,
- .get_eeprom_len = zxdh_en_get_eeprom_len,
- .get_eeprom = zxdh_en_get_eeprom,
- .set_eeprom = zxdh_en_set_eeprom,
- .get_ringparam = zxdh_en_get_ringparam,
- .set_ringparam = zxdh_en_set_ringparam,
- .get_pauseparam = zxdh_en_get_pauseparam,
- .set_pauseparam = zxdh_en_set_pauseparam,
- .get_fecparam = zxdh_en_get_fecparam,
- .set_fecparam = zxdh_en_set_fecparam,
- .get_module_info = zxdh_en_get_module_info,
- .get_module_eeprom = zxdh_en_get_module_eeprom,
+ .get_drvinfo = zxdh_en_get_drvinfo,
+ .get_link_ksettings = zxdh_en_get_link_ksettings,
+ .set_link_ksettings = zxdh_en_set_link_ksettings,
+ .get_regs_len = zxdh_en_get_regs_len,
+ .get_regs = zxdh_en_get_regs,
+ .get_wol = zxdh_en_get_wol,
+ .set_wol = zxdh_en_set_wol,
+ .get_msglevel = zxdh_en_get_msglevel,
+ .set_msglevel = zxdh_en_set_msglevel,
+ .nway_reset = zxdh_en_nway_reset,
+ .get_link = zxdh_en_get_link,
+ .get_eeprom_len = zxdh_en_get_eeprom_len,
+ .get_eeprom = zxdh_en_get_eeprom,
+ .set_eeprom = zxdh_en_set_eeprom,
+ .get_ringparam = zxdh_en_get_ringparam,
+ .set_ringparam = zxdh_en_set_ringparam,
+ .get_pauseparam = zxdh_en_get_pauseparam,
+ .set_pauseparam = zxdh_en_set_pauseparam,
+ .get_fecparam = zxdh_en_get_fecparam,
+ .set_fecparam = zxdh_en_set_fecparam,
+ .get_module_info = zxdh_en_get_module_info,
+ .get_module_eeprom = zxdh_en_get_module_eeprom,
#ifdef HAVE_ETHTOOL_GET_MODULE_EEPROM_BY_PAGE
- .get_module_eeprom_by_page = zxdh_en_get_module_eeprom_by_page,
+ .get_module_eeprom_by_page = zxdh_en_get_module_eeprom_by_page,
#endif
- .self_test = zxdh_en_diag_test,
- .get_strings = zxdh_en_get_strings,
- .get_priv_flags = zxdh_en_get_priv_flags,
- .set_priv_flags = zxdh_en_set_priv_flags,
+ .self_test = zxdh_en_diag_test,
+ .get_strings = zxdh_en_get_strings,
+ .get_priv_flags = zxdh_en_get_priv_flags,
+ .set_priv_flags = zxdh_en_set_priv_flags,
#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
#ifdef HAVE_ETHTOOL_SET_PHYS_ID
- .set_phys_id = zxdh_en_set_phys_id,
+ .set_phys_id = zxdh_en_set_phys_id,
#else
- .phys_id = zxdh_en_phys_id,
+ .phys_id = zxdh_en_phys_id,
#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
- .get_sset_count = zxdh_en_get_sset_count,
+ .get_sset_count = zxdh_en_get_sset_count,
#else
- .get_stats_count = zxdh_en_get_stats_count,
- .self_test_count = zxdh_en_diag_test_count,
+ .get_stats_count = zxdh_en_get_stats_count,
+ .self_test_count = zxdh_en_diag_test_count,
#endif
- .get_ethtool_stats = zxdh_en_get_ethtool_stats,
+ .get_ethtool_stats = zxdh_en_get_ethtool_stats,
#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
- .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_perm_addr = ethtool_op_get_perm_addr,
#endif
- .get_coalesce = zxdh_en_get_coalesce,
- .set_coalesce = zxdh_en_set_coalesce,
+ .get_coalesce = zxdh_en_get_coalesce,
+ .set_coalesce = zxdh_en_set_coalesce,
#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
#ifdef HAVE_ETHTOOL_GET_TS_INFO
- .get_ts_info = zxdh_en_get_ts_info,
+ .get_ts_info = zxdh_en_get_ts_info,
#endif /* HAVE_ETHTOOL_GET_TS_INFO */
#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
#ifdef CONFIG_PM_RUNTIME
- .begin = zxdh_en_ethtool_begin,
- .complete = zxdh_en_ethtool_complete,
+ .begin = zxdh_en_ethtool_begin,
+ .complete = zxdh_en_ethtool_complete,
#endif /* CONFIG_PM_RUNTIME */
#ifndef HAVE_NDO_SET_FEATURES
- .get_rx_csum = zxdh_en_get_rx_csum,
- .set_rx_csum = zxdh_en_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = zxdh_en_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
+ .get_rx_csum = zxdh_en_get_rx_csum,
+ .set_rx_csum = zxdh_en_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = zxdh_en_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
#ifdef NETIF_F_TSO
- .get_tso = ethtool_op_get_tso,
- .set_tso = zxdh_en_set_tso,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = zxdh_en_set_tso,
#endif
#ifdef ETHTOOL_GFLAGS
- .get_flags = ethtool_op_get_flags,
- .set_flags = zxdh_en_set_flags,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = zxdh_en_set_flags,
#endif /* ETHTOOL_GFLAGS */
#endif /* HAVE_NDO_SET_FEATURES */
#ifdef ETHTOOL_GADV_COAL
- .get_advcoal = zxdh_en_get_adv_coal,
- .set_advcoal = zxdh_en_set_dmac_coal,
+ .get_advcoal = zxdh_en_get_adv_coal,
+ .set_advcoal = zxdh_en_set_dmac_coal,
#endif /* ETHTOOL_GADV_COAL */
#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
#ifdef ETHTOOL_GEEE
- .get_eee = zxdh_en_get_eee,
+ .get_eee = zxdh_en_get_eee,
#endif
#ifdef ETHTOOL_SEEE
- .set_eee = zxdh_en_set_eee,
+ .set_eee = zxdh_en_set_eee,
#endif
#ifdef ETHTOOL_GRXFHINDIR
#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
- .get_rxfh_indir_size = zxdh_en_get_rxfh_indir_size,
- .get_rxfh_key_size = zxdh_en_get_rxfh_key_size,
+ .get_rxfh_indir_size = zxdh_en_get_rxfh_indir_size,
+ .get_rxfh_key_size = zxdh_en_get_rxfh_key_size,
#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */
#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
- .get_rxfh = zxdh_en_get_rxfh,
+ .get_rxfh = zxdh_en_get_rxfh,
#else
- .get_rxfh_indir = zxdh_en_get_rxfh_indir,
+ .get_rxfh_indir = zxdh_en_get_rxfh_indir,
#endif /* HAVE_ETHTOOL_GSRSSH */
#endif /* ETHTOOL_GRXFHINDIR */
#ifdef ETHTOOL_SRXFHINDIR
#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
- .set_rxfh = zxdh_en_set_rxfh,
+ .set_rxfh = zxdh_en_set_rxfh,
#else
- .set_rxfh_indir = zxdh_en_set_rxfh_indir,
+ .set_rxfh_indir = zxdh_en_set_rxfh_indir,
#endif /* HAVE_ETHTOOL_GSRSSH */
#endif /* ETHTOOL_SRXFHINDIR */
#ifdef ETHTOOL_GCHANNELS
- .get_channels = zxdh_en_get_channels,
+ .get_channels = zxdh_en_get_channels,
#endif /* ETHTOOL_GCHANNELS */
#ifdef ETHTOOL_SCHANNELS
- .set_channels = zxdh_en_set_channels,
+ .set_channels = zxdh_en_set_channels,
#endif /* ETHTOOL_SCHANNELS */
#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
#ifdef ETHTOOL_GRXFH
- .get_rxnfc = zxdh_en_get_rxnfc,
- .set_rxnfc = zxdh_en_set_rxnfc,
+ .get_rxnfc = zxdh_en_get_rxnfc,
+ .set_rxnfc = zxdh_en_set_rxnfc,
#endif
};
#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
-static const struct ethtool_ops_ext zxdh_en_ethtool_ops_ext =
-{
- .size = sizeof(struct ethtool_ops_ext),
- .get_ts_info = zxdh_en_get_ts_info,
- .set_phys_id = zxdh_en_set_phys_id,
- .get_eee = zxdh_en_get_eee,
- .set_eee = zxdh_en_set_eee,
+static const struct ethtool_ops_ext zxdh_en_ethtool_ops_ext = {
+ .size = sizeof(struct ethtool_ops_ext),
+ .get_ts_info = zxdh_en_get_ts_info,
+ .set_phys_id = zxdh_en_set_phys_id,
+ .get_eee = zxdh_en_get_eee,
+ .set_eee = zxdh_en_set_eee,
#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
- .get_rxfh_indir_size = zxdh_en_get_rxfh_indir_size,
+ .get_rxfh_indir_size = zxdh_en_get_rxfh_indir_size,
#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */
- .get_rxfh_indir = zxdh_en_get_rxfh_indir,
- .set_rxfh_indir = zxdh_en_set_rxfh_indir,
- .get_channels = zxdh_en_get_channels,
- .set_channels = zxdh_en_set_channels,
+ .get_rxfh_indir = zxdh_en_get_rxfh_indir,
+ .set_rxfh_indir = zxdh_en_set_rxfh_indir,
+ .get_channels = zxdh_en_get_channels,
+ .set_channels = zxdh_en_set_channels,
};
void zxdh_en_set_ethtool_ops_ext(struct net_device *netdev)
{
- netdev->ethtool_ops = &zxdh_en_ethtool_ops;
- set_ethtool_ops_ext(netdev, &zxdh_en_ethtool_ops_ext);
+ netdev->ethtool_ops = &zxdh_en_ethtool_ops;
+ set_ethtool_ops_ext(netdev, &zxdh_en_ethtool_ops_ext);
}
#else
void zxdh_en_set_ethtool_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &zxdh_en_ethtool_ops;
+ netdev->ethtool_ops = &zxdh_en_ethtool_ops;
}
#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
-
diff --git a/src/net/drivers/net/ethernet/dinghai/en_ethtool/ethtool.h b/src/net/drivers/net/ethernet/dinghai/en_ethtool/ethtool.h
index fb8fb3de7f2b804d33321814b0f089673ce24697..55964963b3b3e74eb4ba4208f0a24faffaa257c0 100644
--- a/src/net/drivers/net/ethernet/dinghai/en_ethtool/ethtool.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_ethtool/ethtool.h
@@ -8,49 +8,45 @@ extern "C" {
#include
#include
-enum zxdh_priv_flag
-{
- ZXDH_PFLAG_ENABLE_LLDP,
- ZXDH_PFLAG_ENABLE_SSHD,
- ZXDH_PFLAG_IP=2,
- ZXDH_NUM_PFLAGS, /* Keep last */
+enum zxdh_priv_flag {
+ ZXDH_PFLAG_ENABLE_LLDP,
+ ZXDH_PFLAG_ENABLE_SSHD,
+ ZXDH_PFLAG_IP = 2,
+ ZXDH_NUM_PFLAGS, /* Keep last */
};
-#define ZXDH_SET_PFLAG(pflags, flag, enable) \
- do \
- { \
- if (enable) \
- { \
- pflags |= BIT(flag); \
- } \
- else \
- { \
- pflags &= ~(BIT(flag)); \
- } \
- } while (0)
-
-#define ZXDH_ADD_STRING(data, str) \
-do \
-{ \
- data += ETH_GSTRING_LEN; \
- snprintf(data, ETH_GSTRING_LEN, str); \
-} while (0)
-
-#define ZXDH_ADD_QUEUE_STRING(data, str, i) \
-do \
-{ \
- data += ETH_GSTRING_LEN; \
- snprintf(data, ETH_GSTRING_LEN, "queue[%u]_%s", i, str); \
-} while (0)
-
-#define ZXDH_NETDEV_STATS_NUM (sizeof(struct zxdh_en_netdev_stats) / sizeof(uint64_t))
-#define ZXDH_VPORT_STATS_NUM (sizeof(struct zxdh_en_vport_stats) / sizeof(uint64_t))
-#define ZXDH_MAC_STATS_NUM (sizeof(struct zxdh_en_phy_stats) / sizeof(uint64_t))
-#define ZXDH_QUEUE_STATS_NUM (sizeof(struct zxdh_en_queue_stats) / sizeof(uint64_t))
-
-#define ZXDH_NET_PF_STATS_NUM(en_dev) \
- (ZXDH_NETDEV_STATS_NUM + ZXDH_MAC_STATS_NUM + ZXDH_VPORT_STATS_NUM + \
- en_dev->curr_queue_pairs * ZXDH_QUEUE_STATS_NUM)
+#define ZXDH_SET_PFLAG(pflags, flag, enable) \
+ do { \
+ if (enable) { \
+ pflags |= BIT(flag); \
+ } else { \
+ pflags &= ~(BIT(flag)); \
+ } \
+ } while (0)
+
+#define ZXDH_ADD_STRING(data, str) \
+ do { \
+ data += ETH_GSTRING_LEN; \
+ snprintf(data, ETH_GSTRING_LEN, str); \
+ } while (0)
+
+#define ZXDH_ADD_QUEUE_STRING(data, str, i) \
+ do { \
+ data += ETH_GSTRING_LEN; \
+ snprintf(data, ETH_GSTRING_LEN, "queue[%u]_%s", i, str); \
+ } while (0)
+
+#define ZXDH_NETDEV_STATS_NUM \
+ (sizeof(struct zxdh_en_netdev_stats) / sizeof(uint64_t))
+#define ZXDH_VPORT_STATS_NUM \
+ (sizeof(struct zxdh_en_vport_stats) / sizeof(uint64_t))
+#define ZXDH_MAC_STATS_NUM (sizeof(struct zxdh_en_phy_stats) / sizeof(uint64_t))
+#define ZXDH_QUEUE_STATS_NUM \
+ (sizeof(struct zxdh_en_queue_stats) / sizeof(uint64_t))
+
+#define ZXDH_NET_PF_STATS_NUM(en_dev) \
+ (ZXDH_NETDEV_STATS_NUM + ZXDH_MAC_STATS_NUM + ZXDH_VPORT_STATS_NUM + \
+ en_dev->curr_queue_pairs * ZXDH_QUEUE_STATS_NUM)
#define ZXDH_GET_PFLAG(pflags, flag) (!!(pflags & (BIT(flag))))
@@ -60,7 +56,6 @@ void zxdh_en_set_ethtool_ops_ext(struct net_device *netdev);
void zxdh_en_set_ethtool_ops(struct net_device *netdev);
#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
-#define ZIOS_TYPE 0XAA
#ifdef __cplusplus
}
#endif
diff --git a/src/net/drivers/net/ethernet/dinghai/en_mpf.c b/src/net/drivers/net/ethernet/dinghai/en_mpf.c
old mode 100755
new mode 100644
index 1b65f40c549a9b4370c0de95a698cc3f44924369..1ca4722119e72131795e34b6da1aabb41f966b7d
--- a/src/net/drivers/net/ethernet/dinghai/en_mpf.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_mpf.c
@@ -1,301 +1,296 @@
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include "./en_mpf/events.h"
-#include "./en_mpf/eq.h"
-#include "./en_mpf/irq.h"
-#include "en_mpf.h"
-#include "en_mpf/cfg_sf.h"
-
-MODULE_LICENSE("Dual BSD/GPL");
-
-uint32_t dh_debug_mask;
-module_param_named(debug_mask, dh_debug_mask, uint, 0644);
-MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
-
-extern struct devlink_ops dh_mpf_devlink_ops;
-extern struct dh_core_devlink_ops dh_mpf_core_devlink_ops;
-
-int32_t dh_mpf_pci_init(struct dh_core_dev *dev)
-{
- int32_t ret = 0;
- struct dh_en_mpf_dev *mpf_dev = NULL;
-
- pci_set_drvdata(dev->pdev, dev);
-
- ret = pci_enable_device(dev->pdev);
- if (ret != 0)
- {
- dev_err(dev->device, "pci_enable_device failed: %d\n", ret);
- return -ENOMEM;
- }
-
- ret = dma_set_mask_and_coherent(dev->device, DMA_BIT_MASK(64));
- if (ret != 0)
- {
- ret = dma_set_mask_and_coherent(dev->device, DMA_BIT_MASK(32));
- if (ret != 0)
- {
- dev_err(dev->device, "dma_set_mask_and_coherent failed: %d\n", ret);
- goto err_pci;
- }
- }
-
- ret = pci_request_selected_regions(dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM), "dh-mpf");
- if (ret != 0)
- {
- dev_err(dev->device, "pci_request_selected_regions failed: %d\n", ret);
- goto err_pci;
- }
-
- pci_enable_pcie_error_reporting(dev->pdev);
- pci_set_master(dev->pdev);
- ret = pci_save_state(dev->pdev);
- if (ret != 0)
- {
- dev_err(dev->device, "pci_save_state failed: %d\n", ret);
- goto err_pci_save_state;
- }
-
- mpf_dev = dh_core_priv(dev);
- mpf_dev->pci_ioremap_addr = (uint64_t)ioremap(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0));
- LOG_INFO("pci_ioremap_addr=0x%llx, ioremap(0x%llx, 0x%llx)\n", mpf_dev->pci_ioremap_addr, pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0));
- if (mpf_dev->pci_ioremap_addr == 0)
- {
- ret = -1;
- LOG_ERR("ioremap(0x%llx, 0x%llx) failed\n", pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0));
- goto err_pci_save_state;
- }
-
- return 0;
-
-err_pci_save_state:
- pci_disable_pcie_error_reporting(dev->pdev);
- pci_release_selected_regions(dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM));
-err_pci:
- pci_disable_device(dev->pdev);
- return ret;
-}
-
-static const struct pci_device_id dh_mpf_pci_table[] = {
- { PCI_DEVICE(ZXDH_MPF_VENDOR_ID, ZXDH_MPF_DEVICE_ID), 0 },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, dh_mpf_pci_table);
-
-void dh_mpf_pci_close(struct dh_core_dev *dev)
-{
- struct dh_en_mpf_dev *mpf_dev = NULL;
-
- mpf_dev = dh_core_priv(dev);
- iounmap((void *)mpf_dev->pci_ioremap_addr);
- pci_disable_pcie_error_reporting(dev->pdev);
- pci_release_selected_regions(dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM));
- pci_disable_device(dev->pdev);
-
- return;
-}
-
-static int32_t dh_mpf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct dh_core_dev *dh_dev = NULL;
- struct devlink *devlink = NULL;
- int32_t err = 0;
-
- LOG_INFO("mpf driver start to probe\n");
-
- devlink = zxdh_devlink_alloc(&pdev->dev, &dh_mpf_devlink_ops, sizeof(struct dh_en_mpf_dev));
- if (devlink == NULL)
- {
- dev_err(&pdev->dev, "devlink alloc failed\n");
- return -ENOMEM;
- }
-
- dh_dev = devlink_priv(devlink);
- dh_dev->device = &pdev->dev;
- dh_dev->pdev = pdev;
- dh_dev->devlink_ops = &dh_mpf_core_devlink_ops;
-
- err = dh_mpf_pci_init(dh_dev);
- if (err != 0)
- {
- dev_err(&pdev->dev, "dh_mpf_pci_init failed: %d\n", err);
- goto err_devlink_cleanup;
- }
-
- err = dh_mpf_irq_table_init(dh_dev);
- if (err != 0)
- {
- dh_err(dh_dev, "Failed to alloc IRQs\n");
- goto err_pci;
- }
-
- err = dh_mpf_eq_table_init(dh_dev);
- if (err != 0)
- {
- dh_err(dh_dev, "Failed to alloc IRQs\n");
- goto err_eq_table_init;
- }
-
- err = dh_mpf_irq_table_create(dh_dev);
- if (err != 0)
- {
- dh_err(dh_dev, "Failed to alloc IRQs\n");
- goto err_irq_table_create;
- }
-
- err = dh_mpf_eq_table_create(dh_dev);
- if (err != 0)
- {
- dh_err(dh_dev, "Failed to alloc EQs\n");
- goto err_eq_table_create;
- }
-
- err = dh_mpf_events_init(dh_dev);
- if (err != 0)
- {
- dh_err(dh_dev, "failed to initialize events\n");
- goto err_events_init_cleanup;
- }
-
-#ifdef HAVE_DEVLINK_REGISTER_GET_1_PARAMS
- zxdh_devlink_register(devlink);
-#else
- zxdh_devlink_register(devlink, &pdev->dev);
-#endif
-
- LOG_INFO("mpf driver probe completed\n");
- return 0;
-
-err_events_init_cleanup:
- dh_mpf_eq_table_destroy(dh_dev);
-err_eq_table_create:
- dh_mpf_irq_table_destroy(dh_dev);
-err_irq_table_create:
- dh_eq_table_cleanup(dh_dev);
-err_eq_table_init:
- dh_irq_table_cleanup(dh_dev);
-err_pci:
- dh_mpf_pci_close(dh_dev);
-err_devlink_cleanup:
- zxdh_devlink_free(devlink);
- return err;
-}
-
-static void dh_mpf_remove(struct pci_dev *pdev)
-{
- struct dh_core_dev *dh_dev = pci_get_drvdata(pdev);
- struct devlink *devlink = priv_to_devlink(dh_dev);
- LOG_INFO("mpf driver start to remove");
-
- zxdh_devlink_unregister(devlink);
- dh_mpf_events_uninit(dh_dev);
- dh_mpf_eq_table_destroy(dh_dev);
- dh_mpf_irq_table_destroy(dh_dev);
- dh_eq_table_cleanup(dh_dev);
- dh_irq_table_cleanup(dh_dev);
- dh_mpf_pci_close(dh_dev);
- zxdh_devlink_free(devlink);
-
- pci_set_drvdata(pdev, NULL);
- LOG_INFO("mpf driver remove completed\n");
-}
-
-static int32_t dh_mpf_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-
- return 0;
-}
-
-static int32_t dh_mpf_resume(struct pci_dev *pdev)
-{
-
- return 0;
-}
-
-static void dh_mpf_shutdown(struct pci_dev *pdev)
-{
- dh_mpf_remove(pdev);
-}
-
-static pci_ers_result_t dh_pci_err_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- return PCI_ERS_RESULT_NONE;
-}
-
-static pci_ers_result_t dh_mpf_pci_slot_reset(struct pci_dev *pdev)
-{
- return PCI_ERS_RESULT_NONE;
-}
-
-static void dh_mpf_pci_resume(struct pci_dev *pdev)
-{
-
-}
-
-static const struct pci_error_handlers dh_mpf_err_handler = {
- .error_detected = dh_pci_err_detected,
- .slot_reset = dh_mpf_pci_slot_reset,
- .resume = dh_mpf_pci_resume
-};
-
-static struct pci_driver dh_mpf_driver = {
- .name = KBUILD_MODNAME,
- .id_table = dh_mpf_pci_table,
- .probe = dh_mpf_probe,
- .remove = dh_mpf_remove,
- .suspend = dh_mpf_suspend,
- .resume = dh_mpf_resume,
- .shutdown = dh_mpf_shutdown,
- .err_handler = &dh_mpf_err_handler,
-};
-
-static int32_t __init init(void)
-{
- int32_t err = 0;
-
- err = pci_register_driver(&dh_mpf_driver);
- if (err != 0)
- {
- LOG_ERR("pci_register_driver failed: %d\n", err);
- return err;
- }
-
-#ifdef CONFIG_ZXDH_SF
- err = zxdh_mpf_sf_driver_register();
- if (err != 0)
- {
- LOG_ERR("zxdh_en_sf_driver_register failed: %d\n", err);
- goto err_sf;
- }
-#endif
-
- LOG_INFO("zxdh_mpf driver init success\n");
-
- return 0;
-
-err_sf:
- pci_unregister_driver(&dh_mpf_driver);
- return err;
-}
-
-static void __exit cleanup(void)
-{
-#ifdef CONFIG_ZXDH_SF
- zxdh_mpf_sf_driver_uregister();
-#endif
- pci_unregister_driver(&dh_mpf_driver);
-
- LOG_INFO("zxdh_mpf driver remove success\n");
-}
-
-module_init(init);
-module_exit(cleanup);
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "./en_mpf/events.h"
+#include "./en_mpf/eq.h"
+#include "./en_mpf/irq.h"
+#include "en_mpf.h"
+#include "en_mpf/cfg_sf.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+uint32_t dh_debug_mask;
+module_param_named(debug_mask, dh_debug_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec "
+ "time, 3 = both. Default=0");
+
+extern struct devlink_ops dh_mpf_devlink_ops;
+extern struct dh_core_devlink_ops dh_mpf_core_devlink_ops;
+
+int32_t dh_mpf_pci_init(struct dh_core_dev *dev)
+{
+ int32_t ret = 0;
+ struct dh_en_mpf_dev *mpf_dev = NULL;
+
+ pci_set_drvdata(dev->pdev, dev);
+
+ ret = pci_enable_device(dev->pdev);
+ if (ret != 0) {
+ dev_err(dev->device, "pci_enable_device failed: %d\n", ret);
+ return -ENOMEM;
+ }
+
+ ret = dma_set_mask_and_coherent(dev->device, DMA_BIT_MASK(64));
+ if (ret != 0) {
+ ret = dma_set_mask_and_coherent(dev->device, DMA_BIT_MASK(32));
+ if (ret != 0) {
+ dev_err(dev->device, "dma_set_mask_and_coherent failed: %d\n", ret);
+ goto err_pci;
+ }
+ }
+
+ ret = pci_request_selected_regions(
+ dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM), "dh-mpf");
+ if (ret != 0) {
+ dev_err(dev->device, "pci_request_selected_regions failed: %d\n", ret);
+ goto err_pci;
+ }
+
+ pci_enable_pcie_error_reporting(dev->pdev);
+ pci_set_master(dev->pdev);
+ ret = pci_save_state(dev->pdev);
+ if (ret != 0) {
+ dev_err(dev->device, "pci_save_state failed: %d\n", ret);
+ goto err_pci_save_state;
+ }
+
+ mpf_dev = dh_core_priv(dev);
+ mpf_dev->pci_ioremap_addr = (uint64_t)ioremap(
+ pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0));
+ LOG_INFO("pci_ioremap_addr=0x%llx, ioremap(0x%llx, 0x%llx)\n",
+ mpf_dev->pci_ioremap_addr, pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+ if (mpf_dev->pci_ioremap_addr == 0) {
+ ret = -1;
+ LOG_ERR("ioremap(0x%llx, 0x%llx) failed\n",
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+ goto err_pci_save_state;
+ }
+
+ return 0;
+
+err_pci_save_state:
+ pci_disable_pcie_error_reporting(dev->pdev);
+ pci_release_selected_regions(dev->pdev,
+ pci_select_bars(dev->pdev, IORESOURCE_MEM));
+err_pci:
+ pci_disable_device(dev->pdev);
+ return ret;
+}
+
+static const struct pci_device_id dh_mpf_pci_table[] = {
+ { PCI_DEVICE(ZXDH_MPF_VENDOR_ID, ZXDH_MPF_DEVICE_ID), 0 },
+ {
+ 0,
+ }
+};
+
+MODULE_DEVICE_TABLE(pci, dh_mpf_pci_table);
+
+void dh_mpf_pci_close(struct dh_core_dev *dev)
+{
+ struct dh_en_mpf_dev *mpf_dev = NULL;
+
+ mpf_dev = dh_core_priv(dev);
+ iounmap((void *)mpf_dev->pci_ioremap_addr);
+ pci_disable_pcie_error_reporting(dev->pdev);
+ pci_release_selected_regions(dev->pdev,
+ pci_select_bars(dev->pdev, IORESOURCE_MEM));
+ pci_disable_device(dev->pdev);
+
+ return;
+}
+
+static int32_t dh_mpf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct dh_core_dev *dh_dev = NULL;
+ struct devlink *devlink = NULL;
+ int32_t err = 0;
+
+ LOG_INFO("mpf driver start to probe\n");
+
+ devlink = zxdh_devlink_alloc(&pdev->dev, &dh_mpf_devlink_ops,
+ sizeof(struct dh_en_mpf_dev));
+ if (devlink == NULL) {
+ dev_err(&pdev->dev, "devlink alloc failed\n");
+ return -ENOMEM;
+ }
+
+ dh_dev = devlink_priv(devlink);
+ dh_dev->device = &pdev->dev;
+ dh_dev->pdev = pdev;
+ dh_dev->devlink_ops = &dh_mpf_core_devlink_ops;
+
+ err = dh_mpf_pci_init(dh_dev);
+ if (err != 0) {
+ dev_err(&pdev->dev, "dh_mpf_pci_init failed: %d\n", err);
+ goto err_devlink_cleanup;
+ }
+
+ err = dh_mpf_irq_table_init(dh_dev);
+ if (err != 0) {
+ dh_err(dh_dev, "Failed to alloc IRQs\n");
+ goto err_pci;
+ }
+
+ err = dh_mpf_eq_table_init(dh_dev);
+ if (err != 0) {
+ dh_err(dh_dev, "Failed to alloc IRQs\n");
+ goto err_eq_table_init;
+ }
+
+ err = dh_mpf_irq_table_create(dh_dev);
+ if (err != 0) {
+ dh_err(dh_dev, "Failed to alloc IRQs\n");
+ goto err_irq_table_create;
+ }
+
+ err = dh_mpf_eq_table_create(dh_dev);
+ if (err != 0) {
+ dh_err(dh_dev, "Failed to alloc EQs\n");
+ goto err_eq_table_create;
+ }
+
+ err = dh_mpf_events_init(dh_dev);
+ if (err != 0) {
+ dh_err(dh_dev, "failed to initialize events\n");
+ goto err_events_init_cleanup;
+ }
+
+#ifdef HAVE_DEVLINK_REGISTER_GET_1_PARAMS
+ zxdh_devlink_register(devlink);
+#else
+ zxdh_devlink_register(devlink, &pdev->dev);
+#endif
+
+ LOG_INFO("mpf driver probe completed\n");
+ return 0;
+
+err_events_init_cleanup:
+ dh_mpf_eq_table_destroy(dh_dev);
+err_eq_table_create:
+ dh_mpf_irq_table_destroy(dh_dev);
+err_irq_table_create:
+ dh_eq_table_cleanup(dh_dev);
+err_eq_table_init:
+ dh_irq_table_cleanup(dh_dev);
+err_pci:
+ dh_mpf_pci_close(dh_dev);
+err_devlink_cleanup:
+ zxdh_devlink_free(devlink);
+ return err;
+}
+
+static void dh_mpf_remove(struct pci_dev *pdev)
+{
+ struct dh_core_dev *dh_dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dh_dev);
+ LOG_INFO("mpf driver start to remove");
+
+ zxdh_devlink_unregister(devlink);
+ dh_mpf_events_uninit(dh_dev);
+ dh_mpf_eq_table_destroy(dh_dev);
+ dh_mpf_irq_table_destroy(dh_dev);
+ dh_eq_table_cleanup(dh_dev);
+ dh_irq_table_cleanup(dh_dev);
+ dh_mpf_pci_close(dh_dev);
+ zxdh_devlink_free(devlink);
+
+ pci_set_drvdata(pdev, NULL);
+ LOG_INFO("mpf driver remove completed\n");
+}
+
+static int32_t dh_mpf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int32_t dh_mpf_resume(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static void dh_mpf_shutdown(struct pci_dev *pdev)
+{
+ dh_mpf_remove(pdev);
+}
+
+static pci_ers_result_t dh_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ return PCI_ERS_RESULT_NONE;
+}
+
+static pci_ers_result_t dh_mpf_pci_slot_reset(struct pci_dev *pdev)
+{
+ return PCI_ERS_RESULT_NONE;
+}
+
+static void dh_mpf_pci_resume(struct pci_dev *pdev)
+{
+}
+
+static const struct pci_error_handlers dh_mpf_err_handler = {
+ .error_detected = dh_pci_err_detected,
+ .slot_reset = dh_mpf_pci_slot_reset,
+ .resume = dh_mpf_pci_resume
+};
+
+static struct pci_driver dh_mpf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = dh_mpf_pci_table,
+ .probe = dh_mpf_probe,
+ .remove = dh_mpf_remove,
+ .suspend = dh_mpf_suspend,
+ .resume = dh_mpf_resume,
+ .shutdown = dh_mpf_shutdown,
+ .err_handler = &dh_mpf_err_handler,
+};
+
+static int32_t __init init(void)
+{
+ int32_t err = 0;
+
+ err = pci_register_driver(&dh_mpf_driver);
+ if (err != 0) {
+ LOG_ERR("pci_register_driver failed: %d\n", err);
+ return err;
+ }
+
+#ifdef CONFIG_ZXDH_SF
+ err = zxdh_mpf_sf_driver_register();
+ if (err != 0) {
+ LOG_ERR("zxdh_en_sf_driver_register failed: %d\n", err);
+ goto err_sf;
+ }
+#endif
+
+ LOG_INFO("zxdh_mpf driver init success\n");
+
+ return 0;
+
+err_sf:
+ pci_unregister_driver(&dh_mpf_driver);
+ return err;
+}
+
+static void __exit cleanup(void)
+{
+#ifdef CONFIG_ZXDH_SF
+ zxdh_mpf_sf_driver_uregister();
+#endif
+ pci_unregister_driver(&dh_mpf_driver);
+
+ LOG_INFO("zxdh_mpf driver remove success\n");
+}
+
+module_init(init);
+module_exit(cleanup);
diff --git a/src/net/drivers/net/ethernet/dinghai/en_mpf.h b/src/net/drivers/net/ethernet/dinghai/en_mpf.h
old mode 100755
new mode 100644
index cd1ee3c5d84d46908b795da7d1af8935094909eb..54e571128271d171df8be4deedad37f2be02b350
--- a/src/net/drivers/net/ethernet/dinghai/en_mpf.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_mpf.h
@@ -1,32 +1,32 @@
-#ifndef __ZXDH_EN_MPF_H__
-#define __ZXDH_EN_MPF_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-
-#define ZXDH_MPF_VENDOR_ID 0x1111
-#define ZXDH_MPF_DEVICE_ID 0x1041
-
-#define ZXDH_BAR1_CHAN_OFFSET 0x2000//0x7801000
-#define ZXDH_BAR2_CHAN_OFFSET 0x3000//0x7802000
-
-struct dh_en_mpf_dev {
- uint16_t ep_bdf;
- uint16_t pcie_id;
- uint16_t vport;
-
- uint64_t pci_ioremap_addr;
-
- struct work_struct dh_np_sdk_from_risc;
- struct work_struct dh_np_sdk_from_pf;
-};
-
-#ifdef __cplusplus
-}
-#endif
-
+#ifndef __ZXDH_EN_MPF_H__
+#define __ZXDH_EN_MPF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+#include
+
+#define ZXDH_MPF_VENDOR_ID 0x1111
+#define ZXDH_MPF_DEVICE_ID 0x1041
+
+#define ZXDH_BAR1_CHAN_OFFSET 0x2000 // 0x7801000
+#define ZXDH_BAR2_CHAN_OFFSET 0x3000 // 0x7802000
+
+struct dh_en_mpf_dev {
+ uint16_t ep_bdf;
+ uint16_t pcie_id;
+ uint16_t vport;
+
+ uint64_t pci_ioremap_addr;
+
+ struct work_struct dh_np_sdk_from_risc;
+ struct work_struct dh_np_sdk_from_pf;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
#endif /* __ZXDH_EN_MPF_H__ */
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/en_mpf/cfg_sf.c b/src/net/drivers/net/ethernet/dinghai/en_mpf/cfg_sf.c
old mode 100755
new mode 100644
index 1ddde92cff9a89f235e46bb414d04130f98168e3..14901598da1d76235c01c8786fdea49cb1000502
--- a/src/net/drivers/net/ethernet/dinghai/en_mpf/cfg_sf.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_mpf/cfg_sf.c
@@ -1,54 +1,64 @@
-#include
-#include
-#include
-
-#include "cfg_sf.h"
-
-static int32_t zxdh_cfg_resume(struct zxdh_auxiliary_device *adev)
-{
- return 0;
-}
-
-static int32_t zxdh_cfg_suspend(struct zxdh_auxiliary_device *adev, pm_message_t state)
-{
- return 0;
-}
-
-static int32_t zxdh_cfg_probe(struct zxdh_auxiliary_device *adev,
- const struct zxdh_auxiliary_device_id *id)
-{
- struct cfg_sf_dev * __attribute__((unused)) cfg_sf_dev = container_of(adev, struct cfg_sf_dev, adev);
-
- return 0;
-}
-
-static int32_t zxdh_cfg_remove(struct zxdh_auxiliary_device *adev)
-{
- return 0;
-}
-
-static const struct zxdh_auxiliary_device_id zxdh_cfg_id_table[] = {
- { .name = ZXDH_EN_SF_NAME ".mpf_cfg", },
- {},
-};
-
-//MODULE_DEVICE_TABLE(auxiliary_zxdh_id_table, zxdh_cfg_id_table);
-
-static struct zxdh_auxiliary_driver zxdh_cfg_driver = {
- .name = "mpf_cfg",
- .probe = zxdh_cfg_probe,
- .remove = zxdh_cfg_remove,
- .suspend = zxdh_cfg_suspend,
- .resume = zxdh_cfg_resume,
- .id_table = zxdh_cfg_id_table,
-};
-
-int32_t zxdh_mpf_sf_driver_register(void)
-{
- return zxdh_auxiliary_driver_register(&zxdh_cfg_driver);;
-}
-
-void zxdh_mpf_sf_driver_uregister(void)
-{
- zxdh_auxiliary_driver_unregister(&zxdh_cfg_driver);;
-}
+#include
+#ifdef AUX_BUS_NO_SUPPORT
+#include
+#else
+#include
+#endif
+#include
+
+#include "cfg_sf.h"
+
+static int32_t zxdh_cfg_resume(struct auxiliary_device *adev)
+{
+ return 0;
+}
+
+static int32_t zxdh_cfg_suspend(struct auxiliary_device *adev,
+ pm_message_t state)
+{
+ return 0;
+}
+
+static int32_t zxdh_cfg_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct cfg_sf_dev *__attribute__((unused))
+ cfg_sf_dev = container_of(adev, struct cfg_sf_dev, adev);
+
+ return 0;
+}
+
+static int32_t zxdh_cfg_remove(struct auxiliary_device *adev)
+{
+ return 0;
+}
+
+static const struct auxiliary_device_id zxdh_cfg_id_table[] = {
+ {
+ .name = ZXDH_EN_SF_NAME ".mpf_cfg",
+ },
+ {},
+};
+
+// MODULE_DEVICE_TABLE(auxiliary_zxdh_id_table, zxdh_cfg_id_table);
+
+static struct auxiliary_driver zxdh_cfg_driver = {
+ .name = "mpf_cfg",
+ .probe = zxdh_cfg_probe,
+ .remove = zxdh_cfg_remove,
+ .suspend = zxdh_cfg_suspend,
+ .resume = zxdh_cfg_resume,
+ .id_table = zxdh_cfg_id_table,
+};
+
+int32_t zxdh_mpf_sf_driver_register(void)
+{
+ return auxiliary_driver_register(&zxdh_cfg_driver);
+ ;
+}
+
+void zxdh_mpf_sf_driver_uregister(void)
+{
+ auxiliary_driver_unregister(&zxdh_cfg_driver);
+ ;
+}
diff --git a/src/net/drivers/net/ethernet/dinghai/en_mpf/cfg_sf.h b/src/net/drivers/net/ethernet/dinghai/en_mpf/cfg_sf.h
old mode 100755
new mode 100644
index ec0fdd081cedd95474b33d6fe96020044977270a..2d25fe42e856669e62f2e1a0ed5f0aad6618834b
--- a/src/net/drivers/net/ethernet/dinghai/en_mpf/cfg_sf.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_mpf/cfg_sf.h
@@ -1,30 +1,31 @@
-#ifndef __ZXDH_MPF_CFG_SF_H__
-#define __ZXDH_MPF_CFG_SF_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-#include
-
-struct cfg_sf_ops {
-
-};
-
-struct cfg_sf_dev {
- struct zxdh_auxiliary_device adev;
- struct dh_core_dev *dh_dev;
- struct cfg_sf_ops *ops;
-};
-
-int32_t zxdh_mpf_sf_driver_register(void);
-void zxdh_mpf_sf_driver_uregister(void);
-
-
-#ifdef __cplusplus
-}
-#endif
-
-
+#ifndef __ZXDH_MPF_CFG_SF_H__
+#define __ZXDH_MPF_CFG_SF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef AUX_BUS_NO_SUPPORT
+#include
+#else
+#include
+#endif
+#include
+
+struct cfg_sf_ops {
+};
+
+struct cfg_sf_dev {
+ struct auxiliary_device adev;
+ struct dh_core_dev *dh_dev;
+ struct cfg_sf_ops *ops;
+};
+
+int32_t zxdh_mpf_sf_driver_register(void);
+void zxdh_mpf_sf_driver_uregister(void);
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/en_mpf/devlink.c b/src/net/drivers/net/ethernet/dinghai/en_mpf/devlink.c
old mode 100755
new mode 100644
index e89a752f121155b5a7f48b8bb2474eb209a61bca..9fdaebdbd4d2c0b11d8eb4f46f1bcb9deb5f81aa
--- a/src/net/drivers/net/ethernet/dinghai/en_mpf/devlink.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_mpf/devlink.c
@@ -1,130 +1,124 @@
-#include
-#include
-#include "devlink.h"
-
-struct devlink_ops dh_mpf_devlink_ops = {
-
-};
-
-enum {
- DH_MPF_PARAMS_MAX,
-};
-
-static int32_t __attribute__((unused)) sample_check(struct dh_core_dev *dev)
-{
- return 1;
-}
-
-enum dh_mpf_devlink_param_id {
- DH_MPF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- DH_MPF_DEVLINK_PARAM_ID_SAMPLE,
-};
-
-
-static int32_t dh_devlink_sample_set(struct devlink *devlink, uint32_t id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct dh_core_dev * __attribute__((unused)) dev = devlink_priv(devlink);
-
- return 0;
-}
-
-static int32_t dh_devlink_sample_get(struct devlink *devlink, uint32_t id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct dh_core_dev * __attribute__((unused)) dev = devlink_priv(devlink);
-
- return 0;
-}
-
-#ifdef HAVE_DEVLINK_PARAM_REGISTER
-static const struct devlink_params {
- const char *name;
- int32_t (*check)(struct dh_core_dev *dev);
- struct devlink_param param;
-} devlink_params[] = {
- [DH_MPF_PARAMS_MAX] = { .name = "sample",
- .check = &sample_check,
- .param = DEVLINK_PARAM_DRIVER(DH_MPF_DEVLINK_PARAM_ID_SAMPLE,
- "sample", DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),dh_devlink_sample_get,
- dh_devlink_sample_set,
- NULL),
- }
-};
-
-static int32_t params_register(struct devlink *devlink)
-{
- int32_t i = 0;
- int32_t err = 0;
- struct dh_core_dev *dh_dev = devlink_priv(devlink);
-
- for (i = 0; i < ARRAY_SIZE(devlink_params); i++)
- {
- if(devlink_params[i].check(dh_dev))
- {
- err = devlink_param_register(devlink, &devlink_params[i].param);
- if (err)
- {
- goto rollback;
- }
- }
- }
-
- return 0;
-
-rollback:
- if (i == 0)
- {
- return err;
- }
-
- for (; i > 0; i--)
- {
- devlink_param_unregister(devlink, &devlink_params[i].param);
- }
-
- return err;
-}
-
-static int32_t params_unregister(struct devlink *devlink)
-{
- int32_t i = 0;
-
- for (i = 0; i < ARRAY_SIZE(devlink_params); i++)
- {
- devlink_param_unregister(devlink, &devlink_params[i].param);
- }
-
- return 0;
-}
-#else
-static struct devlink_param devlink_params [] = {
- [DH_MPF_PARAMS_MAX] = DEVLINK_PARAM_DRIVER(DH_MPF_DEVLINK_PARAM_ID_SAMPLE,
- "sample", DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),dh_devlink_sample_get,
- dh_devlink_sample_set,
- NULL),
-};
-
-static int32_t params_register(struct devlink *devlink)
-{
- struct dh_core_dev * __attribute__((unused)) dh_dev = devlink_priv(devlink);
- int32_t err = 0;
-
- err = devlink_params_register(devlink, devlink_params, ARRAY_SIZE(devlink_params));
-
- return err;
-}
-static int32_t params_unregister(struct devlink *devlink)
-{
- devlink_params_unregister(devlink, devlink_params, ARRAY_SIZE(devlink_params));
-
- return 0;
-}
-#endif
-
-struct dh_core_devlink_ops dh_mpf_core_devlink_ops = {
- .params_register = params_register,
- .params_unregister = params_unregister
-};
+#include
+#include
+#include "devlink.h"
+
+struct devlink_ops dh_mpf_devlink_ops = {
+
+};
+
+enum {
+ DH_MPF_PARAMS_MAX,
+};
+
+static int32_t __attribute__((unused)) sample_check(struct dh_core_dev *dev)
+{
+ return 1;
+}
+
+enum dh_mpf_devlink_param_id {
+ DH_MPF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ DH_MPF_DEVLINK_PARAM_ID_SAMPLE,
+};
+
+static int32_t dh_devlink_sample_set(struct devlink *devlink, uint32_t id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct dh_core_dev *__attribute__((unused)) dev = devlink_priv(devlink);
+
+ return 0;
+}
+
+static int32_t dh_devlink_sample_get(struct devlink *devlink, uint32_t id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct dh_core_dev *__attribute__((unused)) dev = devlink_priv(devlink);
+
+ return 0;
+}
+
+#ifdef HAVE_DEVLINK_PARAM_REGISTER
+static const struct devlink_params {
+ const char *name;
+ int32_t (*check)(struct dh_core_dev *dev);
+ struct devlink_param param;
+} devlink_params[] = { [DH_MPF_PARAMS_MAX] = {
+ .name = "sample",
+ .check = &sample_check,
+ .param = DEVLINK_PARAM_DRIVER(
+ DH_MPF_DEVLINK_PARAM_ID_SAMPLE, "sample",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ dh_devlink_sample_get,
+ dh_devlink_sample_set, NULL),
+ } };
+
+static int32_t params_register(struct devlink *devlink)
+{
+ int32_t i = 0;
+ int32_t err = 0;
+ struct dh_core_dev *dh_dev = devlink_priv(devlink);
+
+ for (i = 0; i < ARRAY_SIZE(devlink_params); i++) {
+ if (devlink_params[i].check(dh_dev)) {
+ err = devlink_param_register(devlink, &devlink_params[i].param);
+ if (err) {
+ goto rollback;
+ }
+ }
+ }
+
+ return 0;
+
+rollback:
+ if (i == 0) {
+ return err;
+ }
+
+ for (; i > 0; i--) {
+ devlink_param_unregister(devlink, &devlink_params[i].param);
+ }
+
+ return err;
+}
+
+static int32_t params_unregister(struct devlink *devlink)
+{
+ int32_t i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(devlink_params); i++) {
+ devlink_param_unregister(devlink, &devlink_params[i].param);
+ }
+
+ return 0;
+}
+#else
+static struct devlink_param devlink_params[] = {
+ [DH_MPF_PARAMS_MAX] = DEVLINK_PARAM_DRIVER(
+ DH_MPF_DEVLINK_PARAM_ID_SAMPLE, "sample", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME), dh_devlink_sample_get,
+ dh_devlink_sample_set, NULL),
+};
+
+static int32_t params_register(struct devlink *devlink)
+{
+ struct dh_core_dev *__attribute__((unused)) dh_dev = devlink_priv(devlink);
+ int32_t err = 0;
+
+ err = devlink_params_register(devlink, devlink_params,
+ ARRAY_SIZE(devlink_params));
+
+ return err;
+}
+static int32_t params_unregister(struct devlink *devlink)
+{
+ devlink_params_unregister(devlink, devlink_params,
+ ARRAY_SIZE(devlink_params));
+
+ return 0;
+}
+#endif
+
+struct dh_core_devlink_ops dh_mpf_core_devlink_ops = {
+ .params_register = params_register,
+ .params_unregister = params_unregister
+};
diff --git a/src/net/drivers/net/ethernet/dinghai/en_mpf/devlink.h b/src/net/drivers/net/ethernet/dinghai/en_mpf/devlink.h
old mode 100755
new mode 100644
index a45fd1009d0f0a86da48b4a7acc40eaad9b9fa2e..7dea3be9aad73e4359eaf6eea720c2fcf773a933
--- a/src/net/drivers/net/ethernet/dinghai/en_mpf/devlink.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_mpf/devlink.h
@@ -1,16 +1,14 @@
-#ifndef __ZXDH_MPF_DEVLINK_H__
-#define __ZXDH_MPF_DEVLINK_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-
-
-
-#ifdef __cplusplus
-}
-#endif
-
+#ifndef __ZXDH_MPF_DEVLINK_H__
+#define __ZXDH_MPF_DEVLINK_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/en_mpf/eq.c b/src/net/drivers/net/ethernet/dinghai/en_mpf/eq.c
old mode 100755
new mode 100644
index 0d8b612a27c455bc3128823d500205deda2e886e..c6ed8d90328d60ae5f78beb84391e9c65b105cb4
--- a/src/net/drivers/net/ethernet/dinghai/en_mpf/eq.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_mpf/eq.c
@@ -1,235 +1,236 @@
-#include
-#include
-#include
-#include
-#include "irq.h"
-#include "eq.h"
-#include "../en_mpf.h"
-
-struct dh_mpf_eq_table {
- struct dh_irq **comp_irqs;
- struct dh_irq *async_risc_irq;
- struct dh_irq *async_pf_irq;
- struct dh_eq_async async_risc_eq;
- struct dh_eq_async async_pf_eq;
-};
-
-static int32_t create_async_eqs(struct dh_core_dev *dev);
-
-static int32_t __attribute__((unused)) create_eq_map(struct dh_eq_param *param)
-{
- int32_t err = 0;
-
- /* inform device*/
- return err;
-}
-
-int32_t dh_mpf_eq_table_init(struct dh_core_dev *dev)
-{
- struct dh_eq_table *eq_table;
- struct dh_mpf_eq_table *table_priv = NULL;
- int32_t err = 0;
-
- eq_table = &dev->eq_table;
-
- table_priv = kvzalloc(sizeof(*table_priv), GFP_KERNEL);
- if (unlikely(table_priv == NULL))
- {
- err = -ENOMEM;
- goto err_table_priv;
- }
-
- dh_eq_table_init(dev, table_priv);
-
- return 0;
-
-err_table_priv:
- kvfree(eq_table);
- return err;
-}
-
-/*todo*/
-int32_t dh_eq_get_comp_eqs(struct dh_core_dev *dev)
-{
- return 0;
-}
-
-static int32_t create_comp_eqs(struct dh_core_dev *dev)
-{
- return 0;
-}
-
-static int32_t destroy_async_eq(struct dh_core_dev *dev)
-{
- struct dh_eq_table *eq_table = &dev->eq_table;
-
- mutex_lock(&eq_table->lock);
- /*unmap inform device*/
- mutex_unlock(&eq_table->lock);
-
- return 0;
-}
-
-static void cleanup_async_eq(struct dh_core_dev *dev,
- struct dh_eq_async *eq, const char *name)
-{
- dh_eq_disable(dev, &eq->core, &eq->irq_nb);
-}
-
-static void destroy_async_eqs(struct dh_core_dev *dev)
-{
- struct dh_eq_table *table = &dev->eq_table;
- struct dh_mpf_eq_table *table_priv = table->priv;
-
- cleanup_async_eq(dev, &table_priv->async_risc_eq, "riscv");
- cleanup_async_eq(dev, &table_priv->async_pf_eq, "pf");
- destroy_async_eq(dev);
- dh_irqs_release_vectors(&table_priv->async_risc_irq, 1);
- dh_irqs_release_vectors(&table_priv->async_pf_irq, 1);
-}
-
-void destroy_comp_eqs(struct dh_core_dev *dev)
-{
-
-}
-
-void dh_mpf_eq_table_destroy(struct dh_core_dev *dev)
-{
- destroy_comp_eqs(dev);
- destroy_async_eqs(dev);
-}
-
-int32_t dh_mpf_eq_table_create(struct dh_core_dev *dev)
-{
- int32_t err = 0;
-
- err = create_async_eqs(dev);
- if (err != 0)
- {
- dh_err(dev, "Failed to create async EQs\n");
- goto err_async_eqs;
- }
-
- err = create_comp_eqs(dev);
- if (err != 0)
- {
- dh_err(dev, "Failed to create completion EQs\n");
- goto err_comp_eqs;
- }
-
- return 0;
-
-err_comp_eqs:
- destroy_async_eqs(dev);
-err_async_eqs:
- return err;
-}
-
-/*create eventq*/
-static int32_t create_async_eq(struct dh_core_dev *dev, struct dh_irq *risc, struct dh_irq *pf)
-{
- struct dh_eq_table *eq_table = &dev->eq_table;
- struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dev);
- struct msix_para in = {0};
- int32_t err = 0;
-
- in.vector_risc = risc->index;
- in.vector_pfvf = pf->index;
- in.vector_mpf = 0xff;
- in.driver_type = MSG_CHAN_END_PF;//TODO
- in.pdev = dev->pdev;
- in.virt_addr = mpf_dev->pci_ioremap_addr + ZXDH_BAR1_CHAN_OFFSET;
-
- mutex_lock(&eq_table->lock);
-
- err = zxdh_bar_enable_chan(&in, &mpf_dev->vport);
-
- mutex_unlock(&eq_table->lock);
-
- return err;
-}
-
-static int32_t dh_eq_async_riscv_int(struct notifier_block *nb, unsigned long action, void *data)
-{
- struct dh_eq_async *eq_riscv_async = container_of(nb, struct dh_eq_async, irq_nb);
- struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv;
- struct dh_eq_table *eq_table = &dev->eq_table;
-
- atomic_notifier_call_chain(&eq_table->nh[DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF], DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF, NULL);
-
- return 0;
-}
-
-static int32_t dh_eq_async_mpf_int(struct notifier_block *nb, unsigned long action, void *data)
-{
- struct dh_eq_async *eq_riscv_async = container_of(nb, struct dh_eq_async, irq_nb);
- struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv;
- struct dh_eq_table *eq_table = &dev->eq_table;
-
- atomic_notifier_call_chain(&eq_table->nh[DH_EVENT_TYPE_NOTIFY_PF_TO_MPF], DH_EVENT_TYPE_NOTIFY_PF_TO_MPF, NULL);
-
- return 0;
-}
-
-static int32_t create_async_eqs(struct dh_core_dev *dev)
-{
- struct dh_eq_table *table = &dev->eq_table;
- struct dh_mpf_eq_table *table_priv = table->priv;
- struct dh_eq_param param = {};
- int32_t err = 0;
-
- dh_dbg(dev, "start\r\n");
- table_priv->async_risc_irq = dh_mpf_async_irq_request(dev);
- if (IS_ERR_OR_NULL(table_priv->async_risc_irq))
- {
- dh_err(dev, "Failed to get async_risc_irq\n");
- return PTR_ERR(table_priv->async_risc_irq);
- }
-
- table_priv->async_pf_irq = dh_mpf_async_irq_request(dev);
- if (IS_ERR_OR_NULL(table_priv->async_pf_irq))
- {
- err = PTR_ERR(table_priv->async_pf_irq);
- dh_err(dev, "Failed to get async_pf_irq\n");
- goto err_irq_request;
- }
-
- err = create_async_eq(dev, table_priv->async_risc_irq, table_priv->async_pf_irq);
- if (err != 0)
- {
- dh_err(dev, "Failed to create async_eq\n");
- goto err_create_async_eq;
- }
-
- param = (struct dh_eq_param) {
- .irq = table_priv->async_risc_irq,
- .nent = 10,
- .event_type = DH_EVENT_QUEUE_TYPE_RISCV /* used for inform dpu */
- };
- err = setup_async_eq(dev, &table_priv->async_risc_eq, ¶m, dh_eq_async_riscv_int, "riscv", dev);
- if (err != 0)
- {
- dh_err(dev, "Failed to setup async_risc_eq\n");
- goto err_setup_async_eq;
- }
-
- param.irq = table_priv->async_pf_irq,
- err = setup_async_eq(dev, &table_priv->async_pf_eq, ¶m, dh_eq_async_mpf_int, "pf", dev);
- if (err != 0)
- {
- dh_err(dev, "Failed to setup async_pf_eq\n");
- goto cleanup_async_eq;
- }
-
- return 0;
-
-cleanup_async_eq:
- cleanup_async_eq(dev, &table_priv->async_risc_eq, "riscv");
-err_setup_async_eq:
- destroy_async_eq(dev);
-err_create_async_eq:
- dh_irqs_release_vectors(&table_priv->async_pf_irq, 1);
-err_irq_request:
- dh_irqs_release_vectors(&table_priv->async_risc_irq, 1);
- return err;
+#include
+#include
+#include
+#include
+#include "irq.h"
+#include "eq.h"
+#include "../en_mpf.h"
+
+struct dh_mpf_eq_table {
+ struct dh_irq **comp_irqs;
+ struct dh_irq *async_risc_irq;
+ struct dh_irq *async_pf_irq;
+ struct dh_eq_async async_risc_eq;
+ struct dh_eq_async async_pf_eq;
+};
+
+static int32_t create_async_eqs(struct dh_core_dev *dev);
+
+static int32_t __attribute__((unused)) create_eq_map(struct dh_eq_param *param)
+{
+ int32_t err = 0;
+
+ /* inform device*/
+ return err;
+}
+
+int32_t dh_mpf_eq_table_init(struct dh_core_dev *dev)
+{
+ struct dh_eq_table *eq_table;
+ struct dh_mpf_eq_table *table_priv = NULL;
+ int32_t err = 0;
+
+ eq_table = &dev->eq_table;
+
+ table_priv = kvzalloc(sizeof(*table_priv), GFP_KERNEL);
+ if (unlikely(table_priv == NULL)) {
+ err = -ENOMEM;
+ goto err_table_priv;
+ }
+
+ dh_eq_table_init(dev, table_priv);
+
+ return 0;
+
+err_table_priv:
+ kvfree(eq_table);
+ return err;
+}
+
+/*todo*/
+int32_t dh_eq_get_comp_eqs(struct dh_core_dev *dev)
+{
+ return 0;
+}
+
+static int32_t create_comp_eqs(struct dh_core_dev *dev)
+{
+ return 0;
+}
+
+static int32_t destroy_async_eq(struct dh_core_dev *dev)
+{
+ struct dh_eq_table *eq_table = &dev->eq_table;
+
+ mutex_lock(&eq_table->lock);
+ /*unmap inform device*/
+ mutex_unlock(&eq_table->lock);
+
+ return 0;
+}
+
+static void cleanup_async_eq(struct dh_core_dev *dev, struct dh_eq_async *eq,
+ const char *name)
+{
+ dh_eq_disable(dev, &eq->core, &eq->irq_nb);
+}
+
+static void destroy_async_eqs(struct dh_core_dev *dev)
+{
+ struct dh_eq_table *table = &dev->eq_table;
+ struct dh_mpf_eq_table *table_priv = table->priv;
+
+ cleanup_async_eq(dev, &table_priv->async_risc_eq, "riscv");
+ cleanup_async_eq(dev, &table_priv->async_pf_eq, "pf");
+ destroy_async_eq(dev);
+ dh_irqs_release_vectors(&table_priv->async_risc_irq, 1);
+ dh_irqs_release_vectors(&table_priv->async_pf_irq, 1);
+}
+
+void destroy_comp_eqs(struct dh_core_dev *dev)
+{
+}
+
+void dh_mpf_eq_table_destroy(struct dh_core_dev *dev)
+{
+ destroy_comp_eqs(dev);
+ destroy_async_eqs(dev);
+}
+
+int32_t dh_mpf_eq_table_create(struct dh_core_dev *dev)
+{
+ int32_t err = 0;
+
+ err = create_async_eqs(dev);
+ if (err != 0) {
+ dh_err(dev, "Failed to create async EQs\n");
+ goto err_async_eqs;
+ }
+
+ err = create_comp_eqs(dev);
+ if (err != 0) {
+ dh_err(dev, "Failed to create completion EQs\n");
+ goto err_comp_eqs;
+ }
+
+ return 0;
+
+err_comp_eqs:
+ destroy_async_eqs(dev);
+err_async_eqs:
+ return err;
+}
+
+/*create eventq*/
+static int32_t create_async_eq(struct dh_core_dev *dev, struct dh_irq *risc,
+ struct dh_irq *pf)
+{
+ struct dh_eq_table *eq_table = &dev->eq_table;
+ struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dev);
+ struct msix_para in = { 0 };
+ int32_t err = 0;
+
+ in.vector_risc = risc->index;
+ in.vector_pfvf = pf->index;
+ in.vector_mpf = 0xff;
+ in.driver_type = MSG_CHAN_END_PF; // TODO
+ in.pdev = dev->pdev;
+ in.virt_addr = mpf_dev->pci_ioremap_addr + ZXDH_BAR1_CHAN_OFFSET;
+
+ mutex_lock(&eq_table->lock);
+
+ err = zxdh_bar_enable_chan(&in, &mpf_dev->vport);
+
+ mutex_unlock(&eq_table->lock);
+
+ return err;
+}
+
+static int32_t dh_eq_async_riscv_int(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct dh_eq_async *eq_riscv_async =
+ container_of(nb, struct dh_eq_async, irq_nb);
+ struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv;
+ struct dh_eq_table *eq_table = &dev->eq_table;
+
+ atomic_notifier_call_chain(&eq_table->nh[DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF],
+ DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF, NULL);
+
+ return 0;
+}
+
+static int32_t dh_eq_async_mpf_int(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct dh_eq_async *eq_riscv_async =
+ container_of(nb, struct dh_eq_async, irq_nb);
+ struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv;
+ struct dh_eq_table *eq_table = &dev->eq_table;
+
+ atomic_notifier_call_chain(&eq_table->nh[DH_EVENT_TYPE_NOTIFY_PF_TO_MPF],
+ DH_EVENT_TYPE_NOTIFY_PF_TO_MPF, NULL);
+
+ return 0;
+}
+
+static int32_t create_async_eqs(struct dh_core_dev *dev)
+{
+ struct dh_eq_table *table = &dev->eq_table;
+ struct dh_mpf_eq_table *table_priv = table->priv;
+ struct dh_eq_param param = {};
+ int32_t err = 0;
+
+ dh_dbg(dev, "start\r\n");
+ table_priv->async_risc_irq = dh_mpf_async_irq_request(dev);
+ if (IS_ERR_OR_NULL(table_priv->async_risc_irq)) {
+ dh_err(dev, "Failed to get async_risc_irq\n");
+ return PTR_ERR(table_priv->async_risc_irq);
+ }
+
+ table_priv->async_pf_irq = dh_mpf_async_irq_request(dev);
+ if (IS_ERR_OR_NULL(table_priv->async_pf_irq)) {
+ err = PTR_ERR(table_priv->async_pf_irq);
+ dh_err(dev, "Failed to get async_pf_irq\n");
+ goto err_irq_request;
+ }
+
+ err = create_async_eq(dev, table_priv->async_risc_irq,
+ table_priv->async_pf_irq);
+ if (err != 0) {
+ dh_err(dev, "Failed to create async_eq\n");
+ goto err_create_async_eq;
+ }
+
+ param = (struct dh_eq_param){
+ .irq = table_priv->async_risc_irq,
+ .nent = 10,
+ .event_type = DH_EVENT_QUEUE_TYPE_RISCV /* used for inform dpu */
+ };
+ err = setup_async_eq(dev, &table_priv->async_risc_eq, ¶m,
+ dh_eq_async_riscv_int, "riscv", dev);
+ if (err != 0) {
+ dh_err(dev, "Failed to setup async_risc_eq\n");
+ goto err_setup_async_eq;
+ }
+
+ param.irq = table_priv->async_pf_irq,
+ err = setup_async_eq(dev, &table_priv->async_pf_eq, ¶m,
+ dh_eq_async_mpf_int, "pf", dev);
+ if (err != 0) {
+ dh_err(dev, "Failed to setup async_pf_eq\n");
+ goto cleanup_async_eq;
+ }
+
+ return 0;
+
+cleanup_async_eq:
+ cleanup_async_eq(dev, &table_priv->async_risc_eq, "riscv");
+err_setup_async_eq:
+ destroy_async_eq(dev);
+err_create_async_eq:
+ dh_irqs_release_vectors(&table_priv->async_pf_irq, 1);
+err_irq_request:
+ dh_irqs_release_vectors(&table_priv->async_risc_irq, 1);
+ return err;
}
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/en_mpf/eq.h b/src/net/drivers/net/ethernet/dinghai/en_mpf/eq.h
old mode 100755
new mode 100644
index 4aaaaaee6b4c6426214c934fa70b0d9f56254c91..e962267a1ae63c17db0f671bdcedfe720c39818b
--- a/src/net/drivers/net/ethernet/dinghai/en_mpf/eq.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_mpf/eq.h
@@ -1,19 +1,19 @@
-#ifndef __ZXDH_MPF_EQ_H__
-#define __ZXDH_MPF_EQ_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-
-int32_t dh_mpf_eq_table_init(struct dh_core_dev *dev);
-
-int32_t dh_mpf_eq_table_create(struct dh_core_dev *dev);
-void dh_mpf_eq_table_destroy(struct dh_core_dev *dev);
-
-#ifdef __cplusplus
-}
-#endif
-
+#ifndef __ZXDH_MPF_EQ_H__
+#define __ZXDH_MPF_EQ_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+
+int32_t dh_mpf_eq_table_init(struct dh_core_dev *dev);
+
+int32_t dh_mpf_eq_table_create(struct dh_core_dev *dev);
+void dh_mpf_eq_table_destroy(struct dh_core_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/en_mpf/events.c b/src/net/drivers/net/ethernet/dinghai/en_mpf/events.c
old mode 100755
new mode 100644
index 35aaf52711e8d6f2e741aad09a0d8ec5081412f7..d0d701329777398a9d2ba12a26a9c5a9897d71c9
--- a/src/net/drivers/net/ethernet/dinghai/en_mpf/events.c
+++ b/src/net/drivers/net/ethernet/dinghai/en_mpf/events.c
@@ -1,132 +1,140 @@
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include "events.h"
-#include "../en_mpf.h"
-
-static int32_t riscv_notifier(struct notifier_block *nb, unsigned long type, void *data);
-static int32_t pf_notifier(struct notifier_block *nb, unsigned long type, void *data);
-
-static struct dh_nb mpf_events[] = {
- {.nb.notifier_call = riscv_notifier, .event_type = DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF},
- {.nb.notifier_call = pf_notifier, .event_type = DH_EVENT_TYPE_NOTIFY_PF_TO_MPF}
-};
-
-static int32_t riscv_notifier(struct notifier_block *nb, unsigned long type, void *data)
-{
- struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb);
- struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx;
- struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev);
-
- zxdh_events_work_enqueue(dh_dev, &mpf_dev->dh_np_sdk_from_risc);
-
- return NOTIFY_OK;
-}
-
-static int32_t pf_notifier(struct notifier_block *nb, unsigned long type, void *data)
-{
- struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb);
- struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx;
- struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev);
-
- zxdh_events_work_enqueue(dh_dev, &mpf_dev->dh_np_sdk_from_pf);
-
- return NOTIFY_OK;
-}
-
-void np_sdk_handler_from_risc(struct work_struct *p_work)
-{
- struct dh_en_mpf_dev *mpf_dev = container_of(p_work, struct dh_en_mpf_dev, dh_np_sdk_from_risc);
-
- LOG_INFO("is called\n");
- zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_MPF, mpf_dev->pci_ioremap_addr + ZXDH_BAR1_CHAN_OFFSET, NULL);
- return;
-}
-
-void np_sdk_handler_from_pf(struct work_struct *p_work)
-{
- struct dh_en_mpf_dev *mpf_dev = container_of(p_work, struct dh_en_mpf_dev, dh_np_sdk_from_pf);
-
- LOG_INFO("is called\n");
- zxdh_bar_irq_recv(MSG_CHAN_END_PF, MSG_CHAN_END_MPF, mpf_dev->pci_ioremap_addr + ZXDH_BAR2_CHAN_OFFSET, NULL);
- return;
-}
-
-void zxdh_events_start(struct dh_core_dev *dev)
-{
- struct dh_events *events = dev->events;
- int32_t i;
- int32_t err;
-
- for (i = 0; i < ARRAY_SIZE(mpf_events); i++)
- {
- events->notifiers[i].nb = mpf_events[i];
- events->notifiers[i].ctx = dev;
- err = dh_eq_notifier_register(&dev->eq_table, &events->notifiers[i].nb);
- if (err != 0)
- {
- LOG_ERR("i: %d, err: %d.\n", i, err);
- }
- }
-}
-
-int32_t dh_mpf_events_init(struct dh_core_dev *dev)
-{
- struct dh_events *events = NULL;
- struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dev);
- int32_t ret = 0;
-
- events = kzalloc((sizeof(*events) + ARRAY_SIZE(mpf_events) * sizeof(struct dh_event_nb)), GFP_KERNEL);
- if (unlikely(events == NULL))
- {
- LOG_ERR("events kzalloc failed: %p\n", events);
- ret = -ENOMEM;
- goto err_events_kzalloc;
- }
-
- events->evt_num = ARRAY_SIZE(mpf_events);
- events->dev = dev;
- dev->events = events;
- events->wq = create_singlethread_workqueue("dh_mpf_events");
- if (!events->wq)
- {
- LOG_ERR("events->wq create_singlethread_workqueue failed: %p\n", events->wq);
- ret = -ENOMEM;
- goto err_create_wq;
- }
-
- INIT_WORK(&mpf_dev->dh_np_sdk_from_risc, np_sdk_handler_from_risc);
- INIT_WORK(&mpf_dev->dh_np_sdk_from_pf, np_sdk_handler_from_pf);
-
- zxdh_events_start(dev);
-
- return 0;
-
-err_create_wq:
- kfree(events);
-err_events_kzalloc:
- return ret;
-}
-
-void dh_events_stop(struct dh_core_dev *dev)
-{
- struct dh_events *events = dev->events;
- int32_t i = 0;
-
- for (i = ARRAY_SIZE(mpf_events) - 1; i >= 0 ; i--)
- {
- dh_eq_notifier_unregister(&dev->eq_table, &events->notifiers[i].nb);
- }
-
- zxdh_events_cleanup(dev);
-}
-
-void dh_mpf_events_uninit(struct dh_core_dev *dev)
-{
- return dh_events_stop(dev);
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "events.h"
+#include "../en_mpf.h"
+
+static int32_t riscv_notifier(struct notifier_block *nb, unsigned long type,
+ void *data);
+static int32_t pf_notifier(struct notifier_block *nb, unsigned long type,
+ void *data);
+
+static struct dh_nb mpf_events[] = {
+ { .nb.notifier_call = riscv_notifier,
+ .event_type = DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF },
+ { .nb.notifier_call = pf_notifier,
+ .event_type = DH_EVENT_TYPE_NOTIFY_PF_TO_MPF }
+};
+
+static int32_t riscv_notifier(struct notifier_block *nb, unsigned long type,
+ void *data)
+{
+ struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb);
+ struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx;
+ struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev);
+
+ zxdh_events_work_enqueue(dh_dev, &mpf_dev->dh_np_sdk_from_risc);
+
+ return NOTIFY_OK;
+}
+
+static int32_t pf_notifier(struct notifier_block *nb, unsigned long type,
+ void *data)
+{
+ struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb);
+ struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx;
+ struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev);
+
+ zxdh_events_work_enqueue(dh_dev, &mpf_dev->dh_np_sdk_from_pf);
+
+ return NOTIFY_OK;
+}
+
+void np_sdk_handler_from_risc(struct work_struct *p_work)
+{
+ struct dh_en_mpf_dev *mpf_dev =
+ container_of(p_work, struct dh_en_mpf_dev, dh_np_sdk_from_risc);
+
+ LOG_INFO("is called\n");
+ zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_MPF,
+ mpf_dev->pci_ioremap_addr + ZXDH_BAR1_CHAN_OFFSET, NULL);
+ return;
+}
+
+void np_sdk_handler_from_pf(struct work_struct *p_work)
+{
+ struct dh_en_mpf_dev *mpf_dev =
+ container_of(p_work, struct dh_en_mpf_dev, dh_np_sdk_from_pf);
+
+ LOG_INFO("is called\n");
+ zxdh_bar_irq_recv(MSG_CHAN_END_PF, MSG_CHAN_END_MPF,
+ mpf_dev->pci_ioremap_addr + ZXDH_BAR2_CHAN_OFFSET, NULL);
+ return;
+}
+
+void zxdh_events_start(struct dh_core_dev *dev)
+{
+ struct dh_events *events = dev->events;
+ int32_t i;
+ int32_t err;
+
+ for (i = 0; i < ARRAY_SIZE(mpf_events); i++) {
+ events->notifiers[i].nb = mpf_events[i];
+ events->notifiers[i].ctx = dev;
+ err = dh_eq_notifier_register(&dev->eq_table, &events->notifiers[i].nb);
+ if (err != 0) {
+ LOG_ERR("i: %d, err: %d.\n", i, err);
+ }
+ }
+}
+
+int32_t dh_mpf_events_init(struct dh_core_dev *dev)
+{
+ struct dh_events *events = NULL;
+ struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dev);
+ int32_t ret = 0;
+
+ events = kzalloc((sizeof(*events) +
+ ARRAY_SIZE(mpf_events) * sizeof(struct dh_event_nb)),
+ GFP_KERNEL);
+ if (unlikely(events == NULL)) {
+ LOG_ERR("events kzalloc failed: %p\n", events);
+ ret = -ENOMEM;
+ goto err_events_kzalloc;
+ }
+
+ events->evt_num = ARRAY_SIZE(mpf_events);
+ events->dev = dev;
+ dev->events = events;
+ events->wq = create_singlethread_workqueue("dh_mpf_events");
+ if (!events->wq) {
+ LOG_ERR("events->wq create_singlethread_workqueue failed: %p\n",
+ events->wq);
+ ret = -ENOMEM;
+ goto err_create_wq;
+ }
+
+ INIT_WORK(&mpf_dev->dh_np_sdk_from_risc, np_sdk_handler_from_risc);
+ INIT_WORK(&mpf_dev->dh_np_sdk_from_pf, np_sdk_handler_from_pf);
+
+ zxdh_events_start(dev);
+
+ return 0;
+
+err_create_wq:
+ kfree(events);
+err_events_kzalloc:
+ return ret;
+}
+
+void dh_events_stop(struct dh_core_dev *dev)
+{
+ struct dh_events *events = dev->events;
+ int32_t i = 0;
+
+ for (i = ARRAY_SIZE(mpf_events) - 1; i >= 0; i--) {
+ dh_eq_notifier_unregister(&dev->eq_table, &events->notifiers[i].nb);
+ }
+
+ zxdh_events_cleanup(dev);
+}
+
+void dh_mpf_events_uninit(struct dh_core_dev *dev)
+{
+ return dh_events_stop(dev);
}
\ No newline at end of file
diff --git a/src/net/drivers/net/ethernet/dinghai/en_mpf/events.h b/src/net/drivers/net/ethernet/dinghai/en_mpf/events.h
old mode 100755
new mode 100644
index 496097e7ffdb0a06d04c5c47e5bcb07f1000ada0..20496792f5c710c2027f36d1150d9599a1383a56
--- a/src/net/drivers/net/ethernet/dinghai/en_mpf/events.h
+++ b/src/net/drivers/net/ethernet/dinghai/en_mpf/events.h
@@ -1,18 +1,18 @@
-#ifndef __ZXDH_MPF_EVENTS_H__
-#define __ZXDH_MPF_EVENTS_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-
-int32_t dh_mpf_events_init(struct dh_core_dev *dev);
-void dh_mpf_events_uninit(struct dh_core_dev *dev);
-void zxdh_events_start(struct dh_core_dev *dev);
-
-#ifdef __cplusplus
-}
-#endif
-
+#ifndef __ZXDH_MPF_EVENTS_H__
+#define __ZXDH_MPF_EVENTS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include