diff --git a/tools/Boostkit_ISV_Tool.md b/tools/Boostkit_ISV_Tool.md index bd7e360f0c420b7fe76780f34201278bfb4b98ba..a1f96773f4274199225e8738623ba5fa44178ad3 100644 --- a/tools/Boostkit_ISV_Tool.md +++ b/tools/Boostkit_ISV_Tool.md @@ -10,7 +10,7 @@ 系统信息收集包括:操作系统版本信息、内核版本、鲲鹏服务器的机型 -软件信息收集包括:BoostKit 下八个解决方案特性是否生效的关键日志信息 +软件信息收集包括:BoostKit 下八个解决方案特性是否生效的关键日志信息。 对于未使能的特性,采集过程中会有报错,是正常现象。 @@ -98,9 +98,41 @@ spark的安装路径,默认为/home/ 所创建对应数据库的名称,根据用户所创建的数据库名称填写 +> omnimv_dir=/omnimv +omnimv 目录路径, 用于物化视图特性验证 +> omnidata_launcher_server= +launcher 所在服务器的名称 (需可直接ssh无密码访问, 不填则默认在当前服务器上) + +> omnidata_launcher=/home/omm/omnidata-install/omnidata/bin/launcher + +launcher 二进制路径,用于算子下推特性验证 + +> omnidata_install_path=/home/omm/omnidata-install + +omnidata 安装目录,用于算子下推特性验证 + +> push_down_jars=/usr/local/spark-plugin-jar/* + +算子下推相关jar包路径,用于算子下推特性验证 + +> push_down_conf=spark.executor.extraClassPath=./* + +配置spark参数,用于算子下推特性验证 + +> zookeeper_address=agent1:2181,agent2:2181,agent3:2181 + +连接 zookeeper 地址,用于算子下推特性验证 + +> zookeeper_path=/sdi/status + +存放下推资源信息的目录,用于算子下推特性验证 + +> shuffle_jars=/home/ockadmin/opt/ock/jars/* + +shuffle加速相关jar包路径,用于shuffle加速特性验证 #### **1.2.2分布式存储配置文件说明** @@ -122,7 +154,29 @@ ceph osd erasure-code-profile set EC-profile k=4 m=2 plugin=ksal ceph osd pool create ceph_ecpool 32 32 erasure EC-profile - +> ceph_conf=/tmp/ceph.conf + +ceph 配置文件路径,用于“压缩算法”特性验收 + +> storage_maintain_bin=/tmp/maintain + +用到存储维护工具库特性的二进制路径 + +> rocksdb_bin=/tmp/rock.bin + +用到Rocksdb元数据加速特性的二进制路径 + +> ucache_bin=/tmp/ucache.bin + +用到Ucache智能读缓存特性的二进制路径 + +> non_ceph_bin=/tmp/non_ceph.bin + +非ceph场景下的软件二进制路径,用于“存储加速算法库”特性验收 + +> non_ceph_pid=38799 + +非ceph场景下的软件进程id,用于“存储加速算法库”特性验收 @@ -134,6 +188,14 @@ ceph osd pool create ceph_ecpool 32 32 erasure EC-profile check 默认为False, 置为True 则开启数据库相关特性使能信息收集功能 +> use_mysql=1 + +是否 mysql 场景下验收,填1或0 + +> other_db_bin=/usr/losql/pgsql + +非mysql场景下,需填写数据库二进制路径 + > mysql_install_path=/usr/local/mysql 数据库安装路径,默认为/usr/local/mysql @@ -146,10 +208,33 @@ check 默认为False, 置为True 则开启数据库相关特性使能信息收 登录数据库的密码,默认为123456 +> mysql_port=3306 + +数据库端口,默认为3306 + +> nvme_name=nvme0n1 + +nvme盘名称,用于验收 ”MySQL NVMe SSD原子写“ 特性 + > database_name=xxxx 所创建对应数据库的名称,根据用户所创建的数据库名称填写 +> greenplum_username=root + +greenplum 用户名,用于验收“KAEzip压缩解压缩优化”特性 + +> greenplum_port=5432 + +greenplum 端口,用于验收“KAEzip压缩解压缩优化”特性 + +> kae_version= + +kae 版本, 填写1.0或2.0, 用于验收“KAEzip压缩解压缩优化”特性 + +> greenplum_kae_sql= + +greenplum kae sql 语句,用于验收“KAEzip压缩解压缩优化”特性 #### **1.2.4 ARM原生配置文件说明** @@ -176,7 +261,7 @@ check 默认为False, 置为True 则开启ARM原生特性使能信息收集功 ![bigdata](./BoostKit_pic/acc_lib.png) - +工具默认支持验收 KAE 特性,配置文件无需修改 > check=False @@ -194,6 +279,13 @@ check 默认为False, 置为True 则开启加速库特性使能信息收集功 引用数学库二进制文件绝对路径 +> math_jar=/home/math.jar + +引用数学库的jar包位置 + +> math_java=/home/math.class + +引用数学库的字节码文件位置 #### **1.2.6虚拟化配置文件说明** @@ -208,6 +300,9 @@ check 默认为False, 置为True 则开启加速库特性使能信息收集功 2.虚拟化DPU卸载 特性需要再DPU测执行采集工具, 配置文件无需修改 +3.OVS流表归一化 特性验证前需准备环境,让物理机B上的虚拟机持续ping物理机A上的虚拟机,然后在物理机A上运行采集工具 +该特性默认支持验收, 配置文件无需修改 + OpenStack 验证需要以下信息 > server_name=vm1 @@ -220,6 +315,26 @@ OpenStack 验证需要以下信息 > availability_zone=nova:dpu01 +> vm_ip=71.14.48.104 + +虚拟机ip, 用于验收高性能云盘优化特性 + +> vm_user=root + +虚拟机用户名, 用于验收高性能云盘优化特性 + +> vm_password=Huawei12#$ + +虚拟机密码, 用于验收高性能云盘优化特性 + +> vm_ceph_disk_name=vdb + +虚拟机使用的高性能云盘名称, 用于验收高性能云盘优化特性 + +> vm_name=vm1 + +虚拟机名称, 用于验收高性能云盘优化特性 + #### **1.2.7 机密计算配置文件说明** ![bigdata](./BoostKit_pic/virt.png) @@ -262,6 +377,9 @@ check 默认为False, 置为True 则开启HPC相关特性使能信息收集功 引用HPC SME改造二进制文件绝对路径 +> sve_source_code + +填写有用到 sve 的源码目录或文件的绝对路径 ### 1.3 采集工具使用说明 @@ -279,4 +397,3 @@ check 默认为False, 置为True 则开启HPC相关特性使能信息收集功 bash collect_msg.sh # 同级目录下具有config.ini 配置文件, 且已按1.2中对应解决方案完成相关参数配置 ``` - diff --git a/tools/collect_msg.sh b/tools/collect_msg.sh index e4f1355cb0b728ac6aebe87190c197ff65c72e40..c872fb781f70855e752d8316b32dbc368069766e 100644 --- a/tools/collect_msg.sh +++ b/tools/collect_msg.sh @@ -12,7 +12,8 @@ default_project="Bigdata Database Storage Arm Virt Acclib Virtual HPC" acquire_value(){ project=$1 key=$2 - grep $project -A 100 $config_file |grep -m 1 $key|awk -F= '{print $2}' + opt="{sub(/$key=/,\"\")}1" + grep $project -A 100 $config_file |grep -m 1 "$key="|awk $opt } @@ -145,40 +146,86 @@ collect_virtual_dpu(){ } +collect_virtual_ceph(){ + vm_ip=$1 + vm_user=$2 + vm_password=$3 + vm_ceph_disk_name=$4 + vm_name=$5 + set -x + sshpass -p "$vm_password" ssh "$vm_user"@"$vm_ip" ls -la /sys/block/ | grep $vm_ceph_disk_name | tail -n 1 > $log_path/virtual_ceph_disk.log + virsh dumpxml $vm_name > $log_path/virtual_ceph_vm_xml.log + rpc vhost_get_controllers > $log_path/virtual_ceph_vm_controllers.log + rpc bdev_get_bdevs > $log_path/virtual_ceph_bdev.log +} + + +collect_virtual_ovs_xpf(){ + ovs-appctl hwoff/dump-hwoff-flows > $log_path/virtual_ovs_xpf.log + echo $? >> $log_path/virtual_ovs_xpf.log +} + + ################数据库特性信息收集################## collect_database(){ mysql_install_path=$1 mysql_username=$2 mysql_password=$3 - database_name=$4 + mysql_port=$4 + database_name=$5 + nvme_name=$6 plugin_path=$1/lib/plugin timeout=60 $mysql_install_path/bin/mysqld_safe --defaults-file=/etc/my.cnf & # 循环判断进程是否启动 for ((i=0; i<=$timeout; i++)); do - if ps -ef | grep -v grep | grep "mysql" > /dev/null; then - echo "Process mysql started." - # 执行下一步操作 - sleep 5 - $mysql_install_path/bin/mysql -u $mysql_username -p$mysql_password -D $database_name -h127.0.0.1 -e "select * from INFORMATION_SCHEMA.plugins where PLUGIN_NAME like 'thread_pool%'" > $log_path/database_mysql.log - $mysql_install_path/bin/mysql -u $mysql_username -p$mysql_password -D $database_name -h127.0.0.1 -e "select * from INFORMATION_SCHEMA.plugins where PLUGIN_NAME like 'kovae%'" >> $log_path/database_mysql.log - echo thread_pool: $(ls $plugin_path |grep thread_pool.so) >> $log_path/database_mysql.log - echo kovae_path: $(ls $plugin_path |grep ha_kovae.so) >> $log_path/database_mysql.log - readelf -a $mysql_install_path/bin/mysqld|grep bolt >> $log_path/database_mysql.log - echo no_lock: $(objdump -d $mysql_install_path/bin/mysqld|grep -c row_vers_build_for_semi_consistent_readP5trx_t) >> $log_path/database_mysql.log - objdump -d $mysql_install_path/bin/mysqld |grep crc32cb >> $log_path/database_mysql.log - pkill -9 mysql - break - fi - sleep 1 - if [ $timeout -eq $i ];then - echo "Timeout error: mysql process not started in $timeout seconds.i" - exit 1 - fi + if ps -ef | grep -v grep | grep "mysql" > /dev/null; then + echo "Process mysql started." + # 执行下一步操作 + sleep 5 + $mysql_install_path/bin/mysql -P $mysql_port -u $mysql_username -p$mysql_password -D $database_name -h127.0.0.1 -e "select * from INFORMATION_SCHEMA.plugins where PLUGIN_NAME like 'thread_pool%'" > $log_path/database_mysql.log + $mysql_install_path/bin/mysql -P $mysql_port -u $mysql_username -p$mysql_password -D $database_name -h127.0.0.1 -e "select * from INFORMATION_SCHEMA.plugins where PLUGIN_NAME like 'kovae%'" >> $log_path/database_mysql.log + echo thread_pool: $(ls $plugin_path |grep thread_pool.so) >> $log_path/database_mysql.log + echo kovae_path: $(ls $plugin_path |grep ha_kovae.so) >> $log_path/database_mysql.log + readelf -a $mysql_install_path/bin/mysqld|grep bolt >> $log_path/database_mysql.log + echo no_lock: $(objdump -d $mysql_install_path/bin/mysqld|grep -c row_vers_build_for_semi_consistent_readP5trx_t) >> $log_path/database_mysql.log + objdump -d $mysql_install_path/bin/mysqld |grep crc32cb >> $log_path/database_mysql.log + $mysql_install_path/bin/mysql -P $mysql_port -u $mysql_username -p$mysql_password -D $database_name -h127.0.0.1 -e "show variables like '%paralle%'" >> $log_path/database_mysql.log + nm $mysql_install_path/bin/mysqld | grep -c Page_shards >> $log_path/database_mysql_page_shards.log + $mysql_install_path/bin/mysql -P $mysql_port -u $mysql_username -p$mysql_password -D $database_name -h127.0.0.1 -e "show variables like 'sched_affinity%'" >> $log_path/database_mysql.log + hioadm atomicwrite -d $nvme_name > $log_path/database_mysql_nvme.log + $mysql_install_path/bin/mysql -P $mysql_port -u $mysql_username -p$mysql_password -D $database_name -h127.0.0.1 -e "show variables like '%flush_method%'" >> $log_path/database_mysql_nvme.log + $mysql_install_path/bin/mysql -P $mysql_port -u $mysql_username -p$mysql_password -D $database_name -h127.0.0.1 -e "show variables like '%doublewrite%'" >> $log_path/database_mysql_nvme.log + gazellectl lstack show 1 -c | grep ":$mysql_port" > $log_path/database_mysql_gazelle.log + pkill -9 mysql + break + fi + sleep 1 + if [ $timeout -eq $i ];then + echo "Timeout error: mysql process not started in $timeout seconds.i" + exit 1 + fi done } +collect_database_other_db(){ + other_db_bin=$1 + greenplum_username=$2 + greenplum_port=$3 + kae_version=$4 + greenplum_kae_sql=$5 + readelf -a $other_other_db_bin | grep bolt > $log_path/database_other_db.log + objdump -d $other_other_db_bin | grep crc32cb >> $log_path/database_other_db.log + if [ "$kae_version" == "1.0" ]; then + nohup timout 20 watch -gt -n 0.2 cat /sys/class/uacce/hisi_zip*/attrs/available_instances > $log_path/database_greenplum_kae.log & + else + nohup timeout 20 watch -gt -n 0.2 cat /sys/class/uacce/hisi_zip*/available_instances > $log_path/database_greenplum_kae.log & + fi + psql -h 127.0.0.1 -p $greenplum_port -U $greenplum_username -c "$greenplum_kae_sql" +} + + ####################机密计算特性信息收集################ collect_virtcca_msg(){ cvm_name=$1 @@ -236,10 +283,14 @@ collect_acceleration_library(){ system_lib=$1 hmpp_lib=$2 math_lib=$3 + math_jar=$4 + math_java=$5 openssl speed -elapsed -engine kae rsa2048 > $log_path/acceleration_library.log 2>&1 ldd $1 >> $log_path/acceleration_library.log ldd $2 >> $log_path/acceleration_library.log ldd $3 >> $log_path/acceleration_library.log + jar -tf $4 >> $log_path/acceleration_library_kml_java.log + javap -v $5 >> $log_path/acceleration_library_kml_java.log } @@ -247,6 +298,12 @@ collect_acceleration_library(){ # $1 ec_pool 名字 collect_storage_acc(){ ec_pool=$1 + ceph_conf=$2 + storage_maintain_bin=$3 + rocksdb_bin=$4 + ucache_bin=$5 + non_ceph_bin=$6 + non_ceph_pid=$7 # 存储加速库 ldd /usr/bin/ceph-osd > $log_path/storage_acc.log bcache_dev=$(ls /sys/class/block|grep -m 1 bcache) @@ -262,6 +319,24 @@ collect_storage_acc(){ else echo "ec_pool not exist" >> $log_path/storage_acc.log fi + + systemctl status ceph-boost.service > $log_path/storage_io.log + if ceph osd pool set vdbench compression_algorithm glz; then + \cp "$ceph_conf" "$ceph_conf".bak + sed -i '/^compressor_glz_level/d' "$ceph_conf" + echo "compressor_glz_level = 1" >> "$ceph_conf" + systemctl restart ceph.target + systemctl status ceph.target > $log_path/storage_comporess.log + \cp "$ceph_conf".bak "$ceph_conf" + fi + ldd $storage_maintain_bin > $log_path/storage_maintain_tool.log + lib_rocksdb=$(ldd $rocksdb_bin | grep librocksdb | awk '{print $3}') + ldd $lib_rocksdb > $log_path/storage_rocksdb.log + ldd $ucache_bin > $log_path/storage_ucache.log + + if ldd $non_ceph_bin | grep ksal; then + timeout 20 perf top -p $non_ceph_pid > $log_path/storage_non_ceph_perf_top.log + fi } @@ -532,6 +607,89 @@ collect_bigdata_tune_up(){ } +collect_bigdata_omnimv(){ + omnimv_dir=$1 + hdfs dfs -ls $omnimv_dir > $log_path/bigdata_omnimv.log +} + + +collect_bigdata_omni_push_down(){ + omnidata_launcher_server=$1 + omnidata_launcher=$2 + push_down_jars=$3 + push_down_conf=$4 + spark_path=$5 + database=$6 + omnidata_install_path=$7 + zookeeper_address=$8 + zookeeper_path=$9 + + if [ -z "$omnidata_launcher_server" ]; then + sh $omnidata_launcher status > $log_path/bigdata_omni_launcher_status.log + else + ssh $omnidata_launcher_server sh $omnidata_launcher status > $log_path/bigdata_omni_launcher_status.log + fi + cat << EOF > /tmp/log4j.properties +log4j.rootCategory=INFO, FILE +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.target=System.err +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n + +log4j.logger.org.apache.spark.sql.execution=DEBUG +log4j.logger.org.apache.spark.repl.Main=INFO + +log4j.appender.FILE=org.apache.log4j.FileAppender +log4j.appender.FILE.file=$log_path/bigdata_omni_push_down.log +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout + +log4j.appender.FILE.layout.ConversionPattern=%m%n +EOF + # 这里直接用 operator 的采集 sql + $spark_path/bin/spark-sql --driver-class-path "$push_down_jars" --jars "$push_down_jars" \ + --conf "$push_down_conf" \ + --conf spark.sql.cbo.enabled=true \ + --conf spark.sql.cbo.planStats.enabled=true \ + --conf spark.sql.ndp.enabled=true \ + --conf spark.sql.ndp.filter.selectivity.enable=true \ + --conf spark.sql.ndp.filter.selectivity=0.5 \ + --conf spark.sql.ndp.alive.omnidata=3 \ + --conf spark.sql.ndp.table.size.threshold=10 \ + --conf spark.sql.ndp.zookeeper.address=$zookeeper_address \ + --conf spark.sql.ndp.zookeeper.path=$zookeeper_path \ + --conf spark.sql.ndp.zookeeper.timeout=15000 \ + --conf spark.driver.extraLibraryPath=$omnidata_install_path/haf-host/lib \ + --conf spark.executor.extraLibraryPath=$omnidata_install_path/haf-host/lib \ + --conf spark.executorEnv.HAF_CONFIG_PATH=$omnidata_install_path/haf-host/etc/ \ + --name tpcds_test.sql --driver-memory 50G --driver-java-options -Dlog4j.configuration=file:/tmp/log4j.properties \ + --executor-memory 32G --num-executors 30 --executor-cores 18 --database tpcds_bin_partitioned_varchar_orc_2 \ + -e "WITH customer_total_return AS ( SELECT sr_customer_sk AS ctr_customer_sk, sr_store_sk AS ctr_store_sk, sum(sr_return_amt) AS ctr_total_return FROM store_returns, date_dim WHERE sr_returned_date_sk = d_date_sk AND d_year = 2000 GROUP BY sr_customer_sk, sr_store_sk) SELECT c_customer_id FROM customer_total_return ctr1, store, customer WHERE ctr1.ctr_total_return > (SELECT avg(ctr_total_return) * 1.2 FROM customer_total_return ctr2 WHERE ctr1.ctr_store_sk = ctr2.ctr_store_sk) AND s_store_sk = ctr1.ctr_store_sk AND s_state = 'TN' AND ctr1.ctr_customer_sk = c_customer_sk ORDER BY c_customer_id LIMIT 100;" + rm -f /tmp/log4j.properties +} + + +collect_bigdata_omni_shuffle(){ + spark_path=$1 + shuffle_jars=$2 + database=$3 + cat << EOF > /tmp/ock_spark.conf +spark.master yarn +spark.task.cpus 1 +spark.shuffle.compress true +EOF + timeout 20 $spark_path/bin/spark-sql --deploy-mode client --driver-cores 8 --driver-memory 40G --num-executors 24 --executor-cores 12 --executor-memory 25g --master yarn --conf spark.sql.codegen.wholeStage=false --jars $shuffle_jars --properties-file /tmp/ock_spark.conf --database $database > $log_path/bigdata_omni_shuffle.log 2>&1 + rm -f /tmp/ock_spark.conf +} + + +collect_bigdata_components(){ + spark_path=$1 + $spark_path/bin/spark-sql --version && echo spark > $log_path/bigdata_components.log + hive --version && echo hive >> $log_path/bigdata_components.log + hbase version && echo hbase >> $log_path/bigdata_components.log +} + + #################HPC特性信息收集################## # $1 #用户可执行文件路径 @@ -558,6 +716,10 @@ collect_sme_acceleration_library(){ } +collect_sve_source_code(){ + grep -r arm_sve.h "$1" > $log_path/hpc_sve.log +} + ################环境信息收集####################### collect_os_msg(){ @@ -598,26 +760,47 @@ main(){ elif [ $per_project = "Database" ]; then echo "start collect Database msg..." + use_mysql=$(acquire_value Database use_mysql) mysql_install_path=$(acquire_value Database mysql_install_path) mysql_username=$(acquire_value Database mysql_username) mysql_password=$(acquire_value Database mysql_password) + mysql_port=$(acquire_value Database mysql_port) database_name=$(acquire_value Database database_name) - - collect_database $mysql_install_path $mysql_username $mysql_password $database_name - echo "Database collect msg Done..." + nvme_name=$(acquire_value Database nvme_name) + + if [ "$use_mysql" == "1" ]; then + collect_database $mysql_install_path $mysql_username $mysql_password $mysql_port $database_name $nvme_name + echo "Database mysql collect msg Done..." + else + other_db_bin=$(acquire_value Database other_db_bin) + greenplum_username=$(acquire_value Database greenplum_username) + greenplum_port=$(acquire_value Database greenplum_port) + kae_version=$(acquire_value Database kae_version) + greenplum_kae_sql=$(acquire_value Database greenplum_kae_sql) + collect_database_other_db "$other_db_bin" "$greenplum_username" "$greenplum_password" "$greenplum_port" "$kae_version" "$greenplum_kae_sql" + echo "Database other db collect msg Done..." + fi elif [ $per_project = "Acclib" ]; then echo "start collect acceleration_library msg..." system_lib=$(acquire_value Acclib system_lib) hmpp_lib=$(acquire_value Acclib HMPP_lib) math_lib=$(acquire_value Acclib math_lib) - collect_acceleration_library $system_lib $hmpp_lib $math_lib + math_jar=$(acquire_value Acclib math_jar) + math_java=$(acquire_value Acclib math_java) + collect_acceleration_library "$system_lib" "$hmpp_lib" "$math_lib" "$math_jar" "$math_java" echo "acceleration_library collect msg Done..." elif [ $per_project = "Storage" ]; then echo "start collect Storage msg..." ec_pool_name=$(acquire_value Storage ec_pool_name) - collect_storage_acc $ec_pool_name + ceph_conf=$(acquire_value Storage ceph_conf) + storage_maintain_bin=$(acquire_value Storage storage_maintain_bin) + rocksdb_bin=$(acquire_value Storage rocksdb_bin) + ucache_bin=$(acquire_value Storage ucache_bin) + non_ceph_bin=$(acquire_value Storage non_ceph_bin) + non_ceph_pid=$(acquire_value Storage non_ceph_pid) + collect_storage_acc "$ec_pool_name" "$ceph_conf" "$storage_maintain_bin" "$rocksdb_bin" "$ucache_bin" "$non_ceph_bin" "$non_ceph_pid" echo "Storage collect msg Done..." elif [ $per_project = "Bigdata" ]; then @@ -632,10 +815,23 @@ main(){ mysql_username=$(acquire_value Bigdata mysql_username) mysql_password=$(acquire_value Bigdata mysql_password) mysql_database_name=$(acquire_value Bigdata mysql_database_name) + omnimv_dir=$(acquire_value Bigdata omnimv_dir) + omnidata_launcher=$(acquire_value Bigdata omnidata_launcher) + omnidata_launcher_server=$(acquire_value Bigdata omnidata_launcher_server) + omnidata_install_path=$(acquire_value Bigdata omnidata_install_path) + push_down_jars=$(acquire_value Bigdata push_down_jars) + push_down_conf=$(acquire_value Bigdata push_down_conf) + zookeeper_address=$(acquire_value Bigdata zookeeper_address) + zookeeper_path=$(acquire_value Bigdata zookeeper_path) + shuffle_jars=$(acquire_value Bigdata shuffle_jars) + collect_bigdata_components $spark_path collect_bigdata_kal "${algorithms_list[@]}" $algorithms_path "${dataset_list[@]}" collect_bigdata_operator $spark_path $database $omnioperator_dir collect_bigdata_hbase collect_bigdata_tune_up $omniadvisor_dir $mysql_username $mysql_password $mysql_database_name + collect_bigdata_omnimv "$omnimv_dir" + collect_bigdata_omni_push_down "$omnidata_launcher_server" "$omnidata_launcher" "$push_down_jars" "$push_down_conf" "$spark_path" "$database" "$omnidata_install_path" "$zookeeper_address" "$zookeeper_path" + collect_bigdata_omni_shuffle "$spark_path" "$shuffle_jars" "$database" echo "Bigdata collect msg Done..." elif [ $per_project = "Virtual" ]; then @@ -647,14 +843,23 @@ main(){ volume=$(acquire_value Virtual volume) availability_zone=$(acquire_value Virtual availability_zone) collect_virtual_dpu $server_name $network $flavor $volume $availability_zone + vm_ip=$(acquire_value Virtual vm_ip) + vm_user=$(acquire_value Virtual vm_user) + vm_password=$(acquire_value Virtual vm_password) + vm_ceph_disk_name=$(acquire_value Virtual vm_ceph_disk_name) + vm_name=$(acquire_value Virtual vm_name) + collect_virtual_ceph "$vm_ip" "$vm_user" "$vm_password" "$vm_ceph_disk_name" "$vm_name" + collect_virtual_ovs_xpf echo "Virtual collect msg Done..." elif [ $per_project = "HPC" ]; then echo "start collect HPC msg..." acc_lib=$(acquire_value HPC acc_lib) sme=$(acquire_value HPC sme) + sve_source_code=$(acquire_value HPC sve_source_code) collect_hpc_acceleration_library $acc_lib collect_sme_acceleration_library $sme + collect_sve_source_code $sve_source_code echo "HPC collect msg Done..." fi done diff --git a/tools/config.ini b/tools/config.ini index 1b6c48defa214a460c7d695d967095ee3e001fb3..1b54908ab159ec72b6b704c9d60f6501eb94496f 100644 --- a/tools/config.ini +++ b/tools/config.ini @@ -21,10 +21,19 @@ instuction_container=kbox_3 [Database] check=False +use_mysql=1 +other_db_bin=/usr/losql/pgsql mysql_install_path=/usr/local/mysql mysql_username=root mysql_password=123456 +mysql_port=3306 +nvme_name=nvme0n1 database_name=xxxxx +greenplum_username=root +greenplum_port=5432 +# kae 版本填写1.0或2.0 +kae_version= +greenplum_kae_sql= [Virtual] @@ -35,16 +44,28 @@ network=port1_vdpa01 flavor=4U4G80G volume=ceph1_centos2 availability_zone=nova:dpu01 +# 高性能云盘 +vm_ip=71.14.48.104 +vm_user=root +vm_password=Huawei12#$ +vm_ceph_disk_name=vdb +vm_name=vm1 [Acclib] check=False +# 工具默认支持验收 KAE 特性,不需要作额外配置 #引用系统库二进制位置 system_lib=/home/system_lib.so #引用HMPP库二进制位置 HMPP_lib=/home/HMPP_lib.so +# 数学库三个配置只需填其中之一即可 # 引用数学库二进制位置 math_lib=/home/math_lib.so +# 引用数学库的jar包位置 +math_jar=/home/math.jar +# 引用数学库的字节码文件位置 +math_java=/home/math.class [Bigdata] @@ -65,17 +86,35 @@ mysql_username=root mysql_password=123456 mysql_database_name=xxxxx +# 物化视图 +omnimv_dir=/omnimv + +# 算子下推 +omnidata_launcher_server= +omnidata_launcher=/home/omm/omnidata-install/omnidata/bin/launcher +omnidata_install_path=/home/omm/omnidata-install +push_down_jars=/usr/local/spark-plugin-jar/* +push_down_conf=spark.executor.extraClassPath=./* +zookeeper_address=agent1:2181,agent2:2181,agent3:2181 +zookeeper_path=/sdi/status +# shuffle加速 +shuffle_jars=/home/ockadmin/opt/ock/jars/* [Storage] check=False # 针对ksal特性需先创建ecpool ec_pool_name=ec_pool - +ceph_conf=/tmp/ceph.conf +storage_maintain_bin=/tmp/maintain +rocksdb_bin=/tmp/rock.bin +ucache_bin=/tmp/ucache.bin +non_ceph_bin=/tmp/non_ceph.bin +non_ceph_pid=38799 [HPC] check=False acc_lib=/home/lib sme=/home/lib - - +# 填写用到 sve 的源码目录或文件的绝对路径 +sve_source_code=/tmp/src