diff --git a/distributed/delpoy_sharding.sh b/distributed/delpoy_sharding.sh new file mode 100644 index 0000000000000000000000000000000000000000..1773b8e27a3bfebc9b9c6b785a03baaa3867e147 --- /dev/null +++ b/distributed/delpoy_sharding.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +#name:shardingphere's name +#dir: which node is shardingphere deploy +#hostname:shardingphere's config path + +name="$1"; +dir="$2"; +hostname="$3" + + +sudo kubectl delete pod ${name}-sha +sudo kubectl delete svc ${name}-service-sha + + +res=`sudo kubectl describe node ${hostname} | grep InternalIP:` +result=`echo $res | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'` + +sudo ssh root@${result} "mkdir -p ${dir}" +sudo scp -r ./config-sharding_tmp.yaml root@${result}:${dir}config-sharding.yaml + +echo " +apiVersion: v1 +kind: Pod +metadata: + name: ${name}-sha + labels: + app: ${name}-sha +spec: + nodeName: ${hostname} + containers: + - name: ${name}-sha + image: shardingsphere:1.0.1 + imagePullPolicy: Never + volumeMounts: + - name: config-file + mountPath: /tmp/ + ports: + - containerPort: 3307 + name: ${name} + volumes: + - name: config-file + hostPath: + path: ${dir} + type: Directory +" > shardingsphere.yaml + + +echo " +apiVersion: v1 +kind: Service +metadata: + name: ${name}-service-sha +spec: + type: NodePort + ports: + - port: 3307 + protocol: TCP + targetPort: 3307 + nodePort: 30400 + selector: + app: ${name}-sha +" > shardingsphere-svc.yaml + +sudo kubectl create -f shardingsphere-svc.yaml +sudo kubectl create -f shardingsphere.yaml diff --git a/distributed/patroni.sh b/distributed/patroni.sh new file mode 100644 index 0000000000000000000000000000000000000000..ba57e713b9b336a9c4edbde973b42ecf39f1a9c2 --- /dev/null +++ b/distributed/patroni.sh @@ -0,0 +1,196 @@ +#!/bin/bash + +#create parameter with patroni's config +getPara(){ + hostname="${1}-${3}" + servicename="${1}-service-${3}" + peerIP="" + peerHost="" + for((j=1; j<=$2; j++)) + do + if [ ${j} -ne ${3} ];then + if [ -z "${peerIP}" ];then + peerHost="${1}-${j}" + peerIP="${1}-service-${j}.${namespaces}" + else + peerHost="${peerHost},${1}-${j}" + peerIP="${peerIP},${1}-service-${j}.${namespaces}" + fi + fi + done + +} + +updataPodFile(){ +echo "apiVersion: v1 +kind: Pod +metadata: + name: ${hostname} + namespace: ${namespaces} + labels: + app: ${hostname} +spec: + restartPolicy: Never + containers: + - name: ${hostname} + image: opengauss:1.0.5 + imagePullPolicy: Never + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /var/lib/opengauss/data/ + name: openguass-volume + ports: + - containerPort: 5432 + name: opengauss + env: + - name: HOST_NAME + value: ${hostname} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PEER_IPS + value: ${peerIP} + - name: PEER_HOST_NAMES + value: ${peerHost} + - name: PORT + value: \"5432\" + - name: GS_PASSWORD + value: \"${passwd}\" + - name: SERVER_MODE + value: ${state} + - name: db_config + value: ${db_config} + volumes: + - name: openguass-volume + hostPath: + path: /data/${hostname}/ + type: DirectoryOrCreate + + +--- +" >> "${namespaces}-pod.yaml" +} + +updataSVCFile(){ +echo " +apiVersion: v1 +kind: Service +metadata: + namespace: ${namespaces} + name: ${servicename} +spec: + ports: + - port: 5432 + protocol: TCP + targetPort: 5432 + name: gsql + - port: 5434 + protocol: TCP + targetPort: 5434 + name: localport + - port: 2380 + protocol: TCP + targetPort: 2380 + name: etcd1-service + - port: 2379 + protocol: TCP + targetPort: 2379 + name: etcd1-local + selector: + app: ${hostname} + clusterIP: None + +--- +" >> "${namespaces}-service.yaml" +} + +createHA(){ +echo " +apiVersion: v1 +kind: Pod +metadata: + name: ${name}-ha + namespace: ${namespaces} +spec: + containers: + - name: ${name}-ha + image: haproxy:1.0.0 + ports: + - containerPort: 7000 + name: ${name} + env: + - name: ports + value: \"${haPorts}\" + - name: ips + value: \"${ips}\" +" > "${namespaces}-ha.yaml" +} + +#name:project's name +#num:database's mumber +#namespaces:namespaces +#passwd:passwd +#db_config:database's config + +name="$1"; +num="$2"; +namespaces="$3"; +passwd="$4"; +db_config="$5"; + +if [ ${num} -lt 3 ] || [ ${num} -gt 9 ];then + echo "The number of databases in a single slice must be greater than 3 and less than 9" + exit +fi + +#delete all pod +kubectl delete --all pods --namespace=${namespaces} +kubectl delete --all svc --namespace=${namespaces} + +#create namespace config file +echo " +apiVersion: v1 +kind: Namespace +metadata: + name: ${namespaces} +" > "${namespaces}.yaml" + +#create namespace +kubectl create -f "${namespaces}.yaml" + +#delelte patroni config file +sudo rm -rf "${namespaces}-pod.yaml" +sudo rm -rf "${namespaces}-service.yaml" +#create patroni config file +for ((i=1; i<=$num; i++)) +do +if [ ${i} -eq 1 ];then + state="primary" + haPorts="5432" +else + state="standby" + haPorts="${haPorts},5432" +fi +getPara $1 $2 ${i} + +updataPodFile +updataSVCFile + + +done +#create pod/svc +sudo kubectl create -f "${namespaces}-service.yaml" +sudo kubectl create -f "${namespaces}-pod.yaml" + + +#get opengauss IPs +sleep 20s +res=`sudo kubectl get pod -n ${namespaces} -o wide` +result=`echo $res | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'` +ips=`echo ${result} | sed 's/[ ][ ]*/,/g'` + +createHA + +sudo kubectl create -f "${namespaces}-ha.yaml" diff --git a/distributed/sharding.sh b/distributed/sharding.sh new file mode 100644 index 0000000000000000000000000000000000000000..a1a8499d45c4ca838990908516e972d7081aa1f3 --- /dev/null +++ b/distributed/sharding.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +#name:project's name +#sharding_num:database sharding's number +#patroni_num:database's mumber +#db_config:database's config +name="$1"; +sharding_num="$2"; +patroni_num="$3"; +db_config="$4"; + +if [ ${patroni_num} -lt 3 ] || [ ${patroni_num} -gt 9 ];then + echo "The number of databases in a single slice must be greater than 3 and less than 9" + exit +fi + +stty -echo +read -p "input openGauss password:" PASSWD +echo "" +read -p "input openGauss password again:" PASSWD_AGAIN +echo "" +stty echo + +if [ -z "${PASSWD}" ]; then + echo "password is empty" + exit +fi + +if [ "${PASSWD}" != "${PASSWD_AGAIN}" ];then + echo "The two passwords are different" + exit +fi + + +sudo rm -rf user_input.yaml + +echo " +#dataSources: +# - ip1 port1 database1 user1 password1 +# - ip2 port2 database2 user2 password2 +# - ip3 port3 database3 user3 password3 +#tables: +# - table1 shard_database_field shard_database_num shard_table_field shard_table_num +# - table2 shard_database_field shard_database_num shard_table_field shard_table_num +# - table3 shard_database_field shard_database_num shard_table_field shard_table_num +dataSources:" > user_input.yaml + +for ((i=1; i<=$sharding_num; i++)) +do +{ + + sudo sh patroni.sh "${name}-${i}" $patroni_num "${name}-${i}" $PASSWD "${db_config}" + sleep 10s + res=`sudo kubectl get pod -n "${name}-${i}" -o wide | grep "${name}-${i}-ha"` + result=`echo $res | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'` + echo " - ${result} 5000 postgres admin ${PASSWD}" >> user_input.yaml +}& +done +wait +echo "tables:" >> user_input.yaml + +