diff --git a/script/mindspore-intelligence/README.md b/script/mindspore-intelligence/README.md new file mode 100644 index 0000000000000000000000000000000000000000..29e020bab1a0d5c5add984e6dc023e4d22c29d9f --- /dev/null +++ b/script/mindspore-intelligence/README.md @@ -0,0 +1,31 @@ +# MindSpore-intelligence部署脚本 + +本部署脚本用于自动化部署 vllm+mindspore部署DeepSeek R1&V3 + openEuler Intelligence。脚本采用Shell + Python语言编写,涵盖了从镜像拉取、依赖安装、k3s部署、Ollama部署、DeepSeek 部署、Embeddings部署,数据库安装、Authhub安装、openEuler Intelligence安装的完整流程,已实现一键式部署和分布部署,旨在简化部署过程,提高部署效率和准确性。 + +### 1. 环境要求 +1. **操作系统**:OpenEuler22.03 LTS SP4及以上要求 +2. **软件依赖**: + - `docker`:大模型镜像下载,容器管理; + - `Python3`:用于脚本执行; + - `oedp`:应用快速安装部署平台; + - `k3s`:: 提供轻量级 Kubernetes 集群 + - `helm`: Kubernetes 包管理工具 + +### 2. 脚本执行 +参考mindspore-deepseek项目下的DeepSeekV3&R1部署指南-第四章节配置config.yaml后执行 + +```bash +cd mindspore-intelligence/script +bash deploy.sh +# 选择0开启一键式部署 +``` + +### 3. FAQ + +1.下载的权重是CKPT格式的,脚本中默认是safetensor格式,如何修改? + +```shell +# 修改config.yaml 模型权重类型 +model_type: ckpt +``` + diff --git a/script/mindspore-intelligence/chart/README.md b/script/mindspore-intelligence/chart/README.md new file mode 100644 index 0000000000000000000000000000000000000000..75dd156d0d9b3cfa8fb1618f5e2857b37485eb93 --- /dev/null +++ b/script/mindspore-intelligence/chart/README.md @@ -0,0 +1,10 @@ +# EulerCopilot + +## 部署顺序 + +1. databases [必须部署] +2. authhub [必须部署] +3. witchaind [必须部署] +4. euler-copilot [必须部署] +5. rca +6. agents diff --git a/script/mindspore-intelligence/chart/authhub/.helmignore b/script/mindspore-intelligence/chart/authhub/.helmignore new file mode 100644 index 0000000000000000000000000000000000000000..0e8a0eb36f4ca2c939201c0d54b5d82a1ea34778 --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/script/mindspore-intelligence/chart/authhub/Chart.yaml b/script/mindspore-intelligence/chart/authhub/Chart.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c1029a16829299cf198fbc698c7addc21b85686b --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: authhub-chart +description: AuthHub Helm部署包 +type: application +version: 0.9.6 +appVersion: "0.9.6" diff --git a/script/mindspore-intelligence/chart/authhub/configs/backend/aops-config.yml b/script/mindspore-intelligence/chart/authhub/configs/backend/aops-config.yml new file mode 100644 index 0000000000000000000000000000000000000000..c0e1ca44bb3194f97759e6c54422970fa9abdd01 --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/configs/backend/aops-config.yml @@ -0,0 +1,30 @@ +infrastructure: + mysql: + host: mysql-db.{{ .Release.Namespace }}.svc.cluster.local + port: 3306 + username: authhub + pool_size: 100 + pool_recycle: 7200 + database: oauth2 + password: ${mysql-password} + redis: + host: redis-db.{{ .Release.Namespace }}.svc.cluster.local + port: 6379 + password: ${redis-password} + +include: "/etc/aops/conf.d" +domain: '{{ regexFind "^[^:]+" (.Values.domain.authhub| default "http://127.0.0.1:30080" | replace "http://" "" | replace "https://" "") }}' +services: + log: + log_level: "INFO" + log_dir: "/var/log/aops" + max_bytes: 31457280 + backup_count: 40 + + email: + server: smtp.163.com + port: 25 + sender: xxx@163.com + authorization_code: xxx + smtp_ssl: false + enabled: false diff --git a/script/mindspore-intelligence/chart/authhub/configs/backend/authhub.yml b/script/mindspore-intelligence/chart/authhub/configs/backend/authhub.yml new file mode 100644 index 0000000000000000000000000000000000000000..78677bd2e3f094c10d0654fe6e6dd11e25420d7d --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/configs/backend/authhub.yml @@ -0,0 +1,4 @@ +uwsgi: + port: 11120 + processes: 1 + daemonize: /var/log/oauth2/uwsgi/oauthhub.log \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/authhub/configs/mysql/init.sql b/script/mindspore-intelligence/chart/authhub/configs/mysql/init.sql new file mode 100644 index 0000000000000000000000000000000000000000..9c1b9fc2a4c5796d51c9263c151a16506d88acd8 --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/configs/mysql/init.sql @@ -0,0 +1,112 @@ +CREATE DATABASE IF NOT EXISTS oauth2 DEFAULT CHARACTER SET utf8mb4 DEFAULT COLLATE utf8mb4_bin; +GRANT ALL ON `oauth2`.* TO 'authhub'@'%'; +use oauth2; + +SET FOREIGN_KEY_CHECKS = 0; + +CREATE TABLE IF NOT EXISTS `manage_user` ( + `id` int NOT NULL AUTO_INCREMENT, + `username` varchar(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL, + `password` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `username` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; + +CREATE TABLE IF NOT EXISTS `user` ( + `id` int NOT NULL AUTO_INCREMENT, + `username` varchar(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL, + `password` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL, + `email` varchar(40) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `phone` varchar(11) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `username` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; + +CREATE TABLE IF NOT EXISTS `oauth2_client` ( + `id` int NOT NULL AUTO_INCREMENT, + `app_name` varchar(48) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL, + `username` varchar(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `client_id` varchar(48) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `client_secret` varchar(120) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `client_id_issued_at` int NOT NULL, + `client_secret_expires_at` int NOT NULL, + `client_metadata` text, + PRIMARY KEY (`id`), + UNIQUE KEY `app_name` (`app_name`), + UNIQUE KEY `client_id` (`client_id`), + KEY `username` (`username`), + KEY `ix_oauth2_client_client_id` (`client_id`), + CONSTRAINT `oauth2_client_ibfk_1` FOREIGN KEY (`username`) REFERENCES `manage_user` (`username`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; + +CREATE TABLE IF NOT EXISTS `login_records` ( + `id` int NOT NULL AUTO_INCREMENT, + `username` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `login_time` varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `client_id` varchar(48) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `logout_url` varchar(200) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + PRIMARY KEY (`id`), + CONSTRAINT `login_records_ibfk_1` FOREIGN KEY (`client_id`) REFERENCES `oauth2_client` (`client_id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; + +CREATE TABLE IF NOT EXISTS `oauth2_client_scopes` ( + `id` int NOT NULL AUTO_INCREMENT, + `username` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `client_id` int DEFAULT NULL, + `scopes` text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL, + `grant_at` int NOT NULL, + `expires_in` int NOT NULL, + PRIMARY KEY (`id`), + CONSTRAINT `oauth2_client_scopes_ibfk_1` FOREIGN KEY (`client_id`) REFERENCES `oauth2_client` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; + +CREATE TABLE IF NOT EXISTS `oauth2_code` ( + `id` int NOT NULL AUTO_INCREMENT, + `username` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `code` varchar(120) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL, + `client_id` varchar(48) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `redirect_uri` text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin, + `response_type` text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin, + `scope` text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin, + `nonce` text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin, + `auth_time` int NOT NULL, + `code_challenge` text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin, + `code_challenge_method` varchar(48) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; + +CREATE TABLE IF NOT EXISTS `oauth2_token` ( + `id` int NOT NULL AUTO_INCREMENT, + `user_id` int DEFAULT NULL, + `username` varchar(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL, + `client_id` varchar(48) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL, + `token_metadata` text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin, + `refresh_token_expires_in` int NOT NULL, + `token_type` varchar(40) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `access_token` varchar(4096) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL, + `refresh_token` varchar(4096) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + `scope` text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin, + `issued_at` int NOT NULL, + `access_token_revoked_at` int NOT NULL, + `refresh_token_revoked_at` int NOT NULL, + `expires_in` int NOT NULL, + PRIMARY KEY (`id`), + KEY `user_id` (`user_id`), + CONSTRAINT `oauth2_token_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `user` (`id`) ON DELETE CASCADE, + CONSTRAINT `oauth2_token_ibfk_2` FOREIGN KEY (`client_id`) REFERENCES `oauth2_client` (`client_id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; + +SET FOREIGN_KEY_CHECKS = 1; + +SET @username := "admin"; +SET @password := "pbkdf2:sha256:260000$LEwtriXN8UQ1UIA7$4de6cc1d67263c6579907eab7c1cba7c7e857b32e957f9ff5429592529d7d1b0"; +SET @manage_username := "administrator"; + +INSERT INTO user (username, password) +SELECT @username, @password +FROM DUAL +WHERE NOT EXISTS(SELECT 1 FROM user WHERE username = @username); +INSERT INTO manage_user (username, password) +SELECT @manage_username, @password +FROM DUAL +WHERE NOT EXISTS(SELECT 1 FROM manage_user WHERE username = @username); \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/authhub/templates/NOTES.txt b/script/mindspore-intelligence/chart/authhub/templates/NOTES.txt new file mode 100644 index 0000000000000000000000000000000000000000..2589763fe3986b0ab74926792e9153c0d9d4ec34 --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/templates/NOTES.txt @@ -0,0 +1,7 @@ +感谢您使用openEuler Intelligence! +当前为0.9.6版本。 +当前Chart的功能为:AuthHub统一登录系统部署。 + +说明: +AuthHub的默认用户名为:administrator +AuthHub的默认密码为:changeme diff --git a/script/mindspore-intelligence/chart/authhub/templates/backend/authhub-backend-config.yaml b/script/mindspore-intelligence/chart/authhub/templates/backend/authhub-backend-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0509245f79fb87ddb18be97a27ba610e9c51d431 --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/templates/backend/authhub-backend-config.yaml @@ -0,0 +1,32 @@ +{{- if .Values.authhub.backend.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: authhub-backend-config + namespace: {{ .Release.Namespace }} +data: + aops-config.yml: |- +{{ tpl (.Files.Get "configs/backend/aops-config.yml") . | indent 4 }} + authhub.yml: |- +{{ tpl (.Files.Get "configs/backend/authhub.yml") . | indent 4 }} + copy-config.yml: |- + copy: + - from: /config/aops-config.yml + to: /config-rw/aops-config.yml + mode: + uid: 0 + gid: 0 + mode: "0o650" + secrets: + - /db-secrets + - /authhub-secrets + - from: /config/conf.d/authhub.yml + to: /config-rw/conf.d/authhub.yml + mode: + uid: 0 + gid: 0 + mode: "0o650" + secrets: + - /db-secrets + - /authhub-secrets +{{- end -}} diff --git a/script/mindspore-intelligence/chart/authhub/templates/backend/authhub-backend.yaml b/script/mindspore-intelligence/chart/authhub/templates/backend/authhub-backend.yaml new file mode 100644 index 0000000000000000000000000000000000000000..60246589b5a2a45909f0ff2479147f7a4d5edb45 --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/templates/backend/authhub-backend.yaml @@ -0,0 +1,101 @@ +{{- if .Values.authhub.backend.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: authhub-backend-service + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "ClusterIP" .Values.authhub.backend.service.type }} + selector: + app: authhub-backend + ports: + - port: 11120 + targetPort: 11120 + nodePort: {{ default nil .Values.authhub.backend.service.nodePort }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: authhub-backend-deploy + namespace: {{ .Release.Namespace }} + labels: + app: authhub-backend +spec: + replicas: {{ default 1 .Values.globals.replicaCount }} + selector: + matchLabels: + app: authhub-backend + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/backend/authhub-backend-config.yaml") . | sha256sum }} + labels: + app: authhub-backend + spec: + automountServiceAccountToken: false + containers: + - name: authhub-backend + image: {{ .Values.authhub.backend.image | default (printf "%s/neocopilot/authhub:0.9.3-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + ports: + - containerPort: 11120 + protocol: TCP + volumeMounts: + - name: authhub-shared + mountPath: /etc/aops + livenessProbe: + httpGet: + path: /oauth2/applications + port: 11120 + scheme: HTTP + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 90 + resources: + requests: + cpu: 0.1 + memory: 128Mi + limits: + {{ toYaml .Values.authhub.backend.resourceLimits | nindent 14 }} + initContainers: + - name: authback-copy + image: {{ .Values.authhub.secret_inject.image | default (printf "%s/neocopilot/secret_inject:dev-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + command: + - python3 + - ./main.py + - --config + - /config/copy-config.yml + - --copy + volumeMounts: + - mountPath: /db-secrets + name: euler-copilot-database-vl + - mountPath: /authhub-secrets + name: authhub-secret-vl + - mountPath: /config-rw + name: authhub-shared + - mountPath: /config + name: authhub-config + volumes: + - name: authhub-shared + emptyDir: + medium: Memory + - name: authhub-config + configMap: + name: authhub-backend-config + items: + - key: aops-config.yml + path: aops-config.yml + - key: authhub.yml + path: conf.d/authhub.yml + - key: copy-config.yml + path: copy-config.yml + - name: authhub-secret-vl + secret: + secretName: authhub-secret + - name: euler-copilot-database-vl + secret: + secretName: euler-copilot-database +{{- end -}} diff --git a/script/mindspore-intelligence/chart/authhub/templates/mysql/mysql-config.yaml b/script/mindspore-intelligence/chart/authhub/templates/mysql/mysql-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a92389afa3acb2937e48f5d7433c4128d23c94c --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/templates/mysql/mysql-config.yaml @@ -0,0 +1,10 @@ +{{- if .Values.authhub.mysql.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: mysql-config + namespace: {{ .Release.Namespace }} +data: + init.sql: |- +{{ tpl (.Files.Get "configs/mysql/init.sql") . | indent 4 }} +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/authhub/templates/mysql/mysql-storage.yaml b/script/mindspore-intelligence/chart/authhub/templates/mysql/mysql-storage.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0677d68a79f38601b0f738685e5049ccdbc95429 --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/templates/mysql/mysql-storage.yaml @@ -0,0 +1,16 @@ +{{- if .Values.authhub.mysql.enabled -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mysql-pvc + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/resource-policy: keep +spec: + storageClassName: {{ default "local-path" .Values.globals.storageClassName }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default "10Gi" .Values.authhub.mysql.persistentVolumeSize }} +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/authhub/templates/mysql/mysql.yaml b/script/mindspore-intelligence/chart/authhub/templates/mysql/mysql.yaml new file mode 100644 index 0000000000000000000000000000000000000000..098ee79c2e1e6f318c0ab2ed169a18e950f74232 --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/templates/mysql/mysql.yaml @@ -0,0 +1,90 @@ +{{- if .Values.authhub.mysql.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: mysql-db + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "ClusterIP" .Values.authhub.mysql.service.type }} + selector: + app: mysql + ports: + - port: 3306 + targetPort: 3306 + nodePort: {{ default nil .Values.authhub.mysql.service.nodePort }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mysql-deploy + namespace: {{ .Release.Namespace }} + labels: + app: mysql +spec: + replicas: {{ default 1 .Values.globals.replicaCount }} + selector: + matchLabels: + app: mysql + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/mysql/mysql-config.yaml") . | sha256sum }} + labels: + app: mysql + spec: + automountServiceAccountToken: false + containers: + - name: mysql + image: {{ .Values.authhub.mysql.image | default (printf "%s/neocopilot/mysql:8-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + args: + - "--character-set-server=utf8mb4" + - "--collation-server=utf8mb4_unicode_ci" + ports: + - containerPort: 3306 + protocol: TCP + livenessProbe: + exec: + command: + - sh + - -c + - mysqladmin -h 127.0.0.1 -u $MYSQL_USER --password=$MYSQL_PASSWORD ping + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 90 + env: + - name: TZ + value: "Asia/Shanghai" + - name: MYSQL_USER + value: "authhub" + - name: MYSQL_RANDOM_ROOT_PASSWORD + value: "yes" + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: authhub-secret + key: mysql-password + volumeMounts: + - mountPath: /var/lib/mysql + name: mysql-data + - mountPath: /docker-entrypoint-initdb.d/init.sql + name: mysql-init + subPath: init.sql + resources: + requests: + cpu: 0.1 + memory: 384Mi + limits: + {{ toYaml .Values.authhub.mysql.resourceLimits | nindent 14 }} + restartPolicy: Always + volumes: + - name: mysql-data + persistentVolumeClaim: + claimName: mysql-pvc + - name: mysql-init + configMap: + name: mysql-config +{{- end -}} + diff --git a/script/mindspore-intelligence/chart/authhub/templates/secrets.yaml b/script/mindspore-intelligence/chart/authhub/templates/secrets.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a274f0973ccfec72963b7fac4efb4866e5d2f22b --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/templates/secrets.yaml @@ -0,0 +1,24 @@ +{{- $authhubSecret := (lookup "v1" "Secret" .Release.Namespace "authhub-secret") -}} +{{- if $authhubSecret -}} +apiVersion: v1 +kind: Secret +metadata: + name: authhub-secret + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/resource-policy: keep +type: Opaque +stringData: + mysql-password: {{ index $authhubSecret.data "mysql-password" | b64dec }} +{{- else -}} +apiVersion: v1 +kind: Secret +metadata: + name: authhub-secret + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/resource-policy: keep +type: Opaque +stringData: + mysql-password: {{ randAlphaNum 20 }} +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/authhub/templates/web/authhub-web-config.yaml b/script/mindspore-intelligence/chart/authhub/templates/web/authhub-web-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3d342fb6ea5794f5ff02d4718e170fc73b4d6a3a --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/templates/web/authhub-web-config.yaml @@ -0,0 +1,44 @@ +{{- if .Values.authhub.web.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: authhub-web-config + namespace: {{ .Release.Namespace }} +data: + authhub.nginx.conf: |- + server { + listen 8000; + server_name localhost; + + # gzip config + gzip on; + gzip_min_length 1k; + gzip_comp_level 6; + gzip_types text/plain text/css text/javascript application/json application/javascript application/x-javascript application/xml; + gzip_vary on; + gzip_disable "MSIE [1-6]\."; + + location / { + proxy_set_header X-Real-IP $remote_addr; + root /opt/authhub/web/dist; + index index.html; + try_files $uri $uri/ /index.html; + } + + location /authhub { + add_header Access-Control-Allow-Origin *; + add_header Access-Control-Allow-Methods 'GET, POST, DELETE, PUT, OPTIONS'; + alias /opt/authhub/web/dist; + index index.html; + try_files $uri $uri/ /index.html last; + } + + location /oauth2 { + proxy_pass http://authhub-backend-service.{{ .Release.Namespace }}.svc.cluster.local:11120; + proxy_set_header Host $host; + proxy_set_header X-Real-URL $request_uri; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Request-Header $http_request_header; + } + } +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/authhub/templates/web/authhub-web.yaml b/script/mindspore-intelligence/chart/authhub/templates/web/authhub-web.yaml new file mode 100644 index 0000000000000000000000000000000000000000..91bec2d9bf6e6a4a692d3381d45e9f8b852a3107 --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/templates/web/authhub-web.yaml @@ -0,0 +1,67 @@ +{{- if .Values.authhub.web.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: authhub-web-service + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "NodePort" .Values.authhub.web.service.type }} + selector: + app: authhub-web + ports: + - port: 8000 + targetPort: 8000 + nodePort: {{ default 30081 .Values.authhub.web.service.nodePort }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: authhub-web-deploy + namespace: {{ .Release.Namespace }} + labels: + app: authhub-web +spec: + replicas: {{ default 1 .Values.globals.replicaCount }} + selector: + matchLabels: + app: authhub-web + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/web/authhub-web-config.yaml") . | sha256sum }} + labels: + app: authhub-web + spec: + automountServiceAccountToken: false + containers: + - name: authhub-web + image: {{ .Values.authhub.web.image | default (printf "%s/neocopilot/authhub-web:0.9.3-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: 8000 + scheme: HTTP + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 90 + volumeMounts: + - name: web-config + mountPath: /etc/nginx/conf.d/authhub.nginx.conf + subPath: authhub.nginx.conf + resources: + requests: + cpu: 0.05 + memory: 64Mi + limits: + {{ toYaml .Values.authhub.web.resourceLimits | nindent 14 }} + volumes: + - name: web-config + configMap: + name: authhub-web-config +{{- end -}} diff --git a/script/mindspore-intelligence/chart/authhub/values.yaml b/script/mindspore-intelligence/chart/authhub/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e6fc8fa88ba50e202d98ea4304f6c52e4f3aaa49 --- /dev/null +++ b/script/mindspore-intelligence/chart/authhub/values.yaml @@ -0,0 +1,72 @@ +# 全局设置 +globals: + # 节点架构:默认是x86 + # [必填] 节点设置:["x86", "arm"] + arch: + # 镜像拉取策略;默认为IfNotPresent + imagePullPolicy: + # 副本数,默认为1 + replicaCount: + # 存储类名称;默认为local-path + storageClassName: + +storage: + # MySQL持久化存储大小,默认为10Gi + mysql: + +domain: + # [必填] AuthHub web端URL, 默认是http://127.0.0.1:30081 + authhub: + +# 部署AuthHub本地鉴权服务 +authhub: + # 配置文件工具 + secret_inject: + # 镜像设置;默认为hub.oepkgs.net/neocopilot/secret_inject:dev-x86 + # 镜像标签:["dev-x86", "dev-arm"] + image: + + web: + # [必填] 是否部署AuthHub前端服务 + enabled: true + # 镜像设置;默认为hub.oepkgs.net/neocopilot/authhub-web:0.9.3-x86 + # 镜像标签:["0.9.3-x86", "0.9.3-arm"] + image: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,例如NodePort + type: NodePort + # 当类型为NodePort时,填写主机的端口号 + nodePort: 30081 + + backend: + # [必填] 是否部署AuthHub后端服务 + enabled: true + # 镜像设置;默认为hub.oepkgs.net/neocopilot/authhub:0.9.3-x86 + # 镜像标签:["0.9.3-x86", "0.9.3-arm"] + image: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,例如NodePort + type: + # 当类型为NodePort时,填写主机的端口号 + nodePort: + + mysql: + # [必填] 是否启用MySQL + enabled: true + # 镜像设置;默认为hub.oepkgs.net/neocopilot/mysql:8-x86 + # 镜像标签:["8-x86", "8-arm"] + image: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,例如NodePort + type: + # 当类型为NodePort时,填写主机的端口号 + nodePort: diff --git a/script/mindspore-intelligence/chart/databases/.helmignore b/script/mindspore-intelligence/chart/databases/.helmignore new file mode 100644 index 0000000000000000000000000000000000000000..0e8a0eb36f4ca2c939201c0d54b5d82a1ea34778 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/script/mindspore-intelligence/chart/databases/Chart.yaml b/script/mindspore-intelligence/chart/databases/Chart.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f75228d5379e96352329ae44535054d427cfa12d --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: euler-copilot-databases +description: Euler Copilot 数据库 Helm部署包 +type: application +version: 0.9.6 +appVersion: "0.9.6" diff --git a/script/mindspore-intelligence/chart/databases/templates/NOTES.txt b/script/mindspore-intelligence/chart/databases/templates/NOTES.txt new file mode 100644 index 0000000000000000000000000000000000000000..c3d1f59454b90bd0261ce8f4bd393f3760dc6785 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/NOTES.txt @@ -0,0 +1,3 @@ +感谢您使用openEuler Intelligence! +当前为0.9.6版本。 +当前Chart的功能为:数据库部署。 diff --git a/script/mindspore-intelligence/chart/databases/templates/_helpers.tpl b/script/mindspore-intelligence/chart/databases/templates/_helpers.tpl new file mode 100644 index 0000000000000000000000000000000000000000..17d02ea449a414f3471be820a98ed653b25f3c16 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/_helpers.tpl @@ -0,0 +1,15 @@ +{{- define "databases.generateGaussPassword" -}} + {{- /* 使用确定的特殊字符组合 */ -}} + {{- $special := "#!" -}} + + {{- /* 生成基础密码 */ -}} + {{- $base := randAlphaNum 10 -}} + {{- $upper := randAlphaNum 3 | upper -}} + {{- $digits := randNumeric 3 -}} + + {{- /* 组合密码 */ -}} + {{- $password := print $base $upper $digits $special -}} + + {{- /* 转义特殊字符 */ -}} + {{- $password | replace "!" "\\!" | replace "$" "\\$" | replace "&" "\\&" -}} +{{- end -}} diff --git a/script/mindspore-intelligence/chart/databases/templates/minio/minio-storage.yaml b/script/mindspore-intelligence/chart/databases/templates/minio/minio-storage.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d70fdad490054e82409bf13b2591c8db2110aa70 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/minio/minio-storage.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.databases.minio.enabled -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: minio-storage + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/resource-policy: keep +spec: + storageClassName: {{ default "local-path" .Values.globals.storageClass }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default "10Gi" .Values.storage.minio }} +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/databases/templates/minio/minio.yaml b/script/mindspore-intelligence/chart/databases/templates/minio/minio.yaml new file mode 100644 index 0000000000000000000000000000000000000000..185883b624962e1da4c3be9b4df413cf6ba3f1e7 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/minio/minio.yaml @@ -0,0 +1,79 @@ +{{- if .Values.databases.minio.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: minio-service + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "ClusterIP" .Values.databases.minio.service.type }} + selector: + app: minio + ports: + - port: 9000 + targetPort: 9000 + nodePort: {{ default nil .Values.databases.minio.service.dataNodePort }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: minio-deploy + namespace: {{ .Release.Namespace }} + labels: + app: minio +spec: + replicas: {{ default 1 .Values.globals.replicaCount }} + selector: + matchLabels: + app: minio + template: + metadata: + labels: + app: minio + spec: + automountServiceAccountToken: false + containers: + - name: minio + image: {{ .Values.databases.minio.image | default (printf "%s/neocopilot/minio:empty-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + args: + - "server" + - "/data" + - "--console-address" + - ":9001" + ports: + - containerPort: 9000 + protocol: TCP + livenessProbe: + httpGet: + path: /minio/health/live + port: 9000 + scheme: HTTP + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 90 + env: + - name: TZ + value: "Asia/Shanghai" + - name: MINIO_ROOT_USER + value: minioadmin + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: euler-copilot-database + key: minio-password + volumeMounts: + - mountPath: "/data" + name: minio-data + resources: + requests: + cpu: 0.25 + memory: 256Mi + limits: + {{ toYaml .Values.databases.minio.resourceLimits | nindent 14 }} + volumes: + - name: minio-data + persistentVolumeClaim: + claimName: minio-storage +{{- end -}} diff --git a/script/mindspore-intelligence/chart/databases/templates/mongo/mongo-config.yaml b/script/mindspore-intelligence/chart/databases/templates/mongo/mongo-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c9594fd33b2b1c6ff9fb4e41ebf301db9afa226 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/mongo/mongo-config.yaml @@ -0,0 +1,23 @@ +{{- if .Values.databases.mongo.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: mongo-config + namespace: {{ .Release.Namespace }} +data: + healthcheck.sh: | + #! /bin/bash + + if mongosh --quiet --eval "rs.status().ok" -u ${MONGO_INITDB_ROOT_USERNAME} -p ${MONGO_INITDB_ROOT_PASSWORD} &> /dev/null; then + echo "MongoDB集群状态正常" + exit 0 + else + echo "初始化MongoDB集群" + if ! mongosh --quiet --eval 'rs.initiate({_id: "rs0", members: [{ _id: 0, host: "127.0.0.1:27017" }]});' -u ${MONGO_INITDB_ROOT_USERNAME} -p ${MONGO_INITDB_ROOT_PASSWORD} &> /dev/null; then + echo "初始化MongoDB集群失败!" + exit 1 + fi + echo "初始化MongoDB集群成功!" + exit 0 + fi +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/databases/templates/mongo/mongo-storage.yaml b/script/mindspore-intelligence/chart/databases/templates/mongo/mongo-storage.yaml new file mode 100644 index 0000000000000000000000000000000000000000..db5c313fba6ab6c039a5529eba5862fd2ec66538 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/mongo/mongo-storage.yaml @@ -0,0 +1,16 @@ +{{- if .Values.databases.mongo.enabled -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongo-storage + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/resource-policy: keep +spec: + storageClassName: {{ default "local-path" .Values.globals.storageClass }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default "10Gi" .Values.storage.mongo }} +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/databases/templates/mongo/mongo.yaml b/script/mindspore-intelligence/chart/databases/templates/mongo/mongo.yaml new file mode 100644 index 0000000000000000000000000000000000000000..87d750f45c061addde3811f81db7ba512ec47706 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/mongo/mongo.yaml @@ -0,0 +1,102 @@ +{{- if .Values.databases.mongo.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: mongo-db + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "ClusterIP" .Values.databases.mongo.service.type }} + selector: + app: mongo + ports: + - port: 27017 + targetPort: 27017 + nodePort: {{ default nil .Values.databases.mongo.service.nodePort }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongo-deploy + namespace: {{ .Release.Namespace }} + labels: + app: mongo +spec: + replicas: {{ default 1 .Values.globals.replicaCount }} + selector: + matchLabels: + app: mongo + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/mongo/mongo-config.yaml") . | sha256sum }} + labels: + app: mongo + spec: + automountServiceAccountToken: false + containers: + - name: mongo + image: {{ .Values.databases.mongo.image | default (printf "%s/neocopilot/mongo:7.0.16-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + command: + - bash + - -c + - | + if [ ! -f "/data/db/file.key" ]; then + openssl rand -base64 756 > /data/db/file.key; + fi + chmod 400 /data/db/file.key; + chown 999:999 /data/db/file.key; + exec docker-entrypoint.sh $$@ + args: + - "mongod" + - "--replSet" + - "rs0" + - "--bind_ip_all" + - "--keyFile" + - "/data/db/file.key" + ports: + - containerPort: 27017 + protocol: TCP + livenessProbe: + exec: + command: + - bash + - /tmp/healthcheck.sh + failureThreshold: 5 + initialDelaySeconds: 30 + periodSeconds: 60 + env: + - name: TZ + value: "Asia/Shanghai" + - name: MONGO_INITDB_ROOT_USERNAME + value: "euler_copilot" + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: euler-copilot-database + key: mongo-password + - name: MONGO_INITDB_DATABASE + value: euler_copilot + volumeMounts: + - mountPath: /data/db + name: mongo-data + - mountPath: /tmp/healthcheck.sh + name: mongo-init + subPath: healthcheck.sh + resources: + requests: + cpu: 0.25 + memory: 256Mi + limits: + {{ toYaml .Values.databases.mongo.resourceLimits | nindent 14 }} + restartPolicy: Always + volumes: + - name: mongo-data + persistentVolumeClaim: + claimName: mongo-storage + - name: mongo-init + configMap: + name: mongo-config +{{- end -}} diff --git a/script/mindspore-intelligence/chart/databases/templates/opengauss/opengauss-config.yaml b/script/mindspore-intelligence/chart/databases/templates/opengauss/opengauss-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aca7d76ec00eb222bb5324e3fb6a83787a53f01e --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/opengauss/opengauss-config.yaml @@ -0,0 +1,16 @@ +{{- if .Values.databases.opengauss.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: opengauss-config + namespace: {{ .Release.Namespace }} +data: + init.sql: | + CREATE EXTENSION chparser; + CREATE TEXT SEARCH CONFIGURATION chparser (PARSER = chparser); + ALTER TEXT SEARCH CONFIGURATION chparser ADD MAPPING FOR n,v,a,i,e,l WITH simple; + post-init.sh: | + #!/bin/bash + su - omm -c "gs_guc reload -D /var/lib/opengauss/data -c \"behavior_compat_options = 'accept_empty_str'\"" +{{- end -}} + diff --git a/script/mindspore-intelligence/chart/databases/templates/opengauss/opengauss-storage.yaml b/script/mindspore-intelligence/chart/databases/templates/opengauss/opengauss-storage.yaml new file mode 100644 index 0000000000000000000000000000000000000000..307b464cb25b2c68e3d39ada22197ce336003467 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/opengauss/opengauss-storage.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.databases.opengauss.enabled -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: opengauss-storage + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/resource-policy: keep +spec: + storageClassName: {{ default "local-path" .Values.globals.storageClass }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default "10Gi" .Values.storage.opengauss }} +{{- end -}} + diff --git a/script/mindspore-intelligence/chart/databases/templates/opengauss/opengauss.yaml b/script/mindspore-intelligence/chart/databases/templates/opengauss/opengauss.yaml new file mode 100644 index 0000000000000000000000000000000000000000..215937b2092b9519d05f4d3c243981a18d8bb776 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/opengauss/opengauss.yaml @@ -0,0 +1,93 @@ +{{- if .Values.databases.opengauss.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: opengauss-db + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "ClusterIP" .Values.databases.opengauss.service.type }} + selector: + app: opengauss + ports: + - port: 5432 + targetPort: 5432 + nodePort: {{ default nil .Values.databases.opengauss.service.nodePort }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: opengauss-deploy + namespace: {{ .Release.Namespace }} + labels: + app: opengauss +spec: + replicas: {{ default 1 .Values.globals.replicaCount }} + selector: + matchLabels: + app: opengauss + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/opengauss/opengauss-config.yaml") . | sha256sum }} + labels: + app: opengauss + spec: + automountServiceAccountToken: false + containers: + - name: opengauss + image: {{ .Values.databases.opengauss.image | default (printf "%s/neocopilot/opengauss:latest-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + ports: + - containerPort: 5432 + protocol: TCP + livenessProbe: + exec: + command: ["/bin/bash", "/docker-entrypoint-initdb.d/post-init.sh"] + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 90 + env: + - name: TZ + value: "Asia/Shanghai" + - name: GS_DB + value: "postgres" + - name: GS_USERNAME + value: "postgres" + - name: GS_PASSWORD + valueFrom: + secretKeyRef: + name: euler-copilot-database + key: gauss-password + - name: GAUSSLOG + value: /var/log/opengauss + volumeMounts: + - mountPath: /var/lib/opengauss/data + name: opengauss-data + - mountPath: /docker-entrypoint-initdb.d/init.sql + name: opengauss-init + subPath: init.sql + - mountPath: /docker-entrypoint-initdb.d/post-init.sh + name: opengauss-init + subPath: post-init.sh + - mountPath: /var/log/opengauss + name: opengauss-log + resources: + requests: + cpu: 0.5 + memory: 1024Mi + limits: + {{ toYaml .Values.databases.opengauss.resourceLimits | indent 4 | trim }} + volumes: + - name: opengauss-data + persistentVolumeClaim: + claimName: opengauss-storage + - name: opengauss-init + configMap: + name: opengauss-config + defaultMode: 484 + - name: opengauss-log + emptyDir: {} +{{- end -}} + diff --git a/script/mindspore-intelligence/chart/databases/templates/pgsql/pgsql-config.yaml b/script/mindspore-intelligence/chart/databases/templates/pgsql/pgsql-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f73664cdb897d645ab157be4e5b94cbb3a36f6e --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/pgsql/pgsql-config.yaml @@ -0,0 +1,13 @@ +{{- if .Values.databases.pgsql.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: pgsql-config + namespace: {{ .Release.Namespace }} +data: + init.sql: | + CREATE EXTENSION zhparser; + CREATE EXTENSION vector; + CREATE TEXT SEARCH CONFIGURATION zhparser (PARSER = zhparser); + ALTER TEXT SEARCH CONFIGURATION zhparser ADD MAPPING FOR n,v,a,i,e,l WITH simple; +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/databases/templates/pgsql/pgsql-storage.yaml b/script/mindspore-intelligence/chart/databases/templates/pgsql/pgsql-storage.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5b5a16a3189ad8c823811d464492b77fc6b6835b --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/pgsql/pgsql-storage.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.databases.pgsql.enabled -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pgsql-storage + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/resource-policy: keep +spec: + storageClassName: {{ default "local-path" .Values.globals.storageClass }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default "10Gi" .Values.storage.pgsql }} +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/databases/templates/pgsql/pgsql.yaml b/script/mindspore-intelligence/chart/databases/templates/pgsql/pgsql.yaml new file mode 100644 index 0000000000000000000000000000000000000000..97b92bc8ebe5683bf336f6f8ae6b0876e37c670a --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/pgsql/pgsql.yaml @@ -0,0 +1,84 @@ +{{- if .Values.databases.pgsql.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: opengauss-db + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "ClusterIP" .Values.databases.pgsql.service.type }} + selector: + app: pgsql + ports: + - port: 5432 + targetPort: 5432 + nodePort: {{ default nil .Values.databases.pgsql.service.nodePort }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pgsql-deploy + namespace: {{ .Release.Namespace }} + labels: + app: pgsql +spec: + replicas: {{ default 1 .Values.globals.replicaCount }} + selector: + matchLabels: + app: pgsql + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/pgsql/pgsql-config.yaml") . | sha256sum }} + labels: + app: pgsql + spec: + automountServiceAccountToken: false + containers: + - name: pgsql + image: {{ .Values.databases.pgsql.image | default (printf "%s/neocopilot/pgsql-empty:pg16-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + ports: + - containerPort: 5432 + protocol: TCP + livenessProbe: + exec: + command: + - pg_isready + - -d postgres -U postgres + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 90 + env: + - name: TZ + value: "Asia/Shanghai" + - name: POSTGRES_DB + value: "postgres" + - name: POSTGRES_USER + value: "postgres" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: euler-copilot-database + key: gauss-password + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: pgsql-data + - mountPath: /docker-entrypoint-initdb.d/init.sql + name: pgsql-init + subPath: init.sql + resources: + requests: + cpu: 0.25 + memory: 512Mi + limits: + {{ toYaml .Values.databases.pgsql.resourceLimits | nindent 14 }} + volumes: + - name: pgsql-data + persistentVolumeClaim: + claimName: pgsql-storage + - name: pgsql-init + configMap: + name: pgsql-config +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/databases/templates/redis/redis.yaml b/script/mindspore-intelligence/chart/databases/templates/redis/redis.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2e38113800238deb001582b352162c3392253fff --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/redis/redis.yaml @@ -0,0 +1,77 @@ +{{- if .Values.databases.redis.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: redis-db + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "ClusterIP" .Values.databases.redis.service.type }} + selector: + app: redis + ports: + - port: 6379 + targetPort: 6379 + nodePort: {{ default nil .Values.databases.redis.service.nodePort }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-deploy + namespace: {{ .Release.Namespace }} + labels: + app: redis +spec: + replicas: {{ default 1 .Values.globals.replicaCount }} + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + automountServiceAccountToken: false + containers: + - name: redis + image: {{ .Values.databases.redis.image | default (printf "%s/neocopilot/redis:7.4-alpine-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + command: + - redis-server + - --requirepass $(REDIS_PASSWORD) + ports: + - containerPort: 6379 + protocol: TCP + livenessProbe: + exec: + command: + - sh + - -c + - redis-cli -a $REDIS_PASSWORD ping + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 90 + env: + - name: TZ + value: "Asia/Shanghai" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: euler-copilot-database + key: redis-password + volumeMounts: + - mountPath: /tmp + name: redis-tmp + resources: + requests: + cpu: 0.1 + memory: 64Mi + limits: + {{ toYaml .Values.databases.redis.resourceLimits | nindent 14 }} + restartPolicy: Always + volumes: + - name: redis-tmp + emptyDir: + medium: Memory +{{- end -}} diff --git a/script/mindspore-intelligence/chart/databases/templates/secrets.yaml b/script/mindspore-intelligence/chart/databases/templates/secrets.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b146644ca30f8c1bba1f50ca42e1350f65951356 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/templates/secrets.yaml @@ -0,0 +1,30 @@ +{{- $databaseSecret := (lookup "v1" "Secret" .Release.Namespace "euler-copilot-database") -}} +{{- if $databaseSecret -}} +apiVersion: v1 +kind: Secret +metadata: + name: euler-copilot-database + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/resource-policy: keep +type: Opaque +stringData: + redis-password: {{ index $databaseSecret.data "redis-password" | b64dec | quote }} + mongo-password: {{ index $databaseSecret.data "mongo-password" | b64dec | quote }} + minio-password: {{ index $databaseSecret.data "minio-password" | b64dec | quote }} + gauss-password: {{ index $databaseSecret.data "gauss-password" | b64dec | quote }} +{{- else -}} +apiVersion: v1 +kind: Secret +metadata: + name: euler-copilot-database + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/resource-policy: keep +type: Opaque +stringData: + redis-password: {{ randAlphaNum 20 | quote }} + mongo-password: {{ randAlphaNum 20 | quote }} + minio-password: {{ randAlphaNum 20 | quote }} + gauss-password: {{ include "databases.generateGaussPassword" . | quote }} +{{- end -}} diff --git a/script/mindspore-intelligence/chart/databases/values.yaml b/script/mindspore-intelligence/chart/databases/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..afeb58c1d81e1a817b22cfc7ba306502bbf56ea4 --- /dev/null +++ b/script/mindspore-intelligence/chart/databases/values.yaml @@ -0,0 +1,107 @@ +# 全局设置 +globals: + # 节点架构:默认是x86 + # 节点设置:["x86", "arm"] + arch: + # 部署副本数,默认为1 + replicaCount: + # 镜像拉取策略,默认为IfNotPresent + imagePullPolicy: + # 存储类,默认为local-path + storageClass: + +# 存储设置 +storage: + # MinIO存储大小,默认为10GB + minio: + # MongoDB存储大小,默认为10GB + mongo: + # 向量数据库存储大小,默认为10GB + opengauss: + # PostgreSQL存储大小,默认为10GB + pgsql: + +# 域名设置 +domain: + # 需要修改为MinIO Console绑定的域名。单节点部署时,服务基于Host进行区分,无法使用IP地址 + minioConsole: + +databases: + minio: + # [必填] 是否部署MinIO实例 + enabled: true + # 镜像设置:默认为hub.oepkgs.net/neocopilot/minio:empty-x86 + # 镜像版本:["empty-x86", "empty-arm"] + image: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,例如NodePort + type: + # 当类型为NodePort时,填写MinIO数据端口对应的主机的端口号 + dataNodePort: + # 当类型为NodePort时,填写MinIO控制台对应的主机的端口号 + consoleNodePort: + # Ingress设置 + ingress: + # [必填] 是否暴露MinIO的Console + enabled: true + # Ingress URL前缀 + prefix: / + mongo: + # [必填] 是否部署MySQL数据库实例 + enabled: true + # 镜像设置;默认为hub.oepkgs.net/neocopilot/mongo:7.0.16-x86 + # 镜像版本: ["7.0.16-x86", "7.0.16-arm"] + image: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # [必填] Service类型,例如NodePort + type: + # 当类型为nodePort时,填写主机的端口号 + nodePort: + redis: + # [必填] 是否部署Redis实例 + enabled: true + # 镜像设置,默认为hub.oepkgs.net/neocopilot/redis:7.4-alpine-x86 + # 镜像版本: ["7.4-alpine-x86", "7.4-alpine-arm"] + image: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,如NodePort + type: + # 当类型为nodePort时,填写主机的端口号 + nodePort: + opengauss: + # [必填] 是否部署PostgreSQL实例 + enabled: true + # 镜像设置,默认为hub.oepkgs.net/neocopilot/opengauss:latest-x86 + # 镜像版本: ["latest-x86", "latest-arm"] + image: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,如NodePort + type: + # 当类型为NodePort时,填写主机的端口号 + nodePort: + pgsql: + # [必填] 是否部署PostgreSQL实例 + enabled: false + # 镜像设置,默认为hub.oepkgs.net/neocopilot/pgsql-empty:pg16-x86 + # 镜像版本: ["pg16-x86", "pg16-arm"] + image: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,如NodePort + type: + # 当类型为NodePort时,填写主机的端口号 + nodePort: diff --git a/script/mindspore-intelligence/chart/euler_copilot/.helmignore b/script/mindspore-intelligence/chart/euler_copilot/.helmignore new file mode 100644 index 0000000000000000000000000000000000000000..0e8a0eb36f4ca2c939201c0d54b5d82a1ea34778 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/script/mindspore-intelligence/chart/euler_copilot/Chart.yaml b/script/mindspore-intelligence/chart/euler_copilot/Chart.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d9ffecff3beb67a173b713907461b49c835701f --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: euler-copilot +description: Euler Copilot Helm部署包 +type: application +version: 0.9.6 +appVersion: "0.9.6" diff --git a/script/mindspore-intelligence/chart/euler_copilot/configs/framework/config.toml b/script/mindspore-intelligence/chart/euler_copilot/configs/framework/config.toml new file mode 100644 index 0000000000000000000000000000000000000000..64a1a6a03600b1cfc48c504f32c1e8c9ce4350cb --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/configs/framework/config.toml @@ -0,0 +1,66 @@ +[deploy] +mode = 'local' +cookie = 'domain' +data_dir = '/app/data' + +[login] +provider = 'authhub' +[login.settings] +host = '{{ .Values.domain.authhub | default "http://127.0.0.1:30081" }}' +host_inner = 'http://authhub-backend-service.{{ .Release.Namespace }}.svc.cluster.local:11120' +login_api = '{{ .Values.domain.euler_copilot | default "http://127.0.0.1:30080" }}/api/auth/login' +app_id = '${clientId}' +app_secret = '${clientSecret}' + +[fastapi] +domain = '{{ regexFind "^[^:]+" (.Values.domain.euler_copilot | default "http://127.0.0.1:30080" | replace "http://" "" | replace "https://" "") }}' + +[security] +half_key1 = '${halfKey1}' +half_key2 = '${halfKey2}' +half_key3 = '${halfKey3}' +jwt_key = '${jwtKey}' + +[embedding] +type = '{{ default "openai" .Values.models.embedding.type }}' +endpoint = '{{ .Values.models.embedding.endpoint }}' +api_key = '{{ .Values.models.embedding.key }}' +model = '{{ default "bge-m3" .Values.models.embedding.name }}' + +[rag] +rag_service = 'http://rag-service.{{ .Release.Namespace }}.svc.cluster.local:9988' + +[mongodb] +host = 'mongo-db.{{ .Release.Namespace }}.svc.cluster.local' +port = 27017 +user = 'euler_copilot' +password = '${mongo-password}' +database = 'euler_copilot' + +[minio] +endpoint = 'minio-service.{{ .Release.Namespace }}.svc.cluster.local:9000' +access_key = 'minioadmin' +secret_key = '${minio-password}' +secure = false + +[llm] +endpoint = '{{ .Values.models.answer.endpoint }}' +key = '{{ .Values.models.answer.key }}' +model = '{{ .Values.models.answer.name }}' +max_tokens = {{ default 8192 .Values.models.answer.maxTokens }} +temperature = {{ default 0.7 .Values.models.answer.temperature }} + +[function_call] +backend = '{{ default "ollama" .Values.models.functionCall.backend }}' +endpoint = '{{ default .Values.models.answer.endpoint .Values.models.functionCall.endpoint }}' +model = '{{ default .Values.models.answer.name .Values.models.functionCall.name }}' +api_key = '{{ default .Values.models.answer.key .Values.models.functionCall.key }}' +max_tokens = {{ default .Values.models.answer.maxTokens .Values.models.functionCall.maxTokens }} +temperature = {{ default 0.7 .Values.models.functionCall.temperature }} + +[check] +enable = false +words_list = "" + +[extra] +sql_url = '' diff --git a/script/mindspore-intelligence/chart/euler_copilot/configs/rag/.env b/script/mindspore-intelligence/chart/euler_copilot/configs/rag/.env new file mode 100644 index 0000000000000000000000000000000000000000..5e83f3f415091b156644b33bef9291a2d99d1931 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/configs/rag/.env @@ -0,0 +1,60 @@ +# Fastapi +UVICORN_IP=0.0.0.0 +UVICORN_PORT=9988 +SSL_CERTFILE= +SSL_KEYFILE= +SSL_ENABLE=false +LOG_METHOD=stdout + +# opengauss +DATABASE_TYPE=opengauss +DATABASE_HOST=opengauss-db.{{ .Release.Namespace }}.svc.cluster.local +DATABASE_PORT=5432 +DATABASE_USER=postgres +DATABASE_PASSWORD=${gauss-password} +DATABASE_DB=postgres + +# MinIO +MINIO_ENDPOINT=minio-service.{{ .Release.Namespace }}.svc.cluster.local:9000 +MINIO_ACCESS_KEY=minioadmin +MINIO_SECRET_KEY=${minio-password} +MINIO_SECURE=false + +# MongoDB +MONGODB_USER = euler_copilot +MONGODB_PASSWORD = ${mongo-password} +MONGODB_HOST = mongo-db.{{ .Release.Namespace }}.svc.cluster.local +MONGODB_PORT = 27017 +MONGODB_DATABASE = euler_copilot + +# Task +TASK_RETRY_TIME=3 + +# Embedding +EMBEDDING_TYPE={{ default "openai" .Values.models.embedding.type }} +EMBEDDING_ENDPOINT={{ .Values.models.embedding.endpoint }}/embeddings +EMBEDDING_API_KEY={{ .Values.models.embedding.key }} +EMBEDDING_MODEL_NAME={{ .Values.models.embedding.name }} + +# Token +CSRF_KEY=${csrfKey} +SESSION_TTL=1440 + +# PROMPT_PATH +PROMPT_PATH=/rag-service/data_chain/common/prompt.yaml +# Stop Words PATH +STOP_WORDS_PATH=/rag-service/data_chain/common/stopwords.txt + +#Security +HALF_KEY1=${halfKey1} +HALF_KEY2=${halfKey2} +HALF_KEY3=${halfKey3} + +#LLM config +MODEL_NAME={{ .Values.models.answer.name }} +OPENAI_API_BASE={{ .Values.models.answer.endpoint }}/v1 +OPENAI_API_KEY={{ default "" .Values.models.answer.key }} +MAX_TOKENS={{ default 2048 .Values.models.answer.maxTokens }} + +# DOCUMENT PARSER +DOCUMENT_PARSE_USE_CPU_LIMIT = 4 diff --git a/script/mindspore-intelligence/chart/euler_copilot/configs/rag/.env-sql b/script/mindspore-intelligence/chart/euler_copilot/configs/rag/.env-sql new file mode 100644 index 0000000000000000000000000000000000000000..5553f277ae74a1b7e6678598860b5e5405cfabff --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/configs/rag/.env-sql @@ -0,0 +1,29 @@ +# FastAPI +UVICORN_IP=0.0.0.0 +UVICORN_PORT=9015 + +# Postgres +DATABASE_TYPE=opengauss +DATABASE_HOST=opengauss-db.{{ .Release.Namespace }}.svc.cluster.local +DATABASE_PORT=5432 +DATABASE_USER=postgres +DATABASE_PASSWORD=${gauss-password} +DATABASE_DB=postgres + +# QWEN +LLM_MODEL={{ .Values.models.answer.name }} +LLM_URL={{ .Values.models.answer.endpoint }}/v1 +LLM_KEY={{ default "" .Values.models.answer.key }} +LLM_MAX_TOKENS={{ default 2048 .Values.models.answer.maxTokens }} + + +# Embedding +EMBEDDING_TYPE={{ default "openai" .Values.models.embedding.type }} +EMBEDDING_ENDPOINT={{ .Values.models.embedding.endpoint }}/embeddings +EMBEDDING_API_KEY={{ .Values.models.embedding.key }} +EMBEDDING_MODEL_NAME={{ .Values.models.embedding.name }} + +# security +HALF_KEY1=${halfKey1} +HALF_KEY2=${halfKey2} +HALF_KEY3=${halfKey3} diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/NOTES.txt b/script/mindspore-intelligence/chart/euler_copilot/templates/NOTES.txt new file mode 100644 index 0000000000000000000000000000000000000000..ec7debb6b984f3e6a2aa2b3128301cd763bce5e6 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/NOTES.txt @@ -0,0 +1,5 @@ +感谢您使用openEuler Intelligence! +当前为0.9.6版本。 +当前Chart的功能为:openEuler Intelligence核心组件部署。 + +更多项目动态和分享会议,请关注openEuler sig-intelligence:https://www.openeuler.org/en/sig/sig-detail/?name=sig-intelligence diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/framework/framework-config.yaml b/script/mindspore-intelligence/chart/euler_copilot/templates/framework/framework-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..328803d4f537c5285baf0e8190d9a8a7d11f0237 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/framework/framework-config.yaml @@ -0,0 +1,21 @@ +{{- if .Values.euler_copilot.framework.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: framework-config + namespace: {{ .Release.Namespace }} +data: + config.toml: |- +{{ tpl (.Files.Get "configs/framework/config.toml") . | indent 4 }} + copy-config.yaml: |- + copy: + - from: /config/config.toml + to: /config-rw/config.toml + mode: + uid: 0 + gid: 0 + mode: "0o650" + secrets: + - /db-secrets + - /system-secrets +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/framework/framework-storage.yaml b/script/mindspore-intelligence/chart/euler_copilot/templates/framework/framework-storage.yaml new file mode 100644 index 0000000000000000000000000000000000000000..25047e1b86d93d180bfc876aa6647ad15b64e675 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/framework/framework-storage.yaml @@ -0,0 +1,31 @@ +{{- if .Values.euler_copilot.framework.enabled -}} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: framework-semantics + namespace: {{ .Release.Namespace }} +spec: + storageClassName: {{ default "local-path" .Values.globals.storageClass }} + capacity: + storage: {{ default "10Gi" .Values.storage.frameworkSemantics.size }} + accessModes: + - ReadWriteOnce + hostPath: + path: {{ default "/var/lib/eulercopilot" .Values.storage.frameworkSemantics.path }} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: framework-semantics-claim + namespace: {{ .Release.Namespace }} +spec: + storageClassName: {{ default "local-path" .Values.globals.storageClass }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default "5Gi" .Values.storage.frameworkSemantics.size }} + volumeName: framework-semantics +{{- end -}} diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/framework/framework.yaml b/script/mindspore-intelligence/chart/euler_copilot/templates/framework/framework.yaml new file mode 100644 index 0000000000000000000000000000000000000000..034779540664714efb37287101bb70688ed2a394 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/framework/framework.yaml @@ -0,0 +1,120 @@ +{{- if .Values.euler_copilot.framework.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: framework-service + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "ClusterIP" .Values.euler_copilot.framework.service.type }} + selector: + app: framework + ports: + - name: framework + port: 8002 + targetPort: 8002 + nodePort: {{ default nil .Values.euler_copilot.framework.service.nodePort }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: framework-deploy + namespace: {{ .Release.Namespace }} + labels: + app: framework +spec: + selector: + matchLabels: + app: framework + template: + metadata: + annotations: + checksum/secret: {{ include (print $.Template.BasePath "/framework/framework-config.yaml") . | sha256sum }} + labels: + app: framework + spec: + automountServiceAccountToken: false + containers: + - name: framework + image: {{ .Values.euler_copilot.framework.image | default (printf "%s/neocopilot/euler-copilot-framework:0.9.6-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + ports: + - containerPort: 8002 + protocol: TCP + livenessProbe: + httpGet: + path: /health_check + port: 8002 + scheme: HTTP + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 90 + env: + - name: TZ + value: "Asia/Shanghai" + - name: CONFIG + value: "/app/config/config.toml" + volumeMounts: + - mountPath: /app/config + name: framework-shared + - mountPath: /tmp + name: framework-tmp-volume + - mountPath: /app/static + name: web-static + - mountPath: /app/data + name: framework-semantics-vl + securityContext: + readOnlyRootFilesystem: {{ default false .Values.euler_copilot.framework.readOnly }} + resources: + requests: + cpu: 0.2 + memory: 512Mi + limits: + {{ toYaml .Values.euler_copilot.framework.resourceLimits | nindent 14 }} + initContainers: + - name: framework-copy + image: {{ .Values.euler_copilot.secretInject.image | default (printf "%s/neocopilot/secret_inject:dev-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + command: + - python3 + - ./main.py + - --config + - config.yaml + - --copy + volumeMounts: + - mountPath: /config/config.toml + name: framework-config + subPath: config.toml + - mountPath: /app/config.yaml + name: framework-config + subPath: copy-config.yaml + - mountPath: /config-rw + name: framework-shared + - mountPath: /db-secrets + name: database-secrets + - mountPath: /system-secrets + name: system-secrets + volumes: + - name: framework-config + configMap: + name: framework-config + - name: framework-semantics-vl + persistentVolumeClaim: + claimName: framework-semantics-claim + - name: database-secrets + secret: + secretName: euler-copilot-database + - name: system-secrets + secret: + secretName: euler-copilot-system + - name: web-static + persistentVolumeClaim: + claimName: web-static + - name: framework-tmp-volume + emptyDir: + medium: Memory + - name: framework-shared + emptyDir: + medium: Memory +{{- end -}} diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/rag-web/rag-web-config.yaml b/script/mindspore-intelligence/chart/euler_copilot/templates/rag-web/rag-web-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc7ce1ba799bf6ebcc41faeafe202e2e2f02ce47 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/rag-web/rag-web-config.yaml @@ -0,0 +1,10 @@ +{{- if .Values.euler_copilot.rag_web.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: rag-web-config + namespace: {{ .Release.Namespace }} +data: + .env: |- + DATA_CHAIN_BACEND_URL=http://rag-service.{{ .Release.Namespace }}.svc.cluster.local:9988 +{{- end -}} diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/rag-web/rag-web.yaml b/script/mindspore-intelligence/chart/euler_copilot/templates/rag-web/rag-web.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fd9daf5379327bef3f0a470785bebf3ab3835a08 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/rag-web/rag-web.yaml @@ -0,0 +1,80 @@ +{{- if .Values.euler_copilot.rag_web.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: rag-web-service + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "ClusterIP" .Values.euler_copilot.rag_web.service.type }} + selector: + app: rag-web + ports: + - port: 9888 + targetPort: 9888 + nodePort: {{ default nil .Values.euler_copilot.rag_web.service.nodePort }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rag-web-deploy + namespace: {{ .Release.Namespace }} + labels: + app: rag-web +spec: + replicas: {{ default 1 .Values.globals.replicaCount }} + selector: + matchLabels: + app: rag-web + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/rag-web/rag-web-config.yaml") . | sha256sum }} + labels: + app: rag-web + spec: + automountServiceAccountToken: false + containers: + - name: rag-web + image: {{ .Values.euler_copilot.rag_web.image | default (printf "%s/neocopilot/data_chain_web:0.9.6-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + ports: + - containerPort: 9888 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: 9888 + scheme: HTTP + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 90 + env: + - name: TZ + value: "Asia/Shanghai" + volumeMounts: + - mountPath: /config + name: rag-web-config-volume + - mountPath: /var/lib/nginx/tmp + name: rag-web-tmp + - mountPath: /opt/.env + name: rag-web-env-volume + subPath: .env + resources: + requests: + cpu: 0.05 + memory: 64Mi + limits: + {{ toYaml .Values.euler_copilot.rag_web.resourceLimits | nindent 14 }} + volumes: + - name: rag-web-config-volume + emptyDir: + medium: Memory + - name: rag-web-env-volume + configMap: + name: rag-web-config + - name: rag-web-tmp + emptyDir: + medium: Memory +{{- end -}} diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/rag/rag-config.yaml b/script/mindspore-intelligence/chart/euler_copilot/templates/rag/rag-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ffc922d10808077cf1667d58d0dbb3c19c4fa96 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/rag/rag-config.yaml @@ -0,0 +1,32 @@ +{{- if .Values.euler_copilot.rag.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: rag-config + namespace: {{ .Release.Namespace }} +data: + .env: |- +{{ tpl (.Files.Get "configs/rag/.env") . | indent 4}} + .env-sql: |- +{{ tpl (.Files.Get "configs/rag/.env-sql") . | indent 4}} + copy-config.yaml: |- + copy: + - from: /config/.env + to: /config-rw/.env + mode: + uid: 1001 + gid: 1001 + mode: "0o650" + secrets: + - /db-secrets + - /system-secrets + - from: /config/.env-sql + to: /config-rw/.env-sql + mode: + uid: 1001 + gid: 1001 + mode: "0o650" + secrets: + - /db-secrets + - /system-secrets +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/rag/rag.yaml b/script/mindspore-intelligence/chart/euler_copilot/templates/rag/rag.yaml new file mode 100644 index 0000000000000000000000000000000000000000..761623ef715b91cc70508aaa2a42bb4d75e824a1 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/rag/rag.yaml @@ -0,0 +1,130 @@ +{{- if .Values.euler_copilot.rag.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: rag-service + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "ClusterIP" .Values.euler_copilot.rag.service.type }} + selector: + app: rag + ports: + - name: rag + port: 9988 + targetPort: 9988 + nodePort: {{ default nil .Values.euler_copilot.rag.service.nodePort }} + - name: rag-sql + port: 9015 + targetPort: 9015 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rag-deploy + namespace: {{ .Release.Namespace }} + labels: + app: rag +spec: + replicas: {{ default 1 .Values.globals.replicaCount }} + selector: + matchLabels: + app: rag + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/rag/rag-config.yaml") . | sha256sum }} + labels: + app: rag + spec: + automountServiceAccountToken: false + securityContext: + fsGroup: 1001 + containers: + - name: rag + image: {{ .Values.euler_copilot.rag.image | default (printf "%s/neocopilot/data_chain_back_end:0.9.6-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + securityContext: + readOnlyRootFilesystem: {{ default false .Values.euler_copilot.framework.readOnly }} + capabilities: + drop: + - ALL + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + ports: + - containerPort: 9988 + protocol: TCP + livenessProbe: + httpGet: + path: /health_check + port: 9988 + scheme: HTTP + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 90 + env: + - name: TZ + value: "Asia/Shanghai" + volumeMounts: + - mountPath: /rag-service/data_chain/common/.env + name: rag-shared + subPath: .env + - mountPath: /rag-service/chat2db/common/.env + name: rag-shared + subPath: .env-sql + resources: + requests: + cpu: 0.25 + memory: 512Mi + limits: + {{ toYaml .Values.euler_copilot.rag.resourceLimits | nindent 14 }} + initContainers: + - name: rag-copy-secret + image: {{ .Values.euler_copilot.secretInject.image | default (printf "%s/neocopilot/secret_inject:dev-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + command: + - python3 + - ./main.py + - --config + - config.yaml + - --copy + securityContext: + capabilities: + drop: + - ALL + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + volumeMounts: + - mountPath: /config/.env + name: rag-config-vl + subPath: .env + - mountPath: /config/.env-sql + name: rag-config-vl + subPath: .env-sql + - mountPath: /app/config.yaml + name: rag-config-vl + subPath: copy-config.yaml + - mountPath: /config-rw + name: rag-shared + - mountPath: /db-secrets + name: database-secret + - mountPath: /system-secrets + name: system-secret + volumes: + - name: rag-config-vl + configMap: + name: rag-config + - name: database-secret + secret: + secretName: euler-copilot-database + - name: system-secret + secret: + secretName: euler-copilot-system + - name: rag-shared + emptyDir: + medium: Memory +{{- end -}} diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/secrets.yaml b/script/mindspore-intelligence/chart/euler_copilot/templates/secrets.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5e7faa7234dd4f3c6a239b2ddb8eb7b98d43498d --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/secrets.yaml @@ -0,0 +1,36 @@ +{{- $systemSecret := (lookup "v1" "Secret" .Release.Namespace "euler-copilot-system") -}} +{{- if $systemSecret -}} +apiVersion: v1 +kind: Secret +metadata: + name: euler-copilot-system + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/resource-policy: keep +type: Opaque +stringData: + jwtKey: {{ index $systemSecret.data.jwtKey | b64dec }} + halfKey1: {{ index $systemSecret.data.halfKey1 | b64dec }} + halfKey2: {{ index $systemSecret.data.halfKey2 | b64dec }} + halfKey3: {{ index $systemSecret.data.halfKey3 | b64dec }} + csrfKey: {{ index $systemSecret.data.csrfKey | b64dec }} + clientId: {{ .Values.login.client.id }} + clientSecret: {{ .Values.login.client.secret }} +{{- else -}} +apiVersion: v1 +kind: Secret +metadata: + name: euler-copilot-system + namespace: {{ .Release.Namespace }} + annotations: + helm.sh/resource-policy: keep +type: Opaque +stringData: + jwtKey: {{ randAlphaNum 32 | b64enc }} + halfKey1: {{ randAlphaNum 32 }} + halfKey2: {{ randAlphaNum 32 }} + halfKey3: {{ randAlphaNum 32 }} + csrfKey: {{ randAlphaNum 32 | b64enc}} + clientId: {{ .Values.login.client.id }} + clientSecret: {{ .Values.login.client.secret }} +{{- end -}} diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/web/web-config.yaml b/script/mindspore-intelligence/chart/euler_copilot/templates/web/web-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1793023e13f014cac11ac2939106f14688620c8b --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/web/web-config.yaml @@ -0,0 +1,11 @@ +{{- if .Values.euler_copilot.web.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: web-config + namespace: {{ .Release.Namespace }} +data: + .env: |- + RAG_WEB_URL=http://rag-web-service.{{ .Release.Namespace }}.svc.cluster.local:9888 + FRAMEWORK_URL=http://framework-service.{{ .Release.Namespace }}.svc.cluster.local:8002 +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/web/web-storage.yaml b/script/mindspore-intelligence/chart/euler_copilot/templates/web/web-storage.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a3d7c11e48e325a7c08d52dd87a230c9d797665e --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/web/web-storage.yaml @@ -0,0 +1,15 @@ +{{- if .Values.euler_copilot.web.enabled -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: web-static + namespace: {{ .Release.Namespace }} +spec: + storageClassName: {{ default "local-path" .Values.globals.storageClass }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default "10Gi" .Values.storage.webAsset.size }} + volumeName: web-static +{{- end -}} \ No newline at end of file diff --git a/script/mindspore-intelligence/chart/euler_copilot/templates/web/web.yaml b/script/mindspore-intelligence/chart/euler_copilot/templates/web/web.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c20c045dd686f0c1607467f5c680821afe7e7994 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/templates/web/web.yaml @@ -0,0 +1,87 @@ +{{- if .Values.euler_copilot.web.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: web-service + namespace: {{ .Release.Namespace }} +spec: + type: {{ default "NodePort" .Values.euler_copilot.web.service.type }} + selector: + app: web + ports: + - port: 8080 + targetPort: 8080 + nodePort: {{ default 30080 .Values.euler_copilot.web.service.nodePort }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: web-deploy + namespace: {{ .Release.Namespace }} + labels: + app: web +spec: + selector: + matchLabels: + app: web + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/web/web-config.yaml") . | sha256sum }} + labels: + app: web + spec: + automountServiceAccountToken: false + containers: + - name: web + image: {{ .Values.euler_copilot.web.image | default (printf "%s/neocopilot/euler-copilot-web:0.9.6-%s" (.Values.globals.imageRegistry | default "hub.oepkgs.net") (.Values.globals.arch | default "x86")) }} + imagePullPolicy: {{ default "IfNotPresent" .Values.globals.imagePullPolicy }} + ports: + - containerPort: 8080 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: 8080 + scheme: HTTP + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 90 + env: + - name: TZ + value: "Asia/Shanghai" + volumeMounts: + - mountPath: /config + name: web-config-volume + - mountPath: /var/lib/nginx/tmp + name: web-tmp + - mountPath: /opt/.env + name: web-env-volume + subPath: .env + - mountPath: /usr/share/nginx/html/static + name: web-static + resources: + requests: + cpu: 0.05 + memory: 64Mi + limits: + {{ toYaml .Values.euler_copilot.web.resourceLimits | nindent 14 }} + securityContext: + readOnlyRootFilesystem: {{ default false .Values.euler_copilot.web.readOnly }} + restartPolicy: Always + volumes: + - name: web-static + persistentVolumeClaim: + claimName: web-static + - name: web-config-volume + emptyDir: + medium: Memory + - name: web-env-volume + configMap: + name: web-config + - name: web-tmp + emptyDir: + medium: Memory +{{- end -}} diff --git a/script/mindspore-intelligence/chart/euler_copilot/values.yaml b/script/mindspore-intelligence/chart/euler_copilot/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3722b771c5bb34f4f894de69e0418dd2498de57 --- /dev/null +++ b/script/mindspore-intelligence/chart/euler_copilot/values.yaml @@ -0,0 +1,155 @@ +# 全局设置 +globals: + # 节点架构:默认是x86 + # [必填] 节点设置:["x86", "arm"] + arch: + # 镜像拉取策略, 默认为IfNotPresent + imagePullPolicy: + # 存储类;默认为local-path + storageClass: + +# 模型设置 +models: + # 用于问答的大模型;需要为OpenAI兼容接口 + answer: + # [必填] 接口URL(请根据 API 提供商文档确定是否需要带上“v1”后缀) + endpoint: + # [必填] 接口API Key;默认置空 + key: + # [必填] 模型名称 + name: + # [必填] 模型最大上下文数;建议>=8192 + ctxLength: 8192 + # 模型最大输出长度,建议>=2048 + maxTokens: 2048 + # 用于Function Call的模型;建议使用特定推理框架 + functionCall: + # 推理框架类型,默认为ollama + # 可用的框架类型:["vllm", "sglang", "ollama", "openai"] + backend: + # [必填] 模型地址;请根据 API 提供商文档确定是否需要带上“v1”后缀 + # 选择不填则与问答模型一致 + endpoint: + # API Key;不填则与问答模型一致 + key: + # 模型名称;不填则与问答模型一致 + name: + # 模型最大上下文数;不填则与问答模型一致 + ctxLength: + # 模型最大输出长度;不填则与问答模型一致 + maxTokens: + # 用于数据向量化(Embedding)的模型 + embedding: + # 推理框架类型,默认为openai + # [必填] Embedding接口类型:["openai", "mindie"] + type: + # [必填] Embedding URL(需要带上“v1”后缀) + endpoint: + # [必填] Embedding 模型API Key + key: + # [必填] Embedding 模型名称 + name: + +# 登录设置 +login: + # 客户端ID设置,仅在type为authhub时有效 + client: + # [必填] 客户端ID + id: + # [必填] 客户端密钥 + secret: + +#域名设置 +domain: + # [必填] EulerCopilot的web前端url;默认为http://127.0.0.1:30080 + euler_copilot: + # [必填] authhub的web前端url;默认为http://127.0.0.1:30081 + authhub: + +# 存储设置 +storage: + # 语义接口 + frameworkSemantics: + # 语义接口地址 + path: + # 语义接口存储大小,默认为5GB + size: + # 共享存储 + webAsset: + # 前后端共享存储大小,默认为10GB + size: + +euler_copilot: + # 配置文件工具 + secretInject: + # 镜像设置;默认为hub.oepkgs.net/neocopilot/secret_inject:dev-x86 + # 镜像标签:["dev-x86", "dev-arm"] + image: + + framework: + # [必填] 是否部署Framework后端框架服务 + enabled: true + # 镜像设置;默认为hub.oepkgs.net/neocopilot/euler-copilot-framework:0.9.6-x86 + # 镜像标签:["0.9.6-x86", "0.9.6-arm"] + image: + # 容器根目录只读 + readOnly: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,例如NodePorts + type: + # 当类型为nodePort时,填写主机的端口号 + nodePort: + + web: + # [必填] 是否部署Web前端用户界面 + enabled: true + # 镜像设置;默认为hub.oepkgs.net/neocopilot/euler-copilot-web:0.9.6-x86 + # 镜像标签:["0.9.6-x86", "0.9.6-arm"] + image: + # 容器根目录只读 + readOnly: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,例如NodePort + type: NodePort + # 当类型为NodePort时,填写主机的端口号 + nodePort: 30080 + + rag_web: + # [必填] 是否部署RAG Web前端用户界面 + enabled: true + # 镜像设置;默认为hub.oepkgs.net/neocopilot/data_chain_web:0.9.6-x86 + # 镜像标签:["0.9.6-x86", "0.9.6-arm"] + image: + # 容器根目录只读 + readOnly: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,例如NodePort + type: + # 当类型为NodePort时,填写主机的端口号 + nodePort: + + rag: + # [必填] 是否部署RAG后端服务 + enabled: true + # 镜像设置;默认为hub.oepkgs.net/neocopilot/data_chain_back_end:0.9.6-x86 + # 镜像标签:["0.9.6-x86", "0.9.6-arm"] + image: + # 容器根目录只读 + readOnly: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,例如NodePort + type: + # 当类型为NodePort时,填写主机的端口号 + nodePort: diff --git a/script/mindspore-intelligence/scripts/0-one-click-deploy/one-click-deploy.sh b/script/mindspore-intelligence/scripts/0-one-click-deploy/one-click-deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..9e7ff827bf6d051fab9833a2c131c26e9f1451ed --- /dev/null +++ b/script/mindspore-intelligence/scripts/0-one-click-deploy/one-click-deploy.sh @@ -0,0 +1,293 @@ +#!/bin/bash + +# 增强颜色定义 +RESET='\033[0m' +BOLD='\033[1m' +RED='\033[38;5;196m' +GREEN='\033[38;5;46m' +YELLOW='\033[38;5;226m' +BLUE='\033[38;5;45m' +MAGENTA='\033[38;5;201m' +CYAN='\033[38;5;51m' +WHITE='\033[38;5;255m' +BG_RED='\033[48;5;196m' +BG_GREEN='\033[48;5;46m' +BG_BLUE='\033[48;5;45m' +DIM='\033[2m' + +# 进度条宽度 +PROGRESS_WIDTH=50 +NAMESPACE="euler-copilot" +TIMEOUT=300 # 最大等待时间(秒) +INTERVAL=10 # 检查间隔(秒) + +# 全局变量声明 +authhub_address="" +eulercopilot_address="" + +# 解析命令行参数 +parse_arguments() { + while [[ $# -gt 0 ]]; do + case "$1" in + --eulercopilot_address) + if [ -n "$2" ]; then + eulercopilot_address="$2" + shift 2 + else + echo -e "${RED}错误: --eulercopilot_address 需要提供一个值${RESET}" >&2 + exit 1 + fi + ;; + --authhub_address) + if [ -n "$2" ]; then + authhub_address="$2" + shift 2 + else + echo -e "${RED}错误: --authhub_address 需要提供一个值${RESET}" >&2 + exit 1 + fi + ;; + *) + echo -e "${RED}未知选项: $1${RESET}" >&2 + exit 1 + ;; + esac + done +} + +# 提示用户输入必要参数 +prompt_for_addresses() { + # 如果未通过命令行参数提供eulercopilot_address,则提示用户输入 + if [ -z "$eulercopilot_address" ]; then + echo -e "${YELLOW}未提供 EulerCopilot 访问地址${RESET}" + read -p "$(echo -e "${CYAN}请输入 EulerCopilot 访问地址 (格式如: http://myhost:30080): ${RESET}")" eulercopilot_address + + # 验证输入是否为空 + while [ -z "$eulercopilot_address" ]; do + echo -e "${RED}错误: EulerCopilot 访问地址不能为空${RESET}" + read -p "$(echo -e "${CYAN}请输入 EulerCopilot 访问地址 (格式如: http://myhost:30080): ${RESET}")" eulercopilot_address + done + fi + + # 如果未通过命令行参数提供authhub_address,则提示用户输入 + if [ -z "$authhub_address" ]; then + echo -e "${YELLOW}未提供 Authhub 访问地址${RESET}" + read -p "$(echo -e "${CYAN}请输入 Authhub 访问地址 (格式如: http://myhost:30081): ${RESET}")" authhub_address + + # 验证输入是否为空 + while [ -z "$authhub_address" ]; do + echo -e "${RED}错误: Authhub 访问地址不能为空${RESET}" + read -p "$(echo -e "${CYAN}请输入 Authhub 访问地址 (格式如: http://myhost:30081): ${RESET}")" authhub_address + done + fi +} + +# 带颜色输出的进度条函数 +colorful_progress() { + local current=$1 + local total=$2 + local progress=$((current*100/total)) + local completed=$((PROGRESS_WIDTH*current/total)) + local remaining=$((PROGRESS_WIDTH-completed)) + + printf "\r${BOLD}${BLUE}⟦${RESET}" + printf "${BG_BLUE}${WHITE}%${completed}s${RESET}" | tr ' ' '▌' + printf "${DIM}${BLUE}%${remaining}s${RESET}" | tr ' ' '·' + printf "${BOLD}${BLUE}⟧${RESET} ${GREEN}%3d%%${RESET} ${CYAN}[%d/%d]${RESET}" \ + $progress $current $total +} + +# 打印装饰线 +print_separator() { + echo -e "${BLUE}${BOLD}$(printf '━%.0s' $(seq 1 $(tput cols)))${RESET}" +} + +# 打印步骤标题 +print_step_title() { + echo -e "\n${BG_BLUE}${WHITE}${BOLD} 步骤 $1 ${RESET} ${MAGENTA}${BOLD}$2${RESET}" + echo -e "${DIM}${BLUE}$(printf '━%.0s' $(seq 1 $(tput cols)))${RESET}" +} + +# 获取主脚本绝对路径并切换到所在目录 +MAIN_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +cd "$MAIN_DIR" || exit 1 + +run_script_with_check() { + local script_path=$1 + local script_name=$2 + local step_number=$3 + local auto_input=${4:-false} + shift 4 + local extra_args=("$@") # 使用数组来存储额外参数 + + # 前置检查:脚本是否存在 + if [ ! -f "$script_path" ]; then + echo -e "\n${BOLD}${RED}✗ 致命错误:${RESET}${YELLOW}${script_name}${RESET}${RED} 不存在 (路径: ${CYAN}${script_path}${RED})${RESET}" >&2 + exit 1 + fi + + print_step_title $step_number "$script_name" + + # 获取绝对路径和执行目录 + local script_abs_path=$(realpath "$script_path") + local script_dir=$(dirname "$script_abs_path") + local script_base=$(basename "$script_abs_path") + + echo -e "${DIM}${BLUE}🠖 脚本绝对路径:${YELLOW}${script_abs_path}${RESET}" + echo -e "${DIM}${BLUE}🠖 执行工作目录:${YELLOW}${script_dir}${RESET}" + echo -e "${DIM}${BLUE}🠖 额外参数:${YELLOW}${extra_args[*]}${RESET}" + echo -e "${DIM}${BLUE}🠖 开始执行时间:${YELLOW}$(date +'%Y-%m-%d %H:%M:%S')${RESET}" + + # 创建临时日志文件 + local log_file=$(mktemp) + echo -e "${DIM}${BLUE}🠖 临时日志文件:${YELLOW}${log_file}${RESET}" + + # 执行脚本(带自动输入处理和实时日志输出) + local exit_code=0 + if $auto_input; then + (cd "$script_dir" && yes "" | bash "./$script_base" "${extra_args[@]}" 2>&1 | tee "$log_file") + else + (cd "$script_dir" && bash "./$script_base" "${extra_args[@]}" 2>&1 | tee "$log_file") + fi + exit_code=${PIPESTATUS[0]} + + # 处理执行结果 + if [ $exit_code -eq 0 ]; then + echo -e "\n${BOLD}${GREEN}✓ ${script_name} 执行成功!${RESET}" + echo -e "${DIM}${CYAN}$(printf '%.0s─' $(seq 1 $(tput cols)))${RESET}" + echo -e "${DIM}${CYAN}操作日志:${RESET}" + cat "$log_file" | sed -e "s/^/${DIM}${CYAN} 🠖 ${RESET}/" + echo -e "${DIM}${CYAN}$(printf '%.0s─' $(seq 1 $(tput cols)))${RESET}" + else + echo -e "\n${BOLD}${RED}✗ ${script_name} 执行失败!${RESET}" >&2 + echo -e "${DIM}${RED}$(printf '%.0s─' $(seq 1 $(tput cols)))${RESET}" >&2 + echo -e "${DIM}${RED}错误日志:${RESET}" >&2 + cat "$log_file" | sed -e "s/^/${DIM}${RED} ✗ ${RESET}/" >&2 + echo -e "${DIM}${RED}$(printf '%.0s─' $(seq 1 $(tput cols)))${RESET}" >&2 + rm "$log_file" + exit 1 + fi + + rm "$log_file" + return $exit_code +} + +# 卸载所有组件 +uninstall_all() { + echo -e "\n${CYAN}▸ 开始卸载所有Helm Release...${RESET}" + local RELEASES + RELEASES=$(helm list -n $NAMESPACE --short 2>/dev/null || true) + + if [ -n "$RELEASES" ]; then + echo -e "${YELLOW}找到以下Helm Release:${RESET}" + echo "$RELEASES" | awk '{print " ➤ "$0}' + for release in $RELEASES; do + echo -e "${BLUE}正在删除: ${release}${RESET}" + helm uninstall "$release" -n $NAMESPACE || echo -e "${RED}删除失败,继续执行...${RESET}" + done + else + echo -e "${YELLOW}未找到需要清理的Helm Release${RESET}" + fi + + echo -e "\n${CYAN}▸ 清理持久化存储...${RESET}" + local pvc_list + pvc_list=$(kubectl get pvc -n $NAMESPACE -o name 2>/dev/null || true) + + if [ -n "$pvc_list" ]; then + echo -e "${YELLOW}找到以下PVC资源:${RESET}" + echo "$pvc_list" | awk '{print " ➤ "$0}' + echo "$pvc_list" | xargs -n 1 kubectl delete -n $NAMESPACE || echo -e "${RED}删除失败,继续执行...${RESET}" + else + echo -e "${YELLOW}未找到需要清理的PVC${RESET}" + fi + + echo -e "\n${CYAN}▸ 清理Secret资源...${RESET}" + local secret_list + secret_list=$(kubectl get secret -n $NAMESPACE -o name 2>/dev/null || true) + + if [ -n "$secret_list" ]; then + echo -e "${YELLOW}找到以下Secret资源:${RESET}" + echo "$secret_list" | awk '{print " ➤ "$0}' + echo "$secret_list" | xargs -n 1 kubectl delete -n $NAMESPACE || echo -e "${RED}删除失败,继续执行...${RESET}" + else + echo -e "${YELLOW}未找到需要清理的Secret${RESET}" + fi + + echo -e "\n${BG_GREEN}${WHITE}${BOLD} ✓ 完成 ${RESET} ${GREEN}所有资源已清理完成${RESET}" +} + +# 主界面显示 +show_header() { + clear + echo -e "\n${BOLD}${MAGENTA}$(printf '✧%.0s' $(seq 1 $(tput cols)))${RESET}" + echo -e "${BOLD}${WHITE} Euler Copilot 一键部署系统 ${RESET}" + echo -e "${BOLD}${MAGENTA}$(printf '✧%.0s' $(seq 1 $(tput cols)))${RESET}" + echo -e "${CYAN}◈ 主工作目录:${YELLOW}${MAIN_DIR}${RESET}" + echo -e "${CYAN}◈ EulerCopilot地址:${YELLOW}${eulercopilot_address:-未设置}${RESET}" + echo -e "${CYAN}◈ Authhub地址:${YELLOW}${authhub_address:-未设置}${RESET}\n" +} +# 修改后的start_deployment函数中的步骤配置 +start_deployment() { + local total_steps=8 + local current_step=1 + + # 步骤配置(脚本路径 脚本名称 自动输入 额外参数数组) + local steps=( + "../1-check-env/check_env.sh 环境检查 false" + "_conditional_tools_step 基础工具安装(k3s+helm) true" + "../3-install-ollama/install_ollama.sh Ollama部署 true" + "../4-deploy-deepseek/deploy_deepseek.sh Deepseek模型部署 false" + "../5-deploy-embedding/deploy-embedding.sh Embedding服务部署 false" + "../6-install-databases/install_databases.sh 数据库集群部署 false" + "../7-install-authhub/install_authhub.sh Authhub部署 true --authhub_address ${authhub_address}" + "_conditional_eulercopilot_step EulerCopilot部署 true" + ) + + for step in "${steps[@]}"; do + local script_path=$(echo "$step" | awk '{print $1}') + local script_name=$(echo "$step" | awk '{print $2}') + local auto_input=$(echo "$step" | awk '{print $3}') + local extra_args=$(echo "$step" | awk '{for(i=4;i<=NF;i++) printf $i" "}') + + # 特殊步骤处理 + if [[ "$script_path" == "_conditional_tools_step" ]]; then + handle_tools_step $current_step + elif [[ "$script_path" == "_conditional_eulercopilot_step" ]]; then + sleep 60 + handle_eulercopilot_step $current_step + else + run_script_with_check "$script_path" "$script_name" $current_step $auto_input $extra_args + fi + + colorful_progress $current_step $total_steps + ((current_step++)) + done +} + +# 处理工具安装步骤 +handle_tools_step() { + local current_step=$1 + if command -v k3s >/dev/null 2>&1 && command -v helm >/dev/null 2>&1; then + echo -e "${CYAN}🠖 检测到已安装 k3s 和 helm,执行环境清理...${RESET}" + uninstall_all + else + run_script_with_check "../2-install-tools/install_tools.sh" "基础工具安装" $current_step true + fi +} + +handle_eulercopilot_step() { + local current_step=$1 + local extra_args=() + + # 构建额外参数数组 + [ -n "$authhub_address" ] && extra_args+=(--authhub_address "$authhub_address") + [ -n "$eulercopilot_address" ] && extra_args+=(--eulercopilot_address "$eulercopilot_address") + + run_script_with_check "../8-install-EulerCopilot/install_eulercopilot.sh" "EulerCopilot部署" $current_step true "${extra_args[@]}" +} + +# 主执行流程 +parse_arguments "$@" +prompt_for_addresses +show_header +start_deployment diff --git a/script/mindspore-intelligence/scripts/1-check-env/check_env.sh b/script/mindspore-intelligence/scripts/1-check-env/check_env.sh new file mode 100755 index 0000000000000000000000000000000000000000..4bc9c47f97e9f728a62200843112521a3ccd6057 --- /dev/null +++ b/script/mindspore-intelligence/scripts/1-check-env/check_env.sh @@ -0,0 +1,249 @@ +#!/bin/bash +# 颜色定义 +COLOR_INFO='\033[34m' # 蓝色信息 +COLOR_SUCCESS='\033[32m' # 绿色成功 +COLOR_ERROR='\033[31m' # 红色错误 +COLOR_WARNING='\033[33m' # 黄色警告 +COLOR_RESET='\033[0m' # 重置颜色 + +# 全局模式标记 +OFFLINE_MODE=false + +function check_user { + if [[ $(id -u) -ne 0 ]]; then + echo -e "${COLOR_ERROR}[Error] 请以root权限运行该脚本!${COLOR_RESET}" + return 1 + fi + return 0 +} + +function check_version { + local current_version_id="$1" + local supported_versions=("${@:2}") + + echo -e "${COLOR_INFO}[Info] 当前操作系统版本为:$current_version_id${COLOR_RESET}" + for version_id in "${supported_versions[@]}"; do + if [[ "$current_version_id" == "$version_id" ]]; then + echo -e "${COLOR_SUCCESS}[Success] 操作系统满足兼容性要求${COLOR_RESET}" + return 0 + fi + done + + echo -e "${COLOR_ERROR}[Error] 操作系统不满足兼容性要求,脚本将退出${COLOR_RESET}" + return 1 +} + +function check_os_version { + local id=$(grep '^ID=' /etc/os-release | cut -d= -f2 | tr -d '"') + local version=$(grep -E "^VERSION_ID=" /etc/os-release | cut -d '"' -f 2) + + echo -e "${COLOR_INFO}[Info] 当前发行版为:$id${COLOR_RESET}" + + case $id in + "openEuler"|"bclinux") + local supported_versions=("22.03" "22.09" "23.03" "23.09" "24.03") + check_version "$version" "${supported_versions[@]}" + ;; + "InLinux") + local supported_versions=("23.12") + check_version "$version" "${supported_versions[@]}" + ;; + "FusionOS") + local supported_versions=("23") + check_version "$version" "${supported_versions[@]}" + ;; + "uos") + local supported_versions=("20") + check_version "$version" "${supported_versions[@]}" + ;; + "HopeOS") + local supported_versions=("V22") + check_version "$version" "${supported_versions[@]}" + ;; + "kylin") + local supported_versions=("V10") + check_version "$version" "${supported_versions[@]}" + ;; + *) + echo -e "${COLOR_ERROR}[Error] 发行版不受支持,脚本将退出${COLOR_RESET}" + return 1 + ;; + esac + return $? +} + +function check_hostname { + local current_hostname=$(cat /etc/hostname) + if [[ -z "$current_hostname" ]]; then + echo -e "${COLOR_ERROR}[Error] 未设置主机名,自动设置为localhost${COLOR_RESET}" + set_hostname "localhost" + return $? + else + echo -e "${COLOR_INFO}[Info] 当前主机名为:$current_hostname${COLOR_RESET}" + echo -e "${COLOR_SUCCESS}[Success] 主机名已设置${COLOR_RESET}" + return 0 + fi +} + +function set_hostname { + if ! command -v hostnamectl &> /dev/null; then + echo "$1" > /etc/hostname + echo -e "${COLOR_SUCCESS}[Success] 手动设置主机名成功${COLOR_RESET}" + return 0 + fi + + if hostnamectl set-hostname "$1"; then + echo -e "${COLOR_SUCCESS}[Success] 主机名设置成功${COLOR_RESET}" + return 0 + else + echo -e "${COLOR_ERROR}[Error] 主机名设置失败${COLOR_RESET}" + return 1 + fi +} + +function check_dns { + echo -e "${COLOR_INFO}[Info] 检查DNS设置${COLOR_RESET}" + if grep -q "^nameserver" /etc/resolv.conf; then + echo -e "${COLOR_SUCCESS}[Success] DNS已配置${COLOR_RESET}" + return 0 + fi + + if $OFFLINE_MODE; then + echo -e "${COLOR_WARNING}[Warning] 离线模式:请手动配置内部DNS服务器${COLOR_RESET}" + return 0 + else + echo -e "${COLOR_ERROR}[Error] DNS未配置,自动设置为8.8.8.8${COLOR_RESET}" + set_dns "8.8.8.8" + return $? + fi +} + +function set_dns { + if systemctl is-active --quiet NetworkManager; then + local net_ic=$(nmcli -t -f NAME con show --active | head -n 1) + if [[ -z "$net_ic" ]]; then + echo -e "${COLOR_ERROR}[Error] 未找到活跃网络连接${COLOR_RESET}" + return 1 + fi + + if nmcli con mod "$net_ic" ipv4.dns "$1" && nmcli con mod "$net_ic" ipv4.ignore-auto-dns yes; then + nmcli con down "$net_ic" && nmcli con up "$net_ic" + echo -e "${COLOR_SUCCESS}[Success] DNS设置成功${COLOR_RESET}" + return 0 + else + echo -e "${COLOR_ERROR}[Error] DNS设置失败${COLOR_RESET}" + return 1 + fi + else + cp /etc/resolv.conf /etc/resolv.conf.bak + echo "nameserver $1" >> /etc/resolv.conf + echo -e "${COLOR_SUCCESS}[Success] 手动设置DNS成功${COLOR_RESET}" + return 0 + fi +} + +function check_ram { + local RAM_THRESHOLD=16000 + local current_mem=$(free -m | awk '/Mem/{print $2}') + + echo -e "${COLOR_INFO}[Info] 当前内存:$current_mem MB${COLOR_RESET}" + if (( current_mem < RAM_THRESHOLD )); then + echo -e "${COLOR_ERROR}[Error] 内存不足 ${RAM_THRESHOLD} MB${COLOR_RESET}" + return 1 + fi + echo -e "${COLOR_SUCCESS}[Success] 内存满足要求${COLOR_RESET}" + return 0 +} + +check_disk_space() { + local DIR="$1" + local THRESHOLD="$2" + + local USAGE=$(df --output=pcent "$DIR" | tail -n 1 | sed 's/%//g' | tr -d ' ') + + if [ "$USAGE" -ge "$THRESHOLD" ]; then + echo -e "${COLOR_WARNING}[Warning] $DIR 的磁盘使用率已达到 ${USAGE}%,超过阈值 ${THRESHOLD}%${COLOR_RESET}" + return 1 + else + echo -e "${COLOR_INFO}[Info] $DIR 的磁盘使用率为 ${USAGE}%,低于阈值 ${THRESHOLD}%${COLOR_RESET}" + return 0 + fi +} + +function check_network { + echo -e "${COLOR_INFO}[Info] 检查网络连接...${COLOR_RESET}" + + # 使用TCP检查代替curl + if timeout 5 bash -c 'cat < /dev/null > /dev/tcp/www.baidu.com/80' 2>/dev/null; then + echo -e "${COLOR_SUCCESS}[Success] 网络连接正常${COLOR_RESET}" + return 0 + else + echo -e "${COLOR_ERROR}[Error] 无法访问外部网络${COLOR_RESET}" + return 1 + fi +} + +function check_selinux { + sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config + echo -e "${COLOR_SUCCESS}[Success] SELinux配置已禁用${COLOR_RESET}" + setenforce 0 &>/dev/null + echo -e "${COLOR_SUCCESS}[Success] SELinux已临时禁用${COLOR_RESET}" + return 0 +} + +function check_firewall { + systemctl disable --now firewalld &>/dev/null + echo -e "${COLOR_SUCCESS}[Success] 防火墙已关闭并禁用${COLOR_RESET}" + return 0 +} + +function prepare_offline { + echo -e "${COLOR_INFO}[Info] 准备离线部署环境..." + mkdir -p /home/eulercopilot/images + mkdir -p /home/eulercopilot/tools + mkdir -p /home/eulercopilot/models + echo -e "1. 请确保已上传离线安装镜像至/home/eulercopilot/images" + echo -e "2. 请确认本地软件仓库已配置" + echo -e "3. 所有工具包提前下载到本地目录/home/eulercopilot/tools" + echo -e "4. 所有模型文件提前下载到本地目录/home/eulercopilot/models${COLOR_RESET}" +} + +function main { + check_user || return 1 + check_os_version || return 1 + check_hostname || return 1 + + # 网络检查与模式判断 + if check_network; then + OFFLINE_MODE=false + else + OFFLINE_MODE=true + echo -e "${COLOR_WARNING}[Warning] 切换到离线部署模式${COLOR_RESET}" + prepare_offline + fi + + check_dns || return 1 + check_ram || return 1 + check_disk_space "/" 70 + + if [ $? -eq 1 ]; then + echo -e "${COLOR_WARNING}[Warning] 需要清理磁盘空间!${COLOR_RESET}" + else + echo -e "${COLOR_SUCCESS}[Success] 磁盘空间正常${COLOR_RESET}" + fi + + check_selinux || return 1 + check_firewall || return 1 + + # 最终部署提示 + echo -e "\n${COLOR_SUCCESS}#####################################" + if $OFFLINE_MODE; then + echo -e "# 环境检查完成,准备离线部署 #" + else + echo -e "# 环境检查完成,准备在线部署 #" + fi + echo -e "#####################################${COLOR_RESET}" + return 0 +} + +main diff --git a/script/mindspore-intelligence/scripts/2-install-tools/install_tools.sh b/script/mindspore-intelligence/scripts/2-install-tools/install_tools.sh new file mode 100755 index 0000000000000000000000000000000000000000..e763220756869301710a339e0e1c49f737aac296 --- /dev/null +++ b/script/mindspore-intelligence/scripts/2-install-tools/install_tools.sh @@ -0,0 +1,394 @@ +#!/bin/bash + +GITHUB_MIRROR="https://gh-proxy.com" +ARCH=$(uname -m) +TOOLS_DIR="/home/eulercopilot/tools" +eulercopilot_version=0.9.6 + +SCRIPT_PATH="$( + cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 + pwd +)/$(basename "${BASH_SOURCE[0]}")" + +IMPORT_SCRIPT="$( + canonical_path=$(readlink -f "$SCRIPT_PATH" 2>/dev/null || echo "$SCRIPT_PATH") + dirname "$(dirname "$canonical_path")" +)" + + +# 函数:显示帮助信息 +function help { + echo -e "用法:bash install_tools.sh [选项]" + echo -e "选项:" + echo -e " --mirror cn 使用国内镜像加速" + echo -e " --k3s-version VERSION 指定k3s版本(默认:v1.30.2+k3s1)" + echo -e " --helm-version VERSION 指定helm版本(默认:v3.15.0)" + echo -e " -h, --help 显示帮助信息" + echo -e "示例:" + echo -e " bash install_tools.sh # 使用默认设置安装" + echo -e " bash install_tools.sh --mirror cn # 使用国内镜像" + echo -e " bash install_tools.sh --k3s-version v1.30.1+k3s1 --helm-version v3.15.0" + echo -e "离线安装说明:" + echo -e "1. 将k3s二进制文件重命名为 k3s 或 k3s-arm64 并放在 $TOOLS_DIR" + echo -e "2. 将k3s镜像包重命名为 k3s-airgap-images-<架构>.tar.zst 放在 $TOOLS_DIR" + echo -e "3. 将helm包重命名为 helm-<版本>-linux-<架构>.tar.gz 放在 $TOOLS_DIR" +} + +# 解析命令行参数 +MIRROR="" +K3S_VERSION="" +HELM_VERSION="" + +while [[ $# -gt 0 ]]; do + case "$1" in + --mirror) + MIRROR="$2" + shift 2 + ;; + --k3s-version) + K3S_VERSION="$2" + shift 2 + ;; + --helm-version) + HELM_VERSION="$2" + shift 2 + ;; + cn) + MIRROR="cn" + shift + ;; + -h|--help) + help + exit 0 + ;; + *) + echo "未知参数: $1" + help + exit 1 + ;; + esac +done + +# 增强型网络检查 +function check_network { + echo -e "[Info] 正在检查网络连接..." + if curl --retry 3 --retry-delay 2 --connect-timeout 5 -Is https://github.com | head -n 1 | grep 200 >/dev/null; then + echo -e "\033[32m[OK] 网络连接正常\033[0m" + return 0 + else + echo -e "\033[33m[Warn] 无网络连接,切换到离线模式\033[0m" + return 1 + fi +} + +function check_user { + if [[ $(id -u) -ne 0 ]]; then + echo -e "\033[31m[Error]请以root权限运行该脚本!\033[0m" + exit 1 + fi +} + +function check_arch { + case $ARCH in + x86_64) ARCH=amd64 ;; + aarch64) ARCH=arm64 ;; + *) + echo -e "\033[31m[Error]当前CPU架构不受支持\033[0m" + return 1 + ;; + esac + return 0 +} + +install_basic_tools() { + # 安装基础工具 + echo "Installing tar, vim, curl, wget..." + yum install -y tar vim curl wget python3 + + # 检查 pip 是否已安装 + if ! command -v pip3 &> /dev/null; then + echo -e "pip could not be found, installing python3-pip..." + yum install -y python3-pip + else + echo -e "pip is already installed." + fi + + echo "Installing requests ruamel.yaml with pip3..." + if ! pip3 install \ + --disable-pip-version-check \ + --retries 3 \ + --timeout 60 \ + --trusted-host mirrors.tools.huawei.com \ + -i http://mirrors.tools.huawei.com/pypi/simple \ + ruamel.yaml requests; then + echo -e "[ERROR] Failed to install ruamel.yaml and requests via pip" >&2 + fi + echo "All basic tools have been installed." + return 0 +} + +function check_local_k3s_files { + local version="${1:-v1.30.2+k3s1}" + local k3s_version="$version" + + # 自动补全v前缀 + if [[ $k3s_version != v* ]]; then + k3s_version="v$k3s_version" + fi + + local image_name="k3s-airgap-images-$ARCH.tar.zst" + local bin_name="k3s" + [[ $ARCH == "arm64" ]] && bin_name="k3s-arm64" + + # 检查本地文件是否存在 + if [[ -f "$TOOLS_DIR/$bin_name" && -f "$TOOLS_DIR/$image_name" ]]; then + echo -e "\033[32m[Info] 检测到本地K3s安装文件,将使用本地文件安装\033[0m" + return 0 + else + echo -e "\033[33m[Info] 本地K3s安装文件不完整,将尝试在线下载\033[0m" + return 1 + fi +} + +function install_k3s { + local version="${1:-v1.30.2+k3s1}" + local use_mirror="$2" + + # 自动补全v前缀 + if [[ $version != v* ]]; then + version="v$version" + fi + local k3s_version="$version" + + local image_name="k3s-airgap-images-$ARCH.tar.zst" + local bin_name="k3s" + [[ $ARCH == "arm64" ]] && bin_name="k3s-arm64" + + # 首先检查本地文件是否存在 + if check_local_k3s_files "$version"; then + # 使用本地文件安装 + echo -e "\033[33m[Info] 进入离线安装K3s模式\033[0m" + + echo -e "[Info] 使用本地包安装..." + cp "$TOOLS_DIR/$bin_name" /usr/local/bin/k3s + chmod +x /usr/local/bin/k3s + + mkdir -p /var/lib/rancher/k3s/agent/images + cp "$TOOLS_DIR/$image_name" "/var/lib/rancher/k3s/agent/images/$image_name" + + # 离线安装脚本 + local local_install_script="$TOOLS_DIR/k3s-install.sh" + if [[ -f "$local_install_script" ]]; then + echo -e "\033[33m[Info] 使用本地安装脚本:$local_install_script\033[0m" + chmod +x "$local_install_script" + if INSTALL_K3S_SKIP_DOWNLOAD=true "$local_install_script"; then + echo -e "\033[32m[Success] K3s安装完成\033[0m" + return 0 + else + echo -e "\033[31m[Error] 本地安装失败\033[0m" + return 1 + fi + else + echo -e "\033[31m[Error] 缺少本地安装脚本:$local_install_script\033[0m" + echo -e "请预先下载并保存到指定目录:" + echo -e "在线模式:curl -sfL https://get.k3s.io -o $local_install_script" + echo -e "国内镜像:curl -sfL https://rancher-mirror.rancher.cn/k3s/k3s-install.sh -o $local_install_script" + return 1 + fi + else + # 本地文件不存在,检查网络 + if check_network; then + echo -e "\033[32m[Info] 开始在线安装K3s\033[0m" + + # 在线下载安装 + local k3s_bin_url="$GITHUB_MIRROR/https://github.com/k3s-io/k3s/releases/download/$k3s_version/$bin_name" + local k3s_image_url="$GITHUB_MIRROR/https://github.com/k3s-io/k3s/releases/download/$k3s_version/$image_name" + + echo -e "[Info] 下载K3s二进制文件..." + if ! curl -L "$k3s_bin_url" -o /usr/local/bin/k3s; then + echo -e "\033[31m[Error] 二进制文件下载失败\033[0m" + return 1 + fi + chmod +x /usr/local/bin/k3s + + echo -e "[Info] 下载依赖镜像..." + mkdir -p /var/lib/rancher/k3s/agent/images + if ! curl -L "$k3s_image_url" -o "/var/lib/rancher/k3s/agent/images/$image_name"; then + echo -e "\033[33m[Warn] 镜像下载失败,可能影响离线能力\033[0m" + fi + + local install_source="https://get.k3s.io" + [[ $use_mirror == "cn" ]] && install_source="https://rancher-mirror.rancher.cn/k3s/k3s-install.sh" + + echo -e "\033[32m[Info] 使用在线安装脚本\033[0m" + if ! curl -sfL "$install_source" | INSTALL_K3S_SKIP_DOWNLOAD=true sh -; then + echo -e "\033[31m[Error] 在线安装失败\033[0m" + return 1 + fi + else + # 既没有本地文件,也没有网络连接 + echo -e "\033[31m[Error] 无法安装K3s:\033[0m" + echo -e "1. 本地缺少必要的安装文件" + echo -e "2. 网络不可用,无法下载安装文件" + echo -e "请执行以下操作之一:" + echo -e "- 确保网络连接正常后重试" + echo -e "- 或预先将以下文件放置在 $TOOLS_DIR 目录:" + echo -e " - $bin_name" + echo -e " - $image_name" + echo -e " - k3s-install.sh (可选)" + return 1 + fi + fi +} + +function check_local_helm_file { + local version="${1:-v3.15.0}" + local helm_version="$version" + + # 自动补全v前缀 + if [[ $helm_version != v* ]]; then + helm_version="v$helm_version" + fi + + local file_name="helm-${helm_version}-linux-${ARCH}.tar.gz" + + # 检查本地文件是否存在 + if [[ -f "$TOOLS_DIR/$file_name" ]]; then + echo -e "\033[32m[Info] 检测到本地Helm安装文件,将使用本地文件安装\033[0m" + return 0 + else + echo -e "\033[33m[Info] 本地Helm安装文件不存在,将尝试在线下载\033[0m" + return 1 + fi +} + +function install_helm { + local version="${1:-v3.15.0}" + local use_mirror="$2" + + # 自动补全v前缀 + if [[ $version != v* ]]; then + version="v$version" + fi + local helm_version="$version" + + local file_name="helm-${helm_version}-linux-${ARCH}.tar.gz" + + # 首先检查本地文件是否存在 + if check_local_helm_file "$version"; then + echo -e "\033[33m[Info] 进入离线安装Helm模式\033[0m" + echo -e "[Info] 使用本地包安装..." + cp "$TOOLS_DIR/$file_name" . + else + # 本地文件不存在,检查网络 + if check_network; then + echo -e "\033[32m[Info] 开始在线安装Helm\033[0m" + + local base_url="https://get.helm.sh" + if [[ $use_mirror == "cn" ]]; then + local helm_version_without_v="${helm_version#v}" + base_url="https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts/helm/${helm_version_without_v}" + fi + + echo -e "[Info] 下载Helm..." + if ! curl -L "$base_url/$file_name" -o "$file_name"; then + echo -e "\033[31m[Error] 下载失败\033[0m" + return 1 + fi + else + # 既没有本地文件,也没有网络连接 + echo -e "\033[31m[Error] 无法安装Helm:\033[0m" + echo -e "1. 本地缺少必要的安装文件" + echo -e "2. 网络不可用,无法下载安装文件" + echo -e "请执行以下操作之一:" + echo -e "- 确保网络连接正常后重试" + echo -e "- 或预先将以下文件放置在 $TOOLS_DIR 目录:" + echo -e " - $file_name" + return 1 + fi + fi + + echo -e "[Info] 解压安装..." + tar -zxvf "$file_name" --strip-components 1 -C /usr/local/bin "linux-$ARCH/helm" + chmod +x /usr/local/bin/helm + rm -f "$file_name" + + echo -e "\033[32m[Success] Helm安装完成\033[0m" + return 0 +} + + +function check_k3s_status() { + local STATUS=$(systemctl is-active k3s) + + if [ "$STATUS" = "active" ]; then + echo -e "[Info] k3s 服务当前处于运行状态(active)。" + else + echo -e "[Info] k3s 服务当前不是运行状态(active),它的状态是: $STATUS。尝试启动服务..." + # 尝试启动k3s服务 + systemctl start k3s.service + + # 再次检查服务状态 + STATUS=$(systemctl is-active k3s.service) + if [ "$STATUS" = "active" ]; then + echo -e "[Info] k3s 服务已成功启动并正在运行。" + else + echo -e "\033[31m[Error] 无法启动 k3s 服务,请检查日志或配置\033[0m" + fi + fi +} + +check_hub_connection() { + if curl -sSf http://hub.oepkgs.net >/dev/null 2>&1; then + echo -e "[Info] 镜像站连接正常" + return 0 + else + echo -e "[Error] 镜像站连接失败" + return 1 + fi +} + +function main { + # 创建工具目录 + mkdir -p "$TOOLS_DIR" + + check_user + check_arch || exit 1 + install_basic_tools + + local use_mirror="$MIRROR" + local k3s_version="${K3S_VERSION:-v1.30.2+k3s1}" + local helm_version="${HELM_VERSION:-v3.15.0}" + + # 安装K3s(如果尚未安装) + if ! command -v k3s &> /dev/null; then + install_k3s "$k3s_version" "$use_mirror" || exit 1 + else + echo -e "[Info] K3s 已经安装,跳过安装步骤" + fi + + # 安装Helm(如果尚未安装) + if ! command -v helm &> /dev/null; then + install_helm "$helm_version" "$use_mirror" || exit 1 + else + echo -e "[Info] Helm 已经安装,跳过安装步骤" + fi + mkdir -p ~/.kube + ln -sf /etc/rancher/k3s/k3s.yaml ~/.kube/config + check_k3s_status + + # 优先检查网络 + if check_hub_connection; then + echo -e "\033[32m[Info] 在线环境,跳过镜像导入\033[0m" + else + echo -e "\033[33m[Info] 离线环境,开始导入本地镜像,请确保本地目录已存在所有镜像文件\033[0m" + bash "$IMPORT_SCRIPT/9-other-script/import_images.sh" -v "$eulercopilot_version" + fi + + echo -e "\n\033[32m=== 全部工具安装完成 ===\033[0m" + echo -e "K3s 版本:$(k3s --version | head -n1)" + echo -e "Helm 版本:$(helm version --short)" +} + +# 执行主函数 +main diff --git a/script/mindspore-intelligence/scripts/3-install-ollama/install_ollama.sh b/script/mindspore-intelligence/scripts/3-install-ollama/install_ollama.sh new file mode 100755 index 0000000000000000000000000000000000000000..c0b0c244878be5ca2857bbf55e568e549a150bc6 --- /dev/null +++ b/script/mindspore-intelligence/scripts/3-install-ollama/install_ollama.sh @@ -0,0 +1,338 @@ +#!/bin/bash + +MAGENTA='\e[35m' +CYAN='\e[36m' +BLUE='\e[34m' +GREEN='\e[32m' +YELLOW='\e[33m' +RED='\e[31m' +RESET='\e[0m' + +# 初始化全局变量 +OS_ID="" +ARCH="" +OLLAMA_BIN_PATH="/usr/bin/ollama" +OLLAMA_LIB_DIR="/usr/lib/ollama" +OLLAMA_DATA_DIR="/var/lib/ollama" +SERVICE_FILE="/etc/systemd/system/ollama.service" +LOCAL_DIR="/home/eulercopilot/tools" +LOCAL_TGZ="ollama-linux-${ARCH}.tgz" + +# 带时间戳的输出函数 +log() { + local level=$1 + shift + local color + case "$level" in + "INFO") color=${BLUE} ;; + "SUCCESS") color=${GREEN} ;; + "WARNING") color=${YELLOW} ;; + "ERROR") color=${RED} ;; + *) color=${RESET} ;; + esac + echo -e "${color}[$(date '+%Y-%m-%d %H:%M:%S')] $level: $*${RESET}" +} + +# 网络连接检查 +check_network() { + local install_url=$(get_ollama_url) + local domain=$(echo "$install_url" | awk -F/ '{print $3}') + local test_url="http://$domain" + + log "INFO" "检查网络连接 ($domain)..." + if curl --silent --head --fail --connect-timeout 5 --max-time 10 "$test_url" >/dev/null 2>&1; then + log "INFO" "网络连接正常" + return 0 + else + log "WARNING" "无法连接互联网" + return 1 + fi +} + +# 操作系统检测 +detect_os() { + log "INFO" "步骤1/8:检测操作系统和架构..." + if [ -f /etc/os-release ]; then + . /etc/os-release + OS_ID="${ID}" + log "INFO" "检测到操作系统: ${PRETTY_NAME}" + else + log "ERROR" "无法检测操作系统类型" + exit 1 + fi + + ARCH=$(uname -m) + case "$ARCH" in + x86_64) ARCH="amd64" ;; + aarch64) ARCH="arm64" ;; + armv7l) ARCH="armv7" ;; + *) log "ERROR" "不支持的架构: $ARCH"; exit 1 ;; + esac + LOCAL_TGZ="ollama-linux-${ARCH}.tgz" + log "INFO" "系统架构: $ARCH" +} + +# 安装系统依赖 +install_dependencies() { + log "INFO" "步骤2/8:安装系统依赖..." + local deps=(curl wget tar gzip jq) + + case "$OS_ID" in + ubuntu|debian) + if ! apt-get update; then + log "ERROR" "APT源更新失败" + exit 1 + fi + if ! DEBIAN_FRONTEND=noninteractive apt-get install -y "${deps[@]}"; then + log "ERROR" "APT依赖安装失败" + exit 1 + fi + ;; + centos|rhel|fedora|openEuler|kylin|uos) + if ! yum install -y "${deps[@]}"; then + log "ERROR" "YUM依赖安装失败" + exit 1 + fi + ;; + *) + log "ERROR" "不支持的发行版: $OS_ID" + exit 1 + ;; + esac + log "SUCCESS" "系统依赖安装完成" +} + +# 获取Ollama下载地址 +get_ollama_url() { + echo "https://repo.oepkgs.net/openEuler/rpm/openEuler-22.03-LTS/contrib/eulercopilot/tools/$ARCH/ollama-linux-$ARCH.tgz" +} + +install_ollama() { + log "INFO" "步骤3/8:安装Ollama核心..." + local install_url=$(get_ollama_url) + local tmp_file="/tmp/ollama-${ARCH}.tgz" + # 增强清理逻辑 + if [ -x "$OLLAMA_BIN_PATH" ] || [ -x "/usr/local/bin/ollama" ]; then + log "WARNING" "发现已存在的Ollama安装,版本: $($OLLAMA_BIN_PATH --version)" + read -p "是否重新安装?[y/N] " -n 1 -r + echo + if [[ $REPLY =~ ^[Yy]$ ]]; then + log "WARNING" "发现已存在的Ollama安装,正在清理..." + systemctl stop ollama 2>/dev/null || true + systemctl disable ollama 2>/dev/null || true + rm -rf ${SERVICE_FILE} 2>/dev/null + rm $(which ollama) 2>/dev/null + rm -rf ${OLLAMA_LIB_DIR} 2>/dev/null + rm -rf ${OLLAMA_DATA_DIR} 2>/dev/null + rm -rf /run/ollama 2>/dev/null + userdel ollama 2>/dev/null || true + groupdel ollama 2>/dev/null || true + else + return 0 + fi + fi + # 增强安装包处理 + local actual_tgz_path="" + if [ -f "${LOCAL_DIR}/${LOCAL_TGZ}" ]; then + log "INFO" "使用本地安装包: ${LOCAL_DIR}/${LOCAL_TGZ}" + actual_tgz_path="${LOCAL_DIR}/${LOCAL_TGZ}" + else + if ! check_network; then + log "ERROR" "网络不可用且未找到本地安装包" + log "INFO" "请预先下载${LOCAL_TGZ}并放置${LOCAL_DIR}" + exit 1 + fi + log "INFO" "下载安装包: ${install_url}" + if ! wget --show-progress -q -O "${tmp_file}" "${install_url}"; then + log "ERROR" "下载失败,退出码: $?" + exit 1 + fi + actual_tgz_path="${tmp_file}" + fi + + log "INFO" "解压文件到系统目录/usr..." + if ! tar -xzvf "$actual_tgz_path" -C /usr/; then + log "ERROR" "解压失败,可能原因:\n1.文件损坏\n2.磁盘空间不足\n3.权限问题" + exit 1 + fi + + chmod +x "$OLLAMA_BIN_PATH" + if [ ! -x "$OLLAMA_BIN_PATH" ]; then + log "ERROR" "安装后验证失败:可执行文件不存在" + exit 1 + fi + log "SUCCESS" "Ollama核心安装完成,版本: $($OLLAMA_BIN_PATH --version || echo '未知')" + # 新增:创建兼容性符号链接 + if [ ! -L "/usr/local/bin/ollama" ]; then + ln -sf "$OLLAMA_BIN_PATH" "/usr/local/bin/ollama" + log "INFO" "已创建符号链接:/usr/local/bin/ollama → $OLLAMA_BIN_PATH" + fi + + # 设置库路径 + echo "${OLLAMA_LIB_DIR}" > /etc/ld.so.conf.d/ollama.conf + ldconfig +} + +fix_user() { + log "INFO" "步骤4/8: 修复用户配置..." + + # 终止所有使用ollama用户的进程 + if pgrep -u ollama >/dev/null; then + log "WARNING" "发现正在运行的ollama进程,正在终止..." + pkill -9 -u ollama || true + sleep 2 + if pgrep -u ollama >/dev/null; then + log "ERROR" "无法终止ollama用户进程" + exit 1 + fi + fi + + # 清理旧用户 + if id ollama &>/dev/null; then + # 检查用户是否被锁定 + if passwd -S ollama | grep -q 'L'; then + log "INFO" "发现被锁定的ollama用户,正在解锁并设置随机密码..." + random_pass=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | head -c 16) + usermod -p "$(openssl passwd -1 "$random_pass")" ollama + fi + + # 删除用户及其主目录 + if ! userdel -r ollama; then + log "WARNING" "无法删除ollama用户,尝试强制删除..." + if ! userdel -f -r ollama; then + log "ERROR" "强制删除用户失败,尝试手动清理..." + sed -i '/^ollama:/d' /etc/passwd /etc/shadow /etc/group + rm -rf /var/lib/ollama + log "WARNING" "已手动清理ollama用户信息" + fi + fi + log "INFO" "已删除旧ollama用户" + fi + + # 检查组是否存在 + if getent group ollama >/dev/null; then + log "INFO" "ollama组已存在,将使用现有组" + existing_group=true + else + existing_group=false + fi + + # 创建系统用户 + if ! useradd -r -g ollama -d /var/lib/ollama -s /bin/false ollama; then + log "ERROR" "用户创建失败,尝试手动创建..." + + # 如果组不存在则创建 + if ! $existing_group; then + if ! groupadd -r ollama; then + log "ERROR" "无法创建ollama组" + exit 1 + fi + fi + + # 再次尝试创建用户 + if ! useradd -r -g ollama -d /var/lib/ollama -s /bin/false ollama; then + log "ERROR" "手动创建用户失败,请检查以下内容:" + log "ERROR" "1. /etc/passwd 和 /etc/group 文件是否可写" + log "ERROR" "2. 系统中是否存在冲突的用户/组" + log "ERROR" "3. 系统用户限制(/etc/login.defs)" + exit 1 + fi + fi + + # 创建目录结构 + mkdir -p /var/lib/ollama/.ollama/{models,bin} + chown -R ollama:ollama /var/lib/ollama + chmod -R 755 /var/lib/ollama + log "SUCCESS" "用户配置修复完成" +} + +fix_service() { + log "INFO" "步骤5/8:配置系统服务..." + cat > "${SERVICE_FILE}" </dev/null; then + log "ERROR" "Ollama未正确安装" + exit 1 + fi + if ! ollama list &>/dev/null; then + log "ERROR" "服务连接失败,请检查:\n1.服务状态: systemctl status ollama\n2.端口监听: ss -tuln | grep 11434" + exit 1 + fi + + log "SUCCESS" "验证通过,您可以执行以下操作:\n ollama list # 查看模型列表\n ollama run llama2 # 运行示例模型" +} + +### 主执行流程 ### +main() { + if [[ $EUID -ne 0 ]]; then + log "ERROR" "请使用sudo运行此脚本" + exit 1 + fi + + detect_os + install_dependencies + echo -e "${MAGENTA}=== 开始Ollama安装 ===${RESET}" + install_ollama + fix_user + fix_service + restart_service + if final_check; then + echo -e "${MAGENTA}=== Ollama安装成功 ===${RESET}" + else + echo -e "${MAGENTA}=== Ollama安装失败 ===${RESET}" + fi +} + +main diff --git a/script/mindspore-intelligence/scripts/4-deploy-deepseek/deploy_deepseek.sh b/script/mindspore-intelligence/scripts/4-deploy-deepseek/deploy_deepseek.sh new file mode 100755 index 0000000000000000000000000000000000000000..5ce4bd7b0dd724d8d61b6e64d9da989d996979e7 --- /dev/null +++ b/script/mindspore-intelligence/scripts/4-deploy-deepseek/deploy_deepseek.sh @@ -0,0 +1,224 @@ +#!/bin/bash +set -euo pipefail + +# 颜色定义 +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # 重置颜色 + +# 全局变量 +readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +readonly MINDSPORE_DEEPSEEK_DIR="${SCRIPT_DIR}/../../../mindspore-deepseek" +readonly CONFIG_FILE="${MINDSPORE_DEEPSEEK_DIR}/config.yaml" +readonly YAML_EXTRACTOR="${SCRIPT_DIR}/yaml_extractor.py" +readonly TIMEOUT_DURATION=45 + +# 从配置文件中获取参数 +function get_config_value() { + local key="$1" + if [ ! -f "$YAML_EXTRACTOR" ]; then + echo -e "${RED}错误: YAML解析脚本不存在: $YAML_EXTRACTOR${NC}" + return 1 + fi + python3 "$YAML_EXTRACTOR" -f "$CONFIG_FILE" -k "$key" +} + +# 检查命令是否存在 +function check_command() { + local cmd=$1 + if ! command -v "$cmd" &> /dev/null; then + echo -e "${RED}命令未安装: $cmd${NC}" + return 1 + fi + echo -e "${GREEN}命令已安装: $cmd${NC}" + return 0 +} + +# 检查目录是否存在 +function check_directory() { + local dir=$1 + if [ ! -d "$dir" ]; then + echo -e "${RED}目录不存在: $dir${NC}" + return 1 + fi + echo -e "${GREEN}目录存在: $dir${NC}" + return 0 +} + +# 检查文件是否存在 +function check_file() { + local file=$1 + if [ ! -f "$file" ]; then + echo -e "${RED}文件不存在: $file${NC}" + return 1 + fi + echo -e "${GREEN}文件存在: $file${NC}" + return 0 +} + +# 安装deepseek +function install_deepseek() { + local install_dir=$1 + echo -e "${YELLOW}执行: oedp run install -p $install_dir${NC}" + + if oedp run install -p "$install_dir"; then + echo -e "${GREEN}deepseek安装成功${NC}" + return 0 + else + echo -e "${RED}deepseek安装失败${NC}" + return 1 + fi +} + +# 验证部署结果 +function verify_deployment() { + echo -e "${BLUE}步骤4/4:验证部署结果...${NC}" + + # 从配置文件获取参数 + local IP=$(get_config_value "all.children.masters.hosts.master1.ansible_host") + local PORT=$(get_config_value "all.vars.llm_port") + local MODEL_NAME=$(get_config_value "all.vars.model_path") + + # 验证参数获取 + if [ -z "$IP" ] || [ -z "$PORT" ] || [ -z "$MODEL_NAME" ]; then + echo -e "${RED}从配置文件获取参数失败${NC}" + echo -e "${YELLOW}请检查以下配置项:" + echo "1. all.children.masters.hosts.master1.ansible_host" + echo "2. all.vars.llm_port" + echo "3. all.vars.model_path" + return 1 + fi + + echo -e "${GREEN}获取配置参数成功:" + echo "IP: $IP" + echo "PORT: $PORT" + echo "MODEL_NAME: $MODEL_NAME${NC}" + + # 检查jq命令 + if ! command -v jq &> /dev/null; then + echo -e "${YELLOW}注意:jq未安装,响应解析可能受限${NC}" + fi + + local retries=3 + local interval=5 + local attempt=1 + + echo -e "${YELLOW}执行API测试(超时时间${TIMEOUT_DURATION}秒)...${NC}" + + while [ $attempt -le $retries ]; do + echo -e "${BLUE}尝试 $attempt: 发送测试请求...${NC}" + + response=$(timeout ${TIMEOUT_DURATION} curl -s http://$IP:$PORT/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer sk-123456" \ + -d '{ + "model": "'"${MODEL_NAME}"'", + "messages": [ + {"role": "system", "content": "你是一个AI助手"}, + {"role": "user", "content": "你好,请说一首中文古诗"} + ], + "stream": false + }' 2>&1) + + if [[ $? -eq 0 ]] && [[ -n "$response" ]]; then + echo -e "${GREEN}测试响应成功,收到有效输出:" + if command -v jq &> /dev/null && jq -e .choices[0].message.content <<< "$response" &> /dev/null; then + jq .choices[0].message.content <<< "$response" + else + echo "$response" + fi + return 0 + else + echo -e "${YELLOW}请求未得到有效响应,重试中...${NC}" + ((attempt++)) + sleep $interval + fi + done + + echo -e "${RED}验证失败:经过 ${retries} 次重试仍未收到有效响应${NC}" + return 1 +} + +# 主函数 +function main() { + echo -e "${BLUE}=== 开始安装流程 ===${NC}" + + echo $SCRIPT_DIR + echo $MINDSPORE_DEEPSEEK_DIR + + # 步骤0:检查必要文件 + echo -e "${BLUE}步骤0/4:检查必要文件...${NC}" + check_file "$YAML_EXTRACTOR" || exit 1 + check_file "$CONFIG_FILE" || exit 1 + + # 步骤1:检查oedp工具 + echo -e "${BLUE}步骤1/4:检查oedp工具...${NC}" + check_command "oedp" || { + echo -e "${YELLOW}可能原因:" + echo "1. 未安装oedp工具" + echo "2. PATH环境变量未包含oedp路径" + echo -e "3. 工具安装不完整${NC}" + exit 1 + } + + # 步骤2:检查mindspore-deepseek目录 + echo -e "${BLUE}步骤2/4:检查mindspore-deepseek目录...${NC}" + check_directory "$MINDSPORE_DEEPSEEK_DIR" || { + echo -e "${YELLOW}可能原因:" + echo "1. 项目未正确克隆" + echo "2. 当前工作目录错误" + echo -e "3. 目录路径配置错误${NC}" + exit 1 + } + + # 步骤3:安装deepseek + echo -e "${BLUE}步骤3/4:安装deepseek...${NC}" + install_deepseek "$MINDSPORE_DEEPSEEK_DIR" || { + echo -e "${YELLOW}可能原因:" + echo "1. 安装脚本执行失败" + echo "2. 依赖项缺失" + echo -e "3. 权限不足${NC}" + exit 1 + } + + # 步骤4:验证部署结果 + verify_deployment || { + echo -e "${YELLOW}可能原因:" + echo "1. 服务未启动" + echo "2. 配置参数错误" + echo "3. 模型未正确加载" + echo "4. 网络连接问题" + echo -e "5. API请求格式错误${NC}" + exit 1 + } + + echo -e "${GREEN}=== 所有步骤已完成 ===${NC}" + + # 使用说明 + local IP=$(get_config_value "all.children.masters.hosts.master1.ansible_host") + local PORT=$(get_config_value "all.vars.llm_port") + local MODEL_NAME=$(get_config_value "all.vars.model_path") + + echo -e "${YELLOW}使用说明:${NC}" + echo -e "${BLUE}API访问示例:${NC}" + cat < Union[Any, None]: + """ + 递归获取嵌套字典中的值 + + Args: + data: 字典数据 + keys: 键路径列表 + + Returns: + 找到的值,如果找不到则返回 None + """ + if not keys: + return data + + key = keys[0] + if key in data: + if len(keys) == 1: + return data[key] + elif isinstance(data[key], dict): + return get_nested_value(data[key], keys[1:]) + else: + # 如果还有剩余键但当前值不是字典,说明路径错误 + return None + return None + +def main(): + # 设置命令行参数 + parser = argparse.ArgumentParser(description='从YAML文件中提取键值') + parser.add_argument('-f', '--file', required=True, help='YAML文件路径') + parser.add_argument('-k', '--key', required=True, + help='点分隔的键路径(例如:all.vars.image_name)') + parser.add_argument('-y', '--yaml', action='store_true', + help='以YAML格式输出复杂结构') + parser.add_argument('-q', '--quiet', action='store_true', + help='仅输出值,不输出额外信息') + + args = parser.parse_args() + + try: + # 读取YAML文件 + with open(args.file, 'r') as f: + yaml_data = yaml.safe_load(f) + + # 分割键路径 + key_path = args.key.split('.') + + # 获取值 + value = get_nested_value(yaml_data, key_path) + + if value is None: + if not args.quiet: + print(f"错误: 键路径 '{args.key}' 未找到", file=sys.stderr) + sys.exit(1) + + # 输出结果 + if args.yaml and isinstance(value, (dict, list)): + # 复杂结构以YAML格式输出 + print(yaml.dump(value, default_flow_style=False, sort_keys=False).strip()) + elif isinstance(value, (dict, list)): + # 复杂结构默认以JSON格式输出 + print(yaml.dump(value, default_flow_style=False, sort_keys=False).strip()) + else: + # 简单值直接输出 + print(value) + + except FileNotFoundError: + if not args.quiet: + print(f"错误: 文件 '{args.file}' 不存在", file=sys.stderr) + sys.exit(1) + except yaml.YAMLError as e: + if not args.quiet: + print(f"YAML解析错误: {str(e)}", file=sys.stderr) + sys.exit(1) + except Exception as e: + if not args.quiet: + print(f"错误: {str(e)}", file=sys.stderr) + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/script/mindspore-intelligence/scripts/5-deploy-embedding/deploy-embedding.sh b/script/mindspore-intelligence/scripts/5-deploy-embedding/deploy-embedding.sh new file mode 100755 index 0000000000000000000000000000000000000000..11365b8a7126ab3544aa7781da9b3fdf587abe36 --- /dev/null +++ b/script/mindspore-intelligence/scripts/5-deploy-embedding/deploy-embedding.sh @@ -0,0 +1,254 @@ +#!/bin/bash +set -euo pipefail + +# 颜色定义 +RED='\e[31m' +GREEN='\e[32m' +YELLOW='\e[33m' +BLUE='\e[34m' +NC='\e[0m' # 重置颜色 + +# 配置参数 +readonly MODEL_NAME="bge-m3" +readonly MODEL_URL="https://modelscope.cn/models/gpustack/bge-m3-GGUF/resolve/master/bge-m3-Q4_K_M.gguf" +readonly MODEL_FILE="bge-m3-Q4_K_M.gguf" +readonly MODELLEFILE="Modelfile" +readonly TIMEOUT_DURATION=45 +readonly MODEL_DIR="/home/eulercopilot/models" + +# 初始化工作目录 +readonly WORK_DIR=$(pwd) +mkdir -p "$MODEL_DIR" + +# 网络检查函数(保持不变) +check_network() { + echo -e "${BLUE}步骤1/5:检查网络连接...${NC}" + local test_url="https://modelscope.cn" + if curl --silent --head --fail --max-time 5 "$test_url" >/dev/null 2>&1; then + echo -e "${GREEN}[SUCCESS] 网络连接正常${NC}" + return 0 + else + echo -e "${YELLOW}[WARNING] 无法连接网络,切换到离线模式${NC}" + return 1 + fi +} + +# 服务检查(保持不变) +check_service() { + echo -e "${BLUE}步骤2/5:检查服务状态...${NC}" + if ! systemctl is-active --quiet ollama; then + echo -e "${RED}[ERROR] Ollama服务未运行${NC}" + echo -e "${YELLOW}可能原因:" + echo "1. 服务未安装" + echo "2. 系统未启用服务" + echo -e "请先执行ollama-install.sh安装服务${NC}" + exit 1 + fi +} + +show_progress() { + local pid=$1 + local cols=$(tput cols) + local bar_width=$((cols - 20)) + + while kill -0 "$pid" 2>/dev/null; do + local current_size=$(du -b "${MODEL_DIR}/${MODEL_FILE}" 2>/dev/null | awk '{print $1}') + local percent=$((current_size * 100 / EXPECTED_SIZE)) + [ $percent -gt 100 ] && percent=100 + + local filled=$((percent * bar_width / 100)) + local empty=$((bar_width - filled)) + + printf "\r[%-*s] %3d%%" "$bar_width" "$(printf '%0.s=' {1..$filled})$(printf '%0.s ' {1..$empty})" "$percent" + sleep 1 + done +} + +detect_gpu() { + echo -e "${BLUE}检测GPU设备...${NC}" + + # 检查是否存在NVIDIA GPU硬件 + if lspci | grep -i nvidia || [ -c /dev/nvidia0 ]; then + echo -e "${GREEN}检测到NVIDIA GPU设备${NC}" + + # 检查NVIDIA驱动是否安装 + if command -v nvidia-smi &> /dev/null; then + echo -e "${GREEN}NVIDIA驱动已安装${NC}" + return 0 + else + echo -e "${YELLOW}检测到GPU但未安装NVIDIA驱动,将使用CPU模式${NC}" + return 1 + fi + else + echo -e "${YELLOW}未检测到GPU设备,将使用CPU模式${NC}" + return 1 + fi +} + +handle_model() { + echo -e "${BLUE}步骤3/5:处理模型文件...${NC}" + local model_path="${MODEL_DIR}/${MODEL_FILE}" + + # 检查本地模型 + if [[ -f "$model_path" ]]; then + echo -e "${GREEN}检测到本地模型文件 ${model_path}${NC}" + return 0 + fi + + # 需要下载时检查网络 + if ! check_network; then + echo -e "${RED}[ERROR] 无法下载模型:网络连接不可用${NC}" + echo -e "${YELLOW}解决方案:" + echo -e "1. 请检查网络连接" + echo -e "2. 可以手动将模型文件放置到:${MODEL_DIR}" + exit 1 + fi + + # 在线下载模式 + echo -e "${YELLOW}开始在线下载模型...${NC}" + echo -e "${YELLOW}下载地址:${MODEL_URL}${NC}" + + + # 创建临时文件记录wget输出 + local wget_output=$(mktemp) + + # 执行下载并显示动态进度条 + ( + wget --tries=3 --content-disposition -O "$model_path" "$MODEL_URL" --progress=dot:binary 2>&1 | \ + while IFS= read -r line; do + # 提取百分比 + if [[ "$line" =~ ([0-9]{1,3})% ]]; then + local percent=${BASH_REMATCH[1]} + # 计算进度条长度(基于终端宽度-20) + local cols=$(tput cols) + local bar_width=$((cols - 20)) + local filled=$((percent * bar_width / 100)) + local empty=$((bar_width - filled)) + + # 构建进度条 + local progress_bar=$(printf "%${filled}s" | tr ' ' '=') + local remaining_bar=$(printf "%${empty}s" | tr ' ' ' ') + + # 显示进度(使用回车覆盖) + printf "\r[%s%s] %3d%%" "$progress_bar" "$remaining_bar" "$percent" + fi + done + echo # 换行 + ) | tee "$wget_output" + + # 检查下载结果 + if grep -q "100%" "$wget_output"; then + echo -e "${GREEN}[SUCCESS] 模型下载完成(文件大小:$(du -h "$model_path" | awk '{print $1}'))${NC}" + echo -e "${GREEN}存储路径:${model_path}${NC}" + rm -f "$wget_output" + else + echo -e "${RED}[ERROR] 模型下载失败${NC}" + echo -e "${YELLOW}可能原因:" + echo "1. URL已失效(当前URL: $MODEL_URL)" + echo "2. 网络连接问题" + echo -e "3. 磁盘空间不足(当前剩余:$(df -h ${MODEL_DIR} | awk 'NR==2 {print $4}'))${NC}" + rm -f "$wget_output" + exit 1 + fi + +} + +create_modelfile() { + echo -e "${BLUE}步骤4/5:创建模型配置...${NC}" + + # GPU参数配置 + local gpu_param="" + if detect_gpu; then + gpu_param="PARAMETER num_gpu -1" + echo -e "${GREEN}已启用GPU加速模式${NC}" + else + echo -e "${YELLOW}使用CPU模式运行${NC}" + gpu_param="PARAMETER num_gpu 0" + fi + + cat > "${WORK_DIR}/${MODELLEFILE}" </dev/null 2>&1 + pwd +)/$(basename "${BASH_SOURCE[0]}")" + +CHART_DIR="$( + canonical_path=$(readlink -f "$SCRIPT_PATH" 2>/dev/null || echo "$SCRIPT_PATH") + dirname "$(dirname "$(dirname "$canonical_path")")" +)/chart" + +# 获取系统架构 +get_architecture() { + arch=$(uname -m) + case "$arch" in + x86_64) + arch="x86" + ;; + aarch64) + arch="arm" + ;; + *) + echo -e "${RED}错误:不支持的架构 $arch${NC}" + exit 1 + ;; + esac + echo -e "${GREEN}检测到系统架构:$(uname -m)${NC}" +} + +create_namespace() { + echo -e "${BLUE}==> 检查命名空间 euler-copilot...${NC}" + if ! kubectl get namespace euler-copilot &> /dev/null; then + kubectl create namespace euler-copilot || { + echo -e "${RED}命名空间创建失败!${NC}" + return 1 + } + echo -e "${GREEN}命名空间创建成功${NC}" + else + echo -e "${YELLOW}命名空间已存在,跳过创建${NC}" + fi +} + +uninstall_databases() { + echo -e "${BLUE}==> 清理现有资源...${NC}" + + local helm_releases + helm_releases=$(helm list -n euler-copilot -q --filter '^databases' 2>/dev/null || true) + + if [ -n "$helm_releases" ]; then + echo -e "${YELLOW}找到以下Helm Release,开始清理...${NC}" + while IFS= read -r release; do + echo -e "${BLUE}正在删除Helm Release: ${release}${NC}" + if ! helm uninstall "$release" -n euler-copilot --wait --timeout 2m; then + echo -e "${RED}错误:删除Helm Release ${release} 失败!${NC}" >&2 + return 1 + fi + done <<< "$helm_releases" + else + echo -e "${YELLOW}未找到需要清理的Helm Release${NC}" + fi + + # 修改重点:仅筛选特定PVC名称 + local pvc_list + pvc_list=$(kubectl get pvc -n euler-copilot -o jsonpath='{.items[*].metadata.name}' 2>/dev/null \ + | tr ' ' '\n' \ + | grep -E '^(opengauss-storage|mongo-storage|minio-storage)$' || true) # 精确匹配三个指定名称 + + if [ -n "$pvc_list" ]; then + echo -e "${YELLOW}找到以下PVC,开始清理...${NC}" + while IFS= read -r pvc; do + echo -e "${BLUE}正在删除PVC: $pvc${NC}" + if ! kubectl delete pvc "$pvc" -n euler-copilot --force --grace-period=0; then + echo -e "${RED}错误:删除PVC $pvc 失败!${NC}" >&2 + return 1 + fi + done <<< "$pvc_list" + else + echo -e "${YELLOW}未找到需要清理的PVC${NC}" + fi + + # 新增:删除 euler-copilot-database Secret + local secret_name="euler-copilot-database" + if kubectl get secret "$secret_name" -n euler-copilot &>/dev/null; then + echo -e "${YELLOW}找到Secret: ${secret_name},开始清理...${NC}" + if ! kubectl delete secret "$secret_name" -n euler-copilot; then + echo -e "${RED}错误:删除Secret ${secret_name} 失败!${NC}" >&2 + return 1 + fi + else + echo -e "${YELLOW}未找到需要清理的Secret: ${secret_name}${NC}" + fi + + echo -e "${BLUE}等待资源清理完成(10秒)...${NC}" + sleep 10 + + echo -e "${GREEN}资源清理完成${NC}" +} + +helm_install() { + echo -e "${BLUE}==> 进入部署目录...${NC}" + [ ! -d "$CHART_DIR" ] && { + echo -e "${RED}错误:部署目录不存在 $CHART_DIR${NC}" + return 1 + } + cd "$CHART_DIR" + + echo -e "${BLUE}正在安装 databases...${NC}" + helm upgrade --install databases --set globals.arch=$arch -n euler-copilot ./databases || { + echo -e "${RED}Helm 安装 databases 失败!${NC}" + return 1 + } +} + +check_pods_status() { + echo -e "${BLUE}==> 等待初始化就绪(30秒)...${NC}" + sleep 30 + + local timeout=300 + local start_time=$(date +%s) + + echo -e "${BLUE}开始监控Pod状态(总超时时间300秒)...${NC}" + + while true; do + local current_time=$(date +%s) + local elapsed=$((current_time - start_time)) + + if [ $elapsed -gt $timeout ]; then + echo -e "${RED}错误:部署超时!${NC}" + kubectl get pods -n euler-copilot + return 1 + fi + + local not_running=$(kubectl get pods -n euler-copilot -o jsonpath='{range .items[*]}{.metadata.name} {.status.phase}{"\n"}{end}' | grep -v "Running") + + if [ -z "$not_running" ]; then + echo -e "${GREEN}所有Pod已正常运行!${NC}" + kubectl get pods -n euler-copilot + return 0 + else + echo "等待Pod就绪(已等待 ${elapsed} 秒)..." + echo "当前异常Pod:" + echo "$not_running" | awk '{print " - " $1 " (" $2 ")"}' + sleep 10 + fi + done +} + +main() { + get_architecture + create_namespace + uninstall_databases + helm_install + check_pods_status + + echo -e "\n${GREEN}=========================" + echo "数据库部署完成!" + echo -e "=========================${NC}" +} + +trap 'echo -e "${RED}操作被中断!${NC}"; exit 1' INT +main "$@" diff --git a/script/mindspore-intelligence/scripts/7-install-authhub/install_authhub.sh b/script/mindspore-intelligence/scripts/7-install-authhub/install_authhub.sh new file mode 100755 index 0000000000000000000000000000000000000000..5c8d26a7c70efcdb024606303c845aef613863ea --- /dev/null +++ b/script/mindspore-intelligence/scripts/7-install-authhub/install_authhub.sh @@ -0,0 +1,240 @@ +#!/bin/bash + +set -eo pipefail + +RED='\033[31m' +GREEN='\033[32m' +YELLOW='\033[33m' +BLUE='\033[34m' +NC='\033[0m' + +SCRIPT_PATH="$( + cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 + pwd +)/$(basename "${BASH_SOURCE[0]}")" + +CHART_DIR="$( + canonical_path=$(readlink -f "$SCRIPT_PATH" 2>/dev/null || echo "$SCRIPT_PATH") + dirname "$(dirname "$(dirname "$canonical_path")")" +)/chart" + +# 打印帮助信息 +print_help() { + echo -e "${GREEN}用法: $0 [选项]" + echo -e "选项:" + echo -e " --help 显示帮助信息" + echo -e " --authhub_address <地址> 指定Authhub的访问地址(例如:http://myhost:30081)" + echo -e "" + echo -e "示例:" + echo -e " $0 --authhub_address http://myhost:30081${NC}" + exit 0 +} + +# 获取系统架构 +get_architecture() { + local arch=$(uname -m) + case "$arch" in + x86_64) + arch="x86" + ;; + aarch64) + arch="arm" + ;; + *) + echo -e "${RED}错误:不支持的架构 $arch${NC}" >&2 + return 1 + ;; + esac + echo -e "${GREEN}检测到系统架构:$(uname -m)${NC}" >&2 + echo "$arch" +} + +create_namespace() { + echo -e "${BLUE}==> 检查命名空间 euler-copilot...${NC}" + if ! kubectl get namespace euler-copilot &> /dev/null; then + kubectl create namespace euler-copilot || { + echo -e "${RED}命名空间创建失败!${NC}" + return 1 + } + echo -e "${GREEN}命名空间创建成功${NC}" + else + echo -e "${YELLOW}命名空间已存在,跳过创建${NC}" + fi +} + +uninstall_authhub() { + echo -e "${BLUE}==> 清理现有资源...${NC}" + + local RELEASES + RELEASES=$(helm list -n euler-copilot --short | grep authhub || true) + + if [ -n "$RELEASES" ]; then + echo -e "${YELLOW}找到以下Helm Release,开始清理...${NC}" + for release in $RELEASES; do + echo -e "${BLUE}正在删除Helm Release: ${release}${NC}" + helm uninstall "$release" -n euler-copilot || echo -e "${RED}删除Helm Release失败,继续执行...${NC}" + done + else + echo -e "${YELLOW}未找到需要清理的Helm Release${NC}" + fi + + local pvc_name + pvc_name=$(kubectl get pvc -n euler-copilot | grep 'mysql-pvc' 2>/dev/null || true) + + if [ -n "$pvc_name" ]; then + echo -e "${YELLOW}找到以下PVC,开始清理...${NC}" + kubectl delete pvc mysql-pvc -n euler-copilot --force --grace-period=0 || echo -e "${RED}PVC删除失败,继续执行...${NC}" + else + echo -e "${YELLOW}未找到需要清理的PVC${NC}" + fi + + # 新增:删除 authhub-secret + local authhub_secret="authhub-secret" + if kubectl get secret "$authhub_secret" -n euler-copilot &>/dev/null; then + echo -e "${YELLOW}找到Secret: ${authhub_secret},开始清理...${NC}" + if ! kubectl delete secret "$authhub_secret" -n euler-copilot; then + echo -e "${RED}错误:删除Secret ${authhub_secret} 失败!${NC}" >&2 + return 1 + fi + else + echo -e "${YELLOW}未找到需要清理的Secret: ${authhub_secret}${NC}" + fi + + echo -e "${GREEN}资源清理完成${NC}" +} + +get_authhub_address() { + local default_address="http://127.0.0.1:30081" + + echo -e "${BLUE}请输入 Authhub 的访问地址(IP或域名,直接回车使用默认值 ${default_address}):${NC}" + read -p "Authhub 地址: " authhub_address + + # 处理空输入情况 + if [[ -z "$authhub_address" ]]; then + authhub_address="$default_address" + echo -e "${GREEN}使用默认地址:${authhub_address}${NC}" + else + echo -e "${GREEN}输入地址:${authhub_address}${NC}" + fi + + return 0 +} + +helm_install() { + local arch="$1" + echo -e "${BLUE}==> 进入部署目录...${NC}" + [ ! -d "${CHART_DIR}" ] && { + echo -e "${RED}错误:部署目录不存在 ${CHART_DIR} ${NC}" + return 1 + } + cd "${CHART_DIR}" + + echo -e "${BLUE}正在安装 authhub...${NC}" + helm upgrade --install authhub -n euler-copilot ./authhub \ + --set globals.arch="$arch" \ + --set domain.authhub="${authhub_address}" || { + echo -e "${RED}Helm 安装 authhub 失败!${NC}" + return 1 + } +} + +check_pods_status() { + echo -e "${BLUE}==> 等待初始化就绪(30秒)...${NC}" >&2 + sleep 30 + + local timeout=300 + local start_time=$(date +%s) + + echo -e "${BLUE}开始监控Pod状态(总超时时间300秒)...${NC}" >&2 + + while true; do + local current_time=$(date +%s) + local elapsed=$((current_time - start_time)) + + if [ $elapsed -gt $timeout ]; then + echo -e "${YELLOW}警告:部署超时!请检查以下资源:${NC}" >&2 + kubectl get pods -n euler-copilot -o wide + echo -e "\n${YELLOW}建议检查:${NC}" + echo "1. 查看未就绪Pod的日志: kubectl logs -n euler-copilot " + echo "2. 检查PVC状态: kubectl get pvc -n euler-copilot" + echo "3. 检查Service状态: kubectl get svc -n euler-copilot" + return 1 + fi + + local not_running=$(kubectl get pods -n euler-copilot -o jsonpath='{range .items[*]}{.metadata.name} {.status.phase} {.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' \ + | awk '$2 != "Running" || $3 != "True" {print $1 " " $2}') + + if [ -z "$not_running" ]; then + echo -e "${GREEN}所有Pod已正常运行!${NC}" >&2 + kubectl get pods -n euler-copilot -o wide + return 0 + else + echo "等待Pod就绪(已等待 ${elapsed} 秒)..." + echo "当前未就绪Pod:" + echo "$not_running" | awk '{print " - " $1 " (" $2 ")"}' + sleep 10 + fi + done +} + +deploy() { + local arch + arch=$(get_architecture) || exit 1 + create_namespace || exit 1 + uninstall_authhub || exit 1 + + # 如果未通过参数提供地址,则提示用户输入 + if [ -z "$authhub_address" ]; then + echo -e "${YELLOW}未提供 --authhub_address 参数,需要手动输入地址${NC}" + get_authhub_address || exit 1 + else + echo -e "${GREEN}使用参数指定的Authhub地址:$authhub_address${NC}" + fi + + helm_install "$arch" || exit 1 + check_pods_status || { + echo -e "${RED}部署失败:Pod状态检查未通过!${NC}" + exit 1 + } + + echo -e "\n${GREEN}=========================" + echo -e "Authhub 部署完成!" + echo -e "查看pod状态:kubectl get pod -n euler-copilot" + echo -e "Authhub登录地址为: $authhub_address" + echo -e "默认账号密码: administrator/changeme" + echo -e "=========================${NC}" +} + +# 解析命令行参数 +parse_args() { + while [[ $# -gt 0 ]]; do + case "$1" in + --help) + print_help + exit 0 + ;; + --authhub_address) + if [ -n "$2" ]; then + authhub_address="$2" + shift 2 + else + echo -e "${RED}错误:--authhub_address 需要提供一个参数${NC}" >&2 + exit 1 + fi + ;; + *) + echo -e "${RED}未知参数: $1${NC}" >&2 + print_help + exit 1 + ;; + esac + done +} + +main() { + parse_args "$@" + deploy +} + +trap 'echo -e "${RED}操作被中断!${NC}"; exit 1' INT +main "$@" diff --git a/script/mindspore-intelligence/scripts/8-install-EulerCopilot/install_eulercopilot.sh b/script/mindspore-intelligence/scripts/8-install-EulerCopilot/install_eulercopilot.sh new file mode 100755 index 0000000000000000000000000000000000000000..8fe150b1a7e68158d83db8012e349b7121bf6fd5 --- /dev/null +++ b/script/mindspore-intelligence/scripts/8-install-EulerCopilot/install_eulercopilot.sh @@ -0,0 +1,508 @@ +#!/bin/bash + +set -eo pipefail + +# 颜色定义 +RED='\e[31m' +GREEN='\e[32m' +YELLOW='\e[33m' +BLUE='\e[34m' +NC='\e[0m' # 恢复默认颜色 + +NAMESPACE="euler-copilot" +PLUGINS_DIR="/var/lib/eulercopilot" + +# 全局变量声明 +internal_ip="" +client_id="" +client_secret="" +eulercopilot_address="" +authhub_address="" +MINDSPORE_DEEPSEEK_DIR="/home/llm_solution-master/script/mindspore-deepseek" +CONFIG_FILE="${MINDSPORE_DEEPSEEK_DIR}/config.yaml" + +SCRIPT_PATH="$( + cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 + pwd +)/$(basename "${BASH_SOURCE[0]}")" + +DEPLOY_DIR="$( + canonical_path=$(readlink -f "$SCRIPT_PATH" 2>/dev/null || echo "$SCRIPT_PATH") + dirname "$(dirname "$(dirname "$canonical_path")")" +)" + +YAML_EXTRACTOR="${DEPLOY_DIR}/scripts/4-deploy-deepseek/yaml_extractor.py" + +# 从配置文件中获取参数 +function get_config_value() { + local key="$1" + python3 "$YAML_EXTRACTOR" -f "$CONFIG_FILE" -k "$key" +} +# 显示帮助信息 +show_help() { + echo -e "${GREEN}用法: $0 [选项]" + echo -e "选项:" + echo -e " --help 显示此帮助信息" + echo -e " --eulercopilot_address 指定EulerCopilot前端访问URL" + echo -e " --authhub_address 指定Authhub前端访问URL" + echo -e "" + echo -e "示例:" + echo -e " $0 --eulercopilot_address http://myhost:30080 --authhub_address http://myhost:30081${NC}" + exit 0 +} + +# 解析命令行参数 +parse_arguments() { + while [[ $# -gt 0 ]]; do + case "$1" in + --help) + show_help + ;; + --eulercopilot_address) + if [ -n "$2" ]; then + eulercopilot_address="$2" + shift + else + echo -e "${RED}错误: --eulercopilot_address 需要提供一个值${NC}" >&2 + exit 1 + fi + ;; + --authhub_address) + if [ -n "$2" ]; then + authhub_address="$2" + shift + else + echo -e "${RED}错误: --authhub_address 需要提供一个值${NC}" >&2 + exit 1 + fi + ;; + *) + echo -e "${RED}未知选项: $1${NC}" >&2 + show_help + exit 1 + ;; + esac + shift + done +} + + +# 安装成功信息显示函数 +show_success_message() { + local arch=$1 + + echo -e "\n${GREEN}==================================================${NC}" + echo -e "${GREEN} EulerCopilot 部署完成! ${NC}" + echo -e "${GREEN}==================================================${NC}" + + echo -e "${YELLOW}访问信息:${NC}" + echo -e "EulerCopilot UI: ${eulercopilot_address}" + echo -e "AuthHub 管理界面: ${authhub_address}" + + echo -e "\n${YELLOW}系统信息:${NC}" + echo -e "内网IP: ${host}" + echo -e "系统架构: $(uname -m) (识别为: ${arch})" + echo -e "插件目录: ${PLUGINS_DIR}" + echo -e "Chart目录: ${DEPLOY_DIR}/chart/" + + echo -e "${BLUE}操作指南:${NC}" + echo -e "1. 查看集群状态: kubectl get all -n $NAMESPACE" + echo -e "2. 查看实时日志: kubectl logs -n $NAMESPACE -f deployment/$NAMESPACE" + echo -e "3. 查看POD状态:kubectl get pods -n $NAMESPACE" +} + +# 获取系统架构 +get_architecture() { + local arch=$(uname -m) + case "$arch" in + x86_64) arch="x86" ;; + aarch64) arch="arm" ;; + *) + echo -e "${RED}错误:不支持的架构 $arch${NC}" >&2 + return 1 + ;; + esac + echo -e "${GREEN}检测到系统架构:${arch} (原始标识: $(uname -m))${NC}" >&2 + echo "$arch" +} + +# 自动检测业务网口 +get_network_ip() { + echo -e "${BLUE}自动检测业务网络接口 IP 地址...${NC}" >&2 + local timeout=20 + local start_time=$(date +%s) + local interface="" + local host="" + + # 查找可用的网络接口 + while [ $(( $(date +%s) - start_time )) -lt $timeout ]; do + # 获取所有非虚拟接口(排除 lo, docker, veth 等) + interfaces=$(ip -o link show | awk -F': ' '{print $2}' | grep -vE '^lo$|docker|veth|br-|virbr|tun') + + for intf in $interfaces; do + # 检查接口状态是否为 UP + if ip link show "$intf" | grep -q 'state UP'; then + # 获取 IPv4 地址 + ip_addr=$(ip addr show "$intf" | grep -w inet | awk '{print $2}' | cut -d'/' -f1) + if [ -n "$ip_addr" ]; then + interface=$intf + host=$ip_addr + break 2 # 跳出两层循环 + fi + fi + done + sleep 1 + done + + if [ -z "$interface" ]; then + echo -e "${RED}错误:未找到可用的业务网络接口${NC}" >&2 + exit 1 + fi + + echo -e "${GREEN}使用网络接口:${interface},IP 地址:${host}${NC}" >&2 + echo "$host" +} + +get_address_input() { + # 如果命令行参数已经提供了地址,则直接使用,不进行交互式输入 + if [ -n "$eulercopilot_address" ] && [ -n "$authhub_address" ]; then + echo -e "${GREEN}使用命令行参数配置:" + echo "EulerCopilot地址: $eulercopilot_address" + echo "Authhub地址: $authhub_address" + return + fi + + # 从环境变量读取或使用默认值 + eulercopilot_address=${EULERCOPILOT_ADDRESS:-"http://127.0.0.1:30080"} + authhub_address=${AUTHHUB_ADDRESS:-"http://127.0.0.1:30081"} + + # 非交互模式直接使用默认值 + if [ -t 0 ]; then # 仅在交互式终端显示提示 + echo -e "${BLUE}请输入 EulerCopilot 前端访问URL(默认:$eulercopilot_address):${NC}" + read -p "> " input_euler + [ -n "$input_euler" ] && eulercopilot_address=$input_euler + + echo -e "${BLUE}请输入 Authhub 前端访问URL(默认:$authhub_address):${NC}" + read -p "> " input_auth + [ -n "$input_auth" ] && authhub_address=$input_auth + fi + + echo -e "${GREEN}使用配置:" + echo "EulerCopilot地址: $eulercopilot_address" + echo "Authhub地址: $authhub_address" +} + +get_client_info_auto() { + # 获取用户输入地址 + get_address_input + # 创建临时文件 + local temp_file + temp_file=$(mktemp) + + # 直接调用Python脚本并传递域名参数 + python3 "${DEPLOY_DIR}/scripts/9-other-script/get_client_id_and_secret.py" "${eulercopilot_address}" > "$temp_file" 2>&1 + + # 检查Python脚本执行结果 + if [ $? -ne 0 ]; then + echo -e "${RED}错误:Python脚本执行失败${NC}" + cat "$temp_file" + rm -f "$temp_file" + return 1 + fi + + # 提取凭证信息 + client_id=$(grep "client_id: " "$temp_file" | awk '{print $2}') + client_secret=$(grep "client_secret: " "$temp_file" | awk '{print $2}') + rm -f "$temp_file" + + # 验证结果 + if [ -z "$client_id" ] || [ -z "$client_secret" ]; then + echo -e "${RED}错误:无法获取有效的客户端凭证${NC}" >&2 + return 1 + fi + + # 输出结果 + echo -e "${GREEN}==============================${NC}" + echo -e "${GREEN}Client ID: ${client_id}${NC}" + echo -e "${GREEN}Client Secret: ${client_secret}${NC}" + echo -e "${GREEN}==============================${NC}" +} + +get_client_info_manual() { + # 非交互模式直接使用默认值 + if [ -t 0 ]; then # 仅在交互式终端显示提示 + echo -e "${BLUE}请输入 Client ID: 域名(端点信息:Client ID): ${NC}" + read -p "> " input_id + [ -n "$input_id" ] && client_id=$input_id + + echo -e "${BLUE}请输入 Client Secret: 域名(端点信息:Client Secret):${NC}" + read -p "> " input_secret + [ -n "$input_secret" ] && client_secret=$input_secret + fi + + # 统一验证域名格式 + echo -e "${GREEN}使用配置:" + echo "Client ID: $client_id" + echo "Client Secret: $client_secret" +} + +check_directories() { + echo -e "${BLUE}检查语义接口目录是否存在...${NC}" >&2 + + # 定义父目录和子目录列表 + local REQUIRED_OWNER="root:root" + + # 检查并创建父目录 + if [ -d "${PLUGINS_DIR}" ]; then + echo -e "${GREEN}目录已存在:${PLUGINS_DIR}${NC}" >&2 + # 检查当前权限 + local current_owner=$(stat -c "%u:%g" "${PLUGINS_DIR}" 2>/dev/null) + if [ "$current_owner" != "$REQUIRED_OWNER" ]; then + echo -e "${YELLOW}当前目录权限: ${current_owner},正在修改为 ${REQUIRED_OWNER}...${NC}" >&2 + if chown root:root "${PLUGINS_DIR}"; then + echo -e "${GREEN}目录权限已成功修改为 ${REQUIRED_OWNER}${NC}" >&2 + else + echo -e "${RED}错误:无法修改目录权限到 ${REQUIRED_OWNER}${NC}" >&2 + exit 1 + fi + else + echo -e "${GREEN}目录权限正确(${REQUIRED_OWNER})${NC}" >&2 + fi + else + if mkdir -p "${PLUGINS_DIR}"; then + echo -e "${GREEN}目录已创建:${PLUGINS_DIR}${NC}" >&2 + chown root:root "${PLUGINS_DIR}" # 设置父目录所有者 + else + echo -e "${RED}错误:无法创建目录 ${PLUGINS_DIR}${NC}" >&2 + exit 1 + fi + fi +} + +uninstall_eulercopilot() { + echo -e "${YELLOW}检查是否存在已部署的 EulerCopilot...${NC}" >&2 + + # 删除 Helm Release: euler-copilot + if helm list -n euler-copilot --short | grep -q '^euler-copilot$'; then + echo -e "${GREEN}找到Helm Release: euler-copilot,开始清理...${NC}" + if ! helm uninstall euler-copilot -n euler-copilot; then + echo -e "${RED}错误:删除Helm Release euler-copilot 失败!${NC}" >&2 + return 1 + fi + else + echo -e "${YELLOW}未找到需要清理的Helm Release: euler-copilot${NC}" + fi + + # 删除 PVC: framework-semantics-claim 和 web-static + local pvc_names=("framework-semantics-claim" "web-static") + for pvc_name in "${pvc_names[@]}"; do + if kubectl get pvc "$pvc_name" -n euler-copilot &>/dev/null; then + echo -e "${GREEN}找到PVC: ${pvc_name},开始清理...${NC}" + if ! kubectl delete pvc "$pvc_name" -n euler-copilot --force --grace-period=0; then + echo -e "${RED}错误:删除PVC ${pvc_name} 失败!${NC}" >&2 + return 1 + fi + else + echo -e "${YELLOW}未找到需要清理的PVC: ${pvc_name}${NC}" + fi + done + + # 删除 Secret: euler-copilot-system + local secret_name="euler-copilot-system" + if kubectl get secret "$secret_name" -n euler-copilot &>/dev/null; then + echo -e "${GREEN}找到Secret: ${secret_name},开始清理...${NC}" + if ! kubectl delete secret "$secret_name" -n euler-copilot; then + echo -e "${RED}错误:删除Secret ${secret_name} 失败!${NC}" >&2 + return 1 + fi + else + echo -e "${YELLOW}未找到需要清理的Secret: ${secret_name}${NC}" + fi + + echo -e "${GREEN}资源清理完成${NC}" +} + +modify_yaml() { + local internal_host=$1 + local preserve_models=$2 # 新增参数,指示是否保留模型配置 + # 从配置文件获取参数 + local host=$(get_config_value "all.children.masters.hosts.master1.ansible_host") + local port=$(get_config_value "all.vars.llm_port") + local model_name=$(get_config_value "all.vars.model_path") + # 验证参数获取 + if [ -z "$host" ] || [ -z "$port" ] || [ -z "$model_name" ]; then + echo -e "${RED}从配置文件获取参数失败${NC}" + echo -e "${YELLOW}请检查以下配置项:" + echo "1. all.children.masters.hosts.master1.ansible_host" + echo "2. all.vars.llm_port" + echo "3. all.vars.model_path" + return 1 + fi + + echo -e "${GREEN}获取配置参数成功:" + echo "host: $host" + echo "port: $port" + echo "model_name: $model_name${NC}" + + echo -e "${BLUE}开始修改YAML配置文件...${NC}" >&2 + # 构建参数数组 + local set_args=() + + # 添加其他必填参数 + set_args+=( + "--set" "globals.arch=$arch" + "--set" "login.client.id=${client_id}" + "--set" "login.client.secret=${client_secret}" + "--set" "domain.euler_copilot=${eulercopilot_address}" + "--set" "domain.authhub=${authhub_address}" + ) + + # 如果不需要保留模型配置,则添加模型相关的参数 + if [[ "$preserve_models" != [Yy]* ]]; then + set_args+=( + "--set" "models.answer.endpoint=http://$host:$port/v1" + "--set" "models.answer.name=${model_name}" + "--set" "models.answer.key=sk-123456" + "--set" "models.functionCall.backend=openai" + "--set" "models.functionCall.endpoint=http://$host:$port" + "--set" "models.embedding.type=openai" + "--set" "models.embedding.endpoint=http://$internal_host:11434/v1" + "--set" "models.embedding.key=sk-123456" + "--set" "models.embedding.name=bge-m3:latest" + ) + fi + + # 调用Python脚本,传递所有参数 + python3 "${DEPLOY_DIR}/scripts/9-other-script/modify_eulercopilot_yaml.py" \ + "${DEPLOY_DIR}/chart/euler_copilot/values.yaml" \ + "${DEPLOY_DIR}/chart/euler_copilot/values.yaml" \ + "${set_args[@]}" || { + echo -e "${RED}错误:YAML文件修改失败${NC}" >&2 + exit 1 + } + echo -e "${GREEN}YAML文件修改成功!${NC}" >&2 +} + +# 检查目录 +enter_chart_directory() { + echo -e "${BLUE}进入Chart目录...${NC}" >&2 + cd "${DEPLOY_DIR}/chart/" || { + echo -e "${RED}错误:无法进入Chart目录 ${DEPLOY_DIR}/chart/${NC}" >&2 + exit 1 + } +} + +pre_install_checks() { + # 检查kubectl和helm是否可用 + command -v kubectl >/dev/null 2>&1 || error_exit "kubectl未安装" + command -v helm >/dev/null 2>&1 || error_exit "helm未安装" + + # 检查Kubernetes集群连接 + kubectl cluster-info >/dev/null 2>&1 || error_exit "无法连接到Kubernetes集群" + + # 检查必要的存储类 + kubectl get storageclasses >/dev/null 2>&1 || error_exit "无法获取存储类信息" +} + +# 执行安装 +execute_helm_install() { + echo -e "${BLUE}开始部署EulerCopilot(架构: $arch)...${NC}" >&2 + + enter_chart_directory + helm upgrade --install $NAMESPACE -n $NAMESPACE ./euler_copilot --create-namespace || { + echo -e "${RED}Helm 安装 EulerCopilot 失败!${NC}" >&2 + exit 1 + } + echo -e "${GREEN}Helm安装 EulerCopilot 成功!${NC}" >&2 +} + +# 检查pod状态 +check_pods_status() { + echo -e "${BLUE}==> 等待初始化就绪(30秒)...${NC}" >&2 + sleep 30 + + local timeout=300 + local start_time=$(date +%s) + + echo -e "${BLUE}开始监控Pod状态(总超时时间300秒)...${NC}" >&2 + + while true; do + local current_time=$(date +%s) + local elapsed=$((current_time - start_time)) + + if [ $elapsed -gt $timeout ]; then + echo -e "${YELLOW}警告:部署超时!请检查以下资源:${NC}" >&2 + kubectl get pods -n $NAMESPACE -o wide + echo -e "\n${YELLOW}建议检查:${NC}" + echo "1. 查看未就绪Pod的日志: kubectl logs -n $NAMESPACE " + echo "2. 检查PVC状态: kubectl get pvc -n $NAMESPACE" + echo "3. 检查Service状态: kubectl get svc -n $NAMESPACE" + return 1 + fi + + local not_running=$(kubectl get pods -n $NAMESPACE -o jsonpath='{range .items[*]}{.metadata.name} {.status.phase} {.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' \ + | awk '$2 != "Running" || $3 != "True" {print $1 " " $2}') + + if [ -z "$not_running" ]; then + echo -e "${GREEN}所有Pod已正常运行!${NC}" >&2 + kubectl get pods -n $NAMESPACE -o wide + return 0 + else + echo "等待Pod就绪(已等待 ${elapsed} 秒)..." + echo "当前未就绪Pod:" + echo "$not_running" | awk '{print " - " $1 " (" $2 ")"}' + sleep 10 + fi + done +} + +# 修改main函数 +main() { + parse_arguments "$@" + + pre_install_checks + + local arch internal_host + + arch=$(get_architecture) || exit 1 + internal_host=$(get_network_ip) || exit 1 + + uninstall_eulercopilot + + if ! get_client_info_auto; then + get_client_info_manual + fi + + check_directories + + # 交互式提示优化 + if [ -t 0 ]; then + echo -e "${YELLOW}是否保留现有的模型配置?${NC}" + echo -e " ${BLUE}Y) 保留现有配置${NC}" + echo -e " ${BLUE}n) 使用默认配置${NC}" + while true; do + read -p "请选择(Y/N): " input_preserve + case "${input_preserve:-Y}" in + [YyNn]) preserve_models=${input_preserve:-Y}; break ;; + *) echo -e "${RED}无效输入,请选择Y或n${NC}" ;; + esac + done + else + preserve_models="N" + fi + + echo -e "${BLUE}开始修改YAML配置...${NC}" + modify_yaml $internal_host $preserve_models + + echo -e "${BLUE}开始Helm安装...${NC}" + execute_helm_install + + if check_pods_status; then + echo -e "${GREEN}所有组件已就绪!${NC}" + show_success_message "$arch" + else + echo -e "${YELLOW}部分组件尚未就绪,建议进行排查!${NC}" + fi +} + +main "$@" diff --git a/script/mindspore-intelligence/scripts/9-other-script/download_file.sh b/script/mindspore-intelligence/scripts/9-other-script/download_file.sh new file mode 100755 index 0000000000000000000000000000000000000000..8bd11575e61fe0eef73270618cf44cabe30c715e --- /dev/null +++ b/script/mindspore-intelligence/scripts/9-other-script/download_file.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +download_and_extract_files() { + local BASE_URL=$1 + local TARGET_DIR="/home/EulerCopilot/models" + shift + local FILES=("$@") + + yum -y install tar wget + + if [ ! -d "${TARGET_DIR}" ]; then + echo "Creating directory ${TARGET_DIR}..." + mkdir -p "${TARGET_DIR}" + fi + + for FILE in "${FILES[@]}"; do + FULL_URL="${BASE_URL}${FILE}" + + if [ ! -f "${FILE}" ]; then + echo "Downloading ${FULL_URL}..." + wget -O "${FILE}" "${FULL_URL}" + if [ $? -ne 0 ]; then + echo "Failed to download ${FILE}." + continue + fi + else + echo "${FILE} already exists, skipping download." + fi + + echo "Extracting ${FILE} to ${TARGET_DIR}..." + if [[ "${FILE}" == *.tar.gz ]]; then + if ! tar -xzvf "${FILE}" -C "${TARGET_DIR}" 2>&1 | grep -q 'Error is not recoverable'; then + echo "${FILE} extracted successfully." + rm "${FILE}" + else + echo "Failed to extract ${FILE}: it may be corrupt or not a tar.gz file." + rm "${FILE}" + fi + else + echo "Unsupported file format: ${FILE}" + continue + fi + done +} + +BASE_URL="https://repo.oepkgs.net/openEuler/rpm/openEuler-22.03-LTS/contrib/EulerCopilot/" +FILES=("bge-mixed-model.tar.gz" "text2vec-base-chinese-paraphrase.tar.gz" "bge-reranker-large.tar.gz") + +download_and_extract_files "${BASE_URL}" "${FILES[@]}" diff --git a/script/mindspore-intelligence/scripts/9-other-script/get_client_id_and_secret.py b/script/mindspore-intelligence/scripts/9-other-script/get_client_id_and_secret.py new file mode 100755 index 0000000000000000000000000000000000000000..901856c69130b30699b01e88c95a29523cd7e006 --- /dev/null +++ b/script/mindspore-intelligence/scripts/9-other-script/get_client_id_and_secret.py @@ -0,0 +1,172 @@ +""" +获取认证信息 +""" +import json +import sys +import requests +import urllib3 +import subprocess +import argparse + +urllib3.disable_warnings() + +def get_service_cluster_ip(namespace, service_name): + cmd = ["kubectl", "get", "service", service_name, "-n", namespace, "-o", "json"] + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + if result.returncode != 0: + error_msg = result.stderr.decode().strip() + print(f"获取服务信息失败: [命名空间: {namespace}] [服务名: {service_name}]") + print(f"Kubectl错误详情: {error_msg}") + + if "NotFound" in error_msg: + print("→ 请检查:") + print(f" 1. 服务是否部署完成(kubectl get pods -n {namespace})") + print(f" 2. 服务名称是否拼写正确") + print(f" 3. 是否在正确的Kubernetes上下文环境中") + sys.exit(1) + + service_info = json.loads(result.stdout.decode()) + return service_info['spec'].get('clusterIP', 'No Cluster IP found') + +def get_user_token(authhub_web_url, username="administrator", password="changeme"): + url = authhub_web_url + "/oauth2/manager-login" + response = requests.post( + url, + json={"password": password, "username": username}, + headers={"Content-Type": "application/json"}, + verify=False, + timeout=10 + ) + response.raise_for_status() + return response.json()["data"]["user_token"] + +def find_existing_app(authhub_web_url, user_token, client_name): + response = requests.get( + authhub_web_url + "/oauth2/applications", + headers={"Authorization": user_token, "Content-Type": "application/json"}, + timeout=10 + ) + response.raise_for_status() + apps_data = response.json() + + for app in apps_data["data"]["applications"]: + client_metadata = app.get("client_metadata") or {} + if isinstance(client_metadata, str): + try: + client_metadata = json.loads(client_metadata) + except json.JSONDecodeError: + client_metadata = {} + + candidate_names = [ + client_metadata.get("client_name"), + app.get("client_name"), + app.get("client_info", {}).get("client_name") + ] + + if any(str(name).lower() == client_name.lower() for name in candidate_names if name): + return app["client_info"]["client_id"] + return None + +def register_or_update_app(authhub_web_url, user_token, client_name, client_url, redirect_urls): + client_id = find_existing_app(authhub_web_url, user_token, client_name) + + if client_id: + # 更新现有应用 + print(f"发现已存在应用 [名称: {client_name}], 正在更新...") + url = f"{authhub_web_url}/oauth2/applications/{client_id}" + response = requests.put( + url, + json={ + "client_uri": client_url, + "redirect_uris": redirect_urls, + "register_callback_uris": [], + "logout_callback_uris": [], + "skip_authorization": True, + "scope": ["email", "phone", "username", "openid", "offline_access"], + "grant_types": ["authorization_code"], + "response_types": ["code"], + "token_endpoint_auth_method": "none" + }, + headers={"Authorization": user_token, "Content-Type": "application/json"}, + verify=False + ) + response.raise_for_status() + return response.json()["data"] + else: + # 注册新应用 + print(f"未找到已存在应用 [名称: {client_name}], 正在注册新应用...") + response = requests.post( + authhub_web_url + "/oauth2/applications/register", + json={ + "client_name": client_name, + "client_uri": client_url, + "redirect_uris": redirect_urls, + "register_callback_uris": [], + "logout_callback_uris": [], + "skip_authorization": True, + "scope": ["email", "phone", "username", "openid", "offline_access"], + "grant_types": ["authorization_code"], + "response_types": ["code"], + "token_endpoint_auth_method": "none" + }, + headers={"Authorization": user_token, "Content-Type": "application/json"}, + verify=False + ) + response.raise_for_status() + return response.json()["data"] + +def get_client_secret(authhub_web_url, user_token, client_id): + response = requests.get( + f"{authhub_web_url}/oauth2/applications/{client_id}", + headers={"Authorization": user_token, "Content-Type": "application/json"}, + timeout=10 + ) + response.raise_for_status() + app_data = response.json() + return { + "client_id": app_data["data"]["client_info"]["client_id"], + "client_secret": app_data["data"]["client_info"]["client_secret"] + } + +if __name__ == "__main__": + # 解析命令行参数 + parser = argparse.ArgumentParser() + parser.add_argument("eulercopilot_address", help="EulerCopilot前端地址(默认:http://172.0.0.1:30080)") + args = parser.parse_args() + + # 获取服务信息 + namespace = "euler-copilot" + service_name = "authhub-web-service" + print(f"正在查询服务信息: [命名空间: {namespace}] [服务名: {service_name}]") + cluster_ip = get_service_cluster_ip(namespace, service_name) + authhub_web_url = f"http://{cluster_ip}:8000" + + # 生成固定URL + client_url = f"{args.eulercopilot_address}" + redirect_urls = [f"{args.eulercopilot_address}/api/auth/login"] + client_name = "EulerCopilot" # 设置固定默认值 + + # 认证流程 + try: + print("\n正在获取用户令牌...") + user_token = get_user_token(authhub_web_url) + print("✓ 用户令牌获取成功") + + print(f"\n正在处理应用 [名称: {client_name}]...") + app_info = register_or_update_app(authhub_web_url, user_token, client_name, client_url, redirect_urls) + print("✓ 应用处理成功") + + print(f"\n正在查询客户端凭证 [ID: {app_info['client_info']['client_id']}]...") + client_info = get_client_secret(authhub_web_url, user_token, app_info["client_info"]["client_id"]) + + print("\n✓ 认证信息获取成功:") + print(f"client_id: {client_info['client_id']}") + print(f"client_secret: {client_info['client_secret']}") + + except requests.exceptions.HTTPError as e: + print(f"\nHTTP 错误: {e.response.status_code} - {e.response.text}") + sys.exit(1) + except Exception as e: + print(f"\n错误: {str(e)}") + sys.exit(1) diff --git a/script/mindspore-intelligence/scripts/9-other-script/get_log.sh b/script/mindspore-intelligence/scripts/9-other-script/get_log.sh new file mode 100755 index 0000000000000000000000000000000000000000..3b02f96368f0fc4ab3ae347a222ba9cb130b8466 --- /dev/null +++ b/script/mindspore-intelligence/scripts/9-other-script/get_log.sh @@ -0,0 +1,40 @@ +#!/bin/bash +function help { + echo -e "用法:./get_log.sh [命名空间] [日志时间]"; + echo -e "示例:./get_log.sh euler-copilot 1h"; +} + + +function main { + echo -e "[Info]开始收集各Pod日志"; + time=$(date -u +"%s"); + echo -e "[Info]当前命名空间:$1,当前时间戳:$time" + filename="logs_$1_$time"; + + mkdir $filename; + echo $time > $filename/timestamp; + + echo "[Info]开始收集日志"; + kubectl -n $1 events > $filename/events.log; + + pod_names=$(kubectl -n $1 get pods -o name); + while IFS= read -r line || [[ -n $line ]]; do + mkdir -p $filename/$line; + kubectl -n $1 describe $line > $filename/$line/details.log; + kubectl -n $1 logs --previous --since $2 --all-containers=true --ignore-errors=true $line > $filename/$line/previous.log; + kubectl -n $1 logs --since $2 --all-containers=true --ignore-errors=true $line > $filename/$line/current.log; + done < <(printf '%s' "$pod_names"); + + tar -czf $filename.tar.gz $filename/; + rm -rf $filename; + + echo -e "[Info]收集日志结束,请将$filename.tar.gz提供给我们进行分析"; +} + + +if [[ $# -lt 2 ]]; then + help +else + main $1 $2; +fi + \ No newline at end of file diff --git a/script/mindspore-intelligence/scripts/9-other-script/import_images.sh b/script/mindspore-intelligence/scripts/9-other-script/import_images.sh new file mode 100755 index 0000000000000000000000000000000000000000..e5b97706a7045166bd4856bd87f4589913d1a208 --- /dev/null +++ b/script/mindspore-intelligence/scripts/9-other-script/import_images.sh @@ -0,0 +1,258 @@ +#!/bin/bash + +# 颜色定义 +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +MAGENTA='\033[0;35m' +CYAN='\033[0;36m' +NC='\033[0m' # 恢复默认颜色 + +# 默认配置 +DEFAULT_VERSION="0.9.5" +IMAGE_BASE_DIR="/home/eulercopilot/images" + +# 显示帮助信息 +show_help() { + echo -e "${YELLOW}使用说明:${NC}" + echo -e " $0 [选项] [参数]" + echo -e "${YELLOW}选项:${NC}" + echo -e " -v, --version <版本>\t指定镜像版本 (默认: ${DEFAULT_VERSION})" + echo -e " -i, --import <文件>\t导入单个镜像文件" + echo -e " -d, --delete <镜像>\t删除指定镜像 (格式: repo/image:tag)" + echo -e " --delete-all\t\t删除所有neocopilot镜像" + echo -e " -h, --help\t\t显示帮助信息" + echo -e "${YELLOW}示例:${NC}" + echo -e " $0 -v 0.9.4\t\t导入0.9.4版本的镜像" + echo -e " $0 -i $IMAGE_BASE_DIR/$DEFAULT_VERSION/myimage.tar\t导入单个镜像文件" + echo -e " $0 -d hub.oepkgs.net/neocopilot/authhub:0.9.3-arm\t删除指定镜像" + echo -e " $0 --delete-all\t删除所有neocopilot镜像" + exit 0 +} + +# 参数解析 +parse_args() { + while [[ $# -gt 0 ]]; do + case "$1" in + -v|--version) + eulercopilot_version="$2" + shift 2 + ;; + -i|--import) + single_image_file="$2" + shift 2 + ;; + -d|--delete) + image_to_delete="$2" + shift 2 + ;; + --delete-all) + delete_all_images=true + shift + ;; + -h|--help) + show_help + ;; + *) + echo -e "${RED}未知参数: $1${NC}" + show_help + exit 1 + ;; + esac + done +} + +# 系统架构检测 +detect_architecture() { + ARCH=$(uname -m) + case $ARCH in + x86_64) + ARCH_SUFFIX="x86" + ;; + aarch64|armv*) + ARCH_SUFFIX="arm" + ;; + *) + echo -e "${RED}不支持的架构: $ARCH${NC}" + exit 1 + ;; + esac +} + +# 删除指定镜像 +delete_image() { + local image=$1 + echo -e "${YELLOW}正在删除镜像: $image${NC}" + if sudo k3s ctr -n=k8s.io images rm "$image"; then + echo -e "${GREEN} 删除成功${NC}" + else + echo -e "${RED} 删除失败${NC}" + fi +} + +# 删除所有neocopilot镜像 +delete_all_neocopilot_images() { + echo -e "${YELLOW}正在删除所有neocopilot镜像...${NC}" + sudo k3s crictl images | grep neocopilot | awk '{print $1":"$2}' | while read -r image; do + delete_image "$image" + done +} + +# 导入单个镜像文件 +import_single_image() { + local file=$1 + echo -e "${CYAN}正在导入单个镜像: $file${NC}" + + if [[ ! -f "$file" ]]; then + echo -e "${RED}错误: 文件不存在 $file${NC}" + exit 1 + fi + + if sudo k3s ctr -n=k8s.io images import "$file"; then + echo -e "${GREEN} 导入成功${NC}" + else + echo -e "${RED} 导入失败${NC}" + exit 1 + fi +} + +# 批量导入镜像 +batch_import_images() { + local version=$1 + local image_dir="$IMAGE_BASE_DIR/$version" + + if [[ ! -d "$image_dir" ]]; then + echo -e "${RED}错误:镜像目录不存在 $image_dir${NC}" + exit 1 + fi + + echo -e "${CYAN}正在扫描目录: $image_dir${NC}" + echo -e "${CYAN}找到以下TAR文件:${NC}" + ls -1 "$image_dir"/*.tar + + local success_count=0 + local fail_count=0 + + for tar_file in "$image_dir"/*.tar; do + [[ -f "$tar_file" ]] || continue + + echo -e "\n${BLUE}正在导入 $tar_file...${NC}" + + if sudo k3s ctr -n=k8s.io images import "$tar_file"; then + ((success_count++)) + echo -e "${GREEN} 导入成功${NC}" + else + ((fail_count++)) + echo -e "${RED} 导入失败${NC}" + fi + done + + echo -e "\n${CYAN}导入结果:${NC}" + echo -e "${GREEN}成功: $success_count 个${NC}" + echo -e "${RED}失败: $fail_count 个${NC}" + + return $fail_count +} + +# 镜像完整性检查 +check_images() { + local version=$1 + local missing_count=0 + + # 基础镜像列表(使用版本变量) + local base_images=( + "hub.oepkgs.net/neocopilot/euler-copilot-framework:${version}-arm" + "hub.oepkgs.net/neocopilot/euler-copilot-web:${version}-arm" + "hub.oepkgs.net/neocopilot/data_chain_back_end:${version}-arm" + "hub.oepkgs.net/neocopilot/data_chain_web:${version}-arm" + "hub.oepkgs.net/neocopilot/authhub:0.9.3-arm" + "hub.oepkgs.net/neocopilot/authhub-web:0.9.3-arm" + "hub.oepkgs.net/neocopilot/opengauss:latest-arm" + "hub.oepkgs.net/neocopilot/redis:7.4-alpine-arm" + "hub.oepkgs.net/neocopilot/mysql:8-arm" + "hub.oepkgs.net/neocopilot/minio:empty-arm" + "hub.oepkgs.net/neocopilot/mongo:7.0.16-arm" + "hub.oepkgs.net/neocopilot/secret_inject:dev-arm" + ) + + # 根据架构调整预期镜像标签 + local expected_images=() + for image in "${base_images[@]}"; do + if [[ "$ARCH_SUFFIX" == "x86" ]]; then + expected_image="${image/-arm/-x86}" + else + expected_image="$image" + fi + expected_images+=("$expected_image") + done + + echo -e "\n${MAGENTA}开始镜像完整性检查:${NC}" + for image in "${expected_images[@]}"; do + if sudo k3s ctr -n=k8s.io images ls | grep -q "$image"; then + echo -e "${GREEN}[存在] $image${NC}" + else + echo -e "${RED}[缺失] $image${NC}" + ((missing_count++)) + fi + done + + echo -e "\n${MAGENTA}使用crictl检查镜像:${NC}" + sudo k3s crictl images | grep neocopilot + + if [[ $missing_count -gt 0 ]]; then + echo -e "\n${RED}警告:缺少 $missing_count 个必需镜像${NC}" + return 1 + else + echo -e "\n${GREEN}所有必需镜像已就绪${NC}" + return 0 + fi +} + +# 主函数 +main() { + parse_args "$@" + detect_architecture + + # 处理删除操作 + if [[ -n "$image_to_delete" ]]; then + delete_image "$image_to_delete" + exit 0 + fi + + if [[ "$delete_all_images" == true ]]; then + delete_all_neocopilot_images + exit 0 + fi + + # 处理单个镜像导入 + if [[ -n "$single_image_file" ]]; then + import_single_image "$single_image_file" + exit 0 + fi + + # 默认批量导入模式 + local version=${eulercopilot_version:-$DEFAULT_VERSION} + + echo -e "${YELLOW}==============================${NC}" + echo -e "${CYAN}架构检测\t: ${ARCH_SUFFIX}${NC}" + echo -e "${CYAN}目标版本\t: ${version}${NC}" + echo -e "${CYAN}镜像目录\t: ${IMAGE_BASE_DIR}/${version}${NC}" + echo -e "${YELLOW}==============================${NC}" + + batch_import_images "$version" + import_result=$? + + if [[ $import_result -eq 0 ]]; then + check_images "$version" || exit 1 + else + echo -e "${RED}存在导入失败的镜像,跳过完整性检查${NC}" + exit 1 + fi + + echo -e "${GREEN}系统准备就绪,所有镜像可用${NC}" +} + +# 执行主函数 +main "$@" +exit 0 diff --git a/script/mindspore-intelligence/scripts/9-other-script/install_oidc_eulercopilot.sh b/script/mindspore-intelligence/scripts/9-other-script/install_oidc_eulercopilot.sh new file mode 100755 index 0000000000000000000000000000000000000000..f098aedb893f7c42e2d64ac9c6c1722eab94cbf2 --- /dev/null +++ b/script/mindspore-intelligence/scripts/9-other-script/install_oidc_eulercopilot.sh @@ -0,0 +1,268 @@ +#!/bin/bash + +# 颜色定义 +RED='\e[31m' +GREEN='\e[32m' +YELLOW='\e[33m' +BLUE='\e[34m' +NC='\e[0m' # 恢复默认颜色 + +set -eo pipefail + +# 颜色定义 +RED='\e[31m' +GREEN='\e[32m' +YELLOW='\e[33m' +BLUE='\e[34m' +NC='\e[0m' # 恢复默认颜色 + +PLUGINS_DIR="/home/eulercopilot/semantics" +SCRIPT_PATH="$( + cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 + pwd +)/$(basename "${BASH_SOURCE[0]}")" + +DEPLOY_DIR="$( + canonical_path=$(readlink -f "$SCRIPT_PATH" 2>/dev/null || echo "$SCRIPT_PATH") + dirname "$(dirname "$(dirname "$canonical_path")")" +)" + + +get_eth0_ip() { + echo -e "${BLUE}获取 eth0 网络接口 IP 地址...${NC}" + local timeout=20 + local start_time=$(date +%s) + local interface="eth0" + + # 检查 eth0 是否存在,并等待其变为可用状态 + while [ $(( $(date +%s) - start_time )) -lt $timeout ]; do + if ip link show "$interface" > /dev/null 2>&1; then + break + else + sleep 1 + fi + done + + if ! ip link show "$interface" > /dev/null 2>&1; then + echo -e "${RED}错误:未找到网络接口 ${interface}${NC}" + exit 1 + fi + + # 获取 IP 地址 + host=$(ip addr show "$interface" | grep -w inet | awk '{print $2}' | cut -d'/' -f1) + + if [[ -z "$host" ]]; then + echo -e "${RED}错误:未能从接口 ${interface} 获取 IP 地址${NC}" + exit 1 + fi + + echo -e "${GREEN}使用网络接口:${interface},IP 地址:${host}${NC}" +} + +get_user_input() { + echo -e "${BLUE}请输入 OAuth 客户端配置:${NC}" + read -p "Client ID: " client_id + read -s -p "Client Secret: " client_secret + + echo + + # 检查必填字段 + if [[ -z "$client_id" || -z "$client_secret" ]]; then + echo -e "${RED}错误:Client Secret 存在空行,请重新输入${NC}" + read -s -p "Client Secret: " client_secret + echo + echo -e "${GREEN}Client Secret 已正确输入${NC}" + fi + + # 处理Copilot域名 + echo -e "${BLUE}请输入 EulerCopilot 域名(直接回车使用默认值 www.eulercopilot.local):${NC}" + read -p "EulerCopilot 的前端域名: " eulercopilot_domain + + if [[ -z "$eulercopilot_domain" ]]; then + eulercopilot_domain="www.eulercopilot.local" + echo -e "${GREEN}使用默认域名:${eulercopilot_domain}${NC}" + else + if ! [[ "${eulercopilot_domain}" =~ ^([a-zA-Z0-9-]+\.)*[a-zA-Z0-9-]+\.[a-zA-Z]{2,}$ ]]; then + echo -e "${RED}错误:输入的EulerCopilot域名格式不正确${NC}" + exit 1 + fi + echo -e "${GREEN}输入域名:${eulercopilot_domain}${NC}" + fi + + # 处理Authhub域名 + echo -e "${BLUE}请输入 Authhub 的域名配置(直接回车使用默认值 authhub.eulercopilot.local):${NC}" + read -p "Authhub 的前端域名: " authhub_domain + if [[ -z "$authhub_domain" ]]; then + authhub_domain="authhub.eulercopilot.local" + echo -e "${GREEN}使用默认域名:${authhub_domain}${NC}" + else + if ! [[ "${authhub_domain}" =~ ^([a-zA-Z0-9-]+\.)*[a-zA-Z0-9-]+\.[a-zA-Z]{2,}$ ]]; then + echo -e "${RED}错误:输入的AuthHub域名格式不正确${NC}" + exit 1 + fi + echo -e "${GREEN}输入域名:${authhub_domain}${NC}" + fi + +} + +# 检查必要目录 +check_directories() { + echo -e "${BLUE}检查语义接口目录是否存在...${NC}" + if [ -d "${PLUGINS_DIR}" ]; then + echo -e "${GREEN}目录已存在:${PLUGINS_DIR}${NC}" + else + if mkdir -p "${PLUGINS_DIR}"; then + echo -e "${GREEN}目录已创建:${PLUGINS_DIR}${NC}" + else + echo -e "${RED}错误:无法创建目录 ${PLUGINS_DIR}${NC}" + exit 1 + fi + fi +} + +# 安装前检查并删除已有部署 +check_and_delete_existing_deployment() { + echo -e "${YELLOW}检查是否存在已部署的euler-copilot...${NC}" + if helm list -n euler-copilot --short | grep -q "^euler-copilot$"; then + echo -e "${YELLOW}发现已存在的euler-copilot部署,正在删除...${NC}" + helm uninstall -n euler-copilot euler-copilot + + if [ $? -ne 0 ]; then + echo -e "${RED}错误:删除旧版euler-copilot失败${NC}" + exit 1 + fi + + echo -e "${YELLOW}等待旧部署清理完成(10秒)...${NC}" + sleep 10 + else + echo -e "${GREEN}未找到已存在的euler-copilot部署,继续安装...${NC}" + fi +} + +# 修改YAML配置文件的方法 +modify_yaml() { + echo -e "${BLUE}开始修改YAML配置文件...${NC}" + cd + python3 ${SCRIPTS_DIR}/8-install-EulerCopilot/modify_eulercopilot_yaml.py \ + "${CHART_DIR}/euler_copilot/values.yaml" \ + "${CHART_DIR}/euler_copilot/values.yaml" \ + --set "models.answer.url=http://120.46.78.178:8000" \ + --set "models.answer.key=sk-EulerCopilot1bT1WtG2ssG92pvOPTkpT3BlbkFJVruTv8oUe" \ + --set "models.answer.name=Qwen2.5-32B-Instruct-GPTQ-Int4" \ + --set "models.answer.ctx_length=8192" \ + --set "models.answer.max_tokens=2048" \ + --set "models.embedding.url=https://192.168.50.4:8001/embedding/v1" \ + --set "models.embedding.key=sk-123456" \ + --set "models.embedding.name=bge-m3" \ + --set "login.type=oidc" \ + --set "login.client.id=623c3c2f1eca5ad5fca6c58a" \ + --set "login.client.secret=5d07c65f44fa1beb08b36f90af314aef" \ + --set "login.oidc.token_url=https://omapi.test.osinfra.cn/oneid/oidc/token" \ + --set "login.oidc.user_url=https://omapi.test.osinfra.cn/oneid/oidc/user" \ + --set "login.oidc.redirect=https://omapi.test.osinfra.cn/oneid/oidc/authorize?client_id=623c3c2f1eca5ad5fca6c58a&redirect_uri=https://qa-robot-openeuler.test.osinfra.cn/api/auth/login&scope=openid+profile+email+phone+offline_access&complementation=phone&access_type=offline&response_type=code" \ + --set "domain.euler_copilot=qa-robot-eulercopilot.test.osinfra.cn" \ + + if [ $? -ne 0 ]; then + echo -e "${RED}错误:YAML文件修改失败${NC}" + exit 1 + fi + echo -e "${GREEN}YAML文件修改成功!${NC}" +} + +# 进入Chart目录的方法 +enter_chart_directory() { + echo -e "${BLUE}进入Chart目录...${NC}" + cd "${DEPLOY_DIR}/chart/" || { + echo -e "${RED}错误:无法进入Chart目录 ${DEPLOY_DIR}/chart/${NC}" + exit 1 + } +} + +# 执行Helm安装的方法 +execute_helm_install() { + echo -e "${BLUE}开始部署EulerCopilot...${NC}" + helm install -n euler-copilot euler-copilot ./euler_copilot + + if [ $? -ne 0 ]; then + echo -e "${RED}错误:Helm安装失败${NC}" + exit 1 + fi + echo -e "${GREEN}Helm安装EulerCopilot成功!${NC}" +} + +check_pods_status() { + echo -e "${BLUE}==> 等待初始化就绪(30秒)...${NC}" + sleep 30 + + local timeout=100 + local start_time=$(date +%s) + + echo -e "${BLUE}开始监控Pod状态(总超时时间300秒)...${NC}" + echo -e "${BLUE}镜像拉取中...${NC}" + + while true; do + local current_time=$(date +%s) + local elapsed=$((current_time - start_time)) + + # 超时处理逻辑 + if [ $elapsed -gt $timeout ]; then + echo -e "${YELLOW}警告:部署超时!请检查以下Pod状态:${NC}" + kubectl get pods -n euler-copilot + echo -e "${YELLOW}注意:部分Pod可能仍在启动中,可稍后手动检查${NC}" + return 1 + fi + + # 检查所有Pod状态 + local not_running=$( + kubectl get pods -n euler-copilot -o jsonpath='{range .items[*]}{.metadata.name} {.status.phase} {.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' \ + | awk '$2 != "Running" || $3 != "True" {print $1 " " $2}' + ) + + if [ -z "$not_running" ]; then + echo -e "${GREEN}所有Pod已正常运行!${NC}" + kubectl get pods -n euler-copilot + return 0 + else + echo "等待Pod就绪(已等待 ${elapsed} 秒)..." + echo "当前未启动Pod:" + echo "$not_running" | awk '{print " - " $1 " (" $2 ")"}' + sleep 10 + fi + done +} + +# 主函数执行各个步骤 +main() { + get_eth0_ip + get_user_input + check_directories + check_and_delete_existing_deployment + modify_yaml + enter_chart_directory + execute_helm_install + + # Pod状态检查并处理结果 + if check_pods_status; then + echo -e "${GREEN}所有组件已就绪!${NC}" + else + echo -e "${YELLOW}注意:部分组件尚未就绪,可稍后手动检查${NC}" + fi + + # 最终部署信息输出 + echo -e "\n${GREEN}==================================================${NC}" + echo -e "${GREEN} EulerCopilot 部署完成! ${NC}" + echo -e "${GREEN}==================================================${NC}" + echo -e "${YELLOW}EulerCopilot访问地址:\thttps://${eulercopilot_domain}${NC}" + echo -e "${YELLOW}AuthHub管理地址:\thttps://${authhub_domain}${NC}" + echo -e "${YELLOW}插件目录:\t\t${PLUGINS_DIR}${NC}" + echo -e "${YELLOW}Chart目录:\t${DEPLOY_DIR}/chart/${NC}" + echo + echo -e "${BLUE}温馨提示:" + echo -e "${BLUE}1. 请确保域名已正确解析到集群Ingress地址${NC}" + echo -e "${BLUE}2. 首次拉取RAG镜像可能需要约1-3分钟,POD会稍后自动启动${NC}" + echo -e "${BLUE}3. 查看实时状态:kubectl get pods -n euler-copilot${NC}" + echo -e "${BLUE}4. 查看镜像:k3s crictl images${NC}" +} + +# 调用主函数 +main diff --git a/script/mindspore-intelligence/scripts/9-other-script/modify_eulercopilot_yaml.py b/script/mindspore-intelligence/scripts/9-other-script/modify_eulercopilot_yaml.py new file mode 100755 index 0000000000000000000000000000000000000000..4f7ac2479b679f0b21b85c30790198cdc00e76a7 --- /dev/null +++ b/script/mindspore-intelligence/scripts/9-other-script/modify_eulercopilot_yaml.py @@ -0,0 +1,120 @@ +import sys +import argparse +from typing import Union + +# 版本标记和依赖检测 +try: + from ruamel.yaml import YAML + from ruamel.yaml.comments import CommentedMap + USING_RUAMEL = True +except ImportError: + try: + import yaml # PyYAML 回退 + USING_RUAMEL = False + except ImportError as e: + sys.stderr.write("错误:需要 YAML 处理库\n") + sys.stderr.write("请选择以下方式之一安装:\n") + sys.stderr.write("1. (推荐) ruamel.yaml: pip install ruamel.yaml\n") + sys.stderr.write("2. PyYAML: pip install PyYAML\n") + sys.exit(1) + +def parse_value(value: str) -> Union[str, int, float, bool]: + """智能转换值的类型""" + value = value.strip() + lower_val = value.lower() + + if lower_val in {'true', 'false'}: + return lower_val == 'true' + if lower_val in {'null', 'none'}: + return None + + try: + return int(value) + except ValueError: + try: + return float(value) + except ValueError: + pass + + # 处理引号包裹的字符串 + if len(value) > 1 and value[0] == value[-1] and value[0] in {'"', "'"}: + return value[1:-1] + + return value + +def set_nested_value(data: dict, key_path: str, value: str) -> None: + """递归设置嵌套字典的值""" + keys = key_path.split('.') + current = data + + try: + for key in keys[:-1]: + # 自动创建不存在的层级 + if key not in current: + current[key] = CommentedMap() if USING_RUAMEL else {} + current = current[key] + + final_key = keys[-1] + current[final_key] = parse_value(value) + except TypeError as e: + raise ValueError(f"路径 {key_path} 中存在非字典类型的中间节点") from e + +def main(): + parser = argparse.ArgumentParser( + description='YAML 配置文件修改工具', + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument('input', help='输入YAML文件路径') + parser.add_argument('output', help='输出YAML文件路径') + parser.add_argument('--set', + action='append', + required=True, + help='格式: path.to.key=value (可多次使用)', + metavar='KEY_PATH=VALUE') + + args = parser.parse_args() + + # 初始化 YAML 处理器 + if USING_RUAMEL: + yaml_processor = YAML() + yaml_processor.preserve_quotes = True + yaml_processor.indent(mapping=2, sequence=4, offset=2) + else: + yaml_processor = yaml # 使用 PyYAML 模块 + + # 读取文件(修正后的部分) + try: + with open(args.input, 'r') as f: # 确保这行正确闭合 + if USING_RUAMEL: + data = yaml_processor.load(f) + else: + data = yaml.safe_load(f) + except Exception as e: + raise SystemExit(f"读取文件失败: {str(e)}") + + # 处理修改参数 + for item in args.set: + if '=' not in item: + raise ValueError(f"无效格式: {item},应使用 KEY_PATH=VALUE 格式") + + key_path, value = item.split('=', 1) + if not key_path: + raise ValueError("键路径不能为空") + + try: + set_nested_value(data, key_path, value) + except Exception as e: + raise SystemExit(f"设置 {key_path} 时出错: {str(e)}") + + # 写入文件 + try: + with open(args.output, 'w') as f: + if USING_RUAMEL: + yaml_processor.dump(data, f) + else: + yaml.dump(data, f, default_flow_style=False, indent=2) + except Exception as e: + raise SystemExit(f"写入文件失败: {str(e)}") + +if __name__ == '__main__': + main() diff --git a/script/mindspore-intelligence/scripts/9-other-script/prepare_docker.sh b/script/mindspore-intelligence/scripts/9-other-script/prepare_docker.sh new file mode 100755 index 0000000000000000000000000000000000000000..2ae2e11812dd68b15dea872fa177363f8681f1d1 --- /dev/null +++ b/script/mindspore-intelligence/scripts/9-other-script/prepare_docker.sh @@ -0,0 +1,163 @@ +#!/bin/bash + +function stop_docker { + echo -e "[Info]检查是否已安装Docker"; + if ! [[ -x $(command -v docker) ]]; then + echo -e "[Info]未安装Docker"; + return 0; + fi + + echo -e "\033[33m[Warning]即将停止Docker服务,确定继续吗?\033[0m"; + read -p "(Y/n): " choice; + case $choice in + [Yy]) + systemctl stop docker + if [[ $? -ne 0 ]]; then + echo -e "\033[31m[Error]停止Docker服务错误,中止运行\033[0m" + return 1 + else + echo -e "\033[32m[Success]停止Docker服务成功\033[0m" + fi + ;; + [Nn]) + echo -e "\033[31m[Error]操作取消\033[0m" + return 1 + ;; + *) + echo -e "\033[31m[Error]无效输入,操作取消\033[0m" + return 1 + ;; + esac + + echo -e "\033[33m[Warning]即将尝试卸载旧版本Docker,确定继续吗?\033[0m"; + read -p "(Y/n): " choice2; + case $choice2 in + [Yy]) + yum remove -y docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine + if [[ $? -ne 0 ]]; then + echo -e "\033[31m[Error]Docker旧版本卸载失败\033[0m" + return 1 + else + echo -e "\033[32m[Success]Docker旧版本已卸载\033[0m" + fi + ;; + [Nn]) + echo -e "\033[31m[Error]操作取消\033[0m" + return 1 + ;; + *) + echo -e "\033[31m[Error]无效输入,操作取消\033[0m" + return 1 + ;; + esac + return 0; +} + +function setup_docker_repo { + echo -e "[Info]设置Docker RPM Repo"; + basearch=$(arch) + cat > /etc/yum.repos.d/docker-ce.repo <<-EOF +[docker-ce-stable] +name=Docker CE Stable - \$basearch +baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/9/\$basearch/stable +enabled=1 +gpgcheck=1 +gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/gpg +EOF + echo -e "[Info]更新yum软件包列表"; + yum makecache + if [[ $? -ne 0 ]]; then + echo -e "\033[31m[Error]更新yum软件包列表失败\033[0m"; + return 1; + else + echo -e "\033[32m[Success]yum软件包列表更新成功\033[0m"; + fi + return 0; +} + +function install_docker { + echo -e "[Info]安装Docker"; + yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin; + if [[ $? -ne 0 ]]; then + echo -e "\033[31m[Error]安装Docker失败\033[0m"; + return 1; + else + echo -e "\033[32m[Success]安装Docker成功\033[0m"; + fi + systemctl enable docker; + + echo -e "[Info]设置DockerHub镜像"; + if ! [[ -d "/etc/docker" ]]; then + mkdir /etc/docker; + fi + + if [[ -f "/etc/docker/daemon.json" ]]; then + echo -e "\033[31m[Error]daemon.json已存在,请手动配置DockerHub镜像\033[0m"; + else + cat > /etc/docker/daemon.json <<-EOF +{ + "registry-mirrors": [ + "https://docker.anyhub.us.kg", + "https://docker.1panel.live", + "https://dockerhub.icu", + "https://docker.ckyl.me", + "https://docker.awsl9527.cn", + "https://dhub.kubesre.xyz", + "https://gg3gwnry.mirror.aliyuncs.com" + ] +} +EOF + fi + systemctl restart docker; + if [[ $? -ne 0 ]]; then + echo -e "\033[31m[Error]Docker启动失败\033[0m"; + return 1; + else + echo -e "\033[32m[Success]Docker启动成功\033[0m"; + return 0; + fi +} + +function login_docker { + echo -e "[Info]登录Docker私仓"; + read -p "仓库地址:" url; + read -p "用户名:" username; + read -p "密码:" password; + + docker login -u $username -p $password $url; + if [[ $? -ne 0 ]]; then + echo -e "\033[31m[Error]Docker登录失败\033[0m"; + return 1; + else + echo -e "\033[32m[Success]Docker登录成功\033[0m"; + return 0; + fi +} + +function main { + echo -e "[Info]正在更新Docker"; + + stop_docker; + if [[ $? -ne 0 ]]; then + return 1; + fi + + setup_docker_repo; + if [[ $? -ne 0 ]]; then + return 1; + fi + + install_docker; + if [[ $? -ne 0 ]]; then + return 1; + fi + + login_docker; + if [[ $? -ne 0 ]]; then + return 1; + fi + + return 0; +} + +main diff --git a/script/mindspore-intelligence/scripts/9-other-script/save_images.sh b/script/mindspore-intelligence/scripts/9-other-script/save_images.sh new file mode 100755 index 0000000000000000000000000000000000000000..81dabf80267b3746c469a9e571e208fda581b324 --- /dev/null +++ b/script/mindspore-intelligence/scripts/9-other-script/save_images.sh @@ -0,0 +1,187 @@ +#!/bin/bash + +# 颜色定义 +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # 恢复默认颜色 + +# 默认配置 +eulercopilot_version="0.9.6" +ARCH_SUFFIX="" +OUTPUT_DIR="/home/eulercopilot/images/${eulercopilot_version}" + +# 显示帮助信息 +show_help() { + echo -e "${YELLOW}使用说明:${NC}" + echo -e " $0 [选项]" + echo -e "" + echo -e "${YELLOW}选项:${NC}" + echo -e " --help 显示此帮助信息" + echo -e " --version <版本> 指定 EulerCopilot 版本 (默认: ${eulercopilot_version})" + echo -e " --arch <架构> 指定系统架构 (arm/x86, 默认自动检测)" + echo -e "" + echo -e "${YELLOW}示例:${NC}" + echo -e " $0 --version ${eulercopilot_version} --arch arm" + echo -e " $0 --help" + exit 0 +} + +# 解析命令行参数 +while [[ $# -gt 0 ]]; do + case "$1" in + --help) + show_help + ;; + --version) + if [ -n "$2" ]; then + eulercopilot_version="$2" + OUTPUT_DIR="/home/eulercopilot/images/${eulercopilot_version}" + shift + else + echo -e "${RED}错误: --version 需要指定一个版本号${NC}" + exit 1 + fi + ;; + --arch) + if [ -n "$2" ]; then + case "$2" in + arm|x86) + ARCH_SUFFIX="$2" + ;; + *) + echo -e "${RED}错误: 不支持的架构 '$2',必须是 arm 或 x86${NC}" + exit 1 + ;; + esac + shift + else + echo -e "${RED}错误: --arch 需要指定一个架构 (arm/x86)${NC}" + exit 1 + fi + ;; + *) + echo -e "${RED}未知参数: $1${NC}" + show_help + ;; + esac + shift +done + +# 自动检测架构(如果未通过参数指定) +if [ -z "$ARCH_SUFFIX" ]; then + ARCH=$(uname -m) + case $ARCH in + x86_64) + ARCH_SUFFIX="x86" + ;; + aarch64|armv*) + ARCH_SUFFIX="arm" + ;; + *) + echo -e "${RED}不支持的架构: $ARCH${NC}" + exit 1 + ;; + esac +fi + +mkdir -p "$OUTPUT_DIR" + +# 镜像列表(使用版本变量) +BASE_IMAGES=( + "hub.oepkgs.net/neocopilot/euler-copilot-framework:${eulercopilot_version}-arm" + "hub.oepkgs.net/neocopilot/euler-copilot-web:${eulercopilot_version}-arm" + "hub.oepkgs.net/neocopilot/data_chain_back_end:${eulercopilot_version}-arm" + "hub.oepkgs.net/neocopilot/data_chain_web:${eulercopilot_version}-arm" + "hub.oepkgs.net/neocopilot/authhub:0.9.3-arm" + "hub.oepkgs.net/neocopilot/authhub-web:0.9.3-arm" + "hub.oepkgs.net/neocopilot/opengauss:latest-arm" + "hub.oepkgs.net/neocopilot/redis:7.4-alpine-arm" + "hub.oepkgs.net/neocopilot/mysql:8-arm" + "hub.oepkgs.net/neocopilot/minio:empty-arm" + "hub.oepkgs.net/neocopilot/mongo:7.0.16-arm" + "hub.oepkgs.net/neocopilot/secret_inject:dev-arm" +) + +# 预定义文件名列表(与BASE_IMAGES顺序严格对应) +FILE_NAMES=( + "euler-copilot-framework.tar" + "euler-copilot-web.tar" + "data_chain_back_end.tar" + "data_chain_web.tar" + "authhub.tar" + "authhub-web.tar" + "opengauss.tar" + "redis.tar" + "mysql.tar" + "minio.tar" + "mongo.tar" + "secret_inject.tar" +) + +# 校验列表一致性 +if [ ${#BASE_IMAGES[@]} -ne ${#FILE_NAMES[@]} ]; then + echo -e "${RED}错误:镜像列表与文件名列表数量不匹配${NC}" + exit 1 +fi + +# 初始化计数器 +total=${#BASE_IMAGES[@]} +success=0 +fail=0 + +# 镜像处理函数 +process_image() { + local raw_image=$1 + local filename=$2 + + # 调整架构标签 + local adjusted_image="${raw_image/-arm/-${ARCH_SUFFIX}}" + local output_path="${OUTPUT_DIR}/${filename}" + + echo -e "\n${BLUE}正在处理:${adjusted_image}${NC}" + + # 拉取镜像 + if ! docker pull "$adjusted_image"; then + echo -e "${RED}拉取失败:${adjusted_image}${NC}" + return 1 + fi + + # 保存镜像 + if docker save -o "$output_path" "$adjusted_image"; then + echo -e "${GREEN}镜像已保存到:${output_path}${NC}" + return 0 + else + echo -e "${RED}保存失败:${output_path}${NC}" + return 1 + fi +} + +# 打印执行信息 +echo -e "${BLUE}==============================${NC}" +echo -e "${YELLOW}架构\t: ${ARCH_SUFFIX}${NC}" +echo -e "${YELLOW}版本\t: ${eulercopilot_version}${NC}" +echo -e "${YELLOW}存储目录\t: ${OUTPUT_DIR}${NC}" +echo -e "${YELLOW}镜像数量\t: ${total}${NC}" +echo -e "${BLUE}==============================${NC}" + +# 遍历处理所有镜像 +for index in "${!BASE_IMAGES[@]}"; do + if process_image "${BASE_IMAGES[$index]}" "${FILE_NAMES[$index]}"; then + ((success++)) + else + ((fail++)) + fi +done + +# 输出最终结果 +echo -e "\n${BLUE}==============================${NC}" +echo -e "${GREEN}操作完成!${NC}" +echo -e "${BLUE}==============================${NC}" +echo -e "${GREEN}成功\t: ${success} 个${NC}" +echo -e "${RED}失败\t: ${fail} 个${NC}" +echo -e "${BLUE}==============================${NC}" + +# 返回状态码 +exit $((fail > 0 ? 1 : 0)) diff --git a/script/mindspore-intelligence/scripts/deploy.sh b/script/mindspore-intelligence/scripts/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..1b4d69ceeb404832455fd792d2a39dbb05f2e94b --- /dev/null +++ b/script/mindspore-intelligence/scripts/deploy.sh @@ -0,0 +1,331 @@ +#!/bin/bash + + +# 顶层菜单 +show_top_menu() { + clear + echo "==============================" + echo " 主部署菜单 " + echo "==============================" + echo "0) 一键自动部署" + echo "1) 手动分步部署" + echo "2) 重启服务" + echo "3) 卸载所有组件并清除数据" + echo "4) 退出程序" + echo "==============================" + echo -n "请输入选项编号(0-3): " +} + +# 安装选项菜单(手动部署子菜单) +show_sub_menu() { + clear + echo "==============================" + echo " 手动分步部署菜单 " + echo "==============================" + echo "1) 执行环境检查脚本" + echo "2) 安装k3s和helm" + echo "3) 安装Ollama" + echo "4) 部署Deepseek模型" + echo "5) 部署Embedding模型" + echo "6) 安装数据库" + echo "7) 安装AuthHub" + echo "8) 安装EulerCopilot" + echo "9) 返回主菜单" + echo "==============================" + echo -n "请输入选项编号(0-9): " +} + +show_restart_menu() { + clear + echo "==============================" + echo " 服务重启菜单 " + echo "==============================" + echo "可重启的服务列表:" + echo "1) authhub-backend" + echo "2) authhub" + echo "3) framework" + echo "4) minio" + echo "5) mongo" + echo "6) mysql" + echo "7) opengauss" + echo "8) rag" + echo "9) rag-web" + echo "10) redis" + echo "11) web" + echo "12) 返回主菜单" + echo "==============================" + echo -n "请输入要重启的服务编号(1-12): " +} + + +# 带错误检查的脚本执行函数 +run_script_with_check() { + local script_path=$1 + local script_name=$2 + + echo "--------------------------------------------------" + echo "开始执行:$script_name" + "$script_path" || { + echo -e "\n\033[31m$script_name 执行失败!\033[0m" + exit 1 + } + echo -e "\n\033[32m$script_name 执行成功!\033[0m" + echo "--------------------------------------------------" +} + +# 执行子菜单对应脚本 +run_sub_script() { + case $1 in + 1) + run_script_with_check "./1-check-env/check_env.sh" "环境检查脚本" + ;; + 2) + run_script_with_check "./2-install-tools/install_tools.sh" "k3s和helm安装脚本" + ;; + 3) + run_script_with_check "./3-install-ollama/install_ollama.sh" "Ollama安装脚本" + ;; + 4) + run_script_with_check "./4-deploy-deepseek/deploy_deepseek.sh" "Deepseek部署脚本" + ;; + 5) + run_script_with_check "./5-deploy-embedding/deploy-embedding.sh" "Embedding部署脚本" + ;; + 6) + run_script_with_check "./6-install-databases/install_databases.sh" "数据库安装脚本" + ;; + 7) + run_script_with_check "./7-install-authhub/install_authhub.sh" "AuthHub安装脚本" + ;; + 8) + run_script_with_check "./8-install-EulerCopilot/install_eulercopilot.sh" "EulerCopilot安装脚本" + ;; + 9) + echo "正在返回主菜单..." + echo "按任意键继续..." + read -r -n 1 -s + return 2 # 特殊返回码表示返回上级菜单 + ;; + *) + echo -e "\033[31m无效的选项,请输入0-9之间的数字\033[0m" + return 1 + ;; + esac + return 0 +} + +# 卸载所有组件 +uninstall_all() { + echo -e "\033[31m警告:此操作将永久删除所有组件和数据!\033[0m" + read -p "确认要继续吗?(y/n) " confirm + + if [[ $confirm != "y" && $confirm != "Y" ]]; then + echo "取消卸载操作" + return + fi + + # 设置超时时间(单位:秒) + local HELM_TIMEOUT=300 + local PVC_DELETE_TIMEOUT=120 + local FORCE_DELETE=false + + echo "开始卸载所有Helm Release..." + local RELEASES + RELEASES=$(helm list -n euler-copilot --short) + + # 删除所有关联的Helm Release + if [ -n "$RELEASES" ]; then + echo -e "${YELLOW}找到以下Helm Release,开始清理...${NC}" + for release in $RELEASES; do + echo -e "${BLUE}正在删除Helm Release: ${release}${NC}" + if ! helm uninstall "$release" -n euler-copilot \ + --wait \ + --timeout ${HELM_TIMEOUT}s \ + --no-hooks; then + echo -e "${RED}警告:Helm Release ${release} 删除异常,尝试强制删除...${NC}" + FORCE_DELETE=true + helm uninstall "$release" -n euler-copilot \ + --timeout 10s \ + --no-hooks \ + --force || true + fi + done + else + echo -e "${YELLOW}未找到需要清理的Helm Release${NC}" + fi + + # 等待资源释放 + sleep 10 + + # 获取所有PVC列表 + local pvc_list + pvc_list=$(kubectl get pvc -n euler-copilot -o name 2>/dev/null) + + # 删除PVC(带重试机制) + if [ -n "$pvc_list" ]; then + echo -e "${YELLOW}找到以下PVC,开始清理...${NC}" + local start_time=$(date +%s) + local end_time=$((start_time + PVC_DELETE_TIMEOUT)) + + for pvc in $pvc_list; do + while : ; do + # 尝试正常删除 + if kubectl delete $pvc -n euler-copilot --timeout=30s 2>/dev/null; then + break + fi + + # 检查是否超时 + if [ $(date +%s) -ge $end_time ]; then + echo -e "${RED}错误:PVC删除超时,尝试强制清理...${NC}" + + # 移除Finalizer强制删除 + kubectl patch $pvc -n euler-copilot \ + --type json \ + --patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]' 2>/dev/null || true + + # 强制删除 + kubectl delete $pvc -n euler-copilot \ + --force \ + --grace-period=0 2>/dev/null && break || true + + # 最终确认 + if ! kubectl get $pvc -n euler-copilot &>/dev/null; then + break + fi + echo -e "${RED}严重错误:无法删除PVC ${pvc}${NC}" >&2 + return 1 + fi + + # 等待后重试 + sleep 5 + echo -e "${YELLOW}重试删除PVC: ${pvc}...${NC}" + done + done + else + echo -e "${YELLOW}未找到需要清理的PVC${NC}" + fi + + # 删除指定的 Secrets + local secret_list=("authhub-secret" "euler-copilot-database" "euler-copilot-system") + for secret in "${secret_list[@]}"; do + if kubectl get secret "$secret" -n euler-copilot &>/dev/null; then + echo -e "${YELLOW}找到Secret: ${secret},开始清理...${NC}" + if ! kubectl delete secret "$secret" -n euler-copilot; then + echo -e "${RED}错误:删除Secret ${secret} 失败!${NC}" >&2 + return 1 + fi + else + echo -e "${YELLOW}未找到需要清理的Secret: ${secret}${NC}" + fi + done + + # 最终清理检查 + echo -e "${YELLOW}执行最终资源检查...${NC}" + kubectl delete all --all -n euler-copilot --timeout=30s 2>/dev/null || true + + echo -e "${GREEN}资源清理完成${NC}" + echo -e "\033[32m所有组件和数据已成功清除\033[0m" +} + +# 手动部署子菜单循环 +manual_deployment_loop() { + while true; do + show_sub_menu + read -r sub_choice + run_sub_script "$sub_choice" + retval=$? + + if [ $retval -eq 2 ]; then # 返回主菜单 + break + elif [ $retval -eq 0 ]; then + echo "按任意键继续..." + read -r -n 1 -s + fi + done +} + +restart_pod() { + local service="$1" + if [[ -z "$service" ]]; then + echo -e "${RED}错误:请输入服务名称${NC}" + return 1 + fi + + local deployment="${service}-deploy" + echo -e "${BLUE}正在验证部署是否存在...${NC}" + if ! kubectl get deployment "$deployment" -n euler-copilot &> /dev/null; then + echo -e "${RED}错误:在 euler-copilot 命名空间中找不到部署 $deployment${NC}" + return 1 + fi + + echo -e "${YELLOW}正在重启部署 $deployment ...${NC}" + if kubectl rollout restart deployment/"$deployment" -n euler-copilot; then + echo -e "${GREEN}成功触发滚动重启!${NC}" + echo -e "可以使用以下命令查看状态:\nkubectl rollout status deployment/$deployment -n euler-copilot" + return 0 + else + echo -e "${RED}重启部署 $deployment 失败!${NC}" + return 1 + fi +} + +# 主程序循环改进 +while true; do + show_top_menu + read -r main_choice + + case $main_choice in + 0) + run_script_with_check "./0-one-click-deploy/one-click-deploy.sh" "一键自动部署" + echo "按任意键继续..." + read -r -n 1 -s + ;; + 1) + manual_deployment_loop + ;; + 2) + while true; do + show_restart_menu + read -r restart_choice + case $restart_choice in + 1) service="authhub-backend" ;; + 2) service="authhub" ;; + 3) service="framework" ;; + 4) service="minio" ;; + 5) service="mongo" ;; + 6) service="mysql" ;; + 7) service="opengauss" ;; + 8) service="rag" ;; + 9) service="rag-web" ;; + 10) service="redis" ;; + 11) service="web" ;; + 12) break ;; + *) + echo -e "${RED}无效的选项,请输入1-12之间的数字${NC}" + continue + ;; + esac + + if [[ -n "$service" ]]; then + restart_pod "$service" + echo "按任意键继续..." + read -r -n 1 -s + fi + done + ;; + + 3) + uninstall_all + echo "按任意键继续..." + read -r -n 1 -s + ;; + 4) + echo "退出部署系统" + exit 0 + ;; + *) + echo -e "${RED}无效的选项,请输入0-4之间的数字${NC}" + sleep 1 + ;; + esac +done