diff --git a/oedp/build/oedp.spec b/oedp/build/oedp.spec index 2110a7fbb2e7fa9e5d82389fb2f7bac5e174f45a..2fbe6be4916312153b9c2c61ca05b923853e88d0 100644 --- a/oedp/build/oedp.spec +++ b/oedp/build/oedp.spec @@ -8,7 +8,7 @@ Source0: %{name}-%{version}.tar.gz BuildArch: noarch -Requires: python3, ansible, python3-prettytable, tar +Requires: python3, ansible, python3-prettytable, python3-netaddr, tar %description openEuler deploy tool diff --git a/plugins/kubernetes-1.31.1/config.yaml b/plugins/kubernetes-1.31.1/config.yaml index c93cfe904886c234718c988077b4bb42b197e9e8..738a7291e138d4e83aa9fc4962a507e73d5e6972 100644 --- a/plugins/kubernetes-1.31.1/config.yaml +++ b/plugins/kubernetes-1.31.1/config.yaml @@ -2,33 +2,64 @@ all: children: masters: hosts: + # master节点数量须为奇数 HOST_IP1: # e.g. 192.168.10.1 ansible_host: HOST_IP1 # e.g. 192.168.10.1 ansible_port: 22 ansible_user: root ansible_password: "" architecture: amd64 # e.g. [ amd64, arm64 ] - oeversion: 22.03-LTS # e.g. [ 22.03-LTS, 24.03-LTS ] + oeversion: 24.03-LTS # e.g. [ 22.03-LTS, 24.03-LTS ] + runtime: docker # e.g. [ docker, containerd ] + # HOST_IP2: + # ansible_host: HOST_IP2 + # ansible_port: 22 + # ansible_user: root + # ansible_password: "" + # architecture: amd64 + # oeversion: 24.03-LTS + # runtime: docker + # HOST_IP3: + # ansible_host: HOST_IP3 + # ansible_port: 22 + # ansible_user: root + # ansible_password: "" + # architecture: amd64 + # oeversion: 24.03-LTS + # runtime: docker workers: hosts: - HOST_IP2: - ansible_host: HOST_IP2 + HOST_IP4: + ansible_host: HOST_IP4 ansible_port: 22 ansible_user: root ansible_password: "" architecture: amd64 - oeversion: 22.03-LTS - HOST_IP3: - ansible_host: HOST_IP3 + oeversion: 24.03-LTS + runtime: docker + HOST_IP5: + ansible_host: HOST_IP5 ansible_port: 22 ansible_user: root ansible_password: "" architecture: amd64 - oeversion: 22.03-LTS + oeversion: 24.03-LTS + runtime: docker + new-workers: # new-workers字段不可删除 + hosts: vars: + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + init_cluster_force: "true" # e.g. [ "true", "false" ] 强制初始化集群 + remove_master_no_schedule_taints: "true" service_cidr: 10.96.0.0/16 # 服务网段 pod_cidr: 10.244.0.0/16 # pod ip 网段 certs_expired: 3650 # 证书过期时间 + # lb_kube_apiserver_ip: + lb_kube_apiserver_port: 8443 has_deployed_containerd: "false" # e.g. [ "true", "false" ] 是否已有 containerd - ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + + # 以下参数需要与build过程中的host.ini保持一致,如果不涉及请忽视 + kubernetes_version: 1.31.1 + calico_version: 3.28.2 + pause_image: "registry.k8s.io/pause:3.10" diff --git a/plugins/kubernetes-1.31.1/doc/readme.md b/plugins/kubernetes-1.31.1/doc/readme.md index 59ad0b57dd434e72a84576a6b15af92abaaacfc3..b692f099bd8bf570fd2642f075837bb3444d7478 100644 --- a/plugins/kubernetes-1.31.1/doc/readme.md +++ b/plugins/kubernetes-1.31.1/doc/readme.md @@ -1,21 +1,23 @@ +# kubernetes-1.31.1 插件使用说明 +**本插件的kubernetes部署能力由 [@Ultraji](https://gitee.com/ultraji) 提供** -# kubernetes 一键部署操作示例 +## kubernetes-1.31.1 一键部署操作示例 -准备 3 个 2U4G 的虚拟机环境(三层网络互通),使用的 OS 版本为 openEuler 24.03 或 22.03 的任意版本,目标是部署由 1 个 master、2 个 worker 构成的 k8s 集群。 +准备若干节点(三层网络互通),OS 版本为 openEuler 24.03 或 22.03 的任意 LTS 版本,规格大于 2U4G。 -在任意节点上,下载并安装 oeDeploy 的命令行工具 oedp。 +在任意节点上,下载并安装 oeDeploy 的命令行工具`oedp (version >= 1.1.0-2)`。 ````bash -wget https://repo.oepkgs.net/openEuler/rpm/openEuler-24.03-LTS/contrib/oedp/noarch/oedp-1.0.2-1.oe2503.noarch.rpm -yum install -y oedp-1.0.2-1.oe2503.noarch.rpm +wget https://repo.oepkgs.net/openEuler/rpm/openEuler-24.03-LTS/contrib/oedp/noarch/oedp-1.1.0-2.oe2403sp2.noarch.rpm +yum install -y oedp-1.1.0-2.oe2403sp2.noarch.rpm ```` -执行以下命令,获取并解压插件包,确保当前目录下出现了目录`kubernetes-1.31.1`。 +获取`kubernetes-1.31.1`插件,并完成初始化。 ```shell wget https://repo.oepkgs.net/openEuler/rpm/openEuler-24.03-LTS/contrib/oedp/plugins/kubernetes-1.31.1.tar.gz -tar -zxvf kubernetes-1.31.1.tar.gz +oedp init kubernetes-1.31.1.tar.gz ``` 执行`info`命令以查看插件详细信息: @@ -35,39 +37,52 @@ all: children: masters: hosts: - 172.27.76.114: # master node IP - ansible_host: 172.27.76.114 # master node IP + 192.168.10.1: + ansible_host: 192.168.10.1 ansible_port: 22 ansible_user: root - ansible_password: - architecture: amd64 # amd64 or arm64 - oeversion: 24.03-LTS # 22.03-LTS or 24.03-LTS + ansible_password: "xxxxxxxx" + architecture: amd64 # e.g. [ amd64, arm64 ] + oeversion: 24.03-LTS # e.g. [ 22.03-LTS, 24.03-LTS ] + runtime: docker # e.g. [ docker, containerd ] workers: hosts: - 172.27.70.60: # worker node IP - ansible_host: 172.27.70.60 # worker node IP + 192.168.10.2: + ansible_host: 192.168.10.2 ansible_port: 22 ansible_user: root - ansible_password: + ansible_password: "xxxxxxxx" architecture: amd64 oeversion: 24.03-LTS - 172.27.72.90: - ansible_host: 172.27.72.90 + runtime: docker + 192.168.10.3: + ansible_host: 192.168.10.3 ansible_port: 22 ansible_user: root - ansible_password: + ansible_password: "xxxxxxxx" architecture: amd64 oeversion: 24.03-LTS + runtime: docker + new-workers: # new-workers字段不可删除 + hosts: vars: - init_cluster_force: "true" - service_cidr: 10.96.0.0/16 - pod_cidr: 10.244.0.0/16 - certs_expired: 3650 - has_deployed_containerd: "false" - ansible_ssh_common_args: "-o StrictHostKeyChecking=no" -```` + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + + init_cluster_force: "true" # e.g. [ "true", "false" ] 强制初始化集群 + remove_master_no_schedule_taints: "true" + service_cidr: 10.96.0.0/16 # 服务网段 + pod_cidr: 10.244.0.0/16 # pod ip 网段 + certs_expired: 3650 # 证书过期时间 + # lb_kube_apiserver_ip: + lb_kube_apiserver_port: 8443 + has_deployed_containerd: "false" # e.g. [ "true", "false" ] 是否已有 containerd + + # 以下参数需要与build过程中的host.ini保持一致,如果不涉及请忽视 + kubernetes_version: 1.31.1 + calico_version: 3.28.2 + pause_image: "registry.k8s.io/pause:3.10" -> 注意:须确保节点间 ssh 可联通,支持密码登录和密钥登录,如果使用密钥登录,则不需要配置密码。 +```` 执行以下命令以开始自动化部署: @@ -81,4 +96,58 @@ oedp run install -p kubernetes-1.31.1 oedp run delete -p kubernetes-1.31.1 ``` -> -p 参数表示解压后的文件目录。如果进入 kubernetes-1.31.1 插件根目录,执行 oedp 命令时无需 -p 参数。 +> -p 参数表示 oeDeploy 项目所在目录 + +## 插件构建指导 + +1. 环境准备 + +基于openEuler22.03/24.03的LTS版本,安装依赖软件`tar`、`docker`(version>20.xx) + +> Tip: 较新的docker版本可以从上游社区获取。docker-25.0.3可以从openEuler24.09的repo中获取,临时修改yum源到openEuler24.09,安装新版本docker后恢复。 + +2. 构建执行 + +可以根据实际需求,在构建配置文件`workspace/host.ini`中调整相关版本号。 + +触发构建: + +```bash +cd oeDeploy/plugins/kubernetes-1.31.1 +sudo ./workspace/build +``` + +> Tip: 如果遇到文件下载失败,请根据错误日志,检查网络资源的可访问状态,或在脚本中配置代理。 + +3. 插件打包 + +构建完成后的`kubernetes-1.31.1`目录,已经是一个完整的 oeDeploy 项目,可以用`oedp`命令行工具开始部署。 + +`tmp`目录用于文件下载的断点续传,构建成功后就不再需要,可以删除(或者备份到其他目录)。 + +```bash +rm -rf workspace/tmp +``` + +将`kubernetes-1.31.1`目录打包成`tar.gz`格式,完成 oeDeploy 插件的打包。 + +```bash +cd .. +tar zcvf kubernetes-1.31.1.tar.gz kubernetes-1.31.1/ +``` + +## 插件维护Tips + +- 原始命令行`ansible-playbook -i ../hosts-all.ini -i hosts.ini -e @variables.yml init-k8s.yml` + `hosts.ini`中,在部署过程中所涉及的配置项,需要体现在`config.yaml` + +- `ansible`控制端(`oedp`软件包)依赖`python3-netaddr` + +- `workspace/roles/prepare/base/tasks/main.yml`的步骤`Copy all rpm packages`前需要安装依赖软件包: + `python3-libselinux`、`python3-netaddr`、`tar`、`socat`、`conntrack-tools`、 + `libnetfilter_cttimeout`、`libnetfilter_cthelper`、`libnetfilter_queue`、 + `ebtables`、`iptables` + +- 受`ansible`版本限制,需将原始脚本中所有`ansible_ssh_host`替换成`ansible_host` + +- 原始`workspace/build`中,最后压缩为`k8s-install.tar.gz`的步骤不需要,可以删除 diff --git a/plugins/kubernetes-1.31.1/main.yaml b/plugins/kubernetes-1.31.1/main.yaml index 44225c93b7d8f617ca186e5758903e484aace062..8a2090848f63564199c24092d1d06b0aa5b1bf4a 100644 --- a/plugins/kubernetes-1.31.1/main.yaml +++ b/plugins/kubernetes-1.31.1/main.yaml @@ -9,7 +9,14 @@ action: playbook: init-k8s.yml vars: variables.yml scope: all - delete: + add-workers: + description: add new workers + tasks: + - name: add new workers + playbook: add-node.yml + vars: variables.yml + scope: all + uninstall: description: delete kubernetes tasks: - name: delete kubernetes diff --git a/plugins/kubernetes-1.31.1/workspace/add-node.yml b/plugins/kubernetes-1.31.1/workspace/add-node.yml new file mode 100644 index 0000000000000000000000000000000000000000..3f7cdcafa5abc200d08a698886f29d3a253ced57 --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/add-node.yml @@ -0,0 +1,25 @@ +- hosts: + - masters + - workers + - new-workers + roles: + - prepare/nameserver + +- hosts: + - new-workers + roles: + - prepare/base + - { role: prepare/containerd, when: "runtime == 'containerd'" } + - { role: prepare/docker, when: "runtime == 'docker'" } + - prepare/kubernetes + - prepare/images + +- hosts: + - new-workers + roles: + - worker + +- hosts: + - new-workers + roles: + - plugins/calico \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/build.sh b/plugins/kubernetes-1.31.1/workspace/build old mode 100644 new mode 100755 similarity index 43% rename from plugins/kubernetes-1.31.1/workspace/build.sh rename to plugins/kubernetes-1.31.1/workspace/build index 63f4154fdbeb537a7e2c292ab63f3e20416a9a78..abe7f8b25df8e061e789d3d073549ac6a163a3e7 --- a/plugins/kubernetes-1.31.1/workspace/build.sh +++ b/plugins/kubernetes-1.31.1/workspace/build @@ -1,28 +1,26 @@ #!/bin/bash -# -*- coding: utf-8 -*- -# Copyright (c) 2024 Huawei Technologies Co., Ltd. -# oeDeploy is licensed under the Mulan PSL v2. -# You can use this software according to the terms and conditions of the Mulan PSL v2. -# You may obtain a copy of Mulan PSL v2 at: -# http://license.coscl.org.cn/MulanPSL2 -# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -# PURPOSE. -# See the Mulan PSL v2 for more details. -# Create: 2024-12-23 -# ====================================================================================================================== +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. set -e +SUPPORTED_OE_VERSION=( "22.03-LTS" "24.03-LTS" ) + KUBERNETES_VERSION=${KUBERNETES_VERSION:-"1.31.1"} CONTAINERD_VERSION=${CONTAINERD_VERSION:-"1.7.22"} +DOCKER_VERSION=${DOCKER_VERSION:-"27.3.1"} +CRI_DOCKER_VERSION=${CRI_DOCKER_VERSION:-"0.3.15"} CALICO_VERSION=${CALICO_VERSION:-"3.28.2"} DOCKER_PROXY=${DOCKER_PROXY:-"m.daocloud.io/"} +# 官方地址:https://repo.openeuler.org +OE_REPO_URL=${OE_REPO_URL:-"https://mirrors.aliyun.com/openeuler"} +# 官方地址:https://download.docker.com/linux/static/stable +DOCKER_DOWNLOAD_URL=${DOCKER_DOWNLOAD_URL:-"https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/static/stable"} # 官方地址:https://dl.k8s.io mirror参考: https://github.com/DaoCloud/public-binary-files-mirror DL_K8S_IO_URL=${DL_K8S_IO_URL:-"https://files.m.daocloud.io/dl.k8s.io"} -# github代理:https://gh-proxy.test.osinfra.cn// https://gh.idayer.com/ -GITHUB_PROXY=${GITHUB_PROXY:-"https://gh-proxy.test.osinfra.cn/"} +# github代理:https://gh-proxy.test.osinfra.cn/ https://gh.idayer.com/ https://ghproxy.cn/ +GITHUB_PROXY=${GITHUB_PROXY:-"https://ghproxy.cn/"} CURRENT_PATH=$(cd `dirname $0/`;pwd) @@ -38,10 +36,87 @@ case "$HOST_ARCH" in ;; esac +_rename_arch() { + case "$1" in + amd64) + echo "x86_64" + ;; + arm64) + echo "aarch64" + ;; + esac +} + +prepare_repo_meta() { + local arch=$(_rename_arch $1) + local tmp_dir="$CURRENT_PATH/tmp" + + mkdir -p $tmp_dir + pushd $tmp_dir + for oe_version in "${SUPPORTED_OE_VERSION[@]}"; do + # get -primary.xml, for example: 3a11516e8e5eae0dbdbd86555d51052eb6d4f66f350221d93c13c8c84e8c88c1-primary.xml + [ ! -e openEuler-$oe_version-$arch-repomd.xml ] && wget --no-check-certificate $OE_REPO_URL/openEuler-$oe_version/everything/$arch/repodata/repomd.xml -O openEuler-$oe_version-$arch-repomd.xml + commpressed_primary_xml=`cat openEuler-$oe_version-$arch-repomd.xml | grep "[A-Za-z0-9]-primary.xml.*" | awk -F'repodata/' '{print $2}' | awk -F'"' '{print $1}'` + primary_xml=`echo "$commpressed_primary_xml" | awk '{sub(/-primary.xml.zst$|-primary.xml.gz$/, "-primary.xml"); print}'` + if [ ! -e $primary_xml ]; then + wget --no-check-certificate $OE_REPO_URL/openEuler-$oe_version/everything/$arch/repodata/$commpressed_primary_xml + zstd -df $commpressed_primary_xml || gunzip $commpressed_primary_xml + fi + + cp $primary_xml openEuler-$oe_version-$arch-primary.xml + done + popd + +} + +_search_rpm() { + local oe_version=$1 + local arch=$2 + local package=$3 + cat $CURRENT_PATH/tmp/openEuler-$oe_version-$arch-primary.xml | grep "Packages/$package-[0-9].*.rpm" | awk -F'Packages/' '{print $2}' | awk -F'"' '{print $1}' +} + +prepare_rpm_packages() { + local arch=$(_rename_arch $1) + + packages=( + python3-libselinux + tar + socat + conntrack-tools + libnetfilter_cttimeout + libnetfilter_cthelper + libnetfilter_queue + ) + + for oe_version in "${SUPPORTED_OE_VERSION[@]}"; do + local tmp_dir="$CURRENT_PATH/tmp/$oe_version/$arch" + local dest_dir="$CURRENT_PATH/roles/prepare/base/files/$oe_version/$arch" + + mkdir -p $tmp_dir + mkdir -p $dest_dir + + pushd $tmp_dir + for package in "${packages[@]}"; do + rpm_package=$(_search_rpm "$oe_version" "$arch" "$package" ) + + [ ! -e $rpm_package ] && wget --no-check-certificate "$OE_REPO_URL/openEuler-$oe_version/everything/$arch/Packages/$rpm_package" + cp $rpm_package $dest_dir + done + + popd + done + +} + _download_and_save_image() { local image=$1 local path=$2 local arch=$3 + + tar_name=`echo "$image" | awk -F'/' '{print $NF}' | awk -F':' '{print $1}'` + [ -e "$path/$tar_name.tar" ] && return 0 + if [[ -n "$DOCKER_PROXY" ]]; then docker pull --platform linux/$arch $DOCKER_PROXY$image docker tag $DOCKER_PROXY$image $image @@ -49,20 +124,17 @@ _download_and_save_image() { docker pull --platform linux/$arch $image fi - tar_name=`echo "$image" | awk -F'/' '{print $NF}' | awk -F':' '{print $1}'` - docker save -o "$path/$tar_name".tar $image + docker save -o "$path/$tar_name.tar" $image } -prepare_kubernetes_binaries() { +prepare_containerd_binaries() { local arch=$1 local tmp_dir="$CURRENT_PATH/tmp/$arch" local ctd_dir="$CURRENT_PATH/roles/prepare/containerd/files/$arch" - local k8s_dir="$CURRENT_PATH/roles/prepare/kubernetes/files/$arch" mkdir -p $tmp_dir mkdir -p $ctd_dir - mkdir -p $k8s_dir pushd $tmp_dir @@ -74,8 +146,46 @@ prepare_kubernetes_binaries() { [ ! -e runc.$arch ] && wget --no-check-certificate "$GITHUB_PROXY"https://github.com/opencontainers/runc/releases/download/v1.1.15/runc.$arch cp runc.$arch $ctd_dir/runc - local crictl_pkg="crictl-v1.31.1-linux-$arch.tar.gz" - [ ! -e $crictl_pkg ] && wget --no-check-certificate "$GITHUB_PROXY"https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.31.1/$crictl_pkg + popd +} + +prepare_docker_binaries() { + local arch=$1 + + local tmp_dir="$CURRENT_PATH/tmp/$arch" + local docker_dir="$CURRENT_PATH/roles/prepare/docker/files/$arch" + + mkdir -p $tmp_dir + mkdir -p $docker_dir + + pushd $tmp_dir + + local docker_pkg="docker-$DOCKER_VERSION.tgz" + [ ! -e $docker_pkg ] && wget --no-check-certificate "$DOCKER_DOWNLOAD_URL"/$(_rename_arch $arch)/$docker_pkg + tar xzvf $docker_pkg && echo "Extracted $docker_pkg successfully" || { rm -f $docker_pkg; echo "Download $docker_pkg failed"; exit 1; } + cp $tmp_dir/docker/* $docker_dir + + local cri_docker_pkg="cri-dockerd-$CRI_DOCKER_VERSION.$arch.tgz" + [ ! -e $cri_docker_pkg ] && wget --no-check-certificate "$GITHUB_PROXY"https://github.com/Mirantis/cri-dockerd/releases/download/v$CRI_DOCKER_VERSION/$cri_docker_pkg + tar xzvf $cri_docker_pkg && echo "Extracted $cri_docker_pkg successfully" || { rm -f $cri_docker_pkg; echo "Download $cri_docker_pkg failed"; exit 1; } + cp $tmp_dir/cri-dockerd/* $docker_dir + + popd +} + +prepare_kubernetes_binaries() { + local arch=$1 + + local tmp_dir="$CURRENT_PATH/tmp/$arch" + local k8s_dir="$CURRENT_PATH/roles/prepare/kubernetes/files/$arch" + + mkdir -p $tmp_dir + mkdir -p $k8s_dir + + pushd $tmp_dir + + local crictl_pkg="crictl-v$KUBERNETES_VERSION-linux-$arch.tar.gz" + [ ! -e $crictl_pkg ] && wget --no-check-certificate "$GITHUB_PROXY"https://github.com/kubernetes-sigs/cri-tools/releases/download/v$KUBERNETES_VERSION/$crictl_pkg tar Cxzvf $k8s_dir/ $crictl_pkg && echo "Extracted $crictl_pkg successfully" || { rm -f $crictl_pkg; echo "Download crictl failed"; exit 1; } local kubernetes_download_url="$DL_K8S_IO_URL/v$KUBERNETES_VERSION/bin/linux/$arch" @@ -117,8 +227,8 @@ prepare_plugins_calico() { if [ ${#CALICO_IMAGES[@]} -eq 0 ]; then if [ ! -e tmp/calico-v$CALICO_VERSION.yaml ]; then wget --no-check-certificate "$GITHUB_PROXY"https://raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/calico.yaml -O tmp/calico-v$CALICO_VERSION.yaml - cp tmp/calico-v$CALICO_VERSION.yaml roles/plugins/calico/files/calico-v$CALICO_VERSION.yaml fi + cp tmp/calico-v$CALICO_VERSION.yaml roles/plugins/calico/files/calico-v$CALICO_VERSION.yaml export CALICO_IMAGES=(`grep "image:" tmp/calico-v$CALICO_VERSION.yaml | uniq | awk '{print $2}'`) @@ -135,21 +245,48 @@ prepare_plugins_calico() { } +prepare_plugins_ingress() { + local arch=$1 + local dest_dir="roles/plugins/ingress/files/$arch" + mkdir -p $dest_dir + + if [ ${#INGRESS_IMAGES[@]} -eq 0 ]; then + local ingress=$CURRENT_PATH/roles/plugins/ingress/templates/ingress-nginx.yaml.j2 + + export INGRESS_IMAGES=(`grep "image:" $ingress | uniq | awk '{print $2}'`) + + if [ ${#INGRESS_IMAGES[@]} -eq 0 ]; then + echo "Get ingress images failed, please check $ingress file content." + exit 1 + fi + fi + + for image in "${INGRESS_IMAGES[@]}"; do + _download_and_save_image $image $dest_dir $arch + done +} + pushd $CURRENT_PATH declare -a KUBERNETES_IMAGES=() declare -a CALICO_IMAGES=() +declare -a INGRESS_IMAGES=() for arch in "${SUPPORTED_ARCH[@]}"; do + prepare_repo_meta $arch + prepare_rpm_packages $arch + prepare_containerd_binaries $arch + prepare_docker_binaries $arch prepare_kubernetes_binaries $arch prepare_kubernetes_images $arch prepare_plugins_calico $arch + prepare_plugins_ingress $arch done -sed -i "s|^kubernetes_version:.*|kubernetes_version: $KUBERNETES_VERSION|" variables.yml -sed -i "s|^pause_image:.*|pause_image: $PAUSE_IMAGE|" variables.yml -sed -i "s|^calico_version:.*|calico_version: $CALICO_VERSION|" variables.yml - -# needed: ansible roles init-k8s.yml delete-k8s.yml clean-k8s.yml variables.yml build.sh +sed -i "s|^kubernetes_version=.*|kubernetes_version=\"${KUBERNETES_VERSION}\"|g" hosts.ini +sed -i "s|^pause_image=.*|pause_image=\"${PAUSE_IMAGE}\"|g" hosts.ini +sed -i "s|^calico_version=.*|calico_version=\"${CALICO_VERSION}\"|g" hosts.ini +sed -i "s|^containerd_version=.*|containerd_version=\"${CONTAINERD_VERSION}\"|g" hosts.ini +sed -i "s|^docker_version=.*|docker_version=\"${DOCKER_VERSION}\"|g" hosts.ini -popd \ No newline at end of file +popd diff --git a/plugins/kubernetes-1.31.1/workspace/hosts.ini b/plugins/kubernetes-1.31.1/workspace/hosts.ini new file mode 100644 index 0000000000000000000000000000000000000000..e198cd14e615abf7e0af083ed0d69f3c55dce571 --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/hosts.ini @@ -0,0 +1,49 @@ +[masters] +; k8s master节点(单个或多个master节点,高可用场景建议3个及以上) +ebs-master-1 + +[ingress] +; k8s ingress-nginx部署节点 +ebs-master-1 + +[workers] +; k8s worker节点 +ebs-master-2 +xt1 +at1 + +[new-workers] +; 新加入到集群的节点,在这里添加,当playbook执行完成后,将节点移动到workers下 +# xt-2 + +[all:vars] +; true: 无论之前集群是否创建成功,都会尝试初始化集群 +init_cluster_force="true" +; true:移除master不可调度的污点,使业务容器可以调度到master节点上 +remove_master_no_schedule_taints="true" +; 容器网段一定要合理划分!!!尽可能不与虚机网段冲突 +service_cidr="10.96.0.0/16" +pod_cidr="10.244.0.0/16" +; ca根证书为10年,故证书过期时间不应超过10年 +certs_expired="3650" +; 负载均衡端口,当master节点数量大于1时生效 +; lb_kube_apiserver_ip="" +lb_kube_apiserver_port="8443" + +; 当离线包制作完成后,以下参数不可修改 +; kubernetes版本 +kubernetes_version="1.31.1" +; 网络插件 calico +calico_version="3.28.2" +; 网络插件 ingress +ingress_version="1.12.0" +; 容器运行时支持版本 +containerd_version="1.7.22" +docker_version="27.3.1" +pause_image="registry.k8s.io/pause:3.10" +;ingress 相关参数 +ingress_type="NodePort" +ingress_http_node_port="30080" +ingress_https_node_port="30443" +ingress_http_container_port="10080" +ingress_https_container_port="10443" diff --git a/plugins/kubernetes-1.31.1/workspace/init-k8s.yml b/plugins/kubernetes-1.31.1/workspace/init-k8s.yml index 531ae13f0455c246a0aa16936523b88a89c0a770..1011787a30ca4563d0e98860375e0f4ba9dc7945 100644 --- a/plugins/kubernetes-1.31.1/workspace/init-k8s.yml +++ b/plugins/kubernetes-1.31.1/workspace/init-k8s.yml @@ -1,26 +1,30 @@ -- hosts: - - masters - - workers - roles: - - prepare/base - - prepare/containerd - - prepare/kubernetes - - prepare/images - -- hosts: masters - roles: - - master - -- hosts: workers - roles: - - worker - -- hosts: - - masters - - workers - roles: - - plugins/calico - -- hosts: workers - roles: - - plugins/rpcbind \ No newline at end of file +- hosts: + - masters + - workers + roles: + - prepare/base + - prepare/nameserver + - { role: prepare/containerd, when: "runtime == 'containerd'" } + - { role: prepare/docker, when: "runtime == 'docker'" } + - prepare/kubernetes + - prepare/images + +- hosts: masters + roles: + - { role: loadbalancer, when: "groups['masters'] | length > 1" } + - master + +- hosts: workers + roles: + - worker + +- hosts: + - masters + - workers + roles: + - plugins/calico + +#- hosts: ingress +# roles: +# - plugins/ingress + diff --git a/plugins/kubernetes-1.31.1/workspace/roles/delete/plugins/rpcbind/tasks/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/delete/plugins/rpcbind/tasks/main.yml index e7bf9cfeb7784199c20f7f37145f05f4bfc184cd..9ae205853918146c0d068806dbdaed1e42755aaf 100644 --- a/plugins/kubernetes-1.31.1/workspace/roles/delete/plugins/rpcbind/tasks/main.yml +++ b/plugins/kubernetes-1.31.1/workspace/roles/delete/plugins/rpcbind/tasks/main.yml @@ -1,6 +1,7 @@ - name: Disable rpcbind service shell: > systemctl disable --now rpcbind + ignore_errors: true - name: Uninstall packages yum: diff --git a/plugins/kubernetes-1.31.1/workspace/roles/loadbalancer/defaults/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/loadbalancer/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..c90bae9e2e2411e676c734837ba6767bb2b2f0fe --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/roles/loadbalancer/defaults/main.yml @@ -0,0 +1,6 @@ +# 提权操作 +ansible_become: true + +# haproxy负载均衡算法,常见如下: +# "roundrobin", "leastconn", "source", "uri" +haproxy_balance_alg: "roundrobin" \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/loadbalancer/tasks/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/loadbalancer/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..7eef84b04806f9ba1f415003ecea4623951f68c1 --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/roles/loadbalancer/tasks/main.yml @@ -0,0 +1,29 @@ +- name: Install haproxy + yum: + name: haproxy + state: present + +- name: Create haproxy directory + file: name=/etc/haproxy state=directory + +- name: Configuration haproxy + template: + src: haproxy/haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg + +- name: Systemctl daemon-reload + systemd: + daemon_reload: yes + +- name: Enable haproxy service + service: + name: haproxy + state: restarted + enabled: yes + +- name: Waiting for haproxy to running + shell: "systemctl status haproxy.service|grep Active" + register: haproxy_status + until: '"running" in haproxy_status.stdout' + retries: 8 + delay: 2 \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/loadbalancer/templates/haproxy/haproxy.cfg.j2 b/plugins/kubernetes-1.31.1/workspace/roles/loadbalancer/templates/haproxy/haproxy.cfg.j2 new file mode 100644 index 0000000000000000000000000000000000000000..39092c2bd6da903aa0a7ca7b2e9e473deb2d16dc --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/roles/loadbalancer/templates/haproxy/haproxy.cfg.j2 @@ -0,0 +1,23 @@ +global + log /dev/log local1 warning + chroot /var/lib/haproxy + user haproxy + group haproxy + daemon + +defaults + log global + timeout connect 5s + timeout client 10m + timeout server 10m + +listen kube-master + bind :{{ lb_kube_apiserver_port }} + mode tcp + option tcplog + option dontlognull + option dontlog-normal + balance {{ haproxy_balance_alg }} +{% for host in groups['masters'] %} + server {{ host }} {% if hostvars[host]['ansible_host'] is defined %}{{ hostvars[host]['ansible_host'] }}{% else %}{{ host }}{% endif %}:6443 check check-ssl verify none +{% endfor %} \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/master/README.md b/plugins/kubernetes-1.31.1/workspace/roles/master/README.md deleted file mode 100644 index 2a9ff0b171750656a0a8849408feb7f182819f14..0000000000000000000000000000000000000000 --- a/plugins/kubernetes-1.31.1/workspace/roles/master/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# kubeadm - -1. 如何生成kubeadm配置文件 - -```shell -kubeadm config print init-defaults > kubeadm-init.yaml -kubeadm config print join-defaults > kubeadm-join.yaml -``` \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/master/defaults/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/master/defaults/main.yml index e2760eb3879de78ce66b74f617d4b6dd3f4d2d0b..83637a92b1d61d635be3ddc0e9d22afd2d76e235 100644 --- a/plugins/kubernetes-1.31.1/workspace/roles/master/defaults/main.yml +++ b/plugins/kubernetes-1.31.1/workspace/roles/master/defaults/main.yml @@ -1 +1,10 @@ -CURRENT_HOST_IP: "{{ inventory_hostname }}" \ No newline at end of file +CURRENT_HOST_IP: "{% if hostvars[inventory_hostname]['ansible_host'] is defined %}{{ hostvars[inventory_hostname]['ansible_host'] }}{% else %}{{ inventory_hostname }}{% endif %}" + +CONTROL_PLANE_ENDPOINT: >- + {% if lb_kube_apiserver_ip is not defined %} + {{ CURRENT_HOST_IP | trim }}:{{ lb_kube_apiserver_port | trim }} + {% else %} + {{ lb_kube_apiserver_ip | trim }}:{{ lb_kube_apiserver_port | trim }} + {% endif %} + +KUBERNETES_SERVICE_IP: "{{ service_cidr | ipaddr('net') | ipaddr(1) | ipaddr('address') }}" \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/master/tasks/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/master/tasks/main.yml index 0f1976459c13ae9424ce9c6865b5ef4839e9b35d..1b630182319096d725e1c776920c398ae8eed19a 100644 --- a/plugins/kubernetes-1.31.1/workspace/roles/master/tasks/main.yml +++ b/plugins/kubernetes-1.31.1/workspace/roles/master/tasks/main.yml @@ -1,9 +1,23 @@ +- name: Prepare kubernetes directory + file: name=/etc/kubernetes state=directory + - name: Read kubelet.conf file stat info stat: path: /etc/kubernetes/kubelet.conf register: stat_kubelet_conf -- include_tasks: master-init.yml - when: - - init_cluster_force == "true" or not stat_kubelet_conf.stat.exists - - inventory_hostname == groups['masters'][0] \ No newline at end of file +- block: + - include_tasks: master-init.yml + when: + - inventory_hostname == groups['masters'][0] + + - include_tasks: master-join.yml + when: + - inventory_hostname != groups['masters'][0] + - inventory_hostname in groups['masters'] + when: init_cluster_force == "true" or not stat_kubelet_conf.stat.exists + +- name: Remove control-plane NoSchedule taint + shell: "kubectl taint node {{ inventory_hostname }} node-role.kubernetes.io/control-plane:NoSchedule-" + when: + - remove_master_no_schedule_taints == "true" \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/master/tasks/master-init.yml b/plugins/kubernetes-1.31.1/workspace/roles/master/tasks/master-init.yml index 837847e9aca2e9a32d39e8bd93897cdb01ab99f3..a73b81cc60551b2f0a3b5955af64dcd97085324f 100644 --- a/plugins/kubernetes-1.31.1/workspace/roles/master/tasks/master-init.yml +++ b/plugins/kubernetes-1.31.1/workspace/roles/master/tasks/master-init.yml @@ -3,8 +3,14 @@ when: - init_cluster_force == "true" -- name: Prepare kubernetes directory - file: name=/etc/kubernetes state=directory +- block: + - name: Generate certificateKey + command: kubeadm certs certificate-key + register: generate_certificate_key_command + + - name: Record certificateKey + set_fact: + certificate_key: "{{ generate_certificate_key_command.stdout }}" - name: Create kubeadm configuration file template: @@ -14,20 +20,21 @@ mode: 0644 - name: Initial the first master node - shell: kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml + command: kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --upload-certs -- name: Create kubeconfig directory - file: name={{ ansible_env.HOME | default('/root') }}/.kube state=directory +- block: + - name: Create kubeconfig directory + file: name={{ ansible_env.HOME | default('/root') }}/.kube state=directory -- name: Master node copy kubeconfig file to .kube directory - copy: - src: /etc/kubernetes/admin.conf - dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config" - mode: 0600 - remote_src: yes + - name: Copy kubeconfig file to .kube directory + copy: + src: /etc/kubernetes/admin.conf + dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config" + mode: 0600 + remote_src: yes -- name: Set the permissions for the kubeconfig file - file: - path: "{{ ansible_env.PWD | default('/root') }}/.kube/config" - owner: "{{ ansible_env.SUDO_USER | default('root') }}" - mode: '0600' \ No newline at end of file + - name: Set the permissions for the kubeconfig file + file: + path: "{{ ansible_env.PWD | default('/root') }}/.kube/config" + owner: "{{ ansible_env.SUDO_USER | default('root') }}" + mode: '0600' \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/master/tasks/master-join.yml b/plugins/kubernetes-1.31.1/workspace/roles/master/tasks/master-join.yml new file mode 100644 index 0000000000000000000000000000000000000000..337ed29a9a0f9aa2ec6ab69f524a215590a4a8cf --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/roles/master/tasks/master-join.yml @@ -0,0 +1,42 @@ +- name: Reset first if init_cluster_force == "true" + shell: kubeadm reset -f + when: + - init_cluster_force == "true" + +- block: + - name: Read kubeadm-config.yaml content + shell: cat /etc/kubernetes/kubeadm-config.yaml | grep certificateKey | awk '{print $2}' + register: certificate_key_line + + - name: Get certificateKey + set_fact: + certificate_key: "{{ certificate_key_line.stdout }}" + + - name: Record kubeadm join command + command: "kubeadm token create --print-join-command --certificate-key {{ certificate_key }}" + register: kubeadm_master_join_command + until: '"kubeadm join" in kubeadm_master_join_command.stdout' + retries: 5 + delay: 5 + delegate_to: "{{ groups['masters'][0] }}" + +- name: Join other master nodes + shell: "{{ kubeadm_master_join_command.stdout }}" + when: init_cluster_force == "true" or not stat_kubelet_conf.stat.exists + +- block: + - name: Create kubeconfig directory + file: name={{ ansible_env.HOME | default('/root') }}/.kube state=directory + + - name: Copy kubeconfig file to .kube directory + copy: + src: /etc/kubernetes/admin.conf + dest: "{{ ansible_env.HOME | default('/root') }}/.kube/config" + mode: 0600 + remote_src: yes + + - name: Set the permissions for the kubeconfig file + file: + path: "{{ ansible_env.PWD | default('/root') }}/.kube/config" + owner: "{{ ansible_env.SUDO_USER | default('root') }}" + mode: '0600' \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/master/templates/kubeadm-init.yaml.j2 b/plugins/kubernetes-1.31.1/workspace/roles/master/templates/kubeadm-init.yaml.j2 index 58af4db1e1053dc5747b1ae764d06f64f9c71261..91e1f51507960aa27b9c7bb08f6bef3c71bf6257 100644 --- a/plugins/kubernetes-1.31.1/workspace/roles/master/templates/kubeadm-init.yaml.j2 +++ b/plugins/kubernetes-1.31.1/workspace/roles/master/templates/kubeadm-init.yaml.j2 @@ -8,11 +8,11 @@ bootstrapTokens: - signing - authentication kind: InitConfiguration +certificateKey: {{ certificate_key }} localAPIEndpoint: advertiseAddress: {{ CURRENT_HOST_IP }} bindPort: 6443 nodeRegistration: - criSocket: unix:///var/run/containerd/containerd.sock imagePullPolicy: IfNotPresent imagePullSerial: true name: {{ inventory_hostname }} @@ -26,12 +26,31 @@ timeouts: tlsBootstrap: 5m0s upgradeManifests: 5m0s --- -apiServer: {} +apiServer: + certSANs: + - localhost + - kubernetes + - kubernetes.default + - kubernetes.default.svc + - kubernetes.default.svc.cluster + - kubernetes.default.svc.cluster.local + - 127.0.0.1 + - 0:0:0:0:0:0:0:1 + - {{ KUBERNETES_SERVICE_IP }} +{% if lb_kube_apiserver_ip is defined %} + - {{ lb_kube_apiserver_ip | trim }} +{% endif %} +{% for host in (groups['masters'] | default([])) | unique %} + - {{ hostvars[host]['ansible_host'] }} +{% endfor %} apiVersion: kubeadm.k8s.io/v1beta4 caCertificateValidityPeriod: 87600h0m0s certificateValidityPeriod: "{{certs_expired|int * 24}}h0m0s" certificatesDir: /etc/kubernetes/pki clusterName: kubernetes +{% if groups['masters'] | length > 1 %} +controlPlaneEndpoint: {{ CONTROL_PLANE_ENDPOINT }} +{% endif %} controllerManager: {} dns: {} encryptionAlgorithm: RSA-2048 diff --git a/plugins/kubernetes-1.31.1/workspace/roles/plugins/calico/README.md b/plugins/kubernetes-1.31.1/workspace/roles/plugins/calico/README.md deleted file mode 100644 index 50ab8a09e876accc8a030d22038f9c7d1a4a75db..0000000000000000000000000000000000000000 --- a/plugins/kubernetes-1.31.1/workspace/roles/plugins/calico/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# calico - -如何安装:[https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises#install-calico](https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises#install-calico) \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/plugins/calico/tasks/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/plugins/calico/tasks/main.yml index daeb07e892ac6e10455633928b6358cddeb9f249..188990fd78c96dbbe4ad7417232ad1b283596033 100644 --- a/plugins/kubernetes-1.31.1/workspace/roles/plugins/calico/tasks/main.yml +++ b/plugins/kubernetes-1.31.1/workspace/roles/plugins/calico/tasks/main.yml @@ -16,6 +16,15 @@ - cni.tar - kube-controllers.tar - node.tar + when: runtime == "containerd" + +- name: Load calico images on all nodes + command: "docker load -i {{ images_dir }}/plugin/calico/{{ item }}" + with_items: + - cni.tar + - kube-controllers.tar + - node.tar + when: runtime == "docker" - name: Create calico directory file: name=/etc/kubernetes/plugins/calico state=directory diff --git a/plugins/kubernetes-1.31.1/workspace/roles/plugins/ingress/tasks/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/plugins/ingress/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..543aafb74d6357772f4f0c512c6fb3fa5e9ecbbd --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/roles/plugins/ingress/tasks/main.yml @@ -0,0 +1,51 @@ +- name: Label ingress node + shell: | + k8s_node_name=$(kubectl get nodes -o wide | grep "{{ hostvars[item]['ansible_host'] }} " | cut -d' ' -f1) + kubectl label node $k8s_node_name ingress-nginx="true" + with_items: "{{ groups['ingress'][0] }}" + delegate_to: "{{ groups['masters'][0] }}" + +- name: Prepare images directories + file: name={{ images_dir }}/plugin/ingress state=directory + +- name: Copy ingress images to master nodes + copy: + src: "{{ architecture }}/{{ item }}" + dest: "{{ images_dir }}/plugin/ingress" + with_items: + - controller.tar + - kube-webhook-certgen.tar + when: inventory_hostname == groups['ingress'][0] + +- name: Load ingress images on master nodes + command: "ctr -n k8s.io images import {{ images_dir }}/plugin/ingress/{{ item }}" + with_items: + - controller.tar + - kube-webhook-certgen.tar + when: + - runtime == "containerd" + - inventory_hostname == groups['ingress'][0] + +- name: Load ingress images on master nodes + command: "docker load -i {{ images_dir }}/plugin/ingress/{{ item }}" + with_items: + - controller.tar + - kube-webhook-certgen.tar + when: + - runtime == "docker" + - inventory_hostname == groups['ingress'][0] + +- name: Create ingress directory + file: name=/etc/kubernetes/plugins/ingress state=directory + when: inventory_hostname == groups['ingress'][0] + +- name: Create ingress yaml file + template: + src: ingress-nginx.yaml.j2 + dest: "/etc/kubernetes/plugins/ingress/ingress-nginx.yaml" + mode: "0644" + when: inventory_hostname == groups['ingress'][0] + +- name: Apply ingress plugin + shell: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/plugins/ingress/ + delegate_to: "{{ groups['masters'][0] }}" diff --git a/plugins/kubernetes-1.31.1/workspace/roles/plugins/ingress/templates/ingress-nginx.yaml.j2 b/plugins/kubernetes-1.31.1/workspace/roles/plugins/ingress/templates/ingress-nginx.yaml.j2 new file mode 100644 index 0000000000000000000000000000000000000000..4fae9e61db23b5f9a081217c549478e7b6885f95 --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/roles/plugins/ingress/templates/ingress-nginx.yaml.j2 @@ -0,0 +1,672 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + name: ingress-nginx +--- +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx + namespace: ingress-nginx +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update +- apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resourceNames: + - ingress-nginx-leader + resources: + - leases + verbs: + - get + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-admission + namespace: ingress-nginx +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update +- apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-admission +rules: +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: +- kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-admission + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-admission +subjects: +- kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: +- kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-admission +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-admission +subjects: +- kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: v1 +data: null +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-controller + namespace: ingress-nginx +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + externalTrafficPolicy: Local + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - appProtocol: http + name: http + port: {{ingress_http_container_port}} + nodePort: {{ingress_http_node_port}} + protocol: TCP + targetPort: http + - appProtocol: https + name: https + port: {{ingress_https_container_port}} + nodePort: {{ingress_https_node_port}} + protocol: TCP + targetPort: https + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: {{ingress_type}} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-controller-admission + namespace: ingress-nginx +spec: + ports: + - appProtocol: https + name: https-webhook + port: 443 + targetPort: webhook + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + minReadySeconds: 0 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + spec: + containers: + - args: + - /nginx-ingress-controller + - --http-port={{ingress_http_container_port}} + - --https-port={{ingress_https_container_port}} + - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller + - --election-id=ingress-nginx-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + image: registry.k8s.io/ingress-nginx/controller:v1.12.0 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: controller + ports: + - containerPort: {{ingress_http_container_port}} + name: http + protocol: TCP + - containerPort: {{ingress_https_container_port}} + name: https + protocol: TCP + - containerPort: 8443 + name: webhook + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 100m + memory: 90Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + readOnlyRootFilesystem: false + runAsGroup: 82 + runAsNonRoot: true + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /usr/local/certificates/ + name: webhook-cert + readOnly: true + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + ingress-nginx: "true" + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: ingress-nginx-admission +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-admission-create + namespace: ingress-nginx +spec: + template: + metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-admission-create + spec: + containers: + - args: + - create + - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0 + imagePullPolicy: IfNotPresent + name: create + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + seccompProfile: + type: RuntimeDefault + nodeSelector: + kubernetes.io/os: linux + ingress-nginx: "true" + restartPolicy: OnFailure + serviceAccountName: ingress-nginx-admission +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-admission-patch + namespace: ingress-nginx +spec: + template: + metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-admission-patch + spec: + containers: + - args: + - patch + - --webhook-name=ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0 + imagePullPolicy: IfNotPresent + name: patch + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + seccompProfile: + type: RuntimeDefault + nodeSelector: + kubernetes.io/os: linux + ingress-nginx: "true" + restartPolicy: OnFailure + serviceAccountName: ingress-nginx-admission +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.12.0 + name: ingress-nginx-admission +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: ingress-nginx-controller-admission + namespace: ingress-nginx + path: /networking/v1/ingresses + port: 443 + failurePolicy: Fail + matchPolicy: Equivalent + name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + sideEffects: None diff --git a/plugins/kubernetes-1.31.1/workspace/roles/plugins/rpcbind/tasks/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/plugins/rpcbind/tasks/main.yml deleted file mode 100644 index 284ff57622b93852fa7249becc1b8d9f0a79d9b7..0000000000000000000000000000000000000000 --- a/plugins/kubernetes-1.31.1/workspace/roles/plugins/rpcbind/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ -- name: Install packages - yum: - name: - - nfs-utils - - rpcbind - state: present - -- name: Start rpcbind service - shell: > - systemctl enable --now rpcbind \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/base/defaults/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/prepare/base/defaults/main.yml old mode 100644 new mode 100755 diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/base/tasks/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/prepare/base/tasks/main.yml old mode 100644 new mode 100755 index f3c1199504a4482e5e80f34a142a67746330feac..c82bbe90957a9c69e31650fcd54423aab1636949 --- a/plugins/kubernetes-1.31.1/workspace/roles/prepare/base/tasks/main.yml +++ b/plugins/kubernetes-1.31.1/workspace/roles/prepare/base/tasks/main.yml @@ -1,99 +1,116 @@ -- name: Check architecture - set_fact: - arch: "{{ ansible_architecture }}" - -- name: Prepare rpm directory - file: name=/k8s-install/rpm/ state=directory - -- name: Install base package - yum: - name: - - python3-libselinux - - tar - - socat - - conntrack-tools - - libnetfilter_cttimeout - - libnetfilter_cthelper - - libnetfilter_queue - state: present - -- name: Disable swap - shell: > - swapoff -a && sysctl -w vm.swappiness=0; - sed -i '/swap/s/^/#/' /etc/fstab - ignore_errors: true - -- name: Status firewalld - shell: > - systemctl status firewalld | grep active || echo "not be found" - register: firewalld_already_installed - -- name: Disable firewalld - service: - name: firewalld - state: stopped - enabled: no - when: '"active" in firewalld_already_installed.stdout' - -- name: Load kernel module - modprobe: - name: "{{ item }}" - state: present - with_items: - - sunrpc - - ip_vs - - ip_vs_rr - - ip_vs_sh - - ip_vs_wrr - - br_netfilter - - nf_conntrack - ignore_errors: true - -- name: Setup systemd-modules-load config - template: - src: 10-k8s-modules.conf.j2 - dest: /etc/modules-load.d/10-k8s-modules.conf - -- name: Restart systemd-modules-load - service: - name: systemd-modules-load - state: restarted - enabled: yes - -- name: Temp stop selinux - shell: setenforce 0 - failed_when: false - -- name: Read selinux config file stat info - stat: - path: /etc/selinux/config - register: stat_selinux_config - -- name: Disable selinux - template: - src: selinux-config.j2 - dest: "/etc/selinux/config" - owner: root - mode: 0644 - when: not stat_selinux_config.stat.exists - -- name: Disable selinux - shell: > - sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config - when: stat_selinux_config.stat.exists - -- name: Delete the default system parameters - shell: > - sed -i '/^net.ipv4.ip_forward=/d' /etc/sysctl.conf; - sed -i '/^net.ipv6.bindv6only=/d' /etc/sysctl.conf; - sed -i '/^vm.max_map_count=/d' /etc/sysctl.conf; - sed -i '/^net.ipv4.ip_local_port_range/d' /etc/sysctl.conf - -- name: Setup system parameters for kubernetes - template: - src: 99-sysctl-k8s.conf.j2 - dest: /etc/sysctl.d/99-sysctl-k8s.conf - -- name: Effective system parameters - shell: "sysctl -p /etc/sysctl.d/99-sysctl-k8s.conf" - ignore_errors: true \ No newline at end of file +- name: Check architecture + set_fact: + arch: "{{ ansible_architecture }}" + +- name: Prepare rpm directory + file: name={{ rpm_dir }} state=directory + +- name: Install base package + yum: + name: + - python3-libselinux + - python3-netaddr + - tar + - socat + - conntrack-tools + - libnetfilter_cttimeout + - libnetfilter_cthelper + - libnetfilter_queue + - ebtables + - iptables + state: present + +- name: Copy all rpm packages to rpm directory + copy: + src: "{{ item }}" + dest: "{{ rpm_dir }}" + mode: "0644" + with_fileglob: "{{ oeversion }}/{{ arch }}/*.rpm" + +- name: Install base package + shell: yum localinstall -y --disablerepo=\* "{{ rpm_dir }}"/*.rpm --skip-broken + ignore_errors: true + +- name: Disable swap + shell: > + swapoff -a && sysctl -w vm.swappiness=0; + sed -i '/swap/s/^/#/' /etc/fstab + ignore_errors: true + +- name: Status firewalld + shell: > + systemctl status firewalld | grep active || echo "not be found" + register: firewalld_already_installed + +- name: Disable firewalld + service: + name: firewalld + state: stopped + enabled: no + when: '"active" in firewalld_already_installed.stdout' + +- name: Load kernel module + modprobe: + name: "{{ item }}" + state: present + with_items: + - sunrpc + - ip_vs + - ip_vs_rr + - ip_vs_sh + - ip_vs_wrr + - br_netfilter + - nf_conntrack + ignore_errors: true + +- name: Setup systemd-modules-load config + template: + src: 10-k8s-modules.conf.j2 + dest: /etc/modules-load.d/10-k8s-modules.conf + +- name: Restart systemd-modules-load + service: + name: systemd-modules-load + state: restarted + enabled: yes + +- name: Temp stop selinux + shell: setenforce 0 + failed_when: false + +- name: Read selinux config file stat info + stat: + path: /etc/selinux/config + register: stat_selinux_config + +- name: Disable selinux + template: + src: selinux-config.j2 + dest: "/etc/selinux/config" + owner: root + mode: 0644 + when: not stat_selinux_config.stat.exists + +- name: Disable selinux + shell: > + sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config + when: stat_selinux_config.stat.exists + +- name: Delete the default system parameters + shell: > + sed -i '/^net.ipv4.ip_forward=/d' /etc/sysctl.conf; + sed -i '/^net.ipv6.bindv6only=/d' /etc/sysctl.conf; + sed -i '/^vm.max_map_count=/d' /etc/sysctl.conf; + sed -i '/^net.ipv4.ip_local_port_range/d' /etc/sysctl.conf + +- name: Setup system parameters for kubernetes + template: + src: 99-sysctl-k8s.conf.j2 + dest: /etc/sysctl.d/99-sysctl-k8s.conf + +- name: Effective system parameters + shell: "sysctl -p /etc/sysctl.d/99-sysctl-k8s.conf" + ignore_errors: true + +- name: Setup hostname + shell: "hostnamectl set-hostname {{ inventory_hostname }}" diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/containerd/templates/containerd.service.j2 b/plugins/kubernetes-1.31.1/workspace/roles/prepare/containerd/templates/containerd.service.j2 old mode 100644 new mode 100755 index 61e71436c2839cba232b4aaa4a2d61446bda5e9e..40714fe6aa646e70e26e6802097e6eb67449df4e --- a/plugins/kubernetes-1.31.1/workspace/roles/prepare/containerd/templates/containerd.service.j2 +++ b/plugins/kubernetes-1.31.1/workspace/roles/prepare/containerd/templates/containerd.service.j2 @@ -1,41 +1,41 @@ -# Copyright The containerd Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -[Unit] -Description=containerd container runtime -Documentation=https://containerd.io -After=network.target local-fs.target dbus.service - -[Service] -ExecStartPre=-/sbin/modprobe overlay -ExecStart=/usr/local/bin/containerd - -Type=notify -Delegate=yes -KillMode=process -Restart=always -RestartSec=5 - -# Having non-zero Limit*s causes performance problems due to accounting overhead -# in the kernel. We recommend using cgroups to do container-local accounting. -LimitNPROC=infinity -LimitCORE=infinity - -# Comment TasksMax if your systemd version does not supports it. -# Only systemd 226 and above support this version. -TasksMax=infinity -OOMScoreAdjust=-999 - -[Install] +# Copyright The containerd Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[Unit] +Description=containerd container runtime +Documentation=https://containerd.io +After=network.target local-fs.target dbus.service + +[Service] +ExecStartPre=-/sbin/modprobe overlay +ExecStart=/usr/local/bin/containerd + +Type=notify +Delegate=yes +KillMode=process +Restart=always +RestartSec=5 + +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC=infinity +LimitCORE=infinity + +# Comment TasksMax if your systemd version does not supports it. +# Only systemd 226 and above support this version. +TasksMax=infinity +OOMScoreAdjust=-999 + +[Install] WantedBy=multi-user.target \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/tasks/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..c1cfd0e25a40d0a065521d8bc52b52a4f0aa4c01 --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/tasks/main.yml @@ -0,0 +1,42 @@ +- name: Copy docker binaries to /usr/bin + copy: + src: "{{ architecture }}/{{ item }}" + dest: /usr/local/bin/ + mode: "0755" + with_items: + - docker-init + - ctr + - containerd + - containerd-shim-runc-v2 + - dockerd + - docker-proxy + - docker + - runc + - cri-dockerd + +- name: Create docker systemd unit file + template: src=docker.service.j2 dest=/etc/systemd/system/docker.service + +- name: Enable and start docker service + shell: systemctl daemon-reload && systemctl enable --now docker + +- name: Waiting for docker to running + shell: "systemctl status docker | grep Active" + register: docker_status + until: '"running" in docker_status.stdout' + retries: 8 + delay: 2 + +- name: Create docker usergroup + shell: | + groupadd docker + usermod -aG docker $USER + +- name: Create cri-docker.service systemd unit file + template: src=cri-docker.service.j2 dest=/etc/systemd/system/cri-docker.service + +- name: Create cri-docker.socket systemd unit file + template: src=cri-docker.socket.j2 dest=/etc/systemd/system/cri-docker.socket + +- name: Enable and Start cri-docker service + shell: systemctl daemon-reload && systemctl enable --now cri-docker.socket \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/templates/cri-docker.service.j2 b/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/templates/cri-docker.service.j2 new file mode 100644 index 0000000000000000000000000000000000000000..24c8bdffe97035f8de8c39718e94927f733167bf --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/templates/cri-docker.service.j2 @@ -0,0 +1,39 @@ +[Unit] +Description=CRI Interface for Docker Application Container Engine +Documentation=https://docs.mirantis.com +After=network-online.target firewalld.service docker.service +Wants=network-online.target +Requires=cri-docker.socket + +[Service] +Type=notify +ExecStart=/usr/local/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image={{ pause_image }} +ExecReload=/bin/kill -s HUP $MAINPID +TimeoutSec=0 +RestartSec=2 +Restart=always + +# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. +# Both the old, and new location are accepted by systemd 229 and up, so using the old location +# to make them work for either version of systemd. +StartLimitBurst=3 + +# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. +# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make +# this option work for either version of systemd. +StartLimitInterval=60s + +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity + +# Comment TasksMax if your systemd version does not support it. +# Only systemd 226 and above support this option. +TasksMax=infinity +Delegate=yes +KillMode=process + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/templates/cri-docker.socket.j2 b/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/templates/cri-docker.socket.j2 new file mode 100644 index 0000000000000000000000000000000000000000..cda71aa51922f3a4f03447d14fbaa78783205961 --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/templates/cri-docker.socket.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=CRI Docker Socket for the API +PartOf=cri-docker.service + +[Socket] +ListenStream=%t/cri-dockerd.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/templates/docker.service.j2 b/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/templates/docker.service.j2 new file mode 100644 index 0000000000000000000000000000000000000000..725dc8ca19bf51fcde459fd14f1f7f0e8c02c982 --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/roles/prepare/docker/templates/docker.service.j2 @@ -0,0 +1,33 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network-online.target firewalld.service +Wants=network-online.target + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/local/bin/dockerd +ExecReload=/bin/kill -s HUP $MAINPID +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=1048576 +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/images/defaults/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/prepare/images/defaults/main.yml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/images/tasks/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/prepare/images/tasks/main.yml index 44159ea0b23303e2cbaf60dda35167c303dbac07..4cf2cdbf35379a48d6f50fd4ef21b37921e47261 100644 --- a/plugins/kubernetes-1.31.1/workspace/roles/prepare/images/tasks/main.yml +++ b/plugins/kubernetes-1.31.1/workspace/roles/prepare/images/tasks/main.yml @@ -27,6 +27,15 @@ - pause.tar - kube-proxy.tar - coredns.tar + when: runtime == "containerd" + +- name: Load kubernetes images on all nodes + command: "docker load -i {{ images_dir }}/kubernetes/{{ item }}" + with_items: + - pause.tar + - kube-proxy.tar + - coredns.tar + when: runtime == "docker" - name: Load control-plane images on masters command: "ctr -n k8s.io images import {{ images_dir }}/kubernetes/{{ item }}" @@ -35,4 +44,17 @@ - kube-controller-manager.tar - kube-scheduler.tar - etcd.tar - when: inventory_hostname in groups['masters'] \ No newline at end of file + when: + - inventory_hostname in groups['masters'] + - runtime == "containerd" + +- name: Load control-plane images on masters + command: "docker load -i {{ images_dir }}/kubernetes/{{ item }}" + with_items: + - kube-apiserver.tar + - kube-controller-manager.tar + - kube-scheduler.tar + - etcd.tar + when: + - inventory_hostname in groups['masters'] + - runtime == "docker" diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/defaults/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/defaults/main.yml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/tasks/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/tasks/main.yml old mode 100644 new mode 100755 index c53e9b2bf47f16be88a1aade7bf429185fc75ddc..8ba3be0c28f63dd7c44ccdd0b452c60f02445b52 --- a/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/tasks/main.yml +++ b/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/tasks/main.yml @@ -1,25 +1,25 @@ -- name: Copy the kubernetes to /usr/bin - copy: - src: "{{ architecture }}/{{ item }}" - dest: /usr/bin/ - mode: "0755" - with_items: - - kubeadm - - kubelet - - kubectl - - crictl - -- name: Create kubelet systemd unit file - template: src=kubelet.service.j2 dest=/etc/systemd/system/kubelet.service - -- name: Create kubelet service directory - file: - name: /etc/systemd/system/kubelet.service.d - state: directory - -- name: Create kubeadm config file for kubelet - template: src=10-kubeadm.conf.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubeadm.conf - -- name: Start kubelet service - shell: > +- name: Copy the kubernetes to /usr/bin + copy: + src: "{{ architecture }}/{{ item }}" + dest: /usr/bin/ + mode: "0755" + with_items: + - kubeadm + - kubelet + - kubectl + - crictl + +- name: Create kubelet systemd unit file + template: src=kubelet.service.j2 dest=/etc/systemd/system/kubelet.service + +- name: Create kubelet service directory + file: + name: /etc/systemd/system/kubelet.service.d + state: directory + +- name: Create kubeadm config file for kubelet + template: src=10-kubeadm.conf.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubeadm.conf + +- name: Start kubelet service + shell: > systemctl daemon-reload && systemctl enable --now kubelet \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/templates/10-kubeadm.conf.j2 b/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/templates/10-kubeadm.conf.j2 old mode 100644 new mode 100755 index d5c1096676396975b2de730a03fa201b9433ac7e..d7b5ad09b2a12848416e2c5f8a8f0a213a4e9bdb --- a/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/templates/10-kubeadm.conf.j2 +++ b/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/templates/10-kubeadm.conf.j2 @@ -1,11 +1,11 @@ -# Note: This dropin only works with kubeadm and kubelet v1.11+ -[Service] -Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" -Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" -# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically -EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env -# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use -# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. -EnvironmentFile=-/etc/default/kubelet -ExecStart= +# Note: This dropin only works with kubeadm and kubelet v1.11+ +[Service] +Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" +Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" +# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically +EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env +# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use +# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. +EnvironmentFile=-/etc/default/kubelet +ExecStart= ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/templates/kubelet.service.j2 b/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/templates/kubelet.service.j2 old mode 100644 new mode 100755 index 63a599f66492695263d629b9a5bb2dcca584fe7f..23ce03d14977cacc1fd6a04850cbc2a29a507867 --- a/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/templates/kubelet.service.j2 +++ b/plugins/kubernetes-1.31.1/workspace/roles/prepare/kubernetes/templates/kubelet.service.j2 @@ -1,14 +1,14 @@ -[Unit] -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ -Wants=network-online.target -After=network-online.target - -[Service] -ExecStart=/usr/bin/kubelet -Restart=always -StartLimitInterval=0 -RestartSec=10 - -[Install] +[Unit] +Description=kubelet: The Kubernetes Node Agent +Documentation=https://kubernetes.io/docs/home/ +Wants=network-online.target +After=network-online.target + +[Service] +ExecStart=/usr/bin/kubelet +Restart=always +StartLimitInterval=0 +RestartSec=10 + +[Install] WantedBy=multi-user.target \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/roles/prepare/nameserver/tasks/main.yml b/plugins/kubernetes-1.31.1/workspace/roles/prepare/nameserver/tasks/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..6157f3e8f77d2df70016922e16b0e8fcb36d75dd --- /dev/null +++ b/plugins/kubernetes-1.31.1/workspace/roles/prepare/nameserver/tasks/main.yml @@ -0,0 +1,9 @@ +- name: Insert hosts record + lineinfile: + path: "/etc/hosts" + regexp: "{{ item }}" + line: "{{ hostvars[item]['ansible_host'] }} {{ item }}" + with_items: + - "{{ groups['masters'] }}" + - "{{ groups['workers'] }}" + - "{{ groups['new-workers'] }}" \ No newline at end of file diff --git a/plugins/kubernetes-1.31.1/workspace/variables.yml b/plugins/kubernetes-1.31.1/workspace/variables.yml index bf76000d3d8a611cdab64180d93e8d54d7613427..0a9100d5ff5c7f1b1cab0716fb2fcb69c1e121a4 100644 --- a/plugins/kubernetes-1.31.1/workspace/variables.yml +++ b/plugins/kubernetes-1.31.1/workspace/variables.yml @@ -1,4 +1,2 @@ -images_dir: "/k8s-install/images" -kubernetes_version: 1.31.1 -pause_image: registry.k8s.io/pause:3.10 -calico_version: 3.28.2 \ No newline at end of file +images_dir: "/cbs/k8s-install/images" +rpm_dir: "/cbs/k8s-install/rpm" \ No newline at end of file