diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..9a1b33edebd686436b745d750080195a6bbf4afa Binary files /dev/null and b/.DS_Store differ diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/.DS_Store" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/.DS_Store" new file mode 100644 index 0000000000000000000000000000000000000000..389169cdbb0e21de75b5f1a07a91e29493559350 Binary files /dev/null and "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/.DS_Store" differ diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Dockerfile" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Dockerfile" new file mode 100644 index 0000000000000000000000000000000000000000..929bf35ad84a6b882986cb033239a30a79e35ec0 --- /dev/null +++ "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Dockerfile" @@ -0,0 +1,83 @@ +FROM debian:buster-slim + +LABEL maintainer="NGINX Docker Maintainers " + +# Define NGINX versions for NGINX Plus and NGINX Plus modules +# Uncomment this block and the versioned nginxPackages block in the main RUN +# instruction to install a specific release +# ENV NGINX_VERSION 24 +# ENV NJS_VERSION 0.6.1 +# ENV PKG_RELEASE 1~buster + +# Download certificate and key from the customer portal (https://account.f5.com) +# and copy to the build context +COPY nginx-repo.crt /etc/ssl/nginx/ +COPY nginx-repo.key /etc/ssl/nginx/ + +# Add Aliyun sources and set Vim nginx config highlight +COPY sources.list /etc/apt/sources.list.d/aliyun.list +COPY vim /root/.vim/ + +RUN set -x \ +# Create nginx user/group first, to be consistent throughout Docker variants + && chmod +r /etc/ssl/nginx/nginx-repo.crt \ + && chmod +r /etc/ssl/nginx/nginx-repo.key \ + && addgroup --system --gid 101 nginx \ + && adduser --system --disabled-login --ingroup nginx --no-create-home --home /nonexistent --gecos "nginx user" --shell /bin/false --uid 101 nginx \ + && apt-get update \ + && apt-get install -y vim \ + && apt-get install --no-install-recommends --no-install-suggests -y ca-certificates gnupg1 \ + && \ + NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \ + found=''; \ + for server in \ + ha.pool.sks-keyservers.net \ + hkp://keyserver.ubuntu.com:80 \ + hkp://p80.pool.sks-keyservers.net:80 \ + pgp.mit.edu \ + ; do \ + echo "Fetching GPG key $NGINX_GPGKEY from $server"; \ + apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \ + done; \ + test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \ + apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \ +# Install the latest release of NGINX Plus and/or NGINX Plus modules +# Uncomment individual modules if necessary +# Use versioned packages over defaults to specify a release + && nginxPackages=" \ + nginx-plus \ + # nginx-plus=${NGINX_VERSION}-${PKG_RELEASE} \ + # nginx-plus-module-xslt \ + # nginx-plus-module-xslt=${NGINX_VERSION}-${PKG_RELEASE} \ + # nginx-plus-module-geoip \ + # nginx-plus-module-geoip=${NGINX_VERSION}-${PKG_RELEASE} \ + # nginx-plus-module-image-filter \ + # nginx-plus-module-image-filter=${NGINX_VERSION}-${PKG_RELEASE} \ + # nginx-plus-module-perl \ + # nginx-plus-module-perl=${NGINX_VERSION}-${PKG_RELEASE} \ + # nginx-plus-module-njs \ + # nginx-plus-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${PKG_RELEASE} \ + " \ + && echo "Acquire::https::pkgs.nginx.com::Verify-Peer \"true\";" >> /etc/apt/apt.conf.d/90nginx \ + && echo "Acquire::https::pkgs.nginx.com::Verify-Host \"true\";" >> /etc/apt/apt.conf.d/90nginx \ + && echo "Acquire::https::pkgs.nginx.com::SslCert \"/etc/ssl/nginx/nginx-repo.crt\";" >> /etc/apt/apt.conf.d/90nginx \ + && echo "Acquire::https::pkgs.nginx.com::SslKey \"/etc/ssl/nginx/nginx-repo.key\";" >> /etc/apt/apt.conf.d/90nginx \ + && printf "deb https://pkgs.nginx.com/plus/debian buster nginx-plus\n" > /etc/apt/sources.list.d/nginx-plus.list \ + && apt-get update \ + && apt-get install --no-install-recommends --no-install-suggests -y \ + $nginxPackages \ + gettext-base \ + curl \ + && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx-plus.list \ + && rm -rf /etc/apt/apt.conf.d/90nginx /etc/ssl/nginx + +# Forward request logs to Docker log collector +RUN ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +# Port 80 for web, 8000 for API, 9000 for runtime state sharing +EXPOSE 80 8000 + +STOPSIGNAL SIGTERM + +CMD ["nginx", "-g", "daemon off;"] diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/README.md" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/README.md" index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..5484746f56e4ff4d0f16e7d14379ceab8d4a6767 100644 --- "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/README.md" +++ "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/README.md" @@ -0,0 +1,168 @@ +# Lab环境介绍 + +本次演示和实验采用Docker环境,学员可根据自己实际情况准备实验环境。本环境中包含以下组件: + +## NGINX Plus反向代理服务器 +后面简称反代,可通过*Dockerfile*生成容器镜像,容器暴露端口如下: +- Web服务:8080:80 +- API:8000:8000 + +容器启动后,登录容器,在/etc/nginx/conf.d/目录下创建 *api-8000.conf* ,执行 `nginx -s reload` ,在实验客户端访问 http://x.x.x.x:8080 确认可以看到NGINX欢迎页面,访问 http://x.x.x.x:8000 ,确认可以打开Dashboard。x.x.x.x为Docker服务器的IP,也即反代的IP,下同。 + +## Web Servers +后面简称Web,可直接使用和反代相同的N+镜像,不对外暴露端口,使用Docker网桥和N+反代通信 + +## 测试客户端 +请预装以下软件,并且为了方便测试,建议在客户端hosts配置测试所用域名。 +- curl +- wrk +- 浏览器 + +--- + +# 实验步骤 + +NGINX安装目录为/etc/nginx/,所有实验配置文件均保存在/etc/nginx/conf.d/目录下。 + +请修改实验客户端*hosts*文件,将实验相关域名解析到反代的IP,反代IP设定为x.x.x.x。 + +## Lab1 变量 +1. 在反代中创建 *variable.conf* 文件,执行 `nginx -s reload` ,观察配置文件是否能被加载。 + +2. 访问http://variable.will.com:8080/noset 和 http://variable.will.com:8080/set ,观察返回的页面信息。 + +3. 删除 *variable.conf* 中第4行的注释符 + ``` + set $f5 "is the best ADC!"; + ``` + 重复第1、2步。 + +4. 在 *variable.conf* 中第11行增加注释符 + ``` + #set $f5 "NGINX is a part of F5"; + ``` + 重复第1步。 + +## Lab2 蓝绿部署 +1. 在反代中创建 *bluegreen.conf* 文件,upstream *blue* 和 *green* 分别修改为Web1和Web2的容器IP,执行 `nginx -s reload`。 +2. 在实验客户端执行以下命令,观察输出,应该为空。 + ``` + curl http://x.x.x.x:8000/api/6/http/keyvals/blue-green + ``` +3. 访问 http://bluegreen.will.com:8080 ,应该访问到Web1。 +4. 在实验客户端执行以下命令。 + ``` + curl -X POST -d '{"environment": "green"} http://x.x.x.x:8000/api/6/http/keyvals/blue-green + ``` +5. 访问 http://bluegreen.will.com:8080 ,应该访问到Web2。 +6. 在实验客户端执行以下命令。 + ``` + curl -X PATCH -d '{"environment": "blue"} http://x.x.x.x:8000/api/6/http/keyvals/blue-green + ``` +7. 访问 http://bluegreen.will.com:8080 ,应该访问到Web1。 + +## Lab3 AB Test +1. 在反代中创建 *abtest.conf* 文件,upstream *abtest_a* 和 *abtest_b* 分别修改为Web1和Web2的容器IP,执行 `nginx -s reload`。 +2. 在实验客户端通过wrk打压力,参考命令 `wrk -c200 -t8 -d300s "http://abtest.will.com:8080" ,打开Dashboard,观察upstream,所有的request应该都打向abtest_a。 +3. 在实验客户端执行以下命令。 + ``` + curl -X POST -d '{"bpercentage": 5} http://x.x.x.x:8000/api/6/http/keyvals/abtest + ``` +4. 通过Dashboard,观察upstream,应该有5%左右的request打向abtest_b。 +5. 在实验客户端执行以下命令。 + ``` + curl -X PATCH -d '{"bpercentage": 20} http://x.x.x.x:8000/api/6/http/keyvals/abtest + ``` +6. 通过Dashboard,观察upstream,应该有20%左右的request打向abtest_b。 +7. 在实验客户端执行以下命令。 + ``` + curl -X PATCH -d '{"bpercentage": 50} http://x.x.x.x:8000/api/6/http/keyvals/abtest + ``` +8. 通过Dashboard,观察upstream,应该有50%左右的request打向abtest_b。 +9. 在实验客户端执行以下命令。 + ``` + curl -X PATCH -d '{"bpercentage": 100} http://x.x.x.x:8000/api/6/http/keyvals/abtest + ``` +10. 通过Dashboard,观察upstream,应该所有的request都打向abtest_b。 + +## Lab4 灰度发布 +1. 在反代中创建 *canary.conf* 文件,upstream *v1_only* 、 *v2_only* 、 *all* 分别修改为Web1和Web2的容器IP,执行 `nginx -s reload`。 +2. 在实验客户端多次用curl模拟各种浏览器访问,应该在Web1和Web2之间轮询。 + ``` + curl -A 'Chrome' http://canary.will.com:8080 + ``` +3. 在实验客户端执行以下命令。 + ``` + curl -X POST -d '{"stage": "b"} http://x.x.x.x:8000/api/6/http/keyvals/canary + ``` +4. 在实验客户端多次用curl模拟各种浏览器访问,应该只会访问到Web1。 + ``` + curl -A 'Chrome' http://canary.will.com:8080 + ``` +5. 在实验客户端执行以下命令。 + ``` + curl -X PATCH -d '{"stage": "c"} http://x.x.x.x:8000/api/6/http/keyvals/canary + ``` +6. 在实验客户端多次用curl模拟Chrome浏览器访问,应该只会访问到Web2。 + ``` + curl -A 'Chrome' http://canary.will.com:8080 + ``` +7. 在实验客户端多次用curl模拟Firefox浏览器访问,应该只会访问到Web1。 + ``` + curl -A 'Firefox' http://canary.will.com:8080 + ``` +8. 在实验客户端模拟其它浏览器访问,例如Safari,应该也只会访问到Web1。 +9. 在实验客户端执行以下命令。 + ``` + curl -X PATCH -d '{"stage": "d"} http://x.x.x.x:8000/api/6/http/keyvals/canary + ``` +10. 在实验客户端多次用curl模拟各种浏览器访问,应该只会访问到Web2。 +11. 在实验客户端执行以下命令。 + ``` + curl -X PATCH -d '{"stage": "a"} http://x.x.x.x:8000/api/6/http/keyvals/canary + ``` +12. 在实验客户端多次用curl模拟各种浏览器访问,应该在Web1和Web2之间轮询。 + +## Lab5 动态限流 +1. 在反代中创建 *ratelimit.conf* 文件,upstream *ratelimit* 分别修改为Web1和Web2的容器IP,执行 `nginx -s reload`。 +2. 在实验客户端通过wrk打压力,参考命令 `wrk -c200 -t8 -d300s "http://ratelimit.will.com:8080" ,打开Dashboard观察,此时应该是正常分发流量,没有任何限速。 +3. 在实验客户端执行以下命令,打开限速200rps的开关。 + ``` + curl -X POST -d '{"ratelimit_200": 1} http://x.x.x.x:8000/api/6/http/keyvals/kv_ratelimit + ``` +4. 打压力的同时观察Dashboard,应该被限制在了200rps左右,同时观察delay的效果。 +5. 在实验客户端执行以下命令,打开限速100rps的开关。 + ``` + curl -X POST -d '{"ratelimit_100": 1} http://x.x.x.x:8000/api/6/http/keyvals/kv_ratelimit + ``` +6. 打压力的同时观察Dashboard,应该被限制在了100rps左右,同时观察nodelay的效果。 + +## Lab6 熔断 +1. 在反代中创建 *circuitbreaker.conf* 文件,upstream *circuitbreaker* 分别修改为Web1和Web2的容器IP,执行 `nginx -s reload`。 +2. 打开Dashboard,观察健康检查的情况。 +3. 在实验客户端通过wrk打压力,参考命令 `wrk -c200 -t8 -d300s "http://circuit.will.com:8080" ,打开Dashboard观察,此时应该是正常分发流量。 +4. 登录Web2容器,修改 */etc/nginx/conf.d/default.conf* 文件,将以下部分中return的值做任意改变,执行 `nginx -s reload`。 + ``` + location /health { + default_type application/json; + return 200 '{"deadlocks":{"healthy":true},"Disk":{"healthy":false},"Memory":{"healthy":true}}'; + } + ``` +5. 观察Dashboard,Web2应该被健康检查标记为down,流量全部打向Web1。 +6. 登录Web2容器,将 */etc/nginx/conf.d/default.conf* 文件修改为正常值,执行 `nginx -s reload`。 + ``` + location /health { + default_type application/json; + return 200 '{"deadlocks":{"healthy":true},"Disk":{"healthy":true},"Memory":{"healthy":true}}'; + } + ``` +7. 观察Dashboard,Web2连续三次健康检查通过后应该被重新标记为up,并且开始逐步接收流量,最终和Web1达到1:1。 +8. 删除反代 *circuitbreaker.conf* 文件中的slow_start参数,执行 `nginx -s reload`。 + ``` + upstream circuitbreaker { + server 172.17.0.3:80; + server 172.17.0.4:80; + zone circuitbreaker 64k; + } + ``` +9. 重复4至7步,观察没有slow_start时候的效果。 diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/.DS_Store" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/.DS_Store" new file mode 100644 index 0000000000000000000000000000000000000000..b944d300f516cc5fd04ece8bbab24d1be89bec13 Binary files /dev/null and "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/.DS_Store" differ diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/abtest.conf" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/abtest.conf" new file mode 100644 index 0000000000000000000000000000000000000000..0aabb2ac2eb635d2d62668aa07eb14b60c3c52d6 --- /dev/null +++ "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/abtest.conf" @@ -0,0 +1,44 @@ +keyval_zone zone=abtest:64k; +keyval bpercentage $keyval_bpercentage zone=abtest; + +split_clients $remote_addr$remote_port $abtest_5 { + 5% abtest_b; + * abtest_a; +} + +split_clients $remote_addr$remote_port $abtest_20 { + 20% abtest_b; + * abtest_a; +} + +split_clients $remote_addr$remote_port $abtest_50 { + 50% abtest_b; + * abtest_a; +} + +map $keyval_bpercentage $abtest_upstream { + 0 abtest_a; + 5 $abtest_5; + 20 $abtest_20; + 50 $abtest_50; + 100 abtest_b; + default abtest_a; +} + +upstream abtest_a { + zone abtest_a 32k; + server 172.17.0.3:80; +} + +upstream abtest_b { + zone abtest_b 32k; + server 172.17.0.4:80; +} + +server { + server_name abtest.will.com; + status_zone abtest.will.com; + location / { + proxy_pass http://$abtest_upstream; + } +} \ No newline at end of file diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/api-8000.conf" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/api-8000.conf" new file mode 100644 index 0000000000000000000000000000000000000000..66234ae37fb41ccd5ec8d708e8f96d8914ad5752 --- /dev/null +++ "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/api-8000.conf" @@ -0,0 +1,26 @@ +server { + listen 8000; + + access_log /var/log/nginx/api_access.log main; + + location / { + rewrite / /dashboard.html redirect; + } + + location /api { + api write=on; + # directives limiting access to the API + } + + location = /dashboard.html { + root /usr/share/nginx/html; + } + + # Redirect requests made to the pre-NGINX Plus API dashboard + location = /status.html { + return 301 /dashboard.html; + } + location /swagger-ui { + root /usr/share/nginx/html; + } +} \ No newline at end of file diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/bluegreen.conf" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/bluegreen.conf" new file mode 100644 index 0000000000000000000000000000000000000000..8aada4b19ce8a43d724a4ad3aed5335952e9b8b9 --- /dev/null +++ "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/bluegreen.conf" @@ -0,0 +1,22 @@ +keyval_zone zone=blue-green:64k; +keyval environment $keyval_bluegreen zone=blue-green; + +map $keyval_bluegreen $bluegreen_upstream { + ~.+ $keyval_bluegreen; + default blue; +} + +upstream blue { + server 172.17.0.3:80; +} + +upstream green { + server 172.17.0.4:80; +} + +server { + server_name bluegreen.will.com; + location / { + proxy_pass http://$bluegreen_upstream; + } +} \ No newline at end of file diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/canary.conf" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/canary.conf" new file mode 100644 index 0000000000000000000000000000000000000000..75037515fc7a6b3df37d72ec0abdc700ee822195 --- /dev/null +++ "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/canary.conf" @@ -0,0 +1,36 @@ +keyval_zone zone=canary:64k; +keyval stage $keyval_stage zone=canary; + +map $keyval_stage $canary_upstream { + a all; + b v1_only; + c $canary_release; + d v2_only; + default all; +} + +map $http_user_agent $canary_release { + ~*firefox v1_only; + ~*chrome v2_only; + default v1_only; +} + +upstream all { + server 172.17.0.3:80; + server 172.17.0.4:80; +} + +upstream v1_only { + server 172.17.0.3:80; +} + +upstream v2_only { + server 172.17.0.4:80; +} + +server { + server_name canary.will.com; + location / { + proxy_pass http://$canary_upstream; + } +} \ No newline at end of file diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/circuitbreaker.conf" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/circuitbreaker.conf" new file mode 100644 index 0000000000000000000000000000000000000000..c66cfeb4b2c747a4a5b3eafd018ed039b8ead239 --- /dev/null +++ "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/circuitbreaker.conf" @@ -0,0 +1,26 @@ +upstream circuitbreaker { + server 172.17.0.3:80 slow_start=15s; + server 172.17.0.4:80 slow_start=15s; + zone circuitbreaker 64k; +} + +server { + server_name circuit.will.com; + status_zone circuit.will.com; + + location @health-check { + internal; + proxy_pass http://circuitbreaker; + health_check uri=/health match=conditions fails=1 passes=3 interval=3s; + } + + location / { + proxy_pass http://circuitbreaker; + } +} + +match conditions { + status 200-399; + header Content-Type ~ "application/json"; + body ~ '{"deadlocks":{"healthy":true},"Disk":{"healthy":true},"Memory":{"healthy":true}}'; +} \ No newline at end of file diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/ratelimit.conf" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/ratelimit.conf" new file mode 100644 index 0000000000000000000000000000000000000000..6c40cec8061e8fb71cec2fb1223369105e6e3d8d --- /dev/null +++ "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/ratelimit.conf" @@ -0,0 +1,40 @@ +#limit_conn_zone $binary_remote_addr zone=limitconnperip:1m; +#limit_conn_zone $server_name zone=limitconnperserver:1m; + +limit_req_zone $ratelimit_100 zone=req_100:1m rate=100r/s; +limit_req_zone $ratelimit_200 zone=req_200:1m rate=200r/s; + +keyval_zone zone=kv_ratelimit:64k; +keyval ratelimit_100 $enablelimit_100 zone=kv_ratelimit; +keyval ratelimit_200 $enablelimit_200 zone=kv_ratelimit; + +map $enablelimit_100 $ratelimit_100 { + default ""; + 0 ""; + 1 $binary_remote_addr; +} + +map $enablelimit_200 $ratelimit_200 { + default ""; + 0 ""; + 1 $binary_remote_addr; +} + +upstream ratelimit { + zone ratelimit 32k; + server 172.17.0.3:80; + server 172.17.0.4:80; +} + +server { + server_name ratelimit.will.com; + status_zone ratelimit.will.com; + #limit_conn limitconnperip 10; + #limit_conn limitconnperserver 100; + limit_req zone=req_100 burst=100 nodelay; + limit_req zone=req_200 burst=200; + limit_req_status 444; + location / { + proxy_pass http://ratelimit; + } +} \ No newline at end of file diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/variable.conf" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/variable.conf" new file mode 100644 index 0000000000000000000000000000000000000000..783858a14b1be62b7706666d4da6531b09570b18 --- /dev/null +++ "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Reverse Proxy/conf.d/variable.conf" @@ -0,0 +1,14 @@ +server { + server_name variable.will.com; + + #set $f5 "is the best ADC!"; + + location /noset { + return 200 "F5: $f5 \n"; + } + + location /set { + set $f5 "NGINX is a part of F5"; + return 200 "F5: $f5 \n"; + } +} \ No newline at end of file diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Web1/conf.d/default.conf" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Web1/conf.d/default.conf" new file mode 100644 index 0000000000000000000000000000000000000000..0bf632510cecb81dcf34983fbe4b8b7392ff8939 --- /dev/null +++ "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Web1/conf.d/default.conf" @@ -0,0 +1,66 @@ +server { + listen 80 default_server; + server_name localhost; + + #charset koi8-r; + #access_log /var/log/nginx/host.access.log main; + + location /health { + default_type application/json; + return 200 '{"deadlocks":{"healthy":true},"Disk":{"healthy":true},"Memory":{"healthy":true}}'; + } + + location / { + #root /usr/share/nginx/html; + #index index.html index.htm; + return 200 "This is Web Server 1. \n"; + } + + #error_page 404 /404.html; + + # redirect server error pages to the static page /50x.html + # + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + + # proxy the PHP scripts to Apache listening on 127.0.0.1:80 + # + #location ~ \.php$ { + # proxy_pass http://127.0.0.1; + #} + + # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 + # + #location ~ \.php$ { + # root html; + # fastcgi_pass 127.0.0.1:9000; + # fastcgi_index index.php; + # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; + # include fastcgi_params; + #} + + # deny access to .htaccess files, if Apache's document root + # concurs with nginx's one + # + #location ~ /\.ht { + # deny all; + #} + + # enable /api/ location with appropriate access control in order + # to make use of NGINX Plus API + # + #location /api/ { + # api write=on; + # allow 127.0.0.1; + # deny all; + #} + + # enable NGINX Plus Dashboard; requires /api/ location to be + # enabled and appropriate access control for remote access + # + #location = /dashboard.html { + # root /usr/share/nginx/html; + #} +} \ No newline at end of file diff --git "a/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Web2/conf.d/default.conf" "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Web2/conf.d/default.conf" new file mode 100644 index 0000000000000000000000000000000000000000..b277470bd0b5e182e4a63ff58c15564d4139485f --- /dev/null +++ "b/5 NGINX\346\225\217\346\215\267\351\205\215\347\275\256/Web2/conf.d/default.conf" @@ -0,0 +1,66 @@ +server { + listen 80 default_server; + server_name localhost; + + #charset koi8-r; + #access_log /var/log/nginx/host.access.log main; + + location /health { + default_type application/json; + return 200 '{"deadlocks":{"healthy":true},"Disk":{"healthy":true},"Memory":{"healthy":true}}'; + } + + location / { + #root /usr/share/nginx/html; + #index index.html index.htm; + return 200 "This is Web Server 2. \n"; + } + + #error_page 404 /404.html; + + # redirect server error pages to the static page /50x.html + # + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + + # proxy the PHP scripts to Apache listening on 127.0.0.1:80 + # + #location ~ \.php$ { + # proxy_pass http://127.0.0.1; + #} + + # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 + # + #location ~ \.php$ { + # root html; + # fastcgi_pass 127.0.0.1:9000; + # fastcgi_index index.php; + # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; + # include fastcgi_params; + #} + + # deny access to .htaccess files, if Apache's document root + # concurs with nginx's one + # + #location ~ /\.ht { + # deny all; + #} + + # enable /api/ location with appropriate access control in order + # to make use of NGINX Plus API + # + #location /api/ { + # api write=on; + # allow 127.0.0.1; + # deny all; + #} + + # enable NGINX Plus Dashboard; requires /api/ location to be + # enabled and appropriate access control for remote access + # + #location = /dashboard.html { + # root /usr/share/nginx/html; + #} +} \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/.DS_Store b/8 NGINX-Ingress-Controller/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..6cb3e5959689800b2b9dfcea42c85add424ac2c5 Binary files /dev/null and b/8 NGINX-Ingress-Controller/.DS_Store differ diff --git a/8 NGINX-Ingress-Controller/0-deployment/README.md b/8 NGINX-Ingress-Controller/0-deployment/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b4e18684b958ea7bf3c6383111ad13899a6027be --- /dev/null +++ b/8 NGINX-Ingress-Controller/0-deployment/README.md @@ -0,0 +1,13 @@ +# KIC部署 + +这是演示环境使用的deployment文件,供参考。关于KIC的启动命令行参数,请参阅: +(https://docs.nginx.com/nginx-ingress-controller/configuration/global-configuration/command-line-arguments/) + +本文件使用了hostnetwork方式对外暴露服务,暴露端口如下: +- http服务:80:80 +- https服务:443:443 +- Dashboard:8080:8080 + +部署完成后,可以通过http/https服务暴露端口进行测试,应该能看到nginx版本页面,通过`http://’服务IP:8080‘/dashboard.html`应能看到NGINX Plus Dashboard,其中*'服务IP:8080’*是你对外暴露的KIC Dashboard的IP和端口。 + +![Dashboard](https://images.gitee.com/uploads/images/2021/0916/170542_2e484244_9655660.png "dashboard.png") \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/0-deployment/nginx-plus-ingress-hostnetwork.yaml b/8 NGINX-Ingress-Controller/0-deployment/nginx-plus-ingress-hostnetwork.yaml new file mode 100644 index 0000000000000000000000000000000000000000..365097d87d45370c4812ea371d2f684ac778388b --- /dev/null +++ b/8 NGINX-Ingress-Controller/0-deployment/nginx-plus-ingress-hostnetwork.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-ingress + namespace: nginx-ingress +spec: + replicas: 1 + selector: + matchLabels: + app: nginx-ingress + template: + metadata: + labels: + app: nginx-ingress + #annotations: + #prometheus.io/scrape: "true" + #prometheus.io/port: "9113" + #prometheus.io/scheme: http + spec: + serviceAccountName: nginx-ingress + hostNetwork: true + nodeSelector: + name: master + containers: + - image: docker-registry.nginx.com/nginx-ic-nap/nginx-plus-ingress:1.12.0 + imagePullPolicy: Never + name: nginx-plus-ingress + ports: + - name: http + containerPort: 80 + - name: https + containerPort: 443 + - name: readiness-port + containerPort: 8081 + - name: dashboard + containerPort: 8080 + #- name: prometheus + # containerPort: 9113 + readinessProbe: + httpGet: + path: /nginx-ready + port: readiness-port + periodSeconds: 1 + securityContext: + allowPrivilegeEscalation: true + runAsUser: 101 #nginx + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + args: + - -nginx-plus + - -nginx-configmaps=$(POD_NAMESPACE)/nginx-config + - -default-server-tls-secret=$(POD_NAMESPACE)/default-server-secret + - -enable-app-protect + #- -v=3 # Enables extensive logging. Useful for troubleshooting. + - -report-ingress-status + #- -external-service=nginx-ingress + #- -enable-prometheus-metrics + - -global-configuration=$(POD_NAMESPACE)/nginx-configuration + - -ingress-class=nginx-plus + - -enable-tls-passthrough + - -nginx-status-allow-cidrs=10.1.10.0/24 + - -enable-preview-policies + - -health-status \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/1-basic-ingress/README.md b/8 NGINX-Ingress-Controller/1-basic-ingress/README.md new file mode 100644 index 0000000000000000000000000000000000000000..987b1aaedbb1a02668f6529ebc1b2c541381bcdd --- /dev/null +++ b/8 NGINX-Ingress-Controller/1-basic-ingress/README.md @@ -0,0 +1,68 @@ +# Basic Ingress + +基本的Ingress体验,根据host和uri进行7层应用路由,同时进行tls加密。 + +## Step 1 - 部署cafe应用 + +部署cafe应用相应的pod并发布service: +``` +$ kubectl create -f cafe.yaml +``` + +## Step 2 - 部署证书密钥和Ingress + +1. 使用secret资源创建证书和密钥: + ``` + $ kubectl create -f cafe-secret.yaml + ``` +2. 创建基本的Ingress资源: + ``` + $ kubectl create -f cafe-ingress.yaml + ``` + +## Step 3 - 验证基本Ingress功能 + +1. 使用浏览器访问`cafe.example.com/coffee`,能够显示访问到coffee pods,多次访问应该在两个pod之间负载均衡。 + + **请留意** 浏览器不需要特意指定https,应用会自动跳转到https进行访问,你可以通过curl命令观察到Ingress做了重定向。 + ``` + $ curl -i cafe.example.com/coffee + HTTP/1.1 301 Moved Permanently + Server: nginx/1.19.10 + Date: Thu, 16 Sep 2021 03:35:35 GMT + Content-Type: text/html + Content-Length: 170 + Connection: keep-alive + Location: https://cafe.example.com:443/coffee + + + 301 Moved Permanently + +

301 Moved Permanently

+
nginx/1.19.10
+ + + ``` + +2. 使用浏览器访问`cafe.example.com/tea`,能够显示访问到tea pods,多次访问应该在两个pod之间负载均衡。 + +## Step 4 - 部署带会话保持的Ingress + +1. 删除基本Ingress: + ``` + $ kubectl delete -f cafe-ingress.yaml + ``` +2. 创建带会话保持的Ingress资源: + ``` + $ kubectl create -f cafe-ingress-with-session-persistence.yaml + ``` + +## Step 5 - 验证带会话保持的Ingress + +1. 浏览器打开开发者工具,访问`cafe.example.com/coffee`,能够显示访问到coffee pod,多次访问也只会访问到同一个pod,通过开发者工具查看cookie,能看到Ingress插入的会话保持cookie。 + +2. 浏览器打开开发者工具,访问`cafe.example.com/tea`,能够显示访问到tea pod,多次访问也只会访问到同一个pod,通过开发者工具查看cookie,能看到Ingress插入的会话保持cookie。 + +3. 对比coffee和tea cookie的不同,留意过期时间。 + +**暂时保留本实验创建的资源,下一个实验会继续使用** \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/1-basic-ingress/cafe-ingress-with-session-persistence.yaml b/8 NGINX-Ingress-Controller/1-basic-ingress/cafe-ingress-with-session-persistence.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e8ea6a0cdf1badb951146afc5cfda3fe92ea4635 --- /dev/null +++ b/8 NGINX-Ingress-Controller/1-basic-ingress/cafe-ingress-with-session-persistence.yaml @@ -0,0 +1,24 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: cafe-ingress + annotations: + nginx.com/sticky-cookie-services: "serviceName=coffee-svc srv_id expires=1h path=/coffee;serviceName=tea-svc srv_id expires=2h path=/tea" +spec: + ingressClassName: nginx-plus + tls: + - hosts: + - cafe.example.com + secretName: cafe-secret + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + backend: + serviceName: tea-svc + servicePort: 80 + - path: /coffee + backend: + serviceName: coffee-svc + servicePort: 80 diff --git a/8 NGINX-Ingress-Controller/1-basic-ingress/cafe-ingress.yaml b/8 NGINX-Ingress-Controller/1-basic-ingress/cafe-ingress.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b4d38a32398eead925ce92ef825cab739c8ed44 --- /dev/null +++ b/8 NGINX-Ingress-Controller/1-basic-ingress/cafe-ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: cafe-ingress +spec: + ingressClassName: nginx-plus + tls: + - hosts: + - cafe.example.com + secretName: cafe-secret + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + backend: + serviceName: tea-svc + servicePort: 80 + - path: /coffee + backend: + serviceName: coffee-svc + servicePort: 80 diff --git a/8 NGINX-Ingress-Controller/1-basic-ingress/cafe-secret.yaml b/8 NGINX-Ingress-Controller/1-basic-ingress/cafe-secret.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e94493b121037da7284ce7e241a5da5250f3a114 --- /dev/null +++ b/8 NGINX-Ingress-Controller/1-basic-ingress/cafe-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cafe-secret +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/1-basic-ingress/cafe.yaml b/8 NGINX-Ingress-Controller/1-basic-ingress/cafe.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5fcfd71148bf6eb41d7c18563495589149123bcb --- /dev/null +++ b/8 NGINX-Ingress-Controller/1-basic-ingress/cafe.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coffee +spec: + replicas: 2 + selector: + matchLabels: + app: coffee + template: + metadata: + labels: + app: coffee + spec: + containers: + - name: coffee + image: nginxdemos/hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: coffee-svc +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: coffee +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tea +spec: + replicas: 2 + selector: + matchLabels: + app: tea + template: + metadata: + labels: + app: tea + spec: + containers: + - name: tea + image: nginxdemos/hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: tea-svc + labels: +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: tea diff --git a/8 NGINX-Ingress-Controller/10-ts-tls-passthrough/README.md b/8 NGINX-Ingress-Controller/10-ts-tls-passthrough/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c199c00bc2ed387793a3f5385751ab844f825ca3 --- /dev/null +++ b/8 NGINX-Ingress-Controller/10-ts-tls-passthrough/README.md @@ -0,0 +1,32 @@ +# TLS Passthrough + +本实验通过KIC的CRD之一Transport Server,实现应用全栈加密的情况下根据SNI进行host路由。 + +## Step 1 - 部署Secure App + +部署secure app: +``` +$ kubectl apply -f secure-app.yaml +``` +这是一个https应用,可以在k8s上通过service ip验证https访问。 + +## Step 2 - 部署ts + +创建TransportServer,实现TLS Passthrough,如果https访问的SNI是*app.example.com*,则路由到secure app: + + ``` + $ kubectl apply -f transport-server-passthrough.yaml + ``` + +## Step 3 - 验证 + +1. 使用curl命令或浏览器访问`https://app.example.com/`能够正常访问。 +2. 使用curl命令或浏览器访问其它域名,只会收到404。 + +## Step 4 - 还原实验环境 + +清除所有资源: + + ``` + $ kubectl delete -f ./ + ``` diff --git a/8 NGINX-Ingress-Controller/10-ts-tls-passthrough/secure-app.yaml b/8 NGINX-Ingress-Controller/10-ts-tls-passthrough/secure-app.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49bf748e83abbf82f929212de380ce36abe84eab --- /dev/null +++ b/8 NGINX-Ingress-Controller/10-ts-tls-passthrough/secure-app.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: secure-app +spec: + replicas: 1 + selector: + matchLabels: + app: secure-app + template: + metadata: + labels: + app: secure-app + spec: + containers: + - name: secure-app + image: nginxdemos/nginx-hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8443 + volumeMounts: + - name: secret + mountPath: /etc/nginx/ssl + readOnly: true + - name: config-volume + mountPath: /etc/nginx/conf.d + volumes: + - name: secret + secret: + secretName: app-tls-secret + - name: config-volume + configMap: + name: secure-config +--- +apiVersion: v1 +kind: Service +metadata: + name: secure-app +spec: + ports: + - port: 8443 + targetPort: 8443 + protocol: TCP + name: https + selector: + app: secure-app +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: secure-config +data: + app.conf: |- + server { + listen 8443 ssl; + + server_name app.example.com; + + ssl_certificate /etc/nginx/ssl/tls.crt; + ssl_certificate_key /etc/nginx/ssl/tls.key; + + default_type text/plain; + + location / { + return 200 "hello from pod $hostname\n"; + } + } +--- +apiVersion: v1 +kind: Secret +metadata: + name: app-tls-secret +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ3Q0NRQ3EzQWxhdnJiaWpqQU5CZ2txaGtpRzl3MEJBUXNGQURCTU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhGakFVQmdOVkJBY01EVk5oYmlCR2NtRnVZMmx6WTI4eEdEQVdCZ05WQkFNTQpEMkZ3Y0M1bGVHRnRjR3hsTG1OdmJUQWVGdzB5TURBek1qTXlNekl3TkROYUZ3MHlNekF6TWpNeU16SXdORE5hCk1Fd3hDekFKQmdOVkJBWVRBbFZUTVFzd0NRWURWUVFJREFKRFFURVdNQlFHQTFVRUJ3d05VMkZ1SUVaeVlXNWoKYVhOamJ6RVlNQllHQTFVRUF3d1BZWEJ3TG1WNFlXMXdiR1V1WTI5dE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRgpBQU9DQVE4QU1JSUJDZ0tDQVFFQTJCRXhZR1JPRkhoN2VPMVlxeCtWRHMzRzMrVEhyTEZULzdEUFFEQlkza3pDCi9oZlprWCt3OW1NNkQ1RU9uK2lpVlNhUWlQMm1aNFA3N29pR0dmd3JrNjJ0eEQ5cHphODM5NC9aSjF5Q0dXZ1QKK2NWUEVZbkxjQktzSTRMcktJZ21oWVIwUjNzWWRjR1JkSXJWUFZlNUVUQlk1Z1U0RGhhMDZOUEIraitmK0krWgphWGIvMlRBekJhNHozMWpIQzg2amVQeTFMdklGazFiY3I2cSsxRGR5eklxcWxkRDYvU3Q4Q2t3cDlOaDFCUGFhCktZZ1ZVd010UVBib2s1cFFmbVMrdDg4NHdSM0dTTEU4VkxRbzgyYnJhNUR3emhIamlzOTlJRGhzbUt0U3lWOXMKaWNJbXp5dHBnSXlhTS9zWEhRQU9KbVFJblFteWgyekd1WFhTQ0lkRGtRSURBUUFCTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQ0tsVkhOZ1k5VHZLaW9Xb0tvdllCdnNRMmYrcmFOOEJwdWNDcnRvRm15NUczcGIzU2lPTndaCkF2cnhtSm4vR3lsa3JKTHBpQVA1eUNBNGI2Y2lYMnRGa3pQRmhJVFZKRTVBeDlpaEF2WWZwTUFSdWVqM29HN2UKd0xwQk1iUnlGbHJYV29NWUVBMGxsV0JueHRQQXZYS2Y4SVZGYTRSSDhzV1JJSDB4M2hFdjVtQ3VUZjJTRTg0QwpiNnNjS3Z3MW9CQU5VWGxXRVZVYTFmei9rWWZBa1lrdHZyV2JUcTZTWGxodXRJYWY4WEYzSUMrL2x1b3gzZThMCjBBcEFQVE5sZ0JwOTkvcXMrOG9PMWthSmQ1TmV6TnlJeXhSdUtJMzlDWkxuQm9OYmkzdlFYY1NzRCtYU2lYT0cKcEVnTjNtci8xRms4OVZMSENhTnkyKzBqMjZ0eWpiclcKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRRFlFVEZnWkU0VWVIdDQKN1Zpckg1VU96Y2JmNU1lc3NWUC9zTTlBTUZqZVRNTCtGOW1SZjdEMll6b1BrUTZmNktKVkpwQ0kvYVpuZy92dQppSVlaL0N1VHJhM0VQMm5OcnpmM2o5a25YSUlaYUJQNXhVOFJpY3R3RXF3amd1c29pQ2FGaEhSSGV4aDF3WkYwCml0VTlWN2tSTUZqbUJUZ09GclRvMDhINlA1LzRqNWxwZHYvWk1ETUZyalBmV01jTHpxTjQvTFV1OGdXVFZ0eXYKcXI3VU4zTE1pcXFWMFByOUszd0tUQ24wMkhVRTlwb3BpQlZUQXkxQTl1aVRtbEIrWkw2M3p6akJIY1pJc1R4VQp0Q2p6WnV0cmtQRE9FZU9LejMwZ09HeVlxMUxKWDJ5SndpYlBLMm1Bakpveit4Y2RBQTRtWkFpZENiS0hiTWE1CmRkSUloME9SQWdNQkFBRUNnZ0VCQUxYaW16ODZrT1A0bkhBcTFPYVEyb2l3dndhQTczbTNlUytZSm84eFk4NFcKcmxyNXRzUWR5dGxPcEhTd05yQjBSQnNNTU1XeFNPQ0JJWlltUlVVZ200cGd2Uk9rRWl2OG9VOThQMkE4SnFTKwprWHBFRjVCNi84K2pXRmM0Z1Q4SWhlMEZtR0VJQllvelhYL08wejBsV0h4WXg2MHluWUoycU9vS1FKT3A5YjlsCmpiUVBkaC9mN2ErRWF0RzZNUFlrNG5xSEY3a0FzcmNsRXo2SGUvaEx6NmRkSTJ1N2RMRjB6QlN0QjM5WDFRZysKZ1JzTittOXg1S1FVTXYxMktvajdLc2hEelozOG5hSjd5bDgycGhBV1lGZzBOZHlzRlBRbmt0WmlNSUxOblFjNwpOeUt0cHNQaUxIRE9ha05hdEZLU2lOaUJrUk1lY1ZUMlJNMzMzUG54bFVFQ2dZRUEvYTY5MEEralU4VFJNbVZyCk4vRnlYWkxYa1c5b2NxVjBRbTA0TDMrSExybFNCTlRWSzk2U1pVT203VjViTzIxNmd4S2dJK3IwYm5kdE5GTUQKLzFncDhsdlJNcUlIeGZTeUo4SHpsSzViT0lnaUpxRGhzK3BKWTZmLytIVzZ1QkZyN3NGS3lxbVlIQlA0SC9BdApsT3lLeEVjMHFXazFlT2tCMWNNSGx0WDRwemtDZ1lFQTJncDhDVDVYWjNMSWRQN2M1SHpDS1YwczBYS1hGNmYyCkxzclhPVlZaTmJCN1NIS1NsOTBIU2VWVGx3czdqSnNxcC9yWFY2aHF0eUdEaTg4aTFZekthcEF6dXl3b0U3TnEKMUJpd2ZYSURQeTlPNUdGNXFYNXFUeENzSWNIcmo2Z21XMEZVQWhoS1lQcDRxd1JMdzFMZkJsd3U1VmhuN3I3ego0SkZBTEFpdlp4a0NnWUJicnpuKzVvZjdFSmtqQTdDYWlYTHlDczVLUzkrTi8rcGl6NktNMkNSOWFKRVNHZkhwClp3bTErNXRyRXIwYVgxajE0bGRxWTlKdjBrM3ZxVWs2a2h5bThUUk1mbThjeG5GVkdTMzF3SVpMaWpmOWlndkkKd0paQnBFaEkvaE83enVBWmJGYWhwR1hMVUJSUFJyalNxQ01IQ1UwcEpWTWtIZUtCNVhqcXRPNm5VUUtCZ0NJUAp6VHlzYm44TW9XQVZpSEJ4Uk91dFVKa1BxNmJZYUU3N0JSQkIwd1BlSkFRM1VjdERqaVh2RzFYWFBXQkR4VEFrCnNZdFNGZ214eEprTXJNWnJqaHVEbDNFLy9xckZOb1VYcmtxS2l4Tk4wcWMreXdDOWJPSVpHcXJUWG5jOHIzRkcKRFZlZWI5QWlrTU0ya3BkYTFOaHJnaS8xMVphb1lmVE0vQmRrNi9IUkFvR0JBSnFzTmFZYzE2clVzYzAzUEwybApXUGNzRnZxZGI3SEJyakVSRkhFdzQ0Vkt2MVlxK0ZWYnNNN1FTQVZ1V1llcGxGQUpDYzcrSEt1YjRsa1hRM1RkCndSajJLK2pOUzJtUXp1Y2hOQnlBZ1hXVnYveHhMZEE3NnpuWmJYdjl5cXhnTVVjTVZwZGRuSkxVZm9QVVZ1dTcKS0tlVVU3TTNIblRKUStrcldtbUxraUlSCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/10-ts-tls-passthrough/ts-app.yaml b/8 NGINX-Ingress-Controller/10-ts-tls-passthrough/ts-app.yaml new file mode 100644 index 0000000000000000000000000000000000000000..94e48d506bdd0cf8c5bd5b78c1c21c1892ffd08a --- /dev/null +++ b/8 NGINX-Ingress-Controller/10-ts-tls-passthrough/ts-app.yaml @@ -0,0 +1,15 @@ +apiVersion: k8s.nginx.org/v1alpha1 +kind: TransportServer +metadata: + name: secure-app +spec: + listener: + name: tls-passthrough + protocol: TLS_PASSTHROUGH + host: app.example.com + upstreams: + - name: secure-app + service: secure-app + port: 8443 + action: + pass: secure-app \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/2-configmap/README.md b/8 NGINX-Ingress-Controller/2-configmap/README.md new file mode 100644 index 0000000000000000000000000000000000000000..41eda9db762fd79bdc2b70b46fc392ab3ca908bb --- /dev/null +++ b/8 NGINX-Ingress-Controller/2-configmap/README.md @@ -0,0 +1,90 @@ +# ConfigMap + +通过ConfigMap修改KIC的配置。 + +## Step 1 - 查看当前配置 + +执行以下命令查看当前的ConfigMap,Data部分应该为空,也即所有配置都是默认值: +``` +$ kubectl describe configmap nginx-config -n nginx-ingress +Name: nginx-config +Namespace: nginx-ingress +Labels: +Annotations: +Data +==== +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Updated 29m nginx-ingress-controller Configuration from nginx-ingress/nginx-config was updated +``` + +## Step 2 - 部署新的ConfigMap + +1. 部署新的configmap文件,关闭ssl redirect: + ``` + $ kubectl apply -f cm-nossl.yaml + ``` +2. 查看更新后的ConfigMap,观察Data部分: + ``` + $ kubectl describe configmap nginx-config -n nginx-ingress + Name: nginx-config + Namespace: nginx-ingress + Labels: + Annotations: + Data + ==== + ssl-redirect: + ---- + False + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Updated 5s (x2 over 30m) nginx-ingress-controller Configuration from nginx-ingress/nginx-config was updated + ``` + +## Step 3 - 验证效果 + +1. 使用curl命令访问`cafe.example.com/coffee`,能够显示访问到coffee pods,而不再是重定向。 + + ``` + $ curl -i cafe.example.com/coffee + HTTP/1.1 200 OK + Server: nginx/1.19.10 + Date: Thu, 16 Sep 2021 04:03:12 GMT + Content-Type: text/plain + Content-Length: 160 + Connection: keep-alive + Expires: Thu, 16 Sep 2021 04:03:11 GMT + Cache-Control: no-cache + + Server address: 10.244.0.188:80 + Server name: coffee-755d68dd75-45bkg + Date: 16/Sep/2021:04:03:12 +0000 + URI: /coffee + Request ID: 52c63289216ff755b6d56ae126a0e973 + ``` + +2. 使用curl命令直接访问`https://cafe.example.com/coffee`,依然能通过https进行加密访问。 + ``` + $ curl -k https://cafe.example.com/coffee + Server address: 10.244.0.187:80 + Server name: coffee-755d68dd75-h4dzn + Date: 16/Sep/2021:04:08:51 +0000 + URI: /coffee + Request ID: 2d2c9a05a23fe106ffbbc946f12c96ba + ``` + +3. 可以通过`/tea`进行重复验证,效果是一样的。 + +## Step 4 - 还原实验环境 + +1. 重新应用空的configmap文件,还原默认配置: + ``` + $ kubectl apply -f cm.yaml + ``` + +2. 返回basic-ingress文件夹,清除所有资源: + ``` + $ kubectl delete -f ./ + ``` diff --git a/8 NGINX-Ingress-Controller/2-configmap/cm-nossl.yaml b/8 NGINX-Ingress-Controller/2-configmap/cm-nossl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e7617176c5be2b5c5c9bfa0c7f49ab4b57b44bcd --- /dev/null +++ b/8 NGINX-Ingress-Controller/2-configmap/cm-nossl.yaml @@ -0,0 +1,7 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-config + namespace: nginx-ingress +data: + ssl-redirect: "False" diff --git a/8 NGINX-Ingress-Controller/2-configmap/cm.yaml b/8 NGINX-Ingress-Controller/2-configmap/cm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc905439b62a6c870a47a38b701c825787faa5e5 --- /dev/null +++ b/8 NGINX-Ingress-Controller/2-configmap/cm.yaml @@ -0,0 +1,6 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-config + namespace: nginx-ingress +data: \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/3-ingress-cross-ns/README.md b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b1221379dcbc8309274b2ce40eca598ee15e4018 --- /dev/null +++ b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/README.md @@ -0,0 +1,40 @@ +# Cross-Namespace Ingress + +实验通过Mergeable Ingress的方式实现跨Namespace的Ingress服务发布 + +## Step 1 - 创建分布于不同namespace下的cafe服务 + +``` +$ kubectl create -f cafe-with-ns.yaml +``` + +## Step 2 - 部署证书密钥和Ingress + +1. 使用secret资源创建证书和密钥: + ``` + $ kubectl create -f cafe-secret.yaml + ``` +2. 创建默认namespace的master ingress资源: + ``` + $ kubectl create -f cafe-master.yaml + ``` +3. 创建coffee namespace的minion ingress资源: + ``` + $ kubectl create -f coffee-minion.yaml + ``` +4. 创建tea namespace的minion ingress资源: + ``` + $ kubectl create -f tea-minion.yaml + ``` + +## Step 3 - 验证效果 + +使用curl命令或浏览器访问`cafe.example.com/coffee`和`cafe.example.com/tea`,能够分别显示访问到coffee pods和tea pods,证明可实现跨namespace的服务发布。 + +## Step 4 - 还原实验环境 + +清除所有资源,**执行两遍**: + + ``` + $ kubectl delete -f ./ + ``` diff --git a/8 NGINX-Ingress-Controller/3-ingress-cross-ns/cafe-master.yaml b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/cafe-master.yaml new file mode 100644 index 0000000000000000000000000000000000000000..92d6ee46f0582bd1fc930cd859972a7c9954a0b0 --- /dev/null +++ b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/cafe-master.yaml @@ -0,0 +1,14 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: cafe-ingress-master + annotations: + nginx.org/mergeable-ingress-type: "master" +spec: + ingressClassName: nginx-plus + tls: + - hosts: + - cafe.example.com + secretName: cafe-secret + rules: + - host: cafe.example.com diff --git a/8 NGINX-Ingress-Controller/3-ingress-cross-ns/cafe-secret.yaml b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/cafe-secret.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e94493b121037da7284ce7e241a5da5250f3a114 --- /dev/null +++ b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/cafe-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cafe-secret +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/3-ingress-cross-ns/cafe-with-ns.yaml b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/cafe-with-ns.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba37ea7452c15d287b528ed41f17ebda70b99a90 --- /dev/null +++ b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/cafe-with-ns.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: tea +--- +apiVersion: v1 +kind: Namespace +metadata: + name: coffee +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coffee + namespace: coffee +spec: + replicas: 2 + selector: + matchLabels: + app: coffee + template: + metadata: + labels: + app: coffee + spec: + containers: + - name: coffee + image: nginxdemos/hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: coffee-svc + namespace: coffee +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: coffee +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tea + namespace: tea +spec: + replicas: 2 + selector: + matchLabels: + app: tea + template: + metadata: + labels: + app: tea + spec: + containers: + - name: tea + image: nginxdemos/hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: tea-svc + namespace: tea +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: tea diff --git a/8 NGINX-Ingress-Controller/3-ingress-cross-ns/coffee-minion.yaml b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/coffee-minion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aee15fde42df4b8b34878b861814ec910c5f5cf6 --- /dev/null +++ b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/coffee-minion.yaml @@ -0,0 +1,17 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: cafe-ingress-coffee-minion + namespace: coffee + annotations: + nginx.org/mergeable-ingress-type: "minion" +spec: + ingressClassName: nginx-plus + rules: + - host: cafe.example.com + http: + paths: + - path: /coffee + backend: + serviceName: coffee-svc + servicePort: 80 diff --git a/8 NGINX-Ingress-Controller/3-ingress-cross-ns/tea-minion.yaml b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/tea-minion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..001dbd1de91e75ecc2bbd6ad8230ee58bc9bb325 --- /dev/null +++ b/8 NGINX-Ingress-Controller/3-ingress-cross-ns/tea-minion.yaml @@ -0,0 +1,17 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: cafe-ingress-tea-minion + namespace: tea + annotations: + nginx.org/mergeable-ingress-type: "minion" +spec: + ingressClassName: nginx-plus + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + backend: + serviceName: tea-svc + servicePort: 80 diff --git a/8 NGINX-Ingress-Controller/4-vs-health/README.md b/8 NGINX-Ingress-Controller/4-vs-health/README.md new file mode 100644 index 0000000000000000000000000000000000000000..daeaf686a9622883470d12b9e36b5b6c0dbdc821 --- /dev/null +++ b/8 NGINX-Ingress-Controller/4-vs-health/README.md @@ -0,0 +1,45 @@ +# 带主动健康检查的Virtual Server资源 + +实验通过KIC的CRD资源之一Virtual Server,简称VS,进行7层服务发布,同时启用主动式健康检查。 + +## Step 1 - 创建cafe服务 + +``` +$ kubectl create -f cafe.yaml +``` + +## Step 2 - 部署证书密钥和VS + +1. 使用secret资源创建证书和密钥: + ``` + $ kubectl create -f cafe-secret.yaml + ``` +2. 创建vs资源: + ``` + $ kubectl create -f vs-health-check.yaml + ``` + +## Step 3 - 验证效果 + +1. 使用curl命令或浏览器访问`cafe.example.com/coffee`和`cafe.example.com/tea`,能够分别显示访问到coffee pods和tea pods。 +2. 打开Dashboard,观察upstream,特别留意tea的health monitor部分。 +3. 使用`kubectl exec -it *tea_pod_name* -- sh`进入任意一个tea pod。 + 修改`/etc/nginx/conf.d/hello-plain-text.conf`文件,增加一个`return 403;`指令。 + ``` + ... + return 403; + #return 200 'Server address...'; + ... + ``` + 执行`ngins -s reload`,使配置生效。 +4. 观察Dashboard中tea的health monitor部分的变化,一段时间后,其中一个pod应该被标记为down。 +5. 在第3步的tea pod中,撤销之前做的操作,执行`ngins -s reload`,使配置生效。 +6. 观察Dashboard中tea的health monitor部分的变化,一段时间后,被标记为down的pod应该重新up。 + +## Step 4 - 还原实验环境 + +清除所有资源: + + ``` + $ kubectl delete -f ./ + ``` diff --git a/8 NGINX-Ingress-Controller/4-vs-health/cafe-secret.yaml b/8 NGINX-Ingress-Controller/4-vs-health/cafe-secret.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e94493b121037da7284ce7e241a5da5250f3a114 --- /dev/null +++ b/8 NGINX-Ingress-Controller/4-vs-health/cafe-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cafe-secret +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/4-vs-health/cafe.yaml b/8 NGINX-Ingress-Controller/4-vs-health/cafe.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5fcfd71148bf6eb41d7c18563495589149123bcb --- /dev/null +++ b/8 NGINX-Ingress-Controller/4-vs-health/cafe.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coffee +spec: + replicas: 2 + selector: + matchLabels: + app: coffee + template: + metadata: + labels: + app: coffee + spec: + containers: + - name: coffee + image: nginxdemos/hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: coffee-svc +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: coffee +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tea +spec: + replicas: 2 + selector: + matchLabels: + app: tea + template: + metadata: + labels: + app: tea + spec: + containers: + - name: tea + image: nginxdemos/hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: tea-svc + labels: +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: tea diff --git a/8 NGINX-Ingress-Controller/4-vs-health/vs-health-check.yaml b/8 NGINX-Ingress-Controller/4-vs-health/vs-health-check.yaml new file mode 100644 index 0000000000000000000000000000000000000000..202dcde4caf1c37c0a10c6103d2597ce0973e9c2 --- /dev/null +++ b/8 NGINX-Ingress-Controller/4-vs-health/vs-health-check.yaml @@ -0,0 +1,31 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: cafe-health-check +spec: + ingressClassName: nginx-plus + host: cafe.example.com + tls: + secret: cafe-secret + upstreams: + - name: tea + service: tea-svc + port: 80 + healthCheck: + enable: true + path: /health + interval: 5s + fails: 3 + passes: 3 + port: 80 + statusMatch: "200" + - name: coffee + service: coffee-svc + port: 80 + routes: + - path: /tea + action: + pass: tea + - path: /coffee + action: + pass: coffee diff --git a/8 NGINX-Ingress-Controller/5-vs-split/README.md b/8 NGINX-Ingress-Controller/5-vs-split/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d1bbb8a28eadc2fa521492d822f341e54b353d4c --- /dev/null +++ b/8 NGINX-Ingress-Controller/5-vs-split/README.md @@ -0,0 +1,34 @@ +# 通过VS实现AB测试 + +实验通过VS的split功能实现AB测试,将指定比例的流量分发到不同的后台服务。 + +## Step 1 - 创建cafe服务 + +``` +$ kubectl create -f cafe-with-ns.yaml +``` + +## Step 2 - 部署vs + +创建vs资源: + + ``` + $ kubectl create -f vs-split.yaml + ``` + +## Step 3 - 验证效果 + +1. 使用curl命令或浏览器访问`cafe.example.com/coffee`能够显示访问到coffee v1 pods和coffee v2 pods。 +2. 使用wrk打一定的压力,参考命令: + ``` + wrk -c100 -t2 -d30s http://cafe.example.com/coffee + ``` +3. 打开Dashboard,观察upstream中coffee v1和v2的request数量,应该大致在9:1。 + +## Step 4 - 还原实验环境 + +清除所有资源: + + ``` + $ kubectl delete -f ./ + ``` diff --git a/8 NGINX-Ingress-Controller/5-vs-split/cafe.yaml b/8 NGINX-Ingress-Controller/5-vs-split/cafe.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1fd9fd578a8f32aa278e1a77ea46090aac8e3971 --- /dev/null +++ b/8 NGINX-Ingress-Controller/5-vs-split/cafe.yaml @@ -0,0 +1,67 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coffee-v1 +spec: + replicas: 2 + selector: + matchLabels: + app: coffee-v1 + template: + metadata: + labels: + app: coffee-v1 + spec: + containers: + - name: coffee-v1 + image: nginxdemos/nginx-hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: coffee-v1-svc +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: coffee-v1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coffee-v2 +spec: + replicas: 2 + selector: + matchLabels: + app: coffee-v2 + template: + metadata: + labels: + app: coffee-v2 + spec: + containers: + - name: coffee-v2 + image: nginxdemos/nginx-hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: coffee-v2-svc +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: coffee-v2 diff --git a/8 NGINX-Ingress-Controller/5-vs-split/vs-split.yaml b/8 NGINX-Ingress-Controller/5-vs-split/vs-split.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3807eb70fc2cc8e5292cd84b089293c2c4a4c2a1 --- /dev/null +++ b/8 NGINX-Ingress-Controller/5-vs-split/vs-split.yaml @@ -0,0 +1,23 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: cafe +spec: + ingressClassName: nginx-plus + host: cafe.example.com + upstreams: + - name: coffee-v1 + service: coffee-v1-svc + port: 80 + - name: coffee-v2 + service: coffee-v2-svc + port: 80 + routes: + - path: /coffee + splits: + - weight: 90 + action: + pass: coffee-v1 + - weight: 10 + action: + pass: coffee-v2 \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/6-vs-condition/README.md b/8 NGINX-Ingress-Controller/6-vs-condition/README.md new file mode 100644 index 0000000000000000000000000000000000000000..878944e7438ec2e444600d53c1b174874bc34c1b --- /dev/null +++ b/8 NGINX-Ingress-Controller/6-vs-condition/README.md @@ -0,0 +1,35 @@ +# 通过VS实现灰度发布 + +实验通过VS实现条件分发和灰度发布的功能。 + +## Step 1 - 创建cafe服务 + +``` +$ kubectl create -f cafe-with-ns.yaml +``` + +## Step 2 - 部署vs + +创建vs资源: + + ``` + $ kubectl create -f vs-condition.yaml + ``` + +## Step 3 - 验证效果 + +1. 使用普通curl命令访问`cafe.example.com/coffee`能够显示访问到coffee v1 pods。 +2. 使用curl命令携带名为*version*,值为*v2*的cookie访问`cafe.example.com/coffee`能够显示访问到coffee v2 pods。 + ``` + curl --cookie 'version=v2' http://cafe.example.com/coffee + ``` +3. 使用普通curl命令访问`cafe.example.com/tea`能够显示访问到tea pods。 +4. 使用curl命令指定方法为POST访问`cafe.example.com/tea`能够显示访问到tea-post pods。 + +## Step 4 - 还原实验环境 + +清除所有资源: + + ``` + $ kubectl delete -f ./ + ``` diff --git a/8 NGINX-Ingress-Controller/6-vs-condition/cafe.yaml b/8 NGINX-Ingress-Controller/6-vs-condition/cafe.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e0751f8143c00fffa8aa789611078fade73dc6fa --- /dev/null +++ b/8 NGINX-Ingress-Controller/6-vs-condition/cafe.yaml @@ -0,0 +1,133 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coffee-v1 +spec: + replicas: 1 + selector: + matchLabels: + app: coffee-v1 + template: + metadata: + labels: + app: coffee-v1 + spec: + containers: + - name: coffee-v1 + image: nginxdemos/nginx-hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: coffee-v1-svc +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: coffee-v1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coffee-v2 +spec: + replicas: 1 + selector: + matchLabels: + app: coffee-v2 + template: + metadata: + labels: + app: coffee-v2 + spec: + containers: + - name: coffee-v2 + image: nginxdemos/nginx-hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: coffee-v2-svc +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: coffee-v2 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tea-post +spec: + replicas: 1 + selector: + matchLabels: + app: tea-post + template: + metadata: + labels: + app: tea-post + spec: + containers: + - name: tea-post + image: nginxdemos/nginx-hello:plain-text + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: tea-post-svc +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: tea-post +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tea +spec: + replicas: 1 + selector: + matchLabels: + app: tea + template: + metadata: + labels: + app: tea + spec: + containers: + - name: tea + image: nginxdemos/nginx-hello:plain-text + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: tea-svc +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: tea \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/6-vs-condition/vs-condition.yaml b/8 NGINX-Ingress-Controller/6-vs-condition/vs-condition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8813fe9fd9cf3ca7c5932e51db772b33d69ba3d7 --- /dev/null +++ b/8 NGINX-Ingress-Controller/6-vs-condition/vs-condition.yaml @@ -0,0 +1,38 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: cafe +spec: + host: cafe.example.com + upstreams: + - name: tea-post + service: tea-post-svc + port: 80 + - name: tea + service: tea-svc + port: 80 + - name: coffee-v1 + service: coffee-v1-svc + port: 80 + - name: coffee-v2 + service: coffee-v2-svc + port: 80 + routes: + - path: /tea + matches: + - conditions: + - variable: $request_method + value: POST + action: + pass: tea-post + action: + pass: tea + - path: /coffee + matches: + - conditions: + - cookie: version + value: v2 + action: + pass: coffee-v2 + action: + pass: coffee-v1 \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/7-vsr-cross-ns/README.md b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5beb28c639fdf0950db0ab79fcecd7896a2d8de0 --- /dev/null +++ b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/README.md @@ -0,0 +1,58 @@ +# Cross-Namespace Virtual Server + +实验通过Virtual Server加Virtual Server Route的方式实现跨Namespace的服务发布,同时实现反向代理的功能。 + +## Step 1 - 创建Namespace + +创建tea, coffee, 和 cafe namespaces: +``` +$ kubectl create -f namespaces.yaml +``` + +## Step 2 - 部署cafe应用 + +1. 创建 tea deployment 和 service 在 tea namespace: + ``` + $ kubectl create -f tea.yaml + ``` +1. 创建 coffee deployment 和 service 在 coffee namespace: + ``` + $ kubectl create -f coffee.yaml + ``` + +## Step 3 - 部署vs和vsr + +1. 创建 VirtualServerRoute 在 tea namespace: + ``` + $ kubectl create -f tea-virtual-server-route.yaml + ``` +2. 创建 VirtualServerRoute 在 coffee namespace,其中包含了反向代理功能: + ``` + $ kubectl create -f coffee-virtual-server-route.yaml + ``` +3. 部署证书密钥在 cafe namespace: + ``` + $ kubectl create -f cafe-secret.yaml + ``` +4. 创建 VirtualServer 在 cafe namespace: + ``` + $ kubectl create -f cafe-virtual-server.yaml + ``` + +## Step 4 - 验证 + +1. 使用curl命令或浏览器访问`cafe.example.com/coffee`和`cafe.example.com/tea`,能够分别显示访问到coffee pods和tea pods,证明可实现跨namespace的服务发布。 +2. 使用curl命令访问`cafe.example.com/tea`,能够看到KIC插入的Header *My-Header*和*IC-Nginx-Version*,以及KIC对URI做了改写,从`/coffee`被改写到了`/coffee/rewrite`。 + ``` + curl -i http://cafe.example.com/coffee + ``` +3. 使用普通curl命令访问`cafe.example.com/tea`能够显示访问到tea pods。 +4. 使用curl命令指定方法为POST访问`cafe.example.com/tea`能够显示访问到tea-post pods。 + +## Step 5 - 还原实验环境 + +清除所有资源: + + ``` + $ kubectl delete -f ./ + ``` diff --git a/8 NGINX-Ingress-Controller/7-vsr-cross-ns/cafe-secret.yaml b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/cafe-secret.yaml new file mode 100644 index 0000000000000000000000000000000000000000..81da195ae4879aaaeae5e8d3112d2c92c794cd25 --- /dev/null +++ b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/cafe-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cafe-secret + namespace: cafe +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/8 NGINX-Ingress-Controller/7-vsr-cross-ns/cafe-virtual-server.yaml b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/cafe-virtual-server.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df871b319a180067d759f6d12adee3d8e8b2385c --- /dev/null +++ b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/cafe-virtual-server.yaml @@ -0,0 +1,14 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: cafe + namespace: cafe +spec: + host: cafe.example.com + tls: + secret: cafe-secret + routes: + - path: /tea + route: tea/tea + - path: /coffee + route: coffee/coffee diff --git a/8 NGINX-Ingress-Controller/7-vsr-cross-ns/coffee-virtual-server-route.yaml b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/coffee-virtual-server-route.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5fa85eeb04fbf6f102690ca9fa2ca6a4f7c0e63c --- /dev/null +++ b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/coffee-virtual-server-route.yaml @@ -0,0 +1,29 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServerRoute +metadata: + name: coffee + namespace: coffee +spec: + host: cafe.example.com + upstreams: + - name: coffee + service: coffee-svc + port: 80 + subroutes: + - path: /coffee + action: + proxy: + upstream: coffee + requestHeaders: + pass: true + set: + - name: My-Header + value: F5-Best + responseHeaders: + add: + - name: My-Header + value: ${http_user_agent} + - name: IC-Nginx-Version + value: ${nginx_version} + always: true + rewritePath: /coffee/rewrite \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/7-vsr-cross-ns/coffee.yaml b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/coffee.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1e1f81727159ad85f138698d9262dce20db9850c --- /dev/null +++ b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/coffee.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coffee + namespace: coffee +spec: + replicas: 1 + selector: + matchLabels: + app: coffee + template: + metadata: + labels: + app: coffee + spec: + containers: + - name: coffee + image: nginxdemos/nginx-hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: coffee-svc + namespace: coffee +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: coffee \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/7-vsr-cross-ns/namespaces.yaml b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/namespaces.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8e7275f71d4dc3b5ef795a5b6b86dee902709dba --- /dev/null +++ b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/namespaces.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: cafe +--- +apiVersion: v1 +kind: Namespace +metadata: + name: tea +--- +apiVersion: v1 +kind: Namespace +metadata: + name: coffee \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/7-vsr-cross-ns/tea-virtual-server-route.yaml b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/tea-virtual-server-route.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1e7966f9e65cef9ca4ee077e36cf1c7152c10d7 --- /dev/null +++ b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/tea-virtual-server-route.yaml @@ -0,0 +1,15 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServerRoute +metadata: + name: tea + namespace: tea +spec: + host: cafe.example.com + upstreams: + - name: tea + service: tea-svc + port: 80 + subroutes: + - path: /tea + action: + pass: tea \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/7-vsr-cross-ns/tea.yaml b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/tea.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8d19efc58c2cd43407f36042d513e913f4bd2174 --- /dev/null +++ b/8 NGINX-Ingress-Controller/7-vsr-cross-ns/tea.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tea + namespace: tea +spec: + replicas: 1 + selector: + matchLabels: + app: tea + template: + metadata: + labels: + app: tea + spec: + containers: + - name: tea + image: nginxdemos/nginx-hello:plain-text + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: tea-svc + namespace: tea +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: tea \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/8-policy-ratelimit/README.md b/8 NGINX-Ingress-Controller/8-policy-ratelimit/README.md new file mode 100644 index 0000000000000000000000000000000000000000..26c26081c53672cdce7bd97ac7c54f4cd3b532c3 --- /dev/null +++ b/8 NGINX-Ingress-Controller/8-policy-ratelimit/README.md @@ -0,0 +1,39 @@ +# Rate Limit + +本实验通过KIC的CRD之一Policy,实现速率限制。 + +## Step 1 - 部署webapp + +``` +$ kubectl apply -f webapp.yaml +``` + +## Step 2 - 部署Rate Limit Policy + +创建一个Policy,使用ratelimit把每个客户端的请求速率限制到100r/s: +``` +$ kubectl apply -f rate-limit.yaml +``` + +## Step 3 - 部署vs + +``` +$ kubectl apply -f vs-ratelimit.yaml +``` + +## Step 4 - 验证 + +1. 使用curl命令或浏览器访问`webapp.example.com/`能够正常访问。 +2. 使用wrk打一定的压力,参考命令: + ``` + wrk -c100 -t2 -d30s http://webapp.example.com + ``` +3. 打开Dashboard,观察HTTP Zones页面,可以看到Limit Req中拦截的超限流量。观察upstream中的请求速率,应该稳定在100r/s左右。 + +## Step 5 - 还原实验环境 + +清除所有资源: + + ``` + $ kubectl delete -f ./ + ``` diff --git a/8 NGINX-Ingress-Controller/8-policy-ratelimit/rate-limit.yaml b/8 NGINX-Ingress-Controller/8-policy-ratelimit/rate-limit.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bfaec479864c4c1d2cfdee6285a0ad86118c6d1b --- /dev/null +++ b/8 NGINX-Ingress-Controller/8-policy-ratelimit/rate-limit.yaml @@ -0,0 +1,11 @@ +apiVersion: k8s.nginx.org/v1 +kind: Policy +metadata: + name: rate-limit-policy +spec: + rateLimit: + rate: 100r/s + burst: 50 + noDelay: true + key: ${binary_remote_addr} + zoneSize: 10M \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/8-policy-ratelimit/vs-ratelimit.yaml b/8 NGINX-Ingress-Controller/8-policy-ratelimit/vs-ratelimit.yaml new file mode 100644 index 0000000000000000000000000000000000000000..59ecbda25ed313c54f1c883c5d40c535ff496078 --- /dev/null +++ b/8 NGINX-Ingress-Controller/8-policy-ratelimit/vs-ratelimit.yaml @@ -0,0 +1,16 @@ +apiVersion: k8s.nginx.org/v1 +kind: VirtualServer +metadata: + name: webapp +spec: + host: webapp.example.com + policies: + - name: rate-limit-policy + upstreams: + - name: webapp + service: webapp-svc + port: 80 + routes: + - path: / + action: + pass: webapp diff --git a/8 NGINX-Ingress-Controller/8-policy-ratelimit/webapp.yaml b/8 NGINX-Ingress-Controller/8-policy-ratelimit/webapp.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c480058d84569465ffe32ddb8681c63b0ec2bd0 --- /dev/null +++ b/8 NGINX-Ingress-Controller/8-policy-ratelimit/webapp.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: webapp +spec: + replicas: 1 + selector: + matchLabels: + app: webapp + template: + metadata: + labels: + app: webapp + spec: + containers: + - name: webapp + image: nginxdemos/nginx-hello:plain-text + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: webapp-svc +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app: webapp \ No newline at end of file diff --git a/8 NGINX-Ingress-Controller/9-app-protect/README.md b/8 NGINX-Ingress-Controller/9-app-protect/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a79e34aff5ed19de31887ebe8f25f6abdeeccdd1 --- /dev/null +++ b/8 NGINX-Ingress-Controller/9-app-protect/README.md @@ -0,0 +1,62 @@ +# Rate Limit + +本实验通过KIC的CRD之一Policy,以及NGINX Plus的APP Protect模块,实现WAF功能。 + +## Step 1 - 部署webapp + +``` +$ kubectl apply -f webapp.yaml +``` + +## Step 2 - 部署App Protect Policy + +创建一个app protect policy,使用最基本的waf策略: +``` +$ kubectl apply -f ap-policy.yaml +``` + +## Step 3 - 部署App Protect Log Config + +创建一个app protect logconf,配置waf的日志格式: +``` +$ kubectl apply -f ap-logconf.yaml +``` + +## Step 4 - 部署syslog服务 + +``` +$ kubectl apply -f syslog.yaml +$ kubectl get service +``` +复制其中syslog-svc的ip。 + +## Step 5 - 部署Policy + +创建一个Policy资源,调用之前创建的waf策略和日志配置,其中的`logDest`填写刚才复制的syslog-svc的ip: +``` +$ kubectl apply -f policy-waf.yaml +``` + +## Step 6 - 部署vs + +``` +$ kubectl apply -f vs-waf.yaml +``` + +## Step 7 - 验证 + +1. 使用curl命令或浏览器访问`webapp.example.com/`能够正常访问。 +2. 使用curl命令或浏览器访问`webapp.example.com/