安装 Ingress-Nginx
- 注意 Ingress Nginx 与 Kubernetes 的版本匹配问题
1
2
3
# 下载
[root@k8s-master1 ~]# cd /opt/kubernetes/
[root@k8s-master1 kubernetes]# wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.9.4/deploy/static/provider/cloud/deploy.yaml -O ingress-nginx-v1.9.4.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# 替换 image
[root@k8s-master1 kubernetes]# grep image ingress-nginx-v1.9.4.yaml
image: registry.k8s.io/ingress-nginx/controller:v1.9.4@sha256:5b161f051d017e55d358435f295f5e9a297e66158f136321d9b04520ec6c48a3
imagePullPolicy: IfNotPresent
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0@sha256:a7943503b45d552785aa3b5e457f169a5661fb94d82b8a3373bcd9ebaf9aac80
imagePullPolicy: IfNotPresent
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0@sha256:a7943503b45d552785aa3b5e457f169a5661fb94d82b8a3373bcd9ebaf9aac80
imagePullPolicy: IfNotPresent
[root@k8s-master1 kubernetes]# sed -i 's/registry.k8s.io\/ingress-nginx\/controller/registry.cn-hangzhou.aliyuncs.com\/google_containers\/nginx-ingress-controller/g' ingress-nginx-v1.9.4.yaml
[root@k8s-master1 kubernetes]# sed -i 's/registry.k8s.io\/ingress-nginx/registry.cn-hangzhou.aliyuncs.com\/google_containers/g' ingress-nginx-v1.9.4.yaml
[root@k8s-master1 kubernetes]# sed -i 's/@sha256:5b161f051d017e55d358435f295f5e9a297e66158f136321d9b04520ec6c48a3//g' ingress-nginx-v1.9.4.yaml
[root@k8s-master1 kubernetes]# sed -i 's/@sha256:a7943503b45d552785aa3b5e457f169a5661fb94d82b8a3373bcd9ebaf9aac80//g' ingress-nginx-v1.9.4.yaml
[root@k8s-master1 kubernetes]# grep image ingress-nginx-v1.9.4.yaml
image: registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.9.4
imagePullPolicy: IfNotPresent
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v20231011-8b53cabe0
imagePullPolicy: IfNotPresent
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v20231011-8b53cabe0
imagePullPolicy: IfNotPresent
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# 安装
[root@k8s-master1 kubernetes]# kubectl apply -f ingress-nginx-v1.9.4.yaml
namespace/ingress-nginx created
serviceaccount/ingress-nginx created
serviceaccount/ingress-nginx-admission created
role.rbac.authorization.k8s.io/ingress-nginx created
role.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrole.rbac.authorization.k8s.io/ingress-nginx created
clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created
rolebinding.rbac.authorization.k8s.io/ingress-nginx created
rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
configmap/ingress-nginx-controller created
service/ingress-nginx-controller created
service/ingress-nginx-controller-admission created
deployment.apps/ingress-nginx-controller created
job.batch/ingress-nginx-admission-create created
job.batch/ingress-nginx-admission-patch created
ingressclass.networking.k8s.io/nginx created
networkpolicy.networking.k8s.io/ingress-nginx-admission created
validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created
[root@k8s-master1 kubernetes]# kubectl get all -n ingress-nginx -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/ingress-nginx-admission-create-k7lbn 0/1 Completed 0 23s 10.244.8.205 k8s-node2.zhch.lan <none> <none>
pod/ingress-nginx-admission-patch-nx5wj 0/1 Completed 0 23s 10.244.8.203 k8s-node2.zhch.lan <none> <none>
pod/ingress-nginx-controller-6fd779f87f-dfnkv 1/1 Running 0 23s 10.244.37.253 k8s-node1.zhch.lan <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/ingress-nginx-controller LoadBalancer 10.108.29.171 <pending> 80:31734/TCP,443:32740/TCP 23s app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingress-nginx,app.kubernetes.io/name=ingress-nginx
service/ingress-nginx-controller-admission ClusterIP 10.103.114.33 <none> 443/TCP 23s app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingress-nginx,app.kubernetes.io/name=ingress-nginx
# service/ingress-nginx-controller 的 Type 是 LoadBalancer , EXTERNAL-IP 处于 pending 状态
# Kubernetes 不为裸机集群提供网络负载均衡器的实现(LoadBalancer 类型的服务)。
# Kubernetes 附带的 Network LB 的实现都是调用各种 IaaS 平台(GCP,AWS,Azure 等)的粘合代码。
# 如果你未在受支持的 IaaS 平台(GCP,AWS,Azure 等)上运行,则 LoadBalancers 在创建后将无限期保持 “pending” 状态。
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/ingress-nginx-controller 1/1 1 1 23s controller registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.9.4 app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingress-nginx,app.kubernetes.io/name=ingress-nginx
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
replicaset.apps/ingress-nginx-controller-6fd779f87f 1 1 1 23s controller registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.9.4 app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingress-nginx,app.kubernetes.io/name=ingress-nginx,pod-template-hash=6fd779f87f
NAME COMPLETIONS DURATION AGE CONTAINERS IMAGES SELECTOR
job.batch/ingress-nginx-admission-create 1/1 4s 23s create registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v20231011-8b53cabe0 controller-uid=45598a5e-0295-4651-aad2-0e94575005cc
job.batch/ingress-nginx-admission-patch 1/1 4s 23s patch registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v20231011-8b53cabe0 controller-uid=f36bdad5-c298-47cd-9849-e465da4c023c
[root@k8s-master1 kubernetes]# kubectl get ep -n ingress-nginx
NAME ENDPOINTS AGE
ingress-nginx-controller 10.244.37.253:443,10.244.37.253:80 31m
ingress-nginx-controller-admission 10.244.37.253:8443 31m
MetalLB
- 使用 MetalLB 可以解决 service/ingress-nginx-controller EXTERNAL-IP 一直 pending 的问题
- 如果 kube-proxy 使用的是 ipvs 模式,需要修改 kube-proxy 配置文件,启用严格的 ARP
1
2
[root@k8s-master1 kubernetes]# kubectl edit configmap -n kube-system kube-proxy
strictARP: true
- 使用 yaml 安装 MetalLB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
[root@k8s-master1 ~]# cd /opt/kubernetes/
[root@k8s-master1 kubernetes]# wget https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
[root@k8s-master1 kubernetes]# kubectl apply -f metallb-native.yaml
namespace/metallb-system created
customresourcedefinition.apiextensions.k8s.io/addresspools.metallb.io created
customresourcedefinition.apiextensions.k8s.io/bfdprofiles.metallb.io created
customresourcedefinition.apiextensions.k8s.io/bgpadvertisements.metallb.io created
customresourcedefinition.apiextensions.k8s.io/bgppeers.metallb.io created
customresourcedefinition.apiextensions.k8s.io/communities.metallb.io created
customresourcedefinition.apiextensions.k8s.io/ipaddresspools.metallb.io created
customresourcedefinition.apiextensions.k8s.io/l2advertisements.metallb.io created
serviceaccount/controller created
serviceaccount/speaker created
role.rbac.authorization.k8s.io/controller created
role.rbac.authorization.k8s.io/pod-lister created
clusterrole.rbac.authorization.k8s.io/metallb-system:controller created
clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created
rolebinding.rbac.authorization.k8s.io/controller created
rolebinding.rbac.authorization.k8s.io/pod-lister created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created
configmap/metallb-excludel2 created
secret/webhook-server-cert created
service/webhook-service created
deployment.apps/controller created
daemonset.apps/speaker created
validatingwebhookconfiguration.admissionregistration.k8s.io/metallb-webhook-configuration created
[root@k8s-master1 kubernetes]# kubectl get all -n metallb-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/controller-78c7fc7f99-sfrx7 1/1 Running 0 70s 10.244.8.204 k8s-node2.zhch.lan <none> <none>
pod/speaker-56jgv 1/1 Running 0 70s 192.168.99.201 k8s-master1.zhch.lan <none> <none>
pod/speaker-5vpjl 1/1 Running 0 70s 192.168.99.204 k8s-node1.zhch.lan <none> <none>
pod/speaker-67gb7 1/1 Running 0 70s 192.168.99.205 k8s-node2.zhch.lan <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/webhook-service ClusterIP 10.97.150.53 <none> 443/TCP 70s component=controller
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
daemonset.apps/speaker 3 3 3 3 3 kubernetes.io/os=linux 70s speaker quay.io/metallb/speaker:v0.13.12 app=metallb,component=speaker
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/controller 1/1 1 1 70s controller quay.io/metallb/controller:v0.13.12 app=metallb,component=controller
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
replicaset.apps/controller-78c7fc7f99 1 1 1 70s controller quay.io/metallb/controller:v0.13.12 app=metallb,component=controller,pod-template-hash=78c7fc7f99
- 配置
-
MetalLB 有 Layer2 模式和 BGP 模式,任选一种模式进行配置即可。
-
因为 BGP 对路由器有要求,因此建议测试时使用 Layer2 模式。
-
创建 IPAdressPool
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
[root@k8s-master1 kubernetes]# kubectl api-versions | grep metallb metallb.io/v1alpha1 metallb.io/v1beta1 metallb.io/v1beta2 [root@k8s-master1 kubernetes]# cat <<EOF > IPAddressPool.yaml apiVersion: metallb.io/v1beta1 kind: IPAddressPool metadata: name: first-pool namespace: metallb-system spec: addresses: # 可分配的 IP 地址,可以指定多个,包括 ipv4、ipv6 - 192.168.99.240-192.168.99.250 EOF [root@k8s-master1 kubernetes]# kubectl apply -f IPAddressPool.yaml ipaddresspool.metallb.io/first-pool created
- 创建 L2Advertisement,并关联 IPAdressPool
- L2 模式不要求将 IP 绑定到网络接口工作节点。它的工作原理是响应本地网络 arp 请求,以将计算机的 MAC 地址提供给客户端。
1 2 3 4 5 6 7 8 9 10 11 12 13
[root@k8s-master1 kubernetes]# cat <<EOF > L2Advertisement.yaml apiVersion: metallb.io/v1beta1 kind: L2Advertisement metadata: name: assign-ip namespace: metallb-system spec: ipAddressPools: - first-pool # 上一步创建的 ip 地址池,通过名字进行关联 EOF [root@k8s-master1 kubernetes]# kubectl apply -f L2Advertisement.yaml l2advertisement.metallb.io/assign-ip created
- 工作流程
- 地址分配:当创建 LoadBalancer Service 时,MetalLB 会为其分配 IP 地址。这个 IP 地址是从预先配置的 IP 地址库获取的。同样,当 Service 删除后,已分配的 IP 地址会重新回到地址库。
- 对外广播:分配了 IP 地址之后,需要让集群外的网络知道这个地址的存在。MetalLB 使用了标准路由协议实现:ARP、NDP 或者 BGP。
- 验证
1 2 3 4 5 6 7 8 9 10 11 12 13
[root@k8s-master1 kubernetes]# kubectl get svc -n ingress-nginx NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ingress-nginx-controller LoadBalancer 10.108.29.171 192.168.99.240 80:31734/TCP,443:32740/TCP 14h ingress-nginx-controller-admission ClusterIP 10.103.114.33 <none> 443/TCP 14h [root@k8s-master1 kubernetes]# curl 192.168.99.240 <html> <head><title>404 Not Found</title></head> <body> <center><h1>404 Not Found</h1></center> <hr><center>nginx</center> </body> </html>
-
HTTPS
- 自签证书
1
2
3
4
5
6
7
8
9
10
11
12
[zc@Nginx-1 ~]$ sudo find / -name openssl.cnf
/etc/pki/tls/openssl.cnf
/etc/ssl/openssl.cnf
/usr/lib/dracut/modules.d/01fips/openssl.cnf
[zc@Nginx-1 ~]$ mkdir cert
[zc@Nginx-1 ~]$ cp /etc/pki/tls/openssl.cnf cert/
[zc@Nginx-1 ~]$ openssl genrsa -out cert/zhch.lan.key 4096
[zc@Nginx-1 ~]$ openssl req -new -x509 -key cert/zhch.lan.key -out cert/zhch.lan.crt -days 36500 \
-subj "/C=CN/ST=Guangdong/L=Guangzhou/O=zhch/OU=zhch/CN=zhch.lan" \
-extensions SAN \
-config <(cat cert/openssl.cnf <(printf "[SAN]\nsubjectAltName=DNS.1:zhch.lan,DNS.2:*.zhch.lan"))
- 创建 Secret
1
2
3
4
5
6
[root@k8s-master1 kubernetes]# kubectl create secret tls zhch.lan --cert=/home/zc/cert/zhch.lan.crt --key=/home/zc/cert/zhch.lan.key
secret/zhch.lan created
[root@k8s-master1 kubernetes]# kubectl get secrets
NAME TYPE DATA AGE
zhch.lan kubernetes.io/tls 2 11s
通过 Ingress 实现外部访问
- 准备 Pod 和 Service
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
[root@k8s-master1 test]# vim deploy-vito-web.yaml
---
apiVersion: v1
kind: Service
metadata:
name: svc-vito-web
spec:
selector: # 将服务流量路由到所有与此选择器匹配的 Pod
app: deploy-vito-tomcat
type: ClusterIP
ports:
- port: 80 # service 暴露在 cluster 上的端口
targetPort: 8080 # pod 上的端口
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-vito-web
spec:
replicas: 2
selector:
matchLabels:
app: deploy-vito-tomcat
template:
metadata:
labels:
app: deploy-vito-tomcat
spec:
containers:
- image: tomcat:10.1.17-jre21
imagePullPolicy: IfNotPresent
name: deploy-vito-web-c-tomcat
[root@k8s-master1 test]# kubectl create -f deploy-vito-web.yaml
service/svc-vito-web created
deployment.apps/deploy-vito-web created
[root@k8s-master1 test]# kubectl get po
NAME READY STATUS RESTARTS AGE
deploy-vito-web-6dccb6f6d9-klthq 1/1 Running 0 5m22s
deploy-vito-web-6dccb6f6d9-p6qwc 1/1 Running 0 5m22s
# kubectl exec -it podName -c containerName -n namespace -- command
[root@k8s-master1 test]# kubectl exec -it deploy-vito-web-6dccb6f6d9-klthq -- bash
root@deploy-vito-web-6dccb6f6d9-klthq:/usr/local/tomcat# cp -r webapps.dist/* webapps/
root@deploy-vito-web-6dccb6f6d9-klthq:/usr/local/tomcat# exit
exit
[root@k8s-master1 test]# kubectl exec -it deploy-vito-web-6dccb6f6d9-p6qwc -- bash
root@deploy-vito-web-6dccb6f6d9-p6qwc:/usr/local/tomcat# cp -r webapps.dist/* webapps/
root@deploy-vito-web-6dccb6f6d9-p6qwc:/usr/local/tomcat# exit
exit
- 创建 Ingress
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
[root@k8s-master1 test]# vim ingress-k8s-demo-nginx.zhch.lan.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-k8s-demo-nginx.zhch.lan
spec:
ingressClassName: nginx
tls:
- hosts:
- k8s-demo-nginx.zhch.lan
secretName: zhch.lan
rules:
- host: k8s-demo-nginx.zhch.lan
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: svc-vito-web
port:
number: 80
[root@k8s-master1 test]# kubectl create -f ingress-k8s-demo-nginx.zhch.lan.yaml
ingress.networking.k8s.io/ingress-k8s-demo-nginx.zhch.lan created
[root@k8s-master1 test]# kubectl get ingress
NAME CLASS HOSTS ADDRESS PORTS AGE
ingress-k8s-demo-nginx.zhch.lan nginx k8s-demo-nginx.zhch.lan 192.168.99.240 80, 443 6s
- 配置 dns
- 解析域名
k8s-demo-nginx.zhch.lan
到service/ingress-nginx-controller
的EXTERNAL-IP
192.168.99.240
- 解析域名
- 验证
- 使用浏览器访问 https://k8s-demo-nginx.zhch.lan/
- 或者使用
curl -k https://k8s-demo-nginx.zhch.lan