elasticsearch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
apiVersion: v1
kind: Namespace
metadata:
name: efk
---
kind: Service
apiVersion: v1
metadata:
name: elasticsearch
namespace: efk
labels:
app: elasticsearch
spec:
selector:
app: elasticsearch
type: ClusterIP
clusterIP: None
ports:
- port: 9200
name: rest
- port: 9300
name: inter-node
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es-cluster
namespace: efk
spec:
serviceName: elasticsearch
replicas: 3
selector:
matchLabels:
app: elasticsearch
template:
metadata:
labels:
app: elasticsearch
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- elasticsearch
topologyKey: kubernetes.io/hostname
containers:
- name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:8.11.3
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
ports:
- containerPort: 9200
name: rest
protocol: TCP
- containerPort: 9300
name: inter-node
protocol: TCP
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
env:
- name: cluster.name
value: k8s-logs
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: discovery.seed_hosts # 以便集群中的节点可以彼此发现并选举一个主节点: host1:port,host2:port... ,默认 port 是 transport.profiles.default.port
value: "es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch"
- name: cluster.initial_master_nodes # 符合主节点资格的节点的集合
value: "es-cluster-0,es-cluster-1,es-cluster-2"
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
- name: "xpack.security.enabled"
value: "false"
initContainers:
- name: fix-permissions
image: busybox
command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
securityContext:
privileged: true
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
- name: increase-vm-max-map
image: busybox
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
- name: increase-fd-ulimit
image: busybox
command: ["sh", "-c", "ulimit -n 65536"]
securityContext:
privileged: true
volumeClaimTemplates:
- metadata:
name: data
labels:
app: elasticsearch
spec:
accessModes: [ "ReadWriteOnce" ]
#storageClassName: "managed-nfs-storage" # 使用集群默认的 StorageClass
resources:
requests:
storage: 2Gi
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
[root@k8s-master1 efk]# kubectl apply -f elasticsearch.yaml
namespace/efk created
service/elasticsearch created
statefulset.apps/es-cluster created
[root@k8s-master1 efk]# kubectl get all -n efk -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/es-cluster-0 1/1 Running 0 8m9s 10.244.37.218 k8s-node1.zhch.lan <none> <none>
pod/es-cluster-1 1/1 Running 0 7m17s 10.244.8.222 k8s-node2.zhch.lan <none> <none>
pod/es-cluster-2 1/1 Running 0 6m40s 10.244.37.221 k8s-node1.zhch.lan <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/elasticsearch ClusterIP None <none> 9200/TCP,9300/TCP 8m9s app=elasticsearch
NAME READY AGE CONTAINERS IMAGES
statefulset.apps/es-cluster 3/3 8m9s elasticsearch docker.elastic.co/elasticsearch/elasticsearch:8.11.3
[root@k8s-master1 efk]# kubectl get pvc -n efk
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
data-es-cluster-0 Bound pvc-750bb7c8-7f74-4c1e-9ef8-2aac00489e77 2Gi RWO managed-nfs-storage 8m51s
data-es-cluster-1 Bound pvc-dca249a3-5eb5-4be3-ba78-bdd5654ba08a 2Gi RWO managed-nfs-storage 7m59s
data-es-cluster-2 Bound pvc-98dc70dc-209a-4e9a-8935-eeac11eab52c 2Gi RWO managed-nfs-storage 7m22s
[root@k8s-master1 efk]# kubectl get pv -n efk
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-750bb7c8-7f74-4c1e-9ef8-2aac00489e77 2Gi RWO Delete Bound efk/data-es-cluster-0 managed-nfs-storage 8m54s
pvc-98dc70dc-209a-4e9a-8935-eeac11eab52c 2Gi RWO Delete Bound efk/data-es-cluster-2 managed-nfs-storage 7m25s
pvc-dca249a3-5eb5-4be3-ba78-bdd5654ba08a 2Gi RWO Delete Bound efk/data-es-cluster-1 managed-nfs-storage 8m2s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[root@k8s-master1 efk]# kubectl run --image yauritux/busybox-curl es-test -n efk --restart=Never --rm -it /bin/sh
If you don't see a command prompt, try pressing enter.
/home # curl http://elasticsearch:9200/_cluster/health/?pretty
{
"cluster_name" : "k8s-logs",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 3,
"number_of_data_nodes" : 3,
"active_primary_shards" : 0,
"active_shards" : 0,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
1
2
3
4
5
6
7
8
9
10
11
[root@k8s-master1 ~]# kubectl exec -it es-cluster-0 -c elasticsearch -n efk -- sh
Defaulted container "elasticsearch" out of: elasticsearch, fix-permissions (init), increase-vm-max-map (init), increase-fd-ulimit (init)
sh-5.0$ env
xpack.security.enabled=false
discovery.seed_hosts=es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch
node.name=es-cluster-0
cluster.name=k8s-logs
cluster.initial_master_nodes=es-cluster-0,es-cluster-1,es-cluster-2
HOSTNAME=es-cluster-0
ES_JAVA_OPTS=-Xms512m -Xmx512m
......
fluentd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fluentd
labels:
app: fluentd
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
verbs:
- get
- list
- watch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
namespace: efk
labels:
app: fluentd
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd
roleRef:
kind: ClusterRole
name: fluentd
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: fluentd
namespace: efk
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd
namespace: efk
labels:
app: fluentd
spec:
selector:
matchLabels:
app: fluentd
template:
metadata:
labels:
app: fluentd
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
#tolerations:
#- key: "" # 容忍所有污点
# operator: "Exists"
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:v1.16.3-debian-elasticsearch8-1.0
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch.efk.svc.cluster.local"
- name: FLUENT_ELASTICSEARCH_PORT
value: "9200"
- name: FLUENT_ELASTICSEARCH_SCHEME
value: "http"
- name: FLUENTD_SYSTEMD_CONF
value: disable
resources:
limits:
memory: 512Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
[root@k8s-master1 efk]# kubectl apply -f fluentd.yaml
clusterrole.rbac.authorization.k8s.io/fluentd created
serviceaccount/fluentd created
clusterrolebinding.rbac.authorization.k8s.io/fluentd created
daemonset.apps/fluentd created
[root@k8s-master1 efk]# kubectl get all -n efk -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/es-cluster-0 1/1 Running 1 (18m ago) 122m 10.244.37.250 k8s-node1.zhch.lan <none> <none>
pod/es-cluster-1 1/1 Running 1 (18m ago) 46m 10.244.37.251 k8s-node1.zhch.lan <none> <none>
pod/es-cluster-2 1/1 Running 1 (18m ago) 120m 10.244.37.252 k8s-node1.zhch.lan <none> <none>
pod/fluentd-r9b8j 1/1 Running 0 18s 10.244.37.254 k8s-node1.zhch.lan <none> <none>
pod/fluentd-v6c9z 1/1 Running 0 18s 10.244.8.235 k8s-node2.zhch.lan <none> <none>
pod/fluentd-wv5rp 1/1 Running 0 18s 10.244.21.27 k8s-master1.zhch.lan <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/elasticsearch ClusterIP None <none> 9200/TCP,9300/TCP 122m app=elasticsearch
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
daemonset.apps/fluentd 3 3 3 3 3 <none> 18s fluentd fluent/fluentd-kubernetes-daemonset:v1.16.3-debian-elasticsearch8-1.0 app=fluentd
NAME READY AGE CONTAINERS IMAGES
statefulset.apps/es-cluster 3/3 122m elasticsearch docker.elastic.co/elasticsearch/elasticsearch:8.11.3
kibana
1
2
3
4
[root@k8s-master1 efk]# kubectl create secret tls zhch.lan -n efk \
--cert=/home/zc/cert/zhch.lan.crt \
--key=/home/zc/cert/zhch.lan.key
secret/zhch.lan created
1
2
3
4
5
6
[root@k8s-master1 ~]# kubectl get svc -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx-controller LoadBalancer 10.108.29.171 192.168.99.240 80:31734/TCP,443:32740/TCP 9d
ingress-nginx-controller-admission ClusterIP 10.103.114.33 <none> 443/TCP 9d
# 配置 DNS 域名解析,将域名 kibana.zhch.lan 解析到 192.168.99.240
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: efk
spec:
selector:
app: kibana
type: ClusterIP
ports:
- port: 8080
targetPort: 5601
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: efk
labels:
app: kibana
spec:
replicas: 1
selector:
matchLabels:
app: kibana
template:
metadata:
labels:
app: kibana
spec:
containers:
- name: kibana
image: docker.elastic.co/kibana/kibana:8.11.3
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
env:
- name: ELASTICSEARCH_URL
value: http://elasticsearch:9200
ports:
- containerPort: 5601
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-kibana
namespace: efk
spec:
ingressClassName: nginx
tls:
- hosts:
- kibana.zhch.lan
secretName: zhch.lan
rules:
- host: kibana.zhch.lan
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kibana
port:
number: 8080
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
[root@k8s-master1 efk]# kubectl apply -f kibana.yaml
service/kibana created
deployment.apps/kibana created
ingress.networking.k8s.io/ingress-kibana created
[root@k8s-master1 efk]# kubectl get all -n efk
NAME READY STATUS RESTARTS AGE
pod/es-cluster-0 1/1 Running 1 (20m ago) 123m
pod/es-cluster-1 1/1 Running 1 (20m ago) 48m
pod/es-cluster-2 1/1 Running 1 (20m ago) 122m
pod/fluentd-r9b8j 1/1 Running 0 96s
pod/fluentd-v6c9z 1/1 Running 0 96s
pod/fluentd-wv5rp 1/1 Running 0 96s
pod/kibana-847b695974-nh6ng 1/1 Running 0 22s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/elasticsearch ClusterIP None <none> 9200/TCP,9300/TCP 123m
service/kibana ClusterIP 10.102.68.238 <none> 8080/TCP 22s
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/fluentd 3 3 3 3 3 <none> 96s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/kibana 1/1 1 1 22s
NAME DESIRED CURRENT READY AGE
replicaset.apps/kibana-847b695974 1 1 1 22s
NAME READY AGE
statefulset.apps/es-cluster 3/3 123m
[root@k8s-master1 efk]# kubectl get ing -n efk
NAME CLASS HOSTS ADDRESS PORTS AGE
ingress-kibana nginx kibana.zhch.lan 192.168.99.240 80, 443 88s