系统初始化

配置主机名解析

1
2
3
4
5
[root@master-01 ~]# cat >>/etc/hosts <<EOF
10.166.33.120 master-01
10.166.33.121 master-02
10.166.33.122 master-03
EOF

安装Base源

1
[root@master-01 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

安装依赖包

1
[root@master-01 ~]# yum install -y conntrack ntpdate ntp jq iptables curl sysstat libseccomp wgetvimnet-tools wget bash-completion

配置防火墙

1
2
[root@master-01 ~]# systemctl stop firewalld && systemctl disable firewalld
[root@master-01 ~]# yum -y install iptables-services && systemctl start iptables && systemctl enable iptables&& iptables -F && service iptables save

关闭selinux与swap

1
2
3
[root@master-01 ~]# swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
[root@master-01 ~]# setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

调整内核

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@master-01 ~]# cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0        # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 表示当内存耗尽时,内核会触发OOM killer杀掉最耗内存的进程
OOMfs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
[root@master-01 ~]# cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
[root@master-01 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf

调整系统时间

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
[root@master-01 ~]# timedatectl set-timezone Asia/Shanghai
主节点配置
[root@master-01 ~]# vim /etc/ntp.conf
restrict 127.0.0.1
restrict ::1
restrict 10.166.33.120 nomodify notrap nopeer noquery # 当前节点ip
restrict 10.166.33.254 mask 255.255.255.0 nomodify notrap # 集群所在网段的网关(Gateway),子网掩码(Genmask)
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server 127.127.1.0
Fudge 127.127.1.0 stratum 10
从节点配置
[root@master-02 ~]# vim /etc/ntp.conf
restrict 127.0.0.1
restrict ::1
restrict 10.166.33.121 nomodify notrap nopeer noquery
restrict 10.166.33.254 mask 255.255.255.0 nomodify notrap
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server 10.166.33.120
Fudge 10.166.33.120 stratum 10

[root@master-01 ~]# systemctl start ntpd
[root@master-01 ~]# systemctl enable ntpd
[root@master-01 ~]# ntpq -p
remote refid st t when poll reach delay offset jitter
==============================================================================
*LOCAL(0) .LOCL. 5 l 30 64 1 0.000 0.000 0.000

配置ipvs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@master-01 ~]# yum -y install ipvsadm ipset
[root@master-01 ~]# modprobe br_netfilter
[root@master-01 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
[root@master-01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules &&lsmod | grep -e ip_vs -e nf_conntrack_ipv4
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145497 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack_ipv4 15053 0
nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
nf_conntrack 133095 3 ip_vs,xt_conntrack,nf_conntrack_ipv4
libcrc32c 12644 3 xfs,ip_vs,nf_conntrack

部署集群

安装docker

1
2
3
4
5
6
7
8
9
10
11
12
[root@master-01 ~]# wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
[root@master-01 ~]# sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
[root@master-01 ~]# yum install -y docker-ce
[root@master-01 ~]# mkdir /etc/docker
[root@master-01 ~]# vim /etc/docker/daemon.json
{
"registry-mirrors": ["https://djbk5ums.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {"max-size": "100m" }
}
[root@master-01 ~]# systemctl daemon-reload && systemctl restart docker && systemctl enable docker

安装keepalived

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
[root@master-01 ~]# yum install -y keepalived
[root@master-01 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
script_user root root
}
vrrp_script check {
script "/etc/keepalived/check.sh"
interval 2
timeout 1
rise 1
fall 3
user root root
}
vrrp_instance master {
state MASTER
interface eth0
virtual_router_id 120
priority 200
advert_int 1
authentication {
auth_type PASS
auth_pass 123456789
}
track_script {
check
}
virtual_ipaddress {
10.166.33.125
}
unicast_src_ip 10.166.33.120
unicast_peer {
10.166.33.121
10.166.33.122
}
}
[root@master-02 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
script_user root
router_id control-plane
vrrp_garp_master_delay 2
vrrp_garp_master_repeat 3
vrrp_garp_master_refresh 30
vrrp_garp_interval 0.001
vrrp_gna_interval 0.000001
vrrp_no_swap
checker_no_swap
}
vrrp_script check {
script "/etc/keepalived/check.sh"
interval 2
timeout 1
rise 1
fall 3
user root root
}
vrrp_instance master {
state BACKUP
interface eth0
virtual_router_id 120
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 123456789
}
track_script {
check
}
virtual_ipaddress {
10.166.33.125
}
unicast_src_ip 10.166.33.121
unicast_peer {
10.166.33.120
10.166.33.122
}
}
[root@master-03 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
script_user root
router_id control-plane
vrrp_garp_master_delay 2
vrrp_garp_master_repeat 3
vrrp_garp_master_refresh 30
vrrp_garp_interval 0.001
vrrp_gna_interval 0.000001
vrrp_no_swap
checker_no_swap
}
vrrp_script check {
script "/etc/keepalived/check.sh"
interval 2
timeout 1
rise 1
fall 3
user root root
}
vrrp_instance master {
state BACKUP
interface eth0
virtual_router_id 120
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 123456789
}
track_script {
check
}
virtual_ipaddress {
10.166.33.125
}
unicast_src_ip 10.166.33.122
unicast_peer {
10.166.33.120
10.166.33.121
}
}
[root@master-01 ~]# cat /etc/keepalived/check.sh
#!/usr/bin/env bash
ports="6443" # 集群没部署之前可以设置22端口
for x in $ports; do
</dev/tcp/127.0.0.1/$x
if [[ $? -eq 0 ]]; then
echo "probe $x successfully"
else
echo "probe $x failed"
exit 1
fi
done
[root@master-01 ~]# chmod +x /etc/keepalived/check.sh
[root@master-01 ~]# systemctl start keepalived.service
[root@master-01 ~]# systemctl enable keepalived.service

安装kubedam

1
2
3
4
5
6
7
8
9
10
11
[root@master-01 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
[root@master-03 ~]# yum install kubelet-1.19.3 kubeadm-1.19.3 kubectl-1.19.3 -y
[root@master-01 ~]# systemctl enable kubelet

初始化master

初始化配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
[root@master-01 ~]# kubeadm config print init-defaults > kubeadm-config.yaml
[root@master-01 ~]# vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 10.166.33.120 # 当前master ip
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master-01
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
controlPlaneEndpoint: "10.166.33.125:6443" # 推荐配置lb负载均衡的ip+prot,这里使用的是keepalived
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers # 更换阿里源
kind: ClusterConfiguration
kubernetesVersion: v1.19.3
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
--- # 配置为开启ipvs模式
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

初始化节点

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
[root@master-01 ~]# kubeadm init --config=kubeadm-config.yaml --upload-certs| tee kubeadm-init.log
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

kubeadm join 10.166.33.125:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:b4ee43acb3a77e9d83ccf144b02eda021226846eff9e54dc6f664abbf3b6bf72 \
--control-plane --certificate-key efa140f2ca6cf179426cea52e5093f345e3a5a25ce96dd550f3ebd984134f200

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.166.33.125:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:b4ee43acb3a77e9d83ccf144b02eda021226846eff9e54dc6f664abbf3b6bf72

[root@master-01 ~]# mkdir -p $HOME/.kube
[root@master-01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master-01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

安装网络插件

1
2
3
4
[root@master-01 ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[root@master-01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master-01 Ready master 3m v1.19.3

master加入集群

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
[root@master-02 ~]# kubeadm join 10.166.33.125:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:b4ee43acb3a77e9d83ccf144b02eda021226846eff9e54dc6f664abbf3b6bf72 \
--control-plane --certificate-key efa140f2ca6cf179426cea52e5093f345e3a5a25ce96dd550f3ebd984134f200
[root@master-02 ~]# mkdir -p $HOME/.kube
[root@master-02 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master-02 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master-02 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master-01 Ready master 12m v1.19.3
master-02 Ready master 9m21s v1.19.3
master-03 Ready master 8m26s v1.19.3
[root@master-02 ~]# kubectl get po -nkube-system
NAME READY STATUS RESTARTS AGE
coredns-6d56c8448f-865wm 1/1 Running 0 15m
coredns-6d56c8448f-xlm9h 1/1 Running 0 15m
etcd-master-01 1/1 Running 0 15m
etcd-master-02 1/1 Running 0 12m
etcd-master-03 1/1 Running 0 11m
kube-apiserver-master-01 1/1 Running 0 15m
kube-apiserver-master-02 1/1 Running 0 12m
kube-apiserver-master-03 1/1 Running 0 11m
kube-controller-manager-master-01 1/1 Running 1 15m
kube-controller-manager-master-02 1/1 Running 0 12m
kube-controller-manager-master-03 1/1 Running 0 11m
kube-flannel-ds-7tcbq 1/1 Running 0 9m35s
kube-flannel-ds-cj4rm 1/1 Running 0 6m39s
kube-flannel-ds-h4pb7 1/1 Running 0 9m35s
kube-proxy-4f2rr 1/1 Running 0 15m
kube-proxy-6k7sc 1/1 Running 0 11m
kube-proxy-b8shv 1/1 Running 0 12m
kube-scheduler-master-01 1/1 Running 1 15m
kube-scheduler-master-02 1/1 Running 0 12m
kube-scheduler-master-03 1/1 Running 0 11m