写在最前
1. 前置要求
2. 配置流程
2.1 主机名配置
创建虚拟机按IP来设置好对应的hostname
# 172.31.0.10
hostnamectl set-hostname ha1
# 172.31.0.11
hostnamectl set-hostname ha2
# 172.31.0.12
hostnamectl set-hostname k8s-master1
# 172.31.0.13
hostnamectl set-hostname k8s-master2
# 172.31.0.14
hostnamectl set-hostname k8s-master3
# 172.31.0.15
hostnamectl set-hostname k8s-worker1
2.2 hosts配置
所有主机都需要执行
cat >> /etc/hosts << EOF
172.31.0.10 ha1
172.31.0.11 ha2
172.31.0.12 k8s-master1
172.31.0.13 k8s-master2
172.31.0.14 k8s-master3
172.31.0.15 k8s-worker1
EOF
2.3 关闭防火墙
所有主机都需要执行
systemctl stop firewalld
systemctl stop iptables
systemctl disable firewalld
systemctl disable iptables
firewall-cmd --state
2.4 关闭selinux
所有主机都需要执行
setenforce 0
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
sestatus
2.5 交换分区配置
所有主机都需要执行
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
echo "vm.swappiness=0" >> /etc/sysctl.conf
sysctl -p
2.6 系统时间同步配置
yum -y install ntpdate
crontab -e
0 */1 * * * ntpdate time1.aliyun.com
2.7 主机系统优化
ulimit -SHn 65535
cat <<EOF >> /etc/security/limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF
2.8 内核升级
2.9 ipvs管理工具安装及模块加载
ha负载均衡可以不用安装,所有节点配置ipvs模块,在内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可
yum -y install ipvsadm ipset sysstat conntrack libseccomp
# 创建 /etc/modules-load.d/ipvs.conf 并加入以下内容
cat >/etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
2.10 加载containerd相关内核模块
# 永久性加载模块
cat > /etc/modules-load.d/containerd.conf << EOF
overlay
br_netfilter
EOF
2.11 Linux内核优化
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 131072
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system
# 所有节点配置完内核后,重启服务器,保证重启后内核依旧加载
reboot -h now
# 重启后查看ipvs模块加载情况:
lsmod | grep --color=auto -e ip_vs -e nf_conntrack
# 重启后查看containerd相关模块加载情况:
lsmod | egrep 'br_netfilter | overlay'
2.12 其它工具安装(选装)
yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git lrzsz -y
3 负载均衡器
3.1 安装haproxy与keepalived
yum -y install haproxy keepalived
3.2 HAProxy配置
cat >/etc/haproxy/haproxy.cfg<<"EOF"
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 0.0.0.0:6443
bind 127.0.0.1:6443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server k8s-master1 172.31.0.12:6443 check
server k8s-master2 172.31.0.13:6443 check
server k8s-master3 172.31.0.14:6443 check
EOF
3.3 KeepAlived配置
如果每个ha都出现了虚拟ip,将多播调整为单播然后重启服务,unicast_src_ip与unicast_peer
3.3.1 ha1
cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33
mcast_src_ip 172.31.0.10
virtual_router_id 51
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
172.31.0.100
}
track_script {
chk_apiserver
}
unicast_src_ip 172.31.0.10
unicast_peer {
172.31.0.11
}
}
EOF
3.3.2 ha2
cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
mcast_src_ip 172.31.0.11
virtual_router_id 51
priority 99
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
172.31.0.100
}
track_script {
chk_apiserver
}
unicast_src_ip 172.31.0.11
unicast_peer {
172.31.0.10
}
}
EOF
3.4 健康检查脚本
ha1和ha2一致
cat > /etc/keepalived/check_apiserver.sh <<"EOF"
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
# 授权执行权限
chmod +x /etc/keepalived/check_apiserver.sh
3.5 启动服务并验证
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived
ip address show
4. 配置免密登录
在k8s-master1上操作
# 生成秘钥
ssh-keygen
ssh-copy-id root@k8s-master1
ssh-copy-id root@k8s-master2
ssh-copy-id root@k8s-master3
ssh-copy-id root@k8s-worker1
5. 证书配置
在k8s-master1上操作。如果链接失效无法下载可以用我下载好的cfssl.zip
cd /data/k8s-work && cd /data/k8s-work
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl*
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
5.1 创建CA证书
5.1.1 配置ca证书请求文件
cat > ca-csr.json <<"EOF"
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}
],
"ca": {
"expiry": "87600h"
}
}
EOF
[root@k8s-master1 k8s-work]# ll
total 4
-rw-r--r-- 1 root root 256 Jan 2 23:28 ca-csr.json
5.1.2 创建ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
[root@k8s-master1 k8s-work]# ll
total 16
-rw-r--r-- 1 root root 1001 Jan 2 23:28 ca.csr
-rw-r--r-- 1 root root 256 Jan 2 23:28 ca-csr.json
-rw------- 1 root root 1679 Jan 2 23:28 ca-key.pem
-rw-r--r-- 1 root root 1359 Jan 2 23:28 ca.pem
5.1.3 配置ca证书策略
cat > ca-config.json <<"EOF"
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
EOF
[root@k8s-master1 k8s-work]# ll
total 20
-rw-r--r-- 1 root root 356 Jan 2 23:29 ca-config.json
-rw-r--r-- 1 root root 1001 Jan 2 23:28 ca.csr
-rw-r--r-- 1 root root 256 Jan 2 23:28 ca-csr.json
-rw------- 1 root root 1679 Jan 2 23:28 ca-key.pem
-rw-r--r-- 1 root root 1359 Jan 2 23:28 ca.pem
6. 部署etcd集群
etcd集群部署在master节点
6.1 创建etcd证书
6.1.1 生成etcd请求文件
cat > etcd-csr.json <<"EOF"
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"172.31.0.12",
"172.31.0.13",
"172.31.0.14"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}]
}
EOF
[root@k8s-master1 k8s-work]# ll
total 24
-rw-r--r-- 1 root root 356 Jan 2 23:29 ca-config.json
-rw-r--r-- 1 root root 1001 Jan 2 23:28 ca.csr
-rw-r--r-- 1 root root 256 Jan 2 23:28 ca-csr.json
-rw------- 1 root root 1679 Jan 2 23:28 ca-key.pem
-rw-r--r-- 1 root root 1359 Jan 2 23:28 ca.pem
-rw-r--r-- 1 root root 273 Jan 2 23:30 etcd-csr.json
6.1.2 生成etcd证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
[root@k8s-master1 k8s-work]# ll
total 36
-rw-r--r-- 1 root root 356 Jan 2 23:29 ca-config.json
-rw-r--r-- 1 root root 1001 Jan 2 23:28 ca.csr
-rw-r--r-- 1 root root 256 Jan 2 23:28 ca-csr.json
-rw------- 1 root root 1679 Jan 2 23:28 ca-key.pem
-rw-r--r-- 1 root root 1359 Jan 2 23:28 ca.pem
-rw-r--r-- 1 root root 1062 Jan 2 23:31 etcd.csr
-rw-r--r-- 1 root root 273 Jan 2 23:30 etcd-csr.json
-rw------- 1 root root 1679 Jan 2 23:31 etcd-key.pem
-rw-r--r-- 1 root root 1436 Jan 2 23:31 etcd.pem
6.1.3 证书分发
# 所有master节点都需要创建目录
mkdir -p /etc/etcd
mkdir -p /etc/etcd/ssl
mkdir -p /var/lib/etcd/default.etcd
# k8s-master1, 先手动复制到自己的目录
cd /data/k8s-work
cp ca*.pem /etc/etcd/ssl
cp etcd*.pem /etc/etcd/ssl
# ssl 证书配置文件分发到k8s-master2, k8s-master3
for i in k8s-master2 k8s-master3; do scp /etc/etcd/ssl/* $i:/etc/etcd/ssl; done
6.2 下载安装
如果链接失效或者下载失败可以用我下载好的etcd-v3.5.2-linux-amd64.tar.gz
# 下载安装包
wget https://github.com/etcd-io/etcd/releases/download/v3.5.2/etcd-v3.5.2-linux-amd64.tar.gz
# 解压
tar -xvf etcd-v3.5.2-linux-amd64.tar.gz
# 安装
cp -p etcd-v3.5.2-linux-amd64/etcd* /usr/local/bin/
# 试验调用
[root@k8s-master1 k8s-work]# ll /usr/local/bin/ | grep etcd
-rwxr-xr-x 1 528287 89939 23588864 Feb 1 2022 etcd
-rwxr-xr-x 1 528287 89939 17993728 Feb 1 2022 etcdctl
-rwxr-xr-x 1 528287 89939 16068608 Feb 1 2022 etcdutl
[root@k8s-master1 k8s-work]# etcd --version
etcd Version: 3.5.2
Git SHA: 99018a77b
Go Version: go1.16.3
Go OS/Arch: linux/amd64
# 文件分发到其他master节点
scp etcd-v3.5.2-linux-amd64/etcd* k8s-master2:/usr/local/bin/
scp etcd-v3.5.2-linux-amd64/etcd* k8s-master3:/usr/local/bin/
6.3 创建etcd.conf文件
6.3.1 k8s-master1
# 生成配置文件
cat > /etc/etcd/etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://172.31.0.12:2380"
ETCD_LISTEN_CLIENT_URLS="https://172.31.0.12:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.31.0.12:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://172.31.0.12:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://172.31.0.12:2380,etcd2=https://172.31.0.13:2380,etcd3=https://172.31.0.14:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
6.3.2 k8s-master2
和etcd1几乎一样,只需要修改名称和对应的节点IP即可
# 生成配置文件
cat > /etc/etcd/etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://172.31.0.13:2380"
ETCD_LISTEN_CLIENT_URLS="https://172.31.0.13:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.31.0.13:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://172.31.0.13:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://172.31.0.12:2380,etcd2=https://172.31.0.13:2380,etcd3=https://172.31.0.14:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
6.3.3 k8s-master3
和etcd1几乎一样,只需要修改名称和对应的节点IP即可
# 生成配置文件
cat > /etc/etcd/etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://172.31.0.14:2380"
ETCD_LISTEN_CLIENT_URLS="https://172.31.0.14:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.31.0.14:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://172.31.0.14:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://172.31.0.12:2380,etcd2=https://172.31.0.13:2380,etcd3=https://172.31.0.14:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
6.4 创建etcd.service文件
# 生成
cat > /etc/systemd/system/etcd.service <<"EOF"
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--trusted-ca-file=/etc/etcd/ssl/ca.pem \
--peer-cert-file=/etc/etcd/ssl/etcd.pem \
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \
--peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
--peer-client-cert-auth \
--client-cert-auth
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# etcd.service 服务配置文件分发到k8s-master2, k8s-master3
for i in k8s-master2 k8s-master3; do scp /etc/systemd/system/etcd.service $i:/etc/systemd/system; done
6.5 启动etcd集群
如果启动失败需要检查一下firewalld和iptables是否关闭了
systemctl daemon-reload
systemctl enable --now etcd.service
systemctl status etcd
6.6 验证集群状态
# 验证集群状态
[root@k8s-master1 k8s-work]# ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://172.31.0.12:2379,https://172.31.0.13:2379,https://172.31.0.14:2379 endpoint health
+--------------------------+--------+-------------+-------+
| ENDPOINT | HEALTH | TOOK | ERROR |
+--------------------------+--------+-------------+-------+
| https://172.31.0.12:2379 | true | 11.182209ms | |
| https://172.31.0.13:2379 | true | 12.274699ms | |
| https://172.31.0.14:2379 | true | 12.7094ms | |
+--------------------------+--------+-------------+-------+
# 检查ETCD数据库性能
[root@k8s-master1 k8s-work]# ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://172.31.0.12:2379,https://172.31.0.13:2379,https://172.31.0.14:2379 check perf
59 / 60 Booooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooom ! 98.33%PASS: Throughput is 150 writes/s
PASS: Slowest request took 0.091654s
PASS: Stddev is 0.002193s
PASS
# 列表成员
[root@k8s-master1 k8s-work]# ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://172.31.0.12:2379,https://172.31.0.13:2379,https://172.31.0.14:2379 member list
+------------------+---------+-------+--------------------------+--------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+-------+--------------------------+--------------------------+------------+
| 27de189f978d77f0 | started | etcd2 | https://172.31.0.13:2380 | https://172.31.0.13:2379 | false |
| 7d78b925ef8c87aa | started | etcd1 | https://172.31.0.12:2380 | https://172.31.0.12:2379 | false |
| dce414af7cc22cf7 | started | etcd3 | https://172.31.0.14:2380 | https://172.31.0.14:2379 | false |
+------------------+---------+-------+--------------------------+--------------------------+------------+
# 节点集群状态
[root@k8s-master1 k8s-work]# ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://172.31.0.12:2379,https://172.31.0.13:2379,https://172.31.0.14:2379 endpoint status
+--------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+--------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://172.31.0.12:2379 | 7d78b925ef8c87aa | 3.5.2 | 22 MB | true | false | 3 | 8986 | 8986 | |
| https://172.31.0.13:2379 | 27de189f978d77f0 | 3.5.2 | 22 MB | false | false | 3 | 8986 | 8986 | |
| https://172.31.0.14:2379 | dce414af7cc22cf7 | 3.5.2 | 22 MB | false | false | 3 | 8986 | 8986 | |
+--------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
7. 部署kubernetes集群
7.1 部署kube-apiserver
7.1.1 创建证书
生成apiserver证书请求文件
为了后续新增节点可以多配置多一些IP,后续新增节点就在里面取一个就行了
cat > kube-apiserver-csr.json << "EOF"
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"172.31.0.12",
"172.31.0.13",
"172.31.0.14",
"172.31.0.15",
"172.31.0.16",
"172.31.0.17",
"172.31.0.18",
"172.31.0.19",
"172.31.0.20",
"172.31.0.21",
"172.31.0.22",
"172.31.0.23",
"172.31.0.24",
"172.31.0.25",
"172.31.0.26",
"172.31.0.27",
"172.31.0.28",
"172.31.0.29",
"172.31.0.30",
"172.31.0.31",
"172.31.0.32",
"172.31.0.33",
"172.31.0.34",
"172.31.0.35",
"172.31.0.36",
"172.31.0.37",
"172.31.0.38",
"172.31.0.39",
"172.31.0.40",
"172.31.0.41",
"172.31.0.42",
"172.31.0.43",
"172.31.0.44",
"172.31.0.45",
"172.31.0.46",
"172.31.0.47",
"172.31.0.48",
"172.31.0.49",
"172.31.0.50",
"172.31.0.51",
"172.31.0.52",
"172.31.0.53",
"172.31.0.54",
"172.31.0.55",
"172.31.0.56",
"172.31.0.57",
"172.31.0.58",
"172.31.0.59",
"172.31.0.60",
"172.31.0.61",
"172.31.0.62",
"172.31.0.63",
"172.31.0.64",
"172.31.0.65",
"172.31.0.66",
"172.31.0.67",
"172.31.0.68",
"172.31.0.69",
"172.31.0.100",
"10.96.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}
]
}
EOF
生成apiserver证书及token文件
# 生成apiserver证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
# 生成token文件
cat > token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
7.1.2 下载安装
如果链接失效或者下载失败可以用我下载好kubernetes-server-linux-amd64.tar.gz,同时如果master节点不用于部署容器那么kube-proxy,kubelet其实是可以省略安装的,就让kube-apiserver专注好好工作就好
# 下载二进制文件
cd /data/k8s-work/ && wget https://dl.k8s.io/v1.21.10/kubernetes-server-linux-amd64.tar.gz
# 解压
tar -xf kubernetes-server-linux-amd64.tar.gz
# 安装 kube-apiserver kube-controller-manager kube-scheduler kubectl
cd kubernetes/server/bin/
cp kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
# 试验调用
[root@k8s-master1 bin]# kubectl get nodes
The connection to the server localhost:8080 was refused - did you specify the right host or port?
# 分发到其他master节点
scp kube-apiserver kube-controller-manager kube-scheduler kubectl k8s-master2:/usr/local/bin/
scp kube-apiserver kube-controller-manager kube-scheduler kubectl k8s-master3:/usr/local/bin/
# 如果master节点也需要部署容器那么可以将kube-proxy kubelet也一并安装
# 安装kube-proxy kubelet
cp kube-proxy kubelet /usr/local/bin/
# 分发到其他master节点与worker节点
scp kube-proxy kubelet k8s-master2:/usr/local/bin/
scp kube-proxy kubelet k8s-master3:/usr/local/bin/
scp kube-proxy kubelet k8s-worker1:/usr/local/bin/
7.1.3 创建kube-apiserver.conf文件
k8s-master1
cat > /etc/kubernetes/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--bind-address=172.31.0.12 \
--secure-port=6443 \
--advertise-address=172.31.0.12 \
--insecure-port=0 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=10.96.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-issuer=api \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--etcd-servers=https://172.31.0.12:2379,https://172.31.0.13:2379,https://172.31.0.14:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--event-ttl=1h \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=4"
EOF
k8s-master2
只需要修--bind-address和--advertise-address成对应节点的IP
cat > /etc/kubernetes/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--bind-address=172.31.0.13 \
--secure-port=6443 \
--advertise-address=172.31.0.13 \
--insecure-port=0 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=10.96.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-issuer=api \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--etcd-servers=https://172.31.0.12:2379,https://172.31.0.13:2379,https://172.31.0.14:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--event-ttl=1h \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=4"
EOF
k8s-master3
只需要修--bind-address和--advertise-address成对应节点的IP
cat > /etc/kubernetes/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--bind-address=172.31.0.14 \
--secure-port=6443 \
--advertise-address=172.31.0.14 \
--insecure-port=0 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=10.96.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-issuer=api \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--etcd-servers=https://172.31.0.12:2379,https://172.31.0.13:2379,https://172.31.0.14:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--event-ttl=1h \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=4"
EOF
7.1.4 创建kube-apiserver.service文件
cat > /etc/systemd/system/kube-apiserver.service << "EOF"
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
7.1.5 证书与配置文件分发
# master与worker节点都需要创建目录
mkdir -p /etc/kubernetes/
mkdir -p /etc/kubernetes/ssl
mkdir -p /var/log/kubernetes
# 先把复制到当前主机
cp ca*.pem /etc/kubernetes/ssl/
cp kube-apiserver*.pem /etc/kubernetes/ssl/
cp token.csv /etc/kubernetes/
scp /etc/kubernetes/ssl/kube-apiserver*.pem k8s-master2:/etc/kubernetes/ssl
scp /etc/kubernetes/ssl/kube-apiserver*.pem k8s-master3:/etc/kubernetes/ssl
scp /etc/kubernetes/ssl/ca*.pem k8s-master2:/etc/kubernetes/ssl
scp /etc/kubernetes/ssl/ca*.pem k8s-master3:/etc/kubernetes/ssl
scp /etc/kubernetes/token.csv k8s-master2:/etc/kubernetes
scp /etc/kubernetes/token.csv k8s-master3:/etc/kubernetes
scp /etc/systemd/system/kube-apiserver.service k8s-master2:/etc/systemd/system/kube-apiserver.service
scp /etc/systemd/system/kube-apiserver.service k8s-master3:/etc/systemd/system/kube-apiserver.service
7.1.6 启动服务
systemctl daemon-reload
systemctl enable --now kube-apiserver
systemctl status kube-apiserver
[root@k8s-master1 k8s-work]# systemctl status kube-apiserver
● kube-apiserver.service - Kubernetes API Server
Loaded: loaded (/etc/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
Active: active (running) since Sat 2025-01-04 07:31:42 CST; 7min ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 1752 (kube-apiserver)
CGroup: /system.slice/kube-apiserver.service
└─1752 /usr/local/bin/kube-apiserver --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota --anonymous-auth=false --bind-address=172.31.0.12 --secure-port=6443 ...
Jan 04 07:39:21 k8s-master1 kube-apiserver[1752]: I0104 07:39:21.208586 1752 httplog.go:94] "HTTP" verb="GET" URI="/apis/flowcontrol.apiserver.k8s.io/v1beta1/flowschemas?allowWatchBookmarks=true&resourceVersion=8971&timeout=7m39s&timeout...
Jan 04 07:39:21 k8s-master1 kube-apiserver[1752]: I0104 07:39:21.208731 1752 reflector.go:530] k8s.io/client-go/informers/factory.go:134: Watch close - *v1beta1.FlowSchema total 37 items received
Jan 04 07:39:21 k8s-master1 kube-apiserver[1752]: I0104 07:39:21.209098 1752 get.go:260] "Starting watch" path="/apis/flowcontrol.apiserver.k8s.io/v1beta1/flowschemas" resourceVersion="9054" labels="" fields="" timeout="5m30s"
Jan 04 07:39:21 k8s-master1 kube-apiserver[1752]: I0104 07:39:21.211020 1752 httplog.go:94] "HTTP" verb="GET" URI="/api/v1/endpoints?allowWatchBookmarks=true&resourceVersion=8971&timeout=7m39s&timeoutSeconds=459&watch=true" latency="7m39...
Jan 04 07:39:21 k8s-master1 kube-apiserver[1752]: I0104 07:39:21.211188 1752 reflector.go:530] k8s.io/client-go/informers/factory.go:134: Watch close - *v1.Endpoints total 11 items received
Jan 04 07:39:21 k8s-master1 kube-apiserver[1752]: I0104 07:39:21.211601 1752 get.go:260] "Starting watch" path="/api/v1/endpoints" resourceVersion="9189" labels="" fields="" timeout="9m38s"
Jan 04 07:39:24 k8s-master1 kube-apiserver[1752]: I0104 07:39:24.386481 1752 httplog.go:94] "HTTP" verb="GET" URI="/api/v1/namespaces/default" latency="3.780009ms" userAgent="kube-apiserver/v1.21.10 (linux/amd64) kuberne...2:56586" resp=200
Jan 04 07:39:24 k8s-master1 kube-apiserver[1752]: I0104 07:39:24.394348 1752 httplog.go:94] "HTTP" verb="GET" URI="/api/v1/namespaces/default/services/kubernetes" latency="6.280161ms" userAgent="kube-apiserver/v1.21.10 (...2:56586" resp=200
Jan 04 07:39:24 k8s-master1 kube-apiserver[1752]: I0104 07:39:24.405402 1752 httplog.go:94] "HTTP" verb="GET" URI="/api/v1/namespaces/default/endpoints/kubernetes" latency="2.451606ms" userAgent="kube-apiserver/v1.21.10 ...2:56586" resp=200
Jan 04 07:39:24 k8s-master1 kube-apiserver[1752]: I0104 07:39:24.411604 1752 httplog.go:94] "HTTP" verb="GET" URI="/apis/discovery.k8s.io/v1/namespaces/default/endpointslices/kubernetes" latency="2.067096ms" userAgent="k...2:56586" resp=200
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-master1 k8s-work]#
7.2 部署kubectl
7.2.1 创建证书
生成kubectl证书请求文件
cat > admin-csr.json << "EOF"
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "system"
}
]
}
EOF
生成kubectl证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
7.2.2 生成kube.config配置文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://172.31.0.100:6443 --kubeconfig=kube.config
kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config
kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config
kubectl config use-context kubernetes --kubeconfig=kube.config
7.2.3 准备kubectl配置文件并进行角色绑定
mkdir ~/.kube
cp kube.config ~/.kube/config
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=/root/.kube/config
7.2.4 证书与配置文件分发
# k8s-master2, k8s-master3 需要先创建目录
mkdir /root/.kube
scp /root/.kube/config k8s-master2:/root/.kube/config
scp /root/.kube/config k8s-master3:/root/.kube/config
7.2.5 查看集群状态
export KUBECONFIG=$HOME/.kube/config
查看集群信息
kubectl cluster-info
查看集群组件状态
kubectl get componentstatuses
查看命名空间中资源对象
kubectl get all --all-namespaces
7.2.6 配置kubectl命令补全(可选)
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
kubectl completion bash > ~/.kube/completion.bash.inc
source '/root/.kube/completion.bash.inc'
source $HOME/.bash_profile
7.3 部署kube-scheduler
7.3.1 创建证书
生成kube-scheduler证书请求文件
cat > kube-scheduler-csr.json << "EOF"
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"172.31.0.12",
"172.31.0.13",
"172.31.0.14"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "system"
}
]
}
EOF
生成kube-scheduler证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
7.3.2 创建scheduler.kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://172.31.0.100:6443 --kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
7.3.3 创建kube-scheduler.conf文件
cat > kube-scheduler.conf << "EOF"
KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
--leader-elect=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"
EOF
7.3.4 创建kube-scheduler.service文件
cat > kube-scheduler.service << "EOF"
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
7.3.5 证书与配置文件分发
# 先把证书复制到当前主机
cp kube-scheduler*.pem /etc/kubernetes/ssl/
cp kube-scheduler.kubeconfig /etc/kubernetes/
cp kube-scheduler.conf /etc/kubernetes/
cp kube-scheduler.service /usr/lib/systemd/system/
scp kube-scheduler*.pem k8s-master2:/etc/kubernetes/ssl/
scp kube-scheduler*.pem k8s-master3:/etc/kubernetes/ssl/
scp kube-scheduler.kubeconfig kube-scheduler.conf k8s-master2:/etc/kubernetes/
scp kube-scheduler.kubeconfig kube-scheduler.conf k8s-master3:/etc/kubernetes/
scp kube-scheduler.service k8s-master2:/usr/lib/systemd/system/
scp kube-scheduler.service k8s-master3:/usr/lib/systemd/system/
7.3.6 启动服务
systemctl daemon-reload
systemctl enable --now kube-scheduler
systemctl status kube-scheduler
[root@k8s-master1 k8s-work]# systemctl status kube-scheduler
● kube-scheduler.service - Kubernetes Scheduler
Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
Active: active (running) since Sat 2025-01-04 08:49:59 CST; 27s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 2002 (kube-scheduler)
CGroup: /system.slice/kube-scheduler.service
└─2002 /usr/local/bin/kube-scheduler --address=127.0.0.1 --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig --leader-elect=true --alsologtostderr=true --logtostderr=false --log-dir=/var/log/kubernetes --v=2
Jan 04 08:50:01 k8s-master1 kube-scheduler[2002]: I0104 08:50:01.376532 2002 server.go:138] Starting Kubernetes Scheduler version v1.21.10
Jan 04 08:50:01 k8s-master1 kube-scheduler[2002]: W0104 08:50:01.377686 2002 authorization.go:47] Authorization is disabled
Jan 04 08:50:01 k8s-master1 kube-scheduler[2002]: W0104 08:50:01.377701 2002 authentication.go:47] Authentication is disabled
Jan 04 08:50:01 k8s-master1 kube-scheduler[2002]: I0104 08:50:01.377737 2002 deprecated_insecure_serving.go:54] Serving healthz insecurely on 127.0.0.1:10251
Jan 04 08:50:01 k8s-master1 kube-scheduler[2002]: I0104 08:50:01.379040 2002 tlsconfig.go:200] loaded serving cert ["Generated self signed cert"]: "localhost@1735951800" [serving] validServingFor=[127.0.0.1,localhost,localhost] issuer="l...
Jan 04 08:50:01 k8s-master1 kube-scheduler[2002]: I0104 08:50:01.379182 2002 named_certificates.go:53] loaded SNI cert [0/"self-signed loopback"]: "apiserver-loopback-client@1735951801" [serving] validServingFor=[apiserver-loopback-clien...
Jan 04 08:50:01 k8s-master1 kube-scheduler[2002]: I0104 08:50:01.379233 2002 secure_serving.go:202] Serving securely on [::]:10259
Jan 04 08:50:01 k8s-master1 kube-scheduler[2002]: I0104 08:50:01.379274 2002 tlsconfig.go:240] Starting DynamicServingCertificateController
Jan 04 08:50:01 k8s-master1 kube-scheduler[2002]: I0104 08:50:01.480169 2002 leaderelection.go:243] attempting to acquire leader lease kube-system/kube-scheduler...
Jan 04 08:50:01 k8s-master1 kube-scheduler[2002]: I0104 08:50:01.503203 2002 leaderelection.go:253] successfully acquired lease kube-system/kube-scheduler
Hint: Some lines were ellipsized, use -l to show in full.
7.4 部署kube-controller-manager
7.4.1 创建证书
生成kube-controller-manager证书请求文件
cat > kube-controller-manager-csr.json << "EOF"
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"172.31.0.12",
"172.31.0.13",
"172.31.0.14"
],
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "system"
}
]
}
EOF
生成kube-controller-manager证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
7.4.2 创建kube-controller-manager.kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://172.31.0.100:6443 --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
7.4.3 创建kube-controller-manager.conf文件
cat > kube-controller-manager.conf << "EOF"
KUBE_CONTROLLER_MANAGER_OPTS="--port=10252 \
--secure-port=10257 \
--bind-address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
--service-cluster-ip-range=10.96.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--allocate-node-cidrs=true \
--cluster-cidr=10.244.0.0/16 \
--experimental-cluster-signing-duration=87600h \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
--leader-elect=true \
--feature-gates=RotateKubeletServerCertificate=true \
--controllers=*,bootstrapsigner,tokencleaner \
--horizontal-pod-autoscaler-use-rest-clients=true \
--horizontal-pod-autoscaler-sync-period=10s \
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
--use-service-account-credentials=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"
EOF
7.4.4 创建kube-controller-manager.service文件
cat > kube-controller-manager.service << "EOF"
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
7.4.5 证书与配置文件分发
# 先把证书复制到当前主机
cp kube-controller-manager*.pem /etc/kubernetes/ssl/
cp kube-controller-manager.kubeconfig /etc/kubernetes/
cp kube-controller-manager.conf /etc/kubernetes/
cp kube-controller-manager.service /usr/lib/systemd/system/
scp kube-controller-manager*.pem k8s-master2:/etc/kubernetes/ssl/
scp kube-controller-manager*.pem k8s-master3:/etc/kubernetes/ssl/
scp kube-controller-manager.kubeconfig kube-controller-manager.conf k8s-master2:/etc/kubernetes/
scp kube-controller-manager.kubeconfig kube-controller-manager.conf k8s-master3:/etc/kubernetes/
scp kube-controller-manager.service k8s-master2:/usr/lib/systemd/system/
scp kube-controller-manager.service k8s-master3:/usr/lib/systemd/system/
#查看证书
openssl x509 -in /etc/kubernetes/ssl/kube-controller-manager.pem -noout -text
7.4.6 启动服务
systemctl daemon-reload
systemctl enable --now kube-controller-manager
systemctl status kube-controller-manager
# 查看组件状态
kubectl get componentstatuses
7.5 部署worker工作节点
7.5.1 下载安装
如果链接失效或者下载失败可以用我下载好cri-containerd-cni-1.7.23-linux-amd64.tar.gz
# 下载
wget https://github.com/containerd/containerd/releases/download/v1.7.23/cri-containerd-cni-1.7.23-linux-amd64.tar.gz
# 默认解压后会有如下目录:etc, opt, usr 会把对应的目解压到/下对应目录中,这样就省去复制文件步骤。
tar -xf cri-containerd-cni-1.7.23-linux-amd64.tar.gz -C /
7.5.2 生成containerd配置文件
# 所有master与worker
mkdir /etc/containerd
# 生成配置文件并修改
containerd config default >/etc/containerd/config.toml
# 下面的配置文件中已修改,可不执行,仅修改默认时执行。
sed -i 's@systemd_cgroup = false@systemd_cgroup = true@' /etc/containerd/config.toml
下面的配置文件中已修改,可不执行,仅修改默认时执行。
sed -i 's@k8s.gcr.io/pause:3.6@registry.aliyuncs.com/google_containers/pause:3.6@' /etc/containerd/config.toml
cat >/etc/containerd/config.toml<<EOF
root = "/var/lib/containerd"
state = "/run/containerd"
oom_score = -999
[grpc]
address = "/run/containerd/containerd.sock"
uid = 0
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
[debug]
address = ""
uid = 0
gid = 0
level = ""
[metrics]
address = ""
grpc_histogram = false
[cgroup]
path = ""
[plugins]
[plugins.cgroups]
no_prometheus = false
[plugins.cri]
stream_server_address = "127.0.0.1"
stream_server_port = "0"
enable_selinux = false
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"
stats_collect_period = 10
systemd_cgroup = true
enable_tls_streaming = false
max_container_log_line_size = 16384
[plugins.cri.containerd]
snapshotter = "overlayfs"
no_pivot = false
[plugins.cri.containerd.default_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = ""
runtime_root = ""
[plugins.cri.containerd.untrusted_workload_runtime]
runtime_type = ""
runtime_engine = ""
runtime_root = ""
[plugins.cri.cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = "/etc/cni/net.d/10-default.conf"
[plugins.cri.registry]
[plugins.cri.registry.mirrors]
[plugins.cri.registry.mirrors."docker.io"]
endpoint = [
"https://docker.mirrors.ustc.edu.cn",
"http://hub-mirror.c.163.com"
]
[plugins.cri.registry.mirrors."gcr.io"]
endpoint = [
"https://gcr.mirrors.ustc.edu.cn"
]
[plugins.cri.registry.mirrors."k8s.gcr.io"]
endpoint = [
"https://gcr.mirrors.ustc.edu.cn/google-containers/"
]
[plugins.cri.registry.mirrors."quay.io"]
endpoint = [
"https://quay.mirrors.ustc.edu.cn"
]
[plugins.cri.registry.mirrors."harbor.tanqidi.com"]
endpoint = [
"http://harbor.tanqidi.com"
]
[plugins.cri.x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins.diff-service]
default = ["walking"]
[plugins.linux]
shim = "containerd-shim"
runtime = "runc"
runtime_root = ""
no_shim = false
shim_debug = false
[plugins.opt]
path = "/opt/containerd"
[plugins.restart]
interval = "10s"
[plugins.scheduler]
pause_threshold = 0.02
deletion_threshold = 0
mutation_threshold = 100
schedule_delay = "0s"
startup_delay = "100ms"
EOF
7.5.3 下载安装runc
由于上述软件包中包含的runc对系统依赖过多,所以建议单独下载安装。
默认runc执行时提示:runc: symbol lookup error: runc: undefined symbol: seccomp_notify_respond
如果链接失效或者下载失败可以用我下载好runc.amd64
# 下载
https://github.com/opencontainers/runc/releases/download/v1.2.3/runc.amd64
# 授权执行权限
chmod +x runc.amd64
# 替换掉原软件包中的runc
mv runc.amd64 /usr/local/sbin/runc
# 试验调用
-rwxr-xr-x 1 root root 11168096 Jan 4 23:48 /usr/local/sbin/runc
[root@k8s-master1 k8s-work]# runc -v
runc version 1.2.3
commit: v1.2.3-0-g0d37cfd4
spec: 1.2.0
go: go1.22.10
libseccomp: 2.5.5
# 启动
systemctl enable --now containerd
systemctl start containerd
7.5.4 证书与配置文件分发
# 分发containerd的安装包
scp /data/k8s-work/cri-containerd-cni-1.7.23-linux-amd64.tar.gz k8s-master2:/data/k8s-work/
scp /data/k8s-work/cri-containerd-cni-1.7.23-linux-amd64.tar.gz k8s-master3:/data/k8s-work/
scp /data/k8s-work/cri-containerd-cni-1.7.23-linux-amd64.tar.gz k8s-worker1:/data/k8s-work/
# 免密执行解压命令完成安装
ssh -t k8s-master2 'sudo tar -xf /data/k8s-work/cri-containerd-cni-1.7.23-linux-amd64.tar.gz -C /'
ssh -t k8s-master3 'sudo tar -xf /data/k8s-work/cri-containerd-cni-1.7.23-linux-amd64.tar.gz -C /'
ssh -t k8s-worker1 'sudo tar -xf /data/k8s-work/cri-containerd-cni-1.7.23-linux-amd64.tar.gz -C /'
# 分发container的/etc/containerd/config.toml
scp /etc/containerd/config.toml k8s-master2:/etc/containerd/config.toml
scp /etc/containerd/config.toml k8s-master3:/etc/containerd/config.toml
scp /etc/containerd/config.toml k8s-worker1:/etc/containerd/config.toml
# 分发runc
scp /usr/local/sbin/runc k8s-master2:/usr/local/sbin/runc
scp /usr/local/sbin/runc k8s-master3:/usr/local/sbin/runc
scp /usr/local/sbin/runc k8s-worker1:/usr/local/sbin/runc
7.5.4 部署kubelet
创建kubelet-bootstrap.kubeconfig
# 所有master与worker节点都需要创建此目录
mkdir -p /var/lib/kubelet
mkdir -p /var/log/kubernetes
# 生成 kubelet-bootstrap.kubeconfig
BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://172.31.0.100:6443 --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
# RBAC权限绑定
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
# 查看
kubectl describe clusterrolebinding cluster-system-anonymous
kubectl describe clusterrolebinding kubelet-bootstrap
创建kubelet.json配置文件
kubelet.json中address需要修改为当前主机IP地址。
cat > kubelet.json << "EOF"
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"authentication": {
"x509": {
"clientCAFile": "/etc/kubernetes/ssl/ca.pem"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": false
}
},
"authorization": {
"mode": "Webhook",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"address": "172.31.0.12",
"port": 10250,
"readOnlyPort": 10255,
"cgroupDriver": "systemd",
"hairpinMode": "promiscuous-bridge",
"serializeImagePulls": false,
"clusterDomain": "cluster.local.",
"clusterDNS": ["10.96.0.2"]
}
EOF
创建kubelet.service文件
如果出现Failed to start kubelet.service: Unit not found异常,要留意kubelet.service中的After与Requires是否写成docker.service了,因为我们采用的是containerd要写成containerd.service。
cat > kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--config=/etc/kubernetes/kubelet.json \
--cni-bin-dir=/opt/cni/bin \
--cni-conf-dir=/etc/cni/net.d \
--container-runtime=remote \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
--network-plugin=cni \
--rotate-certificates \
--pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2 \
--root-dir=/etc/cni/net.d \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
证书与配置文件分发
# 先把复制到当前主机 k8s-master1
cp kubelet-bootstrap.kubeconfig /etc/kubernetes/
cp kubelet.json /etc/kubernetes/
cp kubelet.service /usr/lib/systemd/system/
# 注意kubelet.json同步过去要vi将address改为当前主机的ip
for i in k8s-master2 k8s-master3 k8s-worker1;do scp kubelet-bootstrap.kubeconfig kubelet.json $i:/etc/kubernetes/;done
for i in k8s-master2 k8s-master3 k8s-worker1;do scp ca.pem $i:/etc/kubernetes/ssl/;done
for i in k8s-master2 k8s-master3 k8s-worker1;do scp kubelet.service $i:/usr/lib/systemd/system/;done
启动服务
systemctl daemon-reload
systemctl enable --now kubelet
systemctl status kubelet
[root@k8s-master1 k8s-work]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
Active: active (running) since Sun 2025-01-05 01:18:45 CST; 6min ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 2615 (kubelet)
CGroup: /system.slice/kubelet.service
└─2615 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig --cert-dir=/etc/kubernetes/ssl --kubeconfig=/etc/kubernetes/kubelet.kubeconfig --config=/etc/kubernetes/kubelet.json --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d --c...
Jan 05 01:18:57 k8s-master1 kubelet[2615]: I0105 01:18:57.372478 2615 manager.go:602] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found"
Jan 05 01:18:57 k8s-master1 kubelet[2615]: I0105 01:18:57.372622 2615 manager.go:284] "Serving device plugin registration server on socket" path="/var/lib/kubelet/device-plugins/kubelet.sock"
Jan 05 01:18:57 k8s-master1 kubelet[2615]: I0105 01:18:57.373089 2615 plugin_watcher.go:52] "Plugin Watcher Start" path="/etc/cni/net.d/plugins_registry"
Jan 05 01:18:57 k8s-master1 kubelet[2615]: I0105 01:18:57.373157 2615 plugin_manager.go:112] "The desired_state_of_world populator (plugin watcher) starts"
Jan 05 01:18:57 k8s-master1 kubelet[2615]: I0105 01:18:57.373166 2615 plugin_manager.go:114] "Starting Kubelet Plugin Manager"
Jan 05 01:18:57 k8s-master1 kubelet[2615]: I0105 01:18:57.398248 2615 kubelet_node_status.go:109] "Node was previously registered" node="k8s-master1"
Jan 05 01:18:57 k8s-master1 kubelet[2615]: I0105 01:18:57.398357 2615 kubelet_node_status.go:74] "Successfully registered node" node="k8s-master1"
Jan 05 01:18:57 k8s-master1 kubelet[2615]: I0105 01:18:57.449921 2615 kubelet.go:1932] "SyncLoop ADD" source="api" pods=[]
Jan 05 01:18:57 k8s-master1 kubelet[2615]: I0105 01:18:57.568520 2615 reconciler.go:157] "Reconciler: start to sync state"
Jan 05 01:23:57 k8s-master1 kubelet[2615]: I0105 01:23:57.271297 2615 kubelet.go:1316] "Image garbage collection succeeded"
[root@k8s-master1 k8s-work]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master1 Ready <none> 9m28s v1.21.10
k8s-master2 Ready <none> 5m11s v1.21.10
k8s-master3 Ready <none> 5m12s v1.21.10
k8s-worker1 Ready <none> 5m12s v1.21.10
[root@k8s-master1 k8s-work]#
7.5.5 部署kube-proxy
创建kube-proxy证书请求文件
cat > kube-proxy-csr.json << "EOF"
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}
]
}
EOF
生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
创建kube-proxy.kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://172.31.0.100:6443 --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
创建kube-proxy.yaml文件
kube-proxy.yaml中bindAddress,healthzBindAddress,metricsBindAddress需要修改为当前主机IP地址。
cat > kube-proxy.yaml << "EOF"
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 172.31.0.12
clientConnection:
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 10.244.0.0/16
healthzBindAddress: 172.31.0.12:10256
kind: KubeProxyConfiguration
metricsBindAddress: 172.31.0.12:10249
mode: "ipvs"
EOF
以下为Demo例子用于参考写法,不可用于当前集群
# 以下为Demo例子用于参考写法,不可用于当前集群,https://www.cnblogs.com/yangjianyong-bky/p/16799071.html
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
bindAddress: '172.16.222.231'
healthzBindAddress: '172.16.222.231:10256'
metricsBindAddress: '127.0.0.1:10249'
bindAddressHardFail: true
clientConnection:
kubeconfig: /etc/kubernetes/kubeconfig/kube-proxy.kubeconfig
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
qps: 5
clusterCIDR: 10.100.0.0/16
enableProfiling: false
mode: "ipvs"
conntrack:
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
strictARP: false
syncPeriod: 30s
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
winkernel:
enableDSR: false
networkName: ""
sourceVip: ""
创建kube-proxy.service文件
cat > kube-proxy.service << "EOF"
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.yaml \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
证书与配置文件分发
# 先把证书复制到当前主机
cp kube-proxy*.pem /etc/kubernetes/ssl/
cp kube-proxy.kubeconfig kube-proxy.yaml /etc/kubernetes/
cp kube-proxy.service /usr/lib/systemd/system/
# 分发文件,kube-proxy.yaml中bindAddress,healthzBindAddress,metricsBindAddress需要修改为当前主机IP地址。
for i in k8s-master2 k8s-master3 k8s-worker1;do scp kube-proxy.kubeconfig kube-proxy.yaml $i:/etc/kubernetes/;done
for i in k8s-master2 k8s-master3 k8s-worker1;do scp kube-proxy.service $i:/usr/lib/systemd/system/;done
启动服务
# 所有节点都必须要创建!!! 否则kube-proxy会启动失败
mkdir -p /var/lib/kube-proxy
systemctl daemon-reload
systemctl enable --now kube-proxy
systemctl status kube-proxy
[root@k8s-master1 kubernetes]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube-Proxy Server
Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
Active: active (running) since Sun 2025-01-05 03:11:55 CST; 1min 16s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 3748 (kube-proxy)
Tasks: 8
Memory: 21.3M
CGroup: /system.slice/kube-proxy.service
└─3748 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.yaml --alsologtostderr=true --logtostderr=false --log-dir=/var/log/kubernetes --v=2
Jan 05 03:11:55 k8s-master1 kube-proxy[3748]: I0105 03:11:55.699606 3748 config.go:224] Starting endpoint slice config controller
Jan 05 03:11:55 k8s-master1 kube-proxy[3748]: I0105 03:11:55.699611 3748 shared_informer.go:240] Waiting for caches to sync for endpoint slice config
Jan 05 03:11:55 k8s-master1 kube-proxy[3748]: W0105 03:11:55.715001 3748 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice
Jan 05 03:11:55 k8s-master1 kube-proxy[3748]: I0105 03:11:55.715228 3748 service.go:306] Service default/kubernetes updated: 1 ports
Jan 05 03:11:55 k8s-master1 kube-proxy[3748]: W0105 03:11:55.718099 3748 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice
Jan 05 03:11:55 k8s-master1 kube-proxy[3748]: I0105 03:11:55.799803 3748 shared_informer.go:247] Caches are synced for endpoint slice config
Jan 05 03:11:55 k8s-master1 kube-proxy[3748]: I0105 03:11:55.799896 3748 proxier.go:1040] Not syncing ipvs rules until Services and Endpoints have been received from master
Jan 05 03:11:55 k8s-master1 kube-proxy[3748]: I0105 03:11:55.799939 3748 proxier.go:1040] Not syncing ipvs rules until Services and Endpoints have been received from master
Jan 05 03:11:55 k8s-master1 kube-proxy[3748]: I0105 03:11:55.799951 3748 shared_informer.go:247] Caches are synced for service config
Jan 05 03:11:55 k8s-master1 kube-proxy[3748]: I0105 03:11:55.799986 3748 service.go:421] Adding new service port "default/kubernetes:https" at 10.96.0.1:443/TCP
7.6. 部署cni网络插件
使用calico作为cni网络插件
7.6.1 下载安装
如果无法下载可以用我编辑好的文件calico.yaml
# 下载
wget https://docs.projectcalico.org/v3.19/manifests/calico.yaml
# 修改文件3683行
3683 - name: CALICO_IPV4POOL_CIDR
3684 value: "10.244.0.0/16"
# 应用进集群
kubectl apply -f calico.yaml
# 非常棒,一切都在正常工作
[root@k8s-master1 k8s-work]# kubectl get po -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-7cc8dd57d9-gx4rl 1/1 Running 0 34m
kube-system calico-node-86pxm 1/1 Running 0 20m
kube-system calico-node-8mcfm 1/1 Running 0 25m
kube-system calico-node-mr7tf 1/1 Running 0 103m
kube-system calico-node-t52bw 1/1 Running 0 103m
# 如果发现在不断报错无法下载镜像,就看我下面提供的彩蛋脚本来完成,步骤:7.6.2 彩蛋脚本
[root@k8s-master1 k8s-work]# kubectl get po -A
。。。。。
kube-system calico-node-7x5mt 0/1 Init:ErrImagePull 0 76m
kube-system calico-node-d7tpk 0/1 Init:ErrImagePull 0 76m
kube-system calico-node-mr7tf 0/1 Init:ErrImagePull 0 76m
kube-system calico-node-t52bw 0/1 Init:ErrImagePull 0 76m
7.6.2 彩蛋脚本
如果你的网络访问docker.io受限,可以使用我提供的彩蛋脚本来完成下载镜像与同步分发至所有节点。
./pull.sh swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/calico/kube-controllers:v3.19.4 docker.io/calico/kube-controllers:v3.19.4
./pull.sh swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/calico/cni:v3.19.4 docker.io/calico/cni:v3.19.4
./pull.sh swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/calico/pod2daemon-flexvol:v3.19.4 docker.io/calico/pod2daemon-flexvol:v3.19.4
./pull.sh swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/calico/node:v3.19.4 docker.io/calico/node:v3.19.4
7.7 部署CoreDNS
7.7.1 下载安装
# 生成部署文件
cat > coredns.yaml << "EOF"
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.8.4
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.96.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
# 应用进集群
kubectl apply -f coredns.yaml
# 非常棒,一切都在正常工作
[root@k8s-master1 k8s-work]# kubectl get po -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-7cc8dd57d9-gx4rl 1/1 Running 0 42m
kube-system calico-node-86pxm 1/1 Running 0 28m
kube-system calico-node-8mcfm 1/1 Running 0 32m
kube-system calico-node-mr7tf 1/1 Running 0 110m
kube-system calico-node-t52bw 1/1 Running 0 110m
kube-system coredns-675db8b7cc-nq4qd 1/1 Running 0 4m3s
# 使用彩蛋脚本来完成下载分发到每个节点吧 !
[root@k8s-master1 k8s-work]# kubectl get po -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-7cc8dd57d9-gx4rl 1/1 Running 0 39m
kube-system calico-node-86pxm 1/1 Running 0 25m
kube-system calico-node-8mcfm 1/1 Running 0 29m
kube-system calico-node-mr7tf 1/1 Running 0 107m
kube-system calico-node-t52bw 1/1 Running 0 107m
kube-system coredns-675db8b7cc-nq4qd 0/1 ErrImagePull 0 77s
7.7.2 彩蛋脚本
./pull.sh swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/coredns/coredns:1.8.4 docker.io/coredns/coredns:1.8.4
8. 试验部署nginx并访问
cat > nginx.yaml << "EOF"
---
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-web
spec:
replicas: 2
selector:
name: nginx
template:
metadata:
labels:
name: nginx
spec:
containers:
- name: nginx
image: swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/nginx:1.20.2
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx-service-nodeport
spec:
ports:
- port: 80
targetPort: 80
nodePort: 30001
protocol: TCP
type: NodePort
selector:
name: nginx
EOF
# 应用进集群
kubectl apply -f nginx.yaml
9. 彩蛋脚本
这段脚本 pull.sh
的功能是在 Kubernetes 集群中的多个节点上自动拉取容器镜像、可选地重命名镜像,并删除原始镜像,只保留重命名后的镜像。脚本的第一个参数是要下载的镜像地址(例如 docker.io/nginx:latest
),第二个参数是可选的重命名后的镜像标签(例如 nginx:new-tag
)。如果未提供第二个参数,则脚本将只下载镜像,而不会进行重命名。第三个参数是命名空间(默认为 k8s.io
),如果未传入,则默认将镜像下载到 k8s.io
命名空间。命名空间的设置确保了 ctr
和 crictl
工具能够在 Kubernetes 环境中正确识别和访问镜像,因为 ctr
默认将镜像下载到 default
命名空间,而 crictl
只能访问 k8s.io
命名空间。
脚本首先在本地主机上下载镜像,并根据是否提供了新镜像标签来决定是否重命名和删除原始镜像。然后,通过 SSH 远程操作 Kubernetes 集群中的每个节点,执行相同的镜像拉取、重命名(如果提供了新标签)和删除原始镜像的操作。最终,确保所有节点都能共享并同步新的镜像标签,并且原始镜像会被删除,只保留重命名后的镜像。
./pull.sh swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/nginx:1.22.1-alpine docker.io/nginx:1.22.1-alpine
./pull.sh quay.io/metallb/controller:v0.13.9
可以到渡渡鸟好人搭建的镜像同步站,在里面搜索镜像或者提交没有的镜像来让他代理下载,得到加速地址就可以配合pull.sh来使用了。
# 生成
cat > pull.sh << "EOF"
#!/bin/bash
# 检查参数是否传递正确
if [ $# -lt 1 ]; then
echo "Usage: $0 <original-image> [new-image] [namespace]"
exit 1
fi
# 第一个参数:原始镜像名称
original_image=$1
# 第二个参数:新的镜像名称(可选)
new_image=$2
# 第三个参数:命名空间(可选,默认为 k8s.io)
namespace=${3:-k8s.io}
# 获取所有 Kubernetes 节点的名称
echo "Fetching node names from Kubernetes..."
nodes=$(kubectl get nodes -o custom-columns=NAME:.metadata.name)
# 本地主机上下载镜像(如果指定了新镜像名称,则重命名)
echo "Pulling image: $original_image on local host with namespace $namespace"
ctr --namespace $namespace images pull $original_image
if [ -n "$new_image" ]; then
echo "Tagging image $original_image as $new_image on local host"
ctr --namespace $namespace images tag $original_image $new_image
# 删除原始镜像
echo "Removing original image: $original_image on local host"
ctr --namespace $namespace images remove $original_image
fi
# 确认本地主机镜像是否成功拉取并重命名
echo "Listing images on local host:"
ctr --namespace $namespace images list
# 在每个节点上执行远程下载、重命名并删除原始镜像
for node in $nodes; do
echo "Pulling image on remote host: $node with namespace $namespace"
ssh $node "ctr --namespace $namespace images pull $original_image"
if [ -n "$new_image" ]; then
echo "Tagging image $original_image as $new_image on remote host $node"
ssh $node "ctr --namespace $namespace images tag $original_image $new_image"
# 删除原始镜像
echo "Removing original image: $original_image on remote host $node"
ssh $node "ctr --namespace $namespace images remove $original_image"
fi
# 确认远程主机镜像是否成功拉取并重命名
echo "Listing images on remote host $node:"
ssh $node "ctr --namespace $namespace images list"
done
EOF
# 授权执行权限
chmod +x ./pull.sh
10. 写在最后
真不容易啊但最后还是完成了整个的搭建过程,其中有很多的细节和很多的坑,也还有很多的配置还不是很理解,这需要后续不断的学习来加强它,搭建的过程学到了很多东西也还有继续优化的点,例如硬件与资金匮乏与节点数量很少的情况,ha1和ha2的高可用部分硬件是可以节省出来的,将haproxy与keepalived装到三个master节点上复用在量不大的情况也能实现高可用,但~终究还是不好的,这个看情况了。
还有部署的时候出现了无法访问docker.io下载镜像的问题也是让人头痛的,但我有提供pull.sh来完成下载与分发,相信还是可以轻松操控与使用的。