1. 前置配置

1.1 配置主机名

hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-worker01
hostnamectl set-hostname k8s-worker02

1.2 配置hosts

172.31.0.111 k8s-master01
172.31.0.112 k8s-worker01
172.31.0.113 k8s-worker02

1.3 防火墙配置

systemctl disable firewalld && systemctl stop firewalld
systemctl disable iptables && systemctl stop iptables 
systemctl status firewall
systemctl status iptables 

1.4 升级内核

如果内核比较低可以按照这个步骤来升级到比较新的内核

https://tanqidi.com/archives/8878c993-e75e-4797-86b2-75589a1ff031

1.5 SELINUX配置

setenforce 0
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
sestatus

reboot

1.6 配置内核转发及网桥过滤

# 添加网桥过滤及内核转发配置文件
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF

# 加载br_netfilter模块
modprobe br_netfilter

sysctl -p /etc/sysctl.d/k8s.conf

# 查看是否加载
lsmod | grep br_netfilter
br_netfilter           22256  0

cat > /etc/sysconfig/modules/br_netfilter.modules<<EOF
#!/bin/bash
modprobe br_netfilter
EOF
chmod 755 /etc/sysconfig/modules/br_netfilter.modules

1.7 安装ipset及ipvsadm

yum -y install ipset ipvsadm

# 配置ipvsadm模块加载方式,添加需要加载的模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

授权、运行、检查是否加载
[root@k8s-master01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
ip_vs_sh               16384  0 
ip_vs_wrr              16384  0 
ip_vs_rr               16384  0 
ip_vs                 155648  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          147456  2 xt_conntrack,ip_vs
nf_defrag_ipv6         24576  2 nf_conntrack,ip_vs
nf_defrag_ipv4         16384  1 nf_conntrack
libcrc32c              16384  3 nf_conntrack,xfs,ip_vs

1.8 关闭SWAP分区

swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
echo "vm.swappiness=0" >> /etc/sysctl.conf
sysctl -p

2. 部署 containerd

2.1 下载安装包

https://github.com/containerd/containerd/releases/
mkdir -p /tmp/k8s-install/ && cd /tmp/k8s-install
wget https://github.com/containerd/containerd/releases/download/v1.7.3/cri-containerd-1.7.3-linux-amd64.tar.gz

# 解压到根/目录他会自动来到/usr/local/bin/
tar xf cri-containerd-1.7.3-linux-amd64.tar.gz  -C /

# 生成config.toml
mkdir -p /etc/containerd/ && containerd config default > /etc/containerd/config.toml
# 编辑/etc/containerd/config.toml由3.8修改为3.9
sandbox_image = "registry.k8s.io/pause:3.8" 为registry.aliyuncs.com/google_containers/pause:3.9
# 查看
[root@k8s-master01 tmp]# cat /etc/containerd/config.toml |grep pause:
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"

# containerd 启动!
systemctl enable --now containerd

# 查看版本
systemctl -v

2.2 更新 runc

mkdir -p /tmp/k8s-install/runc && cd /tmp/k8s-install/runc
wget https://github.com/opencontainers/runc/releases/download/v1.1.5/libseccomp-2.5.4.tar.gz
tar xf libseccomp-2.5.4.tar.gz
cd libseccomp-2.5.4/
# 需要前置安装这个
yum install gperf -y

# 如果执行./configure =报错需要安装yum -y groupinstall "Development Tools" 或者只装gcc和make     yum -y install gcc make
./configure
make && make install
find / -name "libseccomp.so"

cd /tmp/k8s-install/runc
wget https://github.com/opencontainers/runc/releases/download/v1.1.9/runc.amd64
chmod +x runc.amd64
mv runc.amd64 /usr/local/sbin/runc
# 执行runc命令,如果有命令帮助则为正常
runc

2.3. 配置代理

由于无法直接访问 docker.io 等公共镜像源,我们通过代理加速拉取镜像。同时配置 no_proxy 忽略内网关键网段,避免影响 Kubernetes 集群正常通信。

mkdir -p /usr/lib/systemd/system/containerd.service.d
cat > /usr/lib/systemd/system/containerd.service.d/http-proxy.conf<<EOF
[Service]
Environment="HTTP_PROXY=http://172.31.0.1:7890"
Environment="HTTPS_PROXY=http://172.31.0.1:7890"
Environment="NO_PROXY=localhost,127.0.0.1,::1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local"
EOF

3. 部署 k8s 集群

3.1 配置yum源

cat > /etc/yum.repos.d/k8s.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

3.2 安装1.28.2版本

yum -y install kubeadm-1.28.2-0 kubelet-1.28.2-0 kubectl-1.28.2-0

Installing:
 kubeadm                                                                  x86_64                                                   1.28.2-0                                                          kubernetes                                                    11 M
 kubectl                                                                  x86_64                                                   1.28.2-0                                                          kubernetes                                                    11 M
 kubelet                                                                  x86_64                                                   1.28.2-0                                                          kubernetes                                                    21 M
Installing for dependencies:
 conntrack-tools                                                          x86_64                                                   1.4.4-7.el7                                                       base                                                         187 k
 cri-tools                                                                x86_64                                                   1.26.0-0                                                          kubernetes                                                   8.6 M
 kubernetes-cni                                                           x86_64                                                   1.2.0-0                                                           kubernetes                                                    17 M
 libnetfilter_cthelper                                                    x86_64                                                   1.0.0-11.el7                                                      base                                                          18 k
 libnetfilter_cttimeout                                                   x86_64                                                   1.0.0-7.el7                                                       base                                                          18 k
 libnetfilter_queue                                                       x86_64                                                   1.0.2-2.el7_2                                                     base                                                          23 k
 socat                                                                    x86_64                                                   1.7.3.2-2.el7                                                     base                                                         290 k

3.3 配置kubelet

为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,建议修改如下文件内容。

# vi /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"

# 设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
systemctl enable kubelet

3.4 初始化集群

[root@k8s-master01 kubelet]# kubeadm config images list
I0720 01:48:41.111331   10165 version.go:256] remote version is much newer: v1.33.3; falling back to: stable-1.28
registry.k8s.io/kube-apiserver:v1.28.15
registry.k8s.io/kube-controller-manager:v1.28.15
registry.k8s.io/kube-scheduler:v1.28.15
registry.k8s.io/kube-proxy:v1.28.15
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.9-0
registry.k8s.io/coredns/coredns:v1.10.1

# 生成配置文件
kubeadm config print init-defaults > kubeadm-config.yaml
# 编辑一些信息
[root@k8s-master01 k8s-install]# cat kubeadm-config.yaml 
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.31.0.111
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: k8s-master01
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.28.2
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}


# 初始化
[root@k8s-master01 k8s-install]# kubeadm init --config kubeadm-config.yaml
[init] Using Kubernetes version: v1.28.2
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.31.0.111]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [172.31.0.111 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [172.31.0.111 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 4.001248 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.31.0.111:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:8950b129b45e39eb9d5f11c05d44ab11575cbb7e38c780a15f26d4c2ffc781be 



# master 节点执行
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 工作节点执行
kubeadm join 172.31.0.111:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:8950b129b45e39eb9d5f11c05d44ab11575cbb7e38c780a15f26d4c2ffc781be 

4. 部署网络插件

https://docs.tigera.io/calico/latest/about
# 部署operator
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/tigera-operator.yaml

# 下载自定义资源
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/custom-resources.yaml

# vi custom-resources.yaml将内部cidr: 192.168.0.0/16改成我们上面的pod自定义网段10.244.0.0/16
[root@k8s-master01 k8s-install]# cat custom-resources.yaml 
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
  name: default
spec:
  # Configures Calico networking.
  calicoNetwork:
    # Note: The ipPools section cannot be modified post-install.
    ipPools:
    - blockSize: 26
      cidr: 10.244.0.0/16
      encapsulation: VXLANCrossSubnet
      natOutgoing: Enabled
      nodeSelector: all()

---

# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
  name: default
spec: {}

# 应用
kubectl create -f custom-resources.yaml

5. 验证集群

部署了一个使用 nginx:1.22 镜像的应用,运行 3 个副本,并通过 NodePort 方式将容器的 80 端口映射到主机的 30001 端口,便于外部访问;镜像拉取策略设为 IfNotPresent,优先使用本地镜像。

image-YpCl.png

cat > nginx-nodeport.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
        - name: nginx
          image: nginx:1.22
          ports:
            - containerPort: 80
          imagePullPolicy: IfNotPresent

---
apiVersion: v1
kind: Service
metadata:
  name: nginx-service
spec:
  type: NodePort
  selector:
    app: nginx
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30001
EOF
[root@k8s-master01 tmp]# kubectl get po -A -owide |grep -v kubesphere
NAMESPACE                      NAME                                               READY   STATUS    RESTARTS      AGE   IP              NODE           NOMINATED NODE   READINESS GATES
calico-apiserver               calico-apiserver-67c976f899-nlf84                  1/1     Running   0             46m   10.244.79.68    k8s-worker01   <none>           <none>
calico-apiserver               calico-apiserver-67c976f899-qshb5                  1/1     Running   0             46m   10.244.69.195   k8s-worker02   <none>           <none>
calico-system                  calico-kube-controllers-5cdb789774-t2pb7           1/1     Running   0             47m   10.244.79.65    k8s-worker01   <none>           <none>
calico-system                  calico-node-5nnmp                                  1/1     Running   0             47m   172.31.0.112    k8s-worker01   <none>           <none>
calico-system                  calico-node-mxcjb                                  1/1     Running   0             47m   172.31.0.113    k8s-worker02   <none>           <none>
calico-system                  calico-node-s7crq                                  1/1     Running   0             47m   172.31.0.111    k8s-master01   <none>           <none>
calico-system                  calico-typha-6875fc4854-nt9c2                      1/1     Running   0             47m   172.31.0.111    k8s-master01   <none>           <none>
calico-system                  calico-typha-6875fc4854-rhdl4                      1/1     Running   1 (47m ago)   47m   172.31.0.113    k8s-worker02   <none>           <none>
calico-system                  csi-node-driver-qhxdm                              2/2     Running   0             47m   10.244.32.129   k8s-master01   <none>           <none>
calico-system                  csi-node-driver-wkfwh                              2/2     Running   0             47m   10.244.79.67    k8s-worker01   <none>           <none>
calico-system                  csi-node-driver-z4t6z                              2/2     Running   0             44m   10.244.69.196   k8s-worker02   <none>           <none>
default                        nginx-deployment-67856bc4f5-249kk                  1/1     Running   0             40m   10.244.69.198   k8s-worker02   <none>           <none>
default                        nginx-deployment-67856bc4f5-rrrcl                  1/1     Running   0             40m   10.244.69.197   k8s-worker02   <none>           <none>
default                        nginx-deployment-67856bc4f5-vlf9d                  1/1     Running   0             40m   10.244.79.69    k8s-worker01   <none>           <none>
kube-system                    coredns-66f779496c-bq7dx                           1/1     Running   0             47m   10.244.79.66    k8s-worker01   <none>           <none>
kube-system                    coredns-66f779496c-wfwjj                           1/1     Running   0             47m   10.244.69.193   k8s-worker02   <none>           <none>
kube-system                    etcd-k8s-master01                                  1/1     Running   0             47m   172.31.0.111    k8s-master01   <none>           <none>
kube-system                    kube-apiserver-k8s-master01                        1/1     Running   0             47m   172.31.0.111    k8s-master01   <none>           <none>
kube-system                    kube-controller-manager-k8s-master01               1/1     Running   0             47m   172.31.0.111    k8s-master01   <none>           <none>
kube-system                    kube-proxy-6kz5k                                   1/1     Running   0             47m   172.31.0.111    k8s-master01   <none>           <none>
kube-system                    kube-proxy-rhn7q                                   1/1     Running   0             47m   172.31.0.112    k8s-worker01   <none>           <none>
kube-system                    kube-proxy-x9hqq                                   1/1     Running   0             47m   172.31.0.113    k8s-worker02   <none>           <none>
kube-system                    kube-scheduler-k8s-master01                        1/1     Running   0             47m   172.31.0.111    k8s-master01   <none>           <none>
kube-system                    snapshot-controller-0                              1/1     Running   0             31m   10.244.79.72    k8s-worker01   <none>           <none>
tigera-operator                tigera-operator-94d7f7696-5z5wf                    1/1     Running   0             47m   172.31.0.111    k8s-master01   <none>           <none>
[root@k8s-master01 tmp]# kubectl get svc -n kube-system
NAME                          TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                        AGE
kube-controller-manager-svc   ClusterIP   None         <none>        10257/TCP                      33m
kube-dns                      ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP         113m
kube-scheduler-svc            ClusterIP   None         <none>        10259/TCP                      33m
kubelet                       ClusterIP   None         <none>        10250/TCP,10255/TCP,4194/TCP   33m
[root@k8s-master01 tmp]# dig -t a tanqidi.com @10.96.0.10

; <<>> DiG 9.11.4-P2-RedHat-9.11.4-26.P2.el7_9.16 <<>> -t a tanqidi.com @10.96.0.10
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 45406
;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;tanqidi.com.			IN	A

;; ANSWER SECTION:
tanqidi.com.		30	IN	A	193.112.95.180

;; Query time: 39 msec
;; SERVER: 10.96.0.10#53(10.96.0.10)
;; WHEN: Sun Jul 20 03:56:21 CST 2025
;; MSG SIZE  rcvd: 67