写在最前
1. 操作流程
因由于目标环境无法直接访问外网,我们需要提前在可联网的环境下,批量下载指定版本的 Kubernetes 相关二进制文件和镜像,打包成离线资源包。这样可以保证后续在内网环境中顺利部署和升级 Kubernetes 集群,避免因网络限制导致安装失败或升级受阻。
该脚本实现了自动下载 Kubernetes 核心组件(kubeadm
、kubelet
、kubectl
)、容器运行时(containerd
、runc
)、网络插件(CNI)以及 Kubernetes 所需的所有镜像资源,并支持将镜像重命名(打标签)为私有 Harbor 仓库地址,方便内部镜像管理和推送。
脚本功能解释
版本固定与架构指定
明确了 Kubernetes 版本(如v1.32.4
)和目标平台架构(arm64
),确保下载匹配版本的二进制和镜像。二进制文件下载
通过curl
自动下载对应架构的kubeadm
、kubelet
、kubectl
,以及用于获取镜像列表的kubeadm
(x86 版本)。容器运行时组件下载
包含containerd
、runc
以及 CNI 插件的下载,确保容器运行环境完整。动态获取 Kubernetes 所需镜像列表
通过 x86 版kubeadm
命令自动解析出指定版本所需的所有核心镜像,避免手动维护镜像列表。镜像拉取和打标签
使用docker pull --platform=linux/arm64
拉取对应架构的镜像,支持配置私有 Harbor 仓库地址,拉取后自动给镜像打 Harbor 标签,方便镜像管理。打包成离线资源包
将所有下载的二进制文件和镜像保存并打包为压缩文件,便于传输至无网络环境。
1.1 制作离线包
#!/bin/bash
set -e
ARCH="arm64"
PLATFORM="linux/arm64"
K8S_VERSION="v1.32.4"
CONTAINERD_VERSION="1.7.13"
RUNC_VERSION="1.1.11"
CNI_VERSION="v1.3.0"
# 设置 Harbor 地址(空字符串表示不使用)
HARBOR_REGISTRY="harbor.tanqidi.com"
OUT_DIR="k8s-offline-${ARCH}-${K8S_VERSION}"
mkdir -p "$OUT_DIR"/{bin,images,containerd}
download_if_not_exist() {
local url="$1"
local file="$2"
if [[ -f "$file" ]]; then
echo "✔ 已存在: $file,跳过下载"
else
echo "↓ 下载: $file"
curl -Lo "$file" "$url"
fi
}
pull_and_save_image() {
local image="$1"
local fname=$(echo "$image" | tr '/:' '_')_${ARCH}.tar
if [[ -f "$OUT_DIR/images/$fname" ]]; then
echo "✔ 镜像包已存在: $fname,跳过"
return
fi
echo "→ 拉取镜像: $image for $PLATFORM"
if docker pull --platform=${PLATFORM} "$image"; then
# Harbor 镜像重命名逻辑
if [[ -n "$HARBOR_REGISTRY" ]]; then
# 提取镜像名和标签(去掉 registry 和路径前缀)
local repo_and_tag="${image##*/}" # 例:kube-proxy:v1.32.4
local harbor_image="${HARBOR_REGISTRY}/${repo_and_tag}"
echo "→ 打标签: $image => $harbor_image"
docker tag "$image" "$harbor_image"
fi
docker save -o "$OUT_DIR/images/$fname" "$image"
echo "✓ 已保存: $fname"
else
echo "❌ 拉取失败: $image —— 已跳过"
fi
}
echo "==== [1] 下载 kubeadm/kubelet/kubectl(ARM64)和 kubeadm(x86)用于获取镜像列表 ===="
cd "$OUT_DIR/bin"
for bin in kubeadm kubelet kubectl; do
download_if_not_exist "https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/${ARCH}/${bin}" "${bin}"
chmod +x "${bin}"
done
download_if_not_exist "https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kubeadm" "kubeadm_x86"
chmod +x kubeadm_x86
cd - > /dev/null
echo "==== [2] 下载 containerd + runc + CNI plugins ===="
cd "$OUT_DIR/containerd"
download_if_not_exist "https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-linux-${ARCH}.tar.gz" "containerd-${CONTAINERD_VERSION}-linux-${ARCH}.tar.gz"
download_if_not_exist "https://github.com/opencontainers/runc/releases/download/v${RUNC_VERSION}/runc.${ARCH}" "runc.${ARCH}"
download_if_not_exist "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz" "cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz"
cd - > /dev/null
echo "==== [3] 使用 x86 kubeadm 获取所需镜像列表并拉取 ===="
K8S_IMAGES=()
RAW_IMAGES=$("$OUT_DIR/bin/kubeadm_x86" config images list --kubernetes-version="$K8S_VERSION")
while read -r image; do
K8S_IMAGES+=("$image")
done <<< "$RAW_IMAGES"
echo "总计 Kubernetes 核心镜像 ${#K8S_IMAGES[@]} 个"
for image in "${K8S_IMAGES[@]}"; do
pull_and_save_image "$image"
done
echo "==== [4] 打包最终离线包 ===="
if [[ -f "${OUT_DIR}.tar.gz" ]]; then
echo "✔ 压缩包已存在,跳过"
else
tar -czf "${OUT_DIR}.tar.gz" "$OUT_DIR"
echo "✅ 完成:${OUT_DIR}.tar.gz"
fi
1.2 部署containerd
1.2.1 创建目录结构
mkdir -p /usr/local/bin
mkdir -p /etc/containerd
mkdir -p /opt/cni/bin
1.2.2 安装 containerd、runc、CNI
解压后 /usr/local/bin/
下会有 containerd
、containerd-shim
等二进制。
tar -C /usr/local -xzf containerd-1.7.13-linux-arm64.tar.gz
[root@hybxvuka01 bin]# pwd
/usr/local/bin
[root@hybxvuka01 bin]# ll
total 105116
-rwxr-xr-x 1 root root 40724696 Feb 1 2024 containerd
-rwxr-xr-x 1 root root 6815744 Feb 1 2024 containerd-shim
-rwxr-xr-x 1 root root 8519680 Feb 1 2024 containerd-shim-runc-v1
-rwxr-xr-x 1 root root 11665408 Feb 1 2024 containerd-shim-runc-v2
-rwxr-xr-x 1 root root 19398656 Feb 1 2024 containerd-stress
-rwxr-xr-x 1 root root 20512768 Feb 1 2024 ctr
1.2.3 安装 runc
cp runc.arm64 /usr/local/sbin/runc
chmod 755 /usr/local/sbin/runc
1.2.4 解压 cni plugins
mkdir -p /opt/cni/bin
tar -C /opt/cni/bin -xzf cni-plugins-linux-arm64-v1.3.0.tgz
[root@hybxvuka01 containerd]# ll /opt/cni/bin/
total 75000
-rwxr-xr-x 1 root root 3920373 May 10 2023 bandwidth
-rwxr-xr-x 1 root root 4404044 May 10 2023 bridge
-rwxr-xr-x 1 root root 10252122 May 10 2023 dhcp
-rwxr-xr-x 1 root root 4107210 May 10 2023 dummy
-rwxr-xr-x 1 root root 4571482 May 10 2023 firewall
-rwxr-xr-x 1 root root 4010859 May 10 2023 host-device
-rwxr-xr-x 1 root root 3437099 May 10 2023 host-local
-rwxr-xr-x 1 root root 4112661 May 10 2023 ipvlan
-rwxr-xr-x 1 root root 3452632 May 10 2023 loopback
-rwxr-xr-x 1 root root 4123995 May 10 2023 macvlan
-rwxr-xr-x 1 root root 3841245 May 10 2023 portmap
-rwxr-xr-x 1 root root 4282980 May 10 2023 ptp
-rwxr-xr-x 1 root root 3638129 May 10 2023 sbr
-rwxr-xr-x 1 root root 2965066 May 10 2023 static
-rwxr-xr-x 1 root root 4199057 May 10 2023 tap
-rwxr-xr-x 1 root root 3609903 May 10 2023 tuning
-rwxr-xr-x 1 root root 4111356 May 10 2023 vlan
-rwxr-xr-x 1 root root 3719609 May 10 2023 vrf
1.2.5 生成 containerd 默认配置文件
[root@hybxvuka01 containerd]# mkdir -p /etc/containerd/
[root@hybxvuka01 containerd]# containerd config default > /etc/containerd/config.toml
[root@hybxvuka01 containerd]# sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
[root@hybxvuka01 containerd]# cat /etc/containerd/config.toml
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2
[cgroup]
path = ""
[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0
[grpc]
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"]
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
drain_exec_sync_io_timeout = "0s"
enable_cdi = false
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_image_defined_volumes = false
image_pull_progress_timeout = "5m0s"
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
sandbox_image = "registry.k8s.io/pause:3.8"
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
ip_pref = ""
max_conf_num = 1
setup_serially = false
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_blockio_not_enabled_errors = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
privileged_without_host_devices_all_devices_allowed = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
sandbox_mode = ""
snapshotter = ""
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
privileged_without_host_devices_all_devices_allowed = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
sandbox_mode = "podsandbox"
snapshotter = ""
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
privileged_without_host_devices_all_devices_allowed = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
sandbox_mode = ""
snapshotter = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = "node"
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = ""
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = 1.0
service_name = "containerd"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.nri.v1.nri"]
disable = true
disable_connections = false
plugin_config_path = "/etc/nri/conf.d"
plugin_path = "/opt/nri/plugins"
plugin_registration_timeout = "5s"
plugin_request_timeout = "2s"
socket_path = "/var/run/nri/nri.sock"
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/arm64/v8"]
sched_core = false
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.service.v1.tasks-service"]
blockio_config_file = ""
rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.blockfile"]
fs_type = ""
mount_options = []
root_path = ""
scratch_file = ""
[plugins."io.containerd.snapshotter.v1.btrfs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"]
mount_options = []
root_path = ""
sync_remove = false
upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = ""
[plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = ""
insecure = false
protocol = ""
[plugins."io.containerd.transfer.v1.local"]
config_path = ""
max_concurrent_downloads = 3
max_concurrent_uploaded_layers = 3
[[plugins."io.containerd.transfer.v1.local".unpack_config]]
differ = ""
platform = "linux/arm64/v8"
snapshotter = "overlayfs"
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[timeouts]
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.metrics.shimstats" = "2s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[ttrpc]
address = ""
gid = 0
uid = 0
1.2.6 配置 systemd 启动服务
cat > /etc/systemd/system/containerd.service <<EOF
# Copyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target dbus.service
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
1.2.7 启动 containerd
systemctl daemon-reexec
systemctl daemon-reload
systemctl enable --now containerd
# 确认运行状态
[root@hybxvuka01 containerd]# systemctl status containerd
● containerd.service - containerd container runtime
Loaded: loaded (/etc/systemd/system/containerd.service; enabled; vendor preset: disabled)
Active: active (running) since Wed 2025-07-16 16:17:25 CST; 8s ago
Docs: https://containerd.io
Main PID: 2142 (containerd)
Tasks: 16
Memory: 32.0M
CGroup: /system.slice/containerd.service
└─2142 /usr/local/bin/containerd
Jul 16 16:17:25 hybxvuka01 containerd[2142]: time="2025-07-16T16:17:25.576228489+08:00" level=info msg="skipping tracing processor initialization (no tracing plugin)" error="no OpenTelemet>
Jul 16 16:17:25 hybxvuka01 containerd[2142]: time="2025-07-16T16:17:25.576238499+08:00" level=info msg="Start subscribing containerd event"
Jul 16 16:17:25 hybxvuka01 containerd[2142]: time="2025-07-16T16:17:25.576322180+08:00" level=info msg="Start recovering state"
Jul 16 16:17:25 hybxvuka01 containerd[2142]: time="2025-07-16T16:17:25.576415972+08:00" level=info msg="Start event monitor"
Jul 16 16:17:25 hybxvuka01 containerd[2142]: time="2025-07-16T16:17:25.576445872+08:00" level=info msg="Start snapshots syncer"
Jul 16 16:17:25 hybxvuka01 containerd[2142]: time="2025-07-16T16:17:25.576504213+08:00" level=info msg="Start cni network conf syncer for default"
Jul 16 16:17:25 hybxvuka01 containerd[2142]: time="2025-07-16T16:17:25.577185264+08:00" level=info msg="Start streaming server"
Jul 16 16:17:25 hybxvuka01 containerd[2142]: time="2025-07-16T16:17:25.577011431+08:00" level=info msg=serving... address=/run/containerd/containerd.sock.ttrpc
Jul 16 16:17:25 hybxvuka01 containerd[2142]: time="2025-07-16T16:17:25.577521909+08:00" level=info msg=serving... address=/run/containerd/containerd.sock
Jul 16 16:17:25 hybxvuka01 containerd[2142]: time="2025-07-16T16:17:25.577632541+08:00" level=info msg="containerd successfully booted in 0.037744s"
1.2.8 验证 containerd 工作正常
# 通过 ctr 查看镜像列表
ctr images ls
crictl info
# 如果没有安装 crictl,你可以从官网或提前离线下载,放置到 /usr/local/bin
chmod +x crictl
1.3 安装 kubeadm/kubelet/kubectl
cp k8s-offline-arm64-v1.32.4/bin/kubeadm /usr/local/bin/
cp k8s-offline-arm64-v1.32.4/bin/kubelet /usr/local/bin/
cp k8s-offline-arm64-v1.32.4/bin/kubectl /usr/local/bin/
chmod +x /usr/local/bin/kubeadm /usr/local/bin/kubelet /usr/local/bin/kubectl
# 生成kubelet.service和kubelet.service.d/10-kubeadm.conf
cat > /lib/systemd/system/kubelet.service <<EOF
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/
Wants=network-online.target
After=network-online.target
[Service]
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
mkdir -p /lib/systemd/system/kubelet.service.d
cat > /lib/systemd/system/kubelet.service.d/10-kubeadm.conf<<EOF
# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/sysconfig/kubelet
ExecStart=
# ExecStart=/usr/bin/kubelet
ExecStart=/usr/local/bin/kubelet
EOF
# 启动
systemctl daemon-reload
systemctl enable --now kubelet
1.x 部署 keepalived 高可用
yum update -y
yum install -y keepalived
1.x 初始化集群
初始化集群配置文件 kubeadm config print init-defaults > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 10.133.179.41
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
imagePullSerial: true
name: node
taints: null
timeouts:
controlPlaneComponentHealthCheck: 4m0s
discovery: 5m0s
etcdAPICall: 2m0s
kubeletHealthCheck: 4m0s
kubernetesAPICall: 1m0s
tlsBootstrap: 5m0s
upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 87600h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: api.k8s.local:6443
controllerManager: {}
dns: {}
encryptionAlgorithm: RSA-2048
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: harbor.bx.crpharm.com/k8s
kind: ClusterConfiguration
kubernetesVersion: 1.32.4
networking:
dnsDomain: cluster.local
podSubnet: 172.22.64.0/18
serviceSubnet: 172.22.0.0/18
proxy: {}
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
1.x.x 开始初始化
# 试验运行,看到有一些警告。
kubeadm init --config=./kubeadm-config.yaml --dry-run
[root@hybxvuka01 tmp]# kubeadm init --config=./kubeadm-config.yaml --dry-run
W0718 14:27:41.658019 439521 validation.go:79] WARNING: certificateValidityPeriod: the value 87600h0m0s is more than the recommended default for certificate expiration: 8760h0m0s
W0718 14:27:41.658175 439521 constants.go:614] Using dry-run directory /etc/kubernetes/tmp/kubeadm-init-dryrun2132585017. To override it, set the environment variable KUBEADM_INIT_DRYRUN_DIR
[init] Using Kubernetes version: v1.32.4
[preflight] Running pre-flight checks
[WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2
[WARNING Hostname]: hostname "node" could not be reached
[WARNING Hostname]: hostname "node": lookup node on 10.7.60.52:53: server misbehaving
# 正式运行
kubeadm init --config=./kubeadm-config.yaml