写在最前

有需要记得改成自己的私有仓库。

1. metallb

1.1 metallb-native.yaml

https://raw.githubusercontent.com/metallb/metallb/v0.15.2/config/manifests/metallb-native.yaml

1.2 metallb-ip-pool.yaml

10.133.179.3 为你的高可用VIP或者真负载均衡IP

apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: my-ip-pool
  namespace: metallb-system
spec:
  addresses:
  - 10.133.179.3/32
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: my-l2
  namespace: metallb-system
spec:
  ipAddressPools:
  - my-ip-pool

1.3 test-nginx.yaml

完成部署后,直接访问高可用 IP(无需加端口,默认使用 80 端口)即可显示 nginx 页面表示配置成功,试验完记得关闭服务因为下一步要部署ingress-nginx他需要80和443端口。

kind: Deployment
apiVersion: apps/v1
metadata:
  name: test-nginx
  namespace: test
  labels:
    app: test-nginx
  annotations:
    deployment.kubernetes.io/revision: '22'
    kubesphere.io/creator: admin
spec:
  replicas: 1
  selector:
    matchLabels:
      app: test-nginx
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: test-nginx
      annotations:
        kubesphere.io/creator: admin
        kubesphere.io/imagepullsecrets: '{"container-tj0qou":"harbor-dev"}'
        kubesphere.io/restartedAt: '2025-08-01T03:36:14.767Z'
    spec:
      containers:
        - name: container-tj0qou
          image: 'harbor.bx.crpharm.com/k8s/arm64/kubesphere/nginx:1.14-alpine'
          ports:
            - name: http-80
              containerPort: 80
              protocol: TCP
          resources: {}
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
          imagePullPolicy: Always
      restartPolicy: Always
      terminationGracePeriodSeconds: 30
      dnsPolicy: ClusterFirst
      serviceAccountName: default
      serviceAccount: default
      securityContext: {}
      schedulerName: default-scheduler
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 25%
      maxSurge: 25%
  revisionHistoryLimit: 10
  progressDeadlineSeconds: 600

---
apiVersion: v1
kind: Service
metadata:
  name: test-nginx
  namespace: test
  labels:
    app: test-nginx
  annotations:
    kubesphere.io/creator: admin
spec:
  type: LoadBalancer
  selector:
    app: test-nginx
  ports:
    - name: http-80
      protocol: TCP
      port: 80
      targetPort: 80
  externalTrafficPolicy: Cluster

2. ingress-nginx

https://github.com/kubernetes/ingress-nginx

需要将镜像后面的@sha256给删掉

https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.13.0/deploy/static/provider/baremetal/deploy.yaml

当所有组件就绪后,进入 ingress-nginx 命名空间,修改名为 ingress-nginx-controller 的 Service,将其 type 设置为 LoadBalancer

3. polardbx 集群

https://doc.polardbx.com/zh/operator/deployment/1-installation.html

可能服务器没法联网无法使用在线helm来部署,先下载离线包。

$ helm install --namespace polardbx-operator-system --create-namespace polardbx-operator https://github.com/polardb/polardbx-operator/releases/download/v1.7.0/polardbx-operator-1.7.0.tgz

3.1 前置准备

# 首先执行如下命令获取 PolarDB-X 各个组件的最新镜像版本(需要填入YAML文件):
[root@localhost ~]# curl -s "https://polardbx-opensource.oss-cn-hangzhou.aliyuncs.com/scripts/get-version.sh" | sh
CN polardbx-opensource-registry.cn-beijing.cr.aliyuncs.com/polardbx/polardbx-sql:v2.4.1_5.4.19
DN polardbx-opensource-registry.cn-beijing.cr.aliyuncs.com/polardbx/polardbx-engine:v2.4.1_8.4.19
CDC polardbx-opensource-registry.cn-beijing.cr.aliyuncs.com/polardbx/polardbx-cdc:v2.4.1_5.4.19
COLUMNAR polardbx-opensource-registry.cn-beijing.cr.aliyuncs.com/polardbx/polardbx-columnar:v2.4.1_5.4.19



# 创建namespace
kubectl create namespace polardbx-operator-system

# 存储目录配置
cat > storageConfig.yaml <<EOF
node:
  volumes:
    data: /data/polardbx/data
    log: /data/polardbx/log
    filestream: /data/polardbx/filestream
EOF


# 可以自定义镜像仓库离线部署
helm install polardbx-operator ./polardbx-operator-1.7.0.tgz \
  --namespace polardbx-operator-system \
  -f storageConfig.yaml \
  --create-namespace \
  --set imageRepo=harbor.bx.crpharm.com/k8s/arm64/polardbx

# 用原始镜像地址
helm install polardbx-operator ./polardbx-operator-1.7.0.tgz \
  --namespace polardbx-operator-system \
   -f storageConfig.yaml \
  --create-namespace \
  --set imageRepo=polardbx-opensource-registry.cn-beijing.cr.aliyuncs.com/polardbx

# 卸载
helm uninstall polardbx-operator -n polardbx-operator-system


[root@localhost db]# kubectl -n polardbx-operator-system get pods
NAME                                           READY   STATUS    RESTARTS   AGE
polardbx-clinic-5d789b5c65-txjzq               1/1     Running   0          5m12s
polardbx-controller-manager-78f54f778c-gn8gw   1/1     Running   0          5m12s
polardbx-hpfs-6xbfz                            1/1     Running   0          5m12s
polardbx-tools-updater-z2vx2                   1/1     Running   0          5m12s

3.2 标准版

部署 PolarDB-X 标准版集群

cat > polardbx-standard.yaml <<EOF
apiVersion: polardbx.aliyun.com/v1
kind: XStore
metadata:
  name: quick-start
  namespace: polardbx-operator-system
spec:
  config:
    controller:
      RPCProtocolVersion: 1
  topology:
    nodeSets:
    - name: cand
      replicas: 2
      role: Candidate
      template:
        spec:
          image: harbor.bx.crpharm.com/k8s/arm64/polardbx/polardbx-engine:v2.4.1_8.4.19
          resources:
            limits:
              cpu: 2
              memory: 4Gi
    - name: log
      replicas: 1
      role: Voter
      template:
        spec:
          image: harbor.bx.crpharm.com/k8s/arm64/polardbx/polardbx-engine:v2.4.1_8.4.19
          resources:
            limits:
              cpu: 1
              memory: 2Gi
EOF

3.3 企业版

# 企业版集群

cat > polardbx-enterprise.yaml <<EOF
apiVersion: polardbx.aliyun.com/v1
kind: PolarDBXCluster
metadata:
  name: quick-start
  namespace: polardbx-operator-system
spec:
  topology:
    nodes:
      gms:
        template:
          image: harbor.bx.crpharm.com/k8s/arm64/polardbx/polardbx-engine:v2.4.1_8.4.19
          resources:
            requests:
              cpu: 100m
              memory: 500Mi
            limits:
              cpu: 1
              memory: 1Gi
      cn:
        replicas: 1
        template:
          image: harbor.bx.crpharm.com/k8s/arm64/polardbx/polardbx-sql:v2.4.1_5.4.19
          resources:
            requests:
              cpu: 100m
              memory: 1Gi
            limits:
              cpu: 1
              memory: 2Gi
      dn:
        replicas: 1
        template:
          image: harbor.bx.crpharm.com/k8s/arm64/polardbx/polardbx-engine:v2.4.1_8.4.19
          resources:
            requests:
              cpu: 100m
              memory: 500Mi
            limits:
              cpu: 1
              memory: 1Gi
      cdc:
        replicas: 1
        template:
          image: harbor.bx.crpharm.com/k8s/arm64/polardbx/polardbx-cdc:v2.4.1_5.4.19
          resources:
            requests:
              cpu: 100m
              memory: 500Mi
            limits:
              cpu: 1
              memory: 1Gi
EOF

3.4 连接数据库

切换到polardbx的namespace中,找到名为quick-start得Secret里面就是账号与密码,如果想要外部访问修改名为quick-start的service修改为nodeport即可。搜索了一下看到说是polardbx_root的密码无法被修改。这?

4. nacos 集群

https://github.com/nacos-group/nacos-k8s/blob/master/deploy/nacos/nacos-no-pvc-ingress.yaml

如果用我的部署要换成nacos/nacos-server:v2.1.2-slim这个才支持arm的,不过这感觉也有点老了,验证流程通过要试验一下新版本才行。

https://tanqidi.com/archives/bc70be98-2b20-44f3-bbe1-e9a3043dcd52

4.1 nacos-sync

https://tanqidi.com/archives/18da8fed-bacd-44ee-91df-68014573c28b-2c2f15b8

5. apollo

https://github.com/apolloconfig/apollo-quick-start/tree/v2.2.0/sql

https://tanqidi.com/archives/c68b8595-356a-4266-a4cd-ea77b54ce73b

要留意ApolloConfigDB库中的ServerConfig表中的eureka.service.url,将它改成http://apollo-configservice.bx.svc.cluster.local:8080/eureka/集群内域名。

因为后续有eureka集群,可以参考文章将其改成k8s集群内eureka地址。

apiVersion: apps/v1
kind: Deployment
metadata:
  name: apollo-configservice
  namespace: bx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: apollo-configservice
  template:
    metadata:
      labels:
        app: apollo-configservice
    spec:
      containers:
      - name: configservice
        image: harbor.bx.crpharm.com/k8s/arm64/apolloconfig/apollo-configservice:2.2.0
        ports:
        - containerPort: 8080
        env:
        - name: SPRING_DATASOURCE_URL
          value: jdbc:mysql://mysql8.bx:3306/ApolloConfigDB?characterEncoding=utf8
        - name: SPRING_DATASOURCE_USERNAME
          value: root
        - name: SPRING_DATASOURCE_PASSWORD
          value: "xxxxxxxxxxxx"
        volumeMounts:
        - name: apollo-logs
          mountPath: /opt/logs
      volumes:
      - name: apollo-logs
        hostPath:
          path: /log/apollo
          type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
  name: apollo-configservice
  namespace: bx
spec:
  selector:
    app: apollo-configservice
  ports:
  - protocol: TCP
    port: 8080
    targetPort: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: apollo-adminservice
  namespace: bx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: apollo-adminservice
  template:
    metadata:
      labels:
        app: apollo-adminservice
    spec:
      containers:
      - name: adminservice
        image: harbor.bx.crpharm.com/k8s/arm64/apolloconfig/apollo-adminservice:2.2.0
        ports:
        - containerPort: 8090
        env:
        - name: SPRING_DATASOURCE_URL
          value: jdbc:mysql://mysql8.bx:3306/ApolloConfigDB?characterEncoding=utf8
        - name: SPRING_DATASOURCE_USERNAME
          value: root
        - name: SPRING_DATASOURCE_PASSWORD
          value: "xxxxxxxxxxxx"
        volumeMounts:
        - name: apollo-logs
          mountPath: /opt/logs
      volumes:
      - name: apollo-logs
        hostPath:
          path: /log/apollo
          type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
  name: apollo-adminservice
  namespace: bx
spec:
  selector:
    app: apollo-adminservice
  ports:
  - protocol: TCP
    port: 8090
    targetPort: 8090
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: apollo-portal
  namespace: bx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: apollo-portal
  template:
    metadata:
      labels:
        app: apollo-portal
    spec:
      containers:
      - name: portal
        image: harbor.bx.crpharm.com/k8s/arm64/apolloconfig/apollo-portal:2.2.0
        ports:
        - containerPort: 8070
        env:
        - name: SPRING_DATASOURCE_URL
          value: jdbc:mysql://mysql8.bx:3306/ApolloPortalDB?characterEncoding=utf8
        - name: SPRING_DATASOURCE_USERNAME
          value: root
        - name: SPRING_DATASOURCE_PASSWORD
          value: "xxxxxxxxxxxx"
        - name: APOLLO_PORTAL_ENVS
          value: dev
        - name: DEV_META
          value: http://apollo-configservice.bx:8080
        volumeMounts:
        - name: apollo-logs
          mountPath: /opt/logs
      volumes:
      - name: apollo-logs
        hostPath:
          path: /log/apollo
          type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
  name: apollo-portal
  namespace: bx
spec:
  selector:
    app: apollo-portal
  ports:
  - protocol: TCP
    port: 8070
    targetPort: 8070

6. zookeeper 集群

https://tanqidi.com/archives/66385472-489e-4e98-b27a-c81a71e43fe8

7. kafka 集群

https://tanqidi.com/archives/0a27256b-069d-4aeb-acc4-4437e7465461

8. eureka 集群

https://tanqidi.com/archives/18da8fed-bacd-44ee-91df-68014573c28b-a1112999

9. redis 集群

https://tanqidi.com/archives/18da8fed-bacd-44ee-91df-68014573c28b-7519e56b

10. rabbitmq 集群

https://tanqidi.com/archives/18da8fed-bacd-44ee-91df-68014573c28b-a046d4fc

11. nexus