k8s (copy)

予早 2025-08-31 14:59:19
Categories: Tags:

阿里云 Ubuntu 22.4 三节点集群部署 Kubernetes 1.33

集群信息

集群概况

实例ID 名称 IP 主机名
i-2zeddz86r5qtupvq5bdz node1 10.0.1.1 ip-10-0-1-1
i-2ze19ijurr8l445yywvd node2 10.0.1.2 ip-10-0-1-2
i-2ze8qn58gz4qt24fdsh5 node3 10.0.1.3 ip-10-0-1-3

节点概况

三个节点配置相同。

大类 资源 配置 备注
基础信息 实例ID
名称 自定义
地域/可用区 华北2(北京)/可用区H 影响延迟与容灾
计算 实例规格 ecs.u1-c1m2.large 2 vCPU 4 GiB
CPU利用率(7d峰值) 2 % 云监控数据
内存 内存容量 4 GiB
内存利用率(7d峰值) 15 %
存储 系统盘 40 GiB ESSD Entry
数据盘
网络 专有网络VPC vpc-2zeo6i8vg8l355t5yv6fp IPv4网段:10.0.0.0/16
交换机 vsw-2zebjepezqbs5zgyayb5n IPv4网段:10.0.1.0/24
公网IP/EIP 8 Mbps
镜像 操作系统 Ubuntu 24.04 64bit
安全 安全组 sg-2zeddz86r5qtupvstjhc 允许 22、3389、6443
# 临时关闭交换空间
sudo swapoff -a
# 将
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

加载内核模块

# 临时加载模块(重启后失效)
sudo modprobe overlay
sudo modprobe br_netfilter
# 永久加载模块
sudo tee /etc/modules-load.d/k8s.conf <<EOF
overlay
br_netfilter
EOF
sudo tee /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system

安装容器运行时

在每个节点进行相同操作。容器运行时采用 docker。

# 步骤 1:刷新本地软件包索引并安装后续步骤所必需的基础工具
sudo apt-get update                                        # 更新软件包列表
sudo apt-get install ca-certificates curl gnupg            # ca-certificates:根证书,用于 HTTPS 验证
                                                           # curl:下载工具,后面用来拉取 GPG 公钥
                                                           # gnupg:GNU 隐私卫士,用于处理 GPG 签名

# 步骤 2:导入并信任 Docker 官方的 GPG 公钥,确保后续下载的软件包来源可信
sudo install -m 0755 -d /etc/apt/keyrings                  # 创建 /etc/apt/keyrings 目录(如果不存在),权限 0755
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg \
  | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg     # 从阿里云镜像拉取 Docker 的 GPG 公钥,转换为二进制格式并保存
sudo chmod a+r /etc/apt/keyrings/docker.gpg                # 确保所有用户均可读取该公钥文件

# 步骤 3:将阿里云 Docker CE 软件源写入系统
mkdir -p /etc/apt/sources.list.d/                          # 确保 /etc/apt/sources.list.d/ 目录存在
echo \
  "deb [arch=$(dpkg --print-architecture) \
   signed-by=/etc/apt/keyrings/docker.gpg] \
   https://mirrors.aliyun.com/docker-ce/linux/ubuntu \
   $(. /etc/os-release && echo "$VERSION_CODENAME") stable" \
  | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
# 解释:
#   deb [...]  :声明这是一个二进制软件源
#   arch=...   :自动检测当前系统架构(amd64/arm64 等)
#   signed-by  :指定验证软件包签名所用的 GPG 公钥文件
#   https://...:阿里云 Docker CE 镜像地址
#   $(...)     :读取 /etc/os-release 中的 VERSION_CODENAME(如 jammy、focal 等)
#   stable     :只使用官方标记为 stable 的组件

# 步骤 4:再次更新索引并安装最新版 Docker CE 及相关组件
# 重新同步软件包索引,使新添加的 Docker 源生效
sudo apt-get update
# 安装 Docker 相关软件
# docker-ce              Docker 引擎(社区版)
# docker-ce-cli          Docker 命令行客户端
# containerd.io          容器运行时(Docker 默认使用 containerd)
# docker-buildx-plugin   Docker Buildx 插件(下一代构建工具)
# docker-compose-plugin  Docker Compose v2 插件(作为 docker compose 子命令)
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

# 如需安装“指定版本”的 Docker-CE,可按下面两步手动操作:
# 步骤 1:查询仓库中可用的 Docker CE 版本列表
# apt-cache madison docker-ce
# 示例输出:
#   docker-ce | 17.03.1~ce-0~ubuntu-xenial | https://mirrors.aliyun.com/... xenial/stable amd64 Packages
#   docker-ce | 17.03.0~ce-0~ubuntu-xenial | https://mirrors.aliyun.com/... xenial/stable amd64 Packages
# 步骤 2:安装指定版本(将 [VERSION] 替换为步骤 1 中看到的完整版本号)
# sudo apt-get -y install docker-ce=[VERSION]

# 安装完成后,重启 Docker 服务并设置开机自启
systemctl restart docker
systemctl enable docker
# 1. 确保 Docker 配置目录存在(如果不存在就新建)
mkdir -p /etc/docker

# 2. 将多行内容一次性写入 /etc/docker/daemon.json,EOF 为结束标记
cat > /etc/docker/daemon.json << EOF
{
  # 使用 systemd 作为 cgroup 驱动,K8s 官方推荐,可避免“cgroupfs”与“systemd”混用导致的资源管理冲突
  "exec-opts": ["native.cgroupdriver=systemd"],

  # 镜像加速地址列表,Docker 会按顺序依次尝试
  "registry-mirrors": [
    "https://docker.1panel.live",     # 1Panel 镜像加速(中国境内)
    "https://hub.mirrorify.net",      # Mirrorify 镜像加速(中国境内)
    "https://docker.m.daocloud.io",   # DaoCloud 公共镜像加速(中国境内)
    "https://registry.dockermirror.com", # DockerMirror 镜像加速(中国境内)
    "https://docker.aityp.com",       # 渡渡鸟镜像同步站(中国境内)
    "https://docker.anyhub.us.kg",    # AnyHub 镜像加速(中国境内)
    "https://dockerhub.icu",          # DockerHub 镜像加速(中国境内)
    "https://docker.awsl9527.cn"      # AWSL 镜像加速(中国境内)
  ],

  # 允许通过 HTTP 连接的不安全私有仓库(通常用于公司内网测试环境)
  "insecure-registries": [
    "https://xxx.xxx.xxx.xxx"
  ],

  # 限制同时下载的最大并发数,避免带宽被占满
  "max-concurrent-downloads": 10,

  # 日志驱动及级别设置
  "log-driver": "json-file",        # 使用 json-file 日志驱动
  "log-level": "warn",              # 日志级别:warn(只打印警告及以上)
  "log-opts": {                     # 日志轮转参数
    "max-size": "10m",              # 单个日志文件最大 10 MB
    "max-file": "3"                 # 最多保留 3 个旧日志文件
  },

  # Docker 所有数据(镜像、容器、卷等)的存放目录
  "data-root": "/var/lib/docker"
}
EOF

# 3. 设置 Docker 开机自启动
systemctl enable docker

# 4. 重启 Docker 服务,让新的 daemon.json 生效
systemctl restart docker

# 5. 查看 Docker 服务状态,确认是否正常启动且无报错
systemctl status docker

# 6. 通过其中一个镜像加速器拉取 nginx:latest 镜像,验证加速效果
docker pull hub.mirrorify.net/library/nginx:latest
cat > /etc/docker/daemon.json << EOF
{
  "exec-opts": [
    "native.cgroupdriver=systemd"
  ],
  "registry-mirrors": [
    "https://docker.1panel.live",
    "https://hub.mirrorify.net",
    "https://docker.m.daocloud.io",
    "https://registry.dockermirror.com",
    "https://docker.aityp.com",
    "https://docker.anyhub.us.kg",
    "https://dockerhub.icu",
    "https://docker.awsl9527.cn"
  ],
  "insecure-registries": [],
  "max-concurrent-downloads": 10,
  "log-driver": "json-file",
  "log-level": "warn",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
  },
  "data-root": "/var/lib/docker"
}
EOF
# docker 使用的 Cgroup Driver 应当为 systemd 而不是默认值 cgroupfs
docker info | grep -i "Cgroup Driver"
docker pull nginx
docker images

cri-docker

cri-docker 是一个CRI(Container Runtime Interface)适配器,用于让 Kubernetes 可以通过标准的 CRI 接口与 Docker 通信。

Kubernetes 从 1.20 开始弃用 dockershim,1.24 起彻底移除,默认不再支持 Docker 作为容器运行时。1.24 及以上版本 Kubernetes 应优先采用 containerd 作为容器运行时,若有旧系统兼容等特殊需求仍需使用 Docker,则需要 cri-docker 作为中间层,其一头对接 kubelet(CRI),一头对接 Docker API。

下载并解压

若由于网络环境无法直接从 Github 下载,可以访问 https://github.akams.cn 选择一个可以访问的代理连接进行下载。

wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.18/cri-dockerd-0.3.18.amd64.tgz
tar -zxvf cri-dockerd-*.amd64.tgz
cp cri-dockerd/cri-dockerd /usr/bin/
chmod +x /usr/bin/cri-dockerd

配置

官方配置模板:https://github.com/Mirantis/cri-dockerd/tree/master/packaging/systemd

/etc/systemd/system/cri-docker.service

在官方模板基础上,ExecStart 中需加上:

--network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9

cat > /etc/systemd/system/cri-docker.service <<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target
EOF

/etc/systemd/system/cri-docker.socket

官方模板原样配置。

cat > /etc/systemd/system/cri-docker.socket <<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service

[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target
EOF
# 让 systemd 重新扫描磁盘上的单元文件(.service、.socket、.mount 等)。当新增、修改或删除了任何 systemd 配置文件(如 /etc/systemd/system/cri-docker.service)后,必须执行这一步,否则 systemd 仍然使用旧的缓存。
systemctl daemon-reload
# 设置 cri-docker 开机自启,并且现在立即启动
systemctl enable cri-docker --now

安装 k8s

在每个节点进行相同操作。

https://kubernetes.io/zh-cn/docs/tasks/tools/install-kubectl-linux/#install-using-native-package-management

# Kubernetes 软件包在 Ubuntu 24.04 的默认包存储库中不可用,故需要添加存储库然后进行安装。
# 使用 curl 命令下载 Kubernetes 包存储库的公共签名密钥。由于网络原因,采用阿里源进行安装,https://developer.aliyun.com/mirror/kubernetes,https://mirrors.aliyun.com/kubernetes-new。
curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.33/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

# 添加 Kubernetes apt 仓库
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.33/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list

# 安装 kubelet kubeadm kubectl 工具
sudo apt update
sudo apt install kubelet kubeadm kubectl -y
# kubelet 的 cgroup driver 与 Docker 保持一致(都使用 systemd)
# 编辑 /etc/default/kubelet 配置文件,KUBELET_EXTRA_ARGS 中配置 --cgroup-driver=systemd,即 KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
vi /etc/default/kubelet
systemctl enable kubelet
kubeadm config images list --kubernetes-version=stable
root@ip-10-0-1-1:~# kubeadm config images list --kubernetes-version=stable
registry.k8s.io/kube-apiserver:v1.33.4
registry.k8s.io/kube-controller-manager:v1.33.4
registry.k8s.io/kube-scheduler:v1.33.4
registry.k8s.io/kube-proxy:v1.33.4
registry.k8s.io/coredns/coredns:v1.12.0
registry.k8s.io/pause:3.10
registry.k8s.io/etcd:3.5.21-0

集群初始化

主节点执行初始化

sudo kubeadm init \
  --kubernetes-version=v1.33.4 \
  --pod-network-cidr=10.244.0.0/16 \
  --apiserver-advertise-address=10.0.1.1 \
  --image-repository registry.aliyuncs.com/google_containers \
  --cri-socket unix:///run/cri-dockerd.sock \
  --control-plane-endpoint=10.0.1.1
root@ip-10-0-1-1:~# sudo kubeadm init \
  --kubernetes-version=v1.33.4 \
  --pod-network-cidr=10.244.0.0/16 \
  --apiserver-advertise-address=10.0.1.1 \
  --image-repository registry.aliyuncs.com/google_containers \
  --cri-socket unix:///run/cri-dockerd.sock \
  --control-plane-endpoint=10.0.1.1
[init] Using Kubernetes version: v1.33.4
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
W0815 00:28:36.788919   12149 checks.go:846] detected that the sandbox image "registry.aliyuncs.com/google_containers/pause:3.9" of the container runtime is inconsistent with that used by kubeadm.It is recommended to use "registry.aliyuncs.com/google_containers/pause:3.10" as the CRI sandbox image.
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [ip-10-0-1-1.cn-beijing.ecs.internal kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.1.1]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [ip-10-0-1-1.cn-beijing.ecs.internal localhost] and IPs [10.0.1.1 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [ip-10-0-1-1.cn-beijing.ecs.internal localhost] and IPs [10.0.1.1 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 507.128098ms
[control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
[control-plane-check] Checking kube-apiserver at https://10.0.1.1:6443/livez
[control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
[control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
[control-plane-check] kube-controller-manager is healthy after 8.489155157s
[control-plane-check] kube-scheduler is healthy after 9.314309793s
[control-plane-check] kube-apiserver is healthy after 10.501890143s
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node ip-10-0-1-1.cn-beijing.ecs.internal as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node ip-10-0-1-1.cn-beijing.ecs.internal as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: c5h4fy.vuksyytbu0bztxqb
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 10.0.1.1:6443 --token c5h4fy.vuksyytbu0bztxqb \
        --discovery-token-ca-cert-hash sha256:c5f38f474fc44b29977d4fbb09e009eb7ebfc530c934dfb72876516aa902be49 \
        --control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.0.1.1:6443 --token c5h4fy.vuksyytbu0bztxqb \
        --discovery-token-ca-cert-hash sha256:c5f38f474fc44b29977d4fbb09e009eb7ebfc530c934dfb72876516aa902be49 

主节点

vi ~/.bashrc
export KUBECONFIG=/etc/kubernetes/admin.conf
source ~/.bashrc

在主节点安装网络插件(以 calico 为例)

https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises

kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
curl https://raw.githubusercontent.com/projectcalico/calico/v3.30.2/manifests/calico.yaml -O
kubectl apply -f calico.yaml

将工作节点加入集群

每一个工作节点均需要执行一次。

# 注意这里需额外指定参数 cri-socket
kubeadm join 10.0.1.1:6443 --token c5h4fy.vuksyytbu0bztxqb \
        --discovery-token-ca-cert-hash sha256:c5f38f474fc44b29977d4fbb09e009eb7ebfc530c934dfb72876516aa902be49  --cri-socket=unix:///run/cri-dockerd.sock
root@ip-10-0-1-2:~# kubeadm join 10.0.1.1:6443 --token c5h4fy.vuksyytbu0bztxqb \
        --discovery-token-ca-cert-hash sha256:c5f38f474fc44b29977d4fbb09e009eb7ebfc530c934dfb72876516aa902be49  --cri-socket=unix:///run/cri-dockerd.sock
[preflight] Running pre-flight checks
[preflight] Reading configuration from the "kubeadm-config" ConfigMap in namespace "kube-system"...
[preflight] Use 'kubeadm init phase upload-config --config your-config-file' to re-upload it.
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 1.000750357s
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

在主节点获取集群各节点状态,工作节点刚加入集群处于 NotReady,约1到2分钟后会变为Ready。

kubectl get nodes
root@ip-10-0-1-1:~# kubectl get nodes
NAME                                  STATUS   ROLES           AGE   VERSION
ip-10-0-1-1.cn-beijing.ecs.internal   Ready    control-plane   66m   v1.33.4
ip-10-0-1-2.cn-beijing.ecs.internal   Ready    <none>          16m   v1.33.4
ip-10-0-1-3.cn-beijing.ecs.internal   Ready    <none>          16m   v1.33.4

在主节点获取当前各容器状态

kubectl get pods -n kube-system
root@ip-10-0-1-1:~# kubectl get pods -n kube-system
NAME                                                          READY   STATUS    RESTARTS   AGE
calico-kube-controllers-7498b9bb4c-996fm                      1/1     Running   0          35m
calico-node-kvtzj                                             1/1     Running   0          35m
calico-node-m6c86                                             1/1     Running   0          15m
calico-node-xlxsx                                             1/1     Running   0          16m
coredns-757cc6c8f8-jpllc                                      1/1     Running   0          65m
coredns-757cc6c8f8-xhwks                                      1/1     Running   0          65m
etcd-ip-10-0-1-1.cn-beijing.ecs.internal                      1/1     Running   0          65m
kube-apiserver-ip-10-0-1-1.cn-beijing.ecs.internal            1/1     Running   0          65m
kube-controller-manager-ip-10-0-1-1.cn-beijing.ecs.internal   1/1     Running   0          65m
kube-proxy-lwwmb                                              1/1     Running   0          16m
kube-proxy-srrsr                                              1/1     Running   0          65m
kube-proxy-vdtf8                                              1/1     Running   0          15m
kube-scheduler-ip-10-0-1-1.cn-beijing.ecs.internal            1/1     Running   0          65m