1、测试环境
k8s版本 |
v1.23.6 |
docker版本 |
20.10.6 |
节点名称 |
原IP |
新IP |
k8s-master |
192.168.6.100 |
192.168.6.200 |
k8s-node01 |
192.168.6.110 |
192.168.6.210 |
k8s-node02 |
192.168.6.120 |
192.168.6.220 |
未调整IP前集群信息如下:
[root@k8s-master ~]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master Ready control-plane,master 14h v1.23.6 192.168.6.100 <none> CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.6
k8s-node01 Ready worker 14h v1.23.6 192.168.6.110 <none> CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.6
k8s-node02 Ready worker 14h v1.23.6 192.168.6.120 <none> CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.6
[root@k8s-master ~]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
default nginx-deployment-8d545c96d-9kmbp 1/1 Running 0 82s
default nginx-deployment-8d545c96d-dn98t 1/1 Running 0 82s
default nginx-deployment-8d545c96d-k9cf6 1/1 Running 0 82s
kube-system calico-kube-controllers-677cd97c8d-bv2zj 1/1 Running 2 (4m24s ago) 14h
kube-system calico-node-79jf6 1/1 Running 1 (4m52s ago) 14h
kube-system calico-node-f4xcw 1/1 Running 1 (13h ago) 14h
kube-system calico-node-qqm2h 1/1 Running 1 (13h ago) 14h
kube-system coredns-6d8c4cb4d-6mcs5 1/1 Running 1 (13h ago) 14h
kube-system coredns-6d8c4cb4d-wvq85 1/1 Running 1 (13h ago) 14h
kube-system etcd-k8s-master 1/1 Running 1 (4m52s ago) 14h
kube-system kube-apiserver-k8s-master 1/1 Running 1 (4m52s ago) 14h
kube-system kube-controller-manager-k8s-master 1/1 Running 2 (4m52s ago) 14h
kube-system kube-proxy-227rt 1/1 Running 1 (13h ago) 14h
kube-system kube-proxy-lz7xb 1/1 Running 1 (13h ago) 14h
kube-system kube-proxy-tv7s4 1/1 Running 1 (4m52s ago) 14h
kube-system kube-scheduler-k8s-master 1/1 Running 2 (4m52s ago) 14h
调整k8s-master节点IP后,重启机器,显示如下:
[root@k8s-master ~]# kubectl get pod
Unable to connect to the server: dial tcp 192.168.6.100:6443: connect: no route to host
2、集群恢复
master节点
1. 所有机器修改hosts解析文件
[root@k8s-master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.6.200 k8s-master
192.168.6.210 k8s-node01
192.168.6.220 k8s-node02
2. 把/etc/kubernetes/*.conf中所有的旧ip换成新ip
[root@k8s-master ~]# cd /etc/kubernetes
[root@k8s-master kubernetes]# find . -type f | xargs sed -i "s/192.168.6.100/192.168.6.200/g"
3. 替换$HOME/.kube/config文件中的旧ip为新ip(注意sudo的话需要改root下的)
[root@k8s-master kubernetes]# cd $HOME/.kube/
[root@k8s-master .kube]# find . -type f | xargs sed -i "s/192.168.6.100/192.168.6.200/g"
4. 修改$HOME/.kube/cache/discovery/ 下的文件夹名改成新的ip
[root@k8s-master .kube]# cd $HOME/.kube/cache/discovery/
[root@k8s-master discovery]# pwd
/root/.kube/cache/discovery
[root@k8s-master discovery]# ls
192.168.6.100_6443
[root@k8s-master discovery]# mv 192.168.6.100_6443/ 192.168.6.200_6443/
5. 重新生成证书
[root@k8s-master discovery]# cd /etc/kubernetes/pki
[root@k8s-master pki]# mv apiserver.key apiserver.key.bak
[root@k8s-master pki]# mv apiserver.crt apiserver.crt.bak
[root@k8s-master pki]# kubeadm init phase certs apiserver --apiserver-advertise-address 192.168.6.200
I0925 20:46:29.798106 8933 version.go:255] remote version is much newer: v1.28.2; falling back to: stable-1.23
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.6.200]
6. 重启kubelet,编辑 ConfigMap,将旧 IP 替换成新的 IP,若coredns、cluster-info如果没有IP地址,则不用替换
[root@k8s-master pki]# systemctl restart kubelet
[root@k8s-master pki]# kubectl -n kube-system edit cm kubeadm-config
Edit cancelled, no changes made.
[root@k8s-master pki]# kubectl -n kube-system edit cm kube-proxy
configmap/kube-proxy edited
[root@k8s-master pki]# kubectl edit cm -n kube-system coredns
Edit cancelled, no changes made.
[root@k8s-master pki]# kubectl edit cm -n kube-public cluster-info
configmap/cluster-info edited
7. 重启master节点服务器,创建加入集群令牌
[root@k8s-master ~]# reboot
[root@k8s-master ~]# kubeadm token create --print-join-command
kubeadm join 192.168.6.200:6443 --token 5cxtc8.i8clppnjdzryqove --discovery-token-ca-cert-hash sha256:cba86bdb61980525c3e93734e60befed1a6d126da1ffe8473ab14b55b045495b
node节点
[root@k8s-node01 ~]# kubeadm reset
[root@k8s-node01 ~]# kubeadm join 192.168.6.200:6443 --token 5cxtc8.i8clppnjdzryqove --discovery-token-ca-cert-hash sha256:cba86bdb61980525c3e93734e60befed1a6d126da1ffe8473ab14b55b045495b
[root@k8s-node02 ~]# kubeadm reset
[root@k8s-node02 ~]# kubeadm join 192.168.6.200:6443 --token 5cxtc8.i8clppnjdzryqove --discovery-token-ca-cert-hash sha256:cba86bdb61980525c3e93734e60befed1a6d126da1ffe8473ab14b55b045495b
master节点服务器查看信息
[root@k8s-master ~]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master Ready control-plane,master 14h v1.23.6 192.168.6.200 <none> CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.6
k8s-node01 Ready worker 14h v1.23.6 192.168.6.210 <none> CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.6
k8s-node02 Ready worker 14h v1.23.6 192.168.6.220 <none> CentOS Linux 7 (Core) 3.10.0-1127.el7.x86_64 docker://20.10.6
[root@k8s-master ~]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
default nginx-deployment-8d545c96d-9kmbp 1/1 Running 1 25m
default nginx-deployment-8d545c96d-dn98t 1/1 Running 1 25m
default nginx-deployment-8d545c96d-k9cf6 1/1 Running 1 25m
kube-system calico-kube-controllers-677cd97c8d-bv2zj 1/1 Running 4 (5m24s ago) 14h
kube-system calico-node-79jf6 1/1 Running 4 (7m31s ago) 14h
kube-system calico-node-f4xcw 1/1 Running 2 14h
kube-system calico-node-qqm2h 1/1 Running 2 14h
kube-system coredns-6d8c4cb4d-6mcs5 1/1 Running 2 14h
kube-system coredns-6d8c4cb4d-wvq85 1/1 Running 2 14h
kube-system etcd-k8s-master 1/1 Running 1 (7m31s ago) 9m46s
kube-system kube-apiserver-k8s-master 1/1 Running 1 (7m30s ago) 9m46s
kube-system kube-controller-manager-k8s-master 1/1 Running 4 (7m31s ago) 14h
kube-system kube-proxy-227rt 1/1 Running 2 14h
kube-system kube-proxy-lz7xb 1/1 Running 2 14h
kube-system kube-proxy-tv7s4 1/1 Running 3 (7m31s ago) 14h
kube-system kube-scheduler-k8s-master 1/1 Running 4 (7m31s ago) 14h
查看证书情况
[root@k8s-master ~]# kubeadm certs check-expiration
[check-expiration] Reading configuration from the cluster...
[check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
CERTIFICATE EXPIRES RESIDUAL TIME CERTIFICATE AUTHORITY EXTERNALLY MANAGED
admin.conf Sep 22, 2033 11:41 UTC 9y ca no
apiserver Sep 25, 2024 01:53 UTC 364d ca no
apiserver-etcd-client Sep 22, 2033 11:41 UTC 9y etcd-ca no
apiserver-kubelet-client Sep 22, 2033 11:41 UTC 9y ca no
controller-manager.conf Sep 22, 2033 11:41 UTC 9y ca no
etcd-healthcheck-client Sep 22, 2033 11:41 UTC 9y etcd-ca no
etcd-peer Sep 22, 2033 11:41 UTC 9y etcd-ca no
etcd-server Sep 22, 2033 11:41 UTC 9y etcd-ca no
front-proxy-client Sep 22, 2033 11:41 UTC 9y front-proxy-ca no
scheduler.conf Sep 22, 2033 11:41 UTC 9y ca no
CERTIFICATE AUTHORITY EXPIRES RESIDUAL TIME EXTERNALLY MANAGED
ca Sep 22, 2033 11:29 UTC 9y no
etcd-ca Sep 22, 2033 11:29 UTC 9y no
front-proxy-ca Sep 22, 2033 11:29 UTC 9y no
发现apiserver 证书期限修改为一年,重新执行证书申请文章来源:https://www.toymoban.com/news/detail-861252.html
[root@k8s-master k8s-install]# ./update-kubeadm-cert.sh all
CERTIFICATE EXPIRES
/etc/kubernetes/controller-manager.config Sep 22 11:41:11 2033 GMT
/etc/kubernetes/scheduler.config Sep 22 11:41:11 2033 GMT
/etc/kubernetes/admin.config Sep 22 11:41:12 2033 GMT
/etc/kubernetes/pki/ca.crt Sep 22 11:29:38 2033 GMT
/etc/kubernetes/pki/apiserver.crt Sep 25 01:53:30 2024 GMT
/etc/kubernetes/pki/apiserver-kubelet-client.crt Sep 22 11:41:11 2033 GMT
/etc/kubernetes/pki/front-proxy-ca.crt Sep 22 11:29:38 2033 GMT
/etc/kubernetes/pki/front-proxy-client.crt Sep 22 11:41:12 2033 GMT
/etc/kubernetes/pki/etcd/ca.crt Sep 22 11:29:39 2033 GMT
/etc/kubernetes/pki/etcd/server.crt Sep 22 11:41:11 2033 GMT
/etc/kubernetes/pki/etcd/peer.crt Sep 22 11:41:11 2033 GMT
/etc/kubernetes/pki/etcd/healthcheck-client.crt Sep 22 11:41:11 2033 GMT
/etc/kubernetes/pki/apiserver-etcd-client.crt Sep 22 11:41:11 2033 GMT
[2023-09-26T10:05:44.04+0800][INFO] backup /etc/kubernetes to /etc/kubernetes.old-20230926
[2023-09-26T10:05:44.04+0800][INFO] updating...
[2023-09-26T10:05:44.08+0800][INFO] updated /etc/kubernetes/pki/etcd/server.conf
[2023-09-26T10:05:44.11+0800][INFO] updated /etc/kubernetes/pki/etcd/peer.conf
[2023-09-26T10:05:44.14+0800][INFO] updated /etc/kubernetes/pki/etcd/healthcheck-client.conf
[2023-09-26T10:05:44.18+0800][INFO] updated /etc/kubernetes/pki/apiserver-etcd-client.conf
[2023-09-26T10:05:44.46+0800][INFO] restarted etcd
[2023-09-26T10:05:44.51+0800][INFO] updated /etc/kubernetes/pki/apiserver.crt
[2023-09-26T10:05:44.55+0800][INFO] updated /etc/kubernetes/pki/apiserver-kubelet-client.crt
[2023-09-26T10:05:44.59+0800][INFO] updated /etc/kubernetes/controller-manager.conf
[2023-09-26T10:05:44.63+0800][INFO] updated /etc/kubernetes/scheduler.conf
[2023-09-26T10:05:44.67+0800][INFO] updated /etc/kubernetes/admin.conf
[2023-09-26T10:05:44.67+0800][INFO] backup /root/.kube/config to /root/.kube/config.old-20230926
[2023-09-26T10:05:44.68+0800][INFO] copy the admin.conf to /root/.kube/config
[2023-09-26T10:05:44.68+0800][INFO] does not need to update kubelet.conf
[2023-09-26T10:05:44.71+0800][INFO] updated /etc/kubernetes/pki/front-proxy-client.crt
[2023-09-26T10:05:54.97+0800][INFO] restarted apiserver
[2023-09-26T10:05:55.22+0800][INFO] restarted controller-manager
[2023-09-26T10:05:55.25+0800][INFO] restarted scheduler
[2023-09-26T10:05:55.29+0800][INFO] restarted kubelet
[2023-09-26T10:05:55.29+0800][INFO] done!!!
CERTIFICATE EXPIRES
/etc/kubernetes/controller-manager.config Sep 23 02:05:44 2033 GMT
/etc/kubernetes/scheduler.config Sep 23 02:05:44 2033 GMT
/etc/kubernetes/admin.config Sep 23 02:05:44 2033 GMT
/etc/kubernetes/pki/ca.crt Sep 22 11:29:38 2033 GMT
/etc/kubernetes/pki/apiserver.crt Sep 23 02:05:44 2033 GMT
/etc/kubernetes/pki/apiserver-kubelet-client.crt Sep 23 02:05:44 2033 GMT
/etc/kubernetes/pki/front-proxy-ca.crt Sep 22 11:29:38 2033 GMT
/etc/kubernetes/pki/front-proxy-client.crt Sep 23 02:05:44 2033 GMT
/etc/kubernetes/pki/etcd/ca.crt Sep 22 11:29:39 2033 GMT
/etc/kubernetes/pki/etcd/server.crt Sep 23 02:05:44 2033 GMT
/etc/kubernetes/pki/etcd/peer.crt Sep 23 02:05:44 2033 GMT
/etc/kubernetes/pki/etcd/healthcheck-client.crt Sep 23 02:05:44 2033 GMT
/etc/kubernetes/pki/apiserver-etcd-client.crt Sep 23 02:05:44 2033 GMT
[root@k8s-master k8s-install]# kubeadm certs check-expiration
[check-expiration] Reading configuration from the cluster...
[check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
CERTIFICATE EXPIRES RESIDUAL TIME CERTIFICATE AUTHORITY EXTERNALLY MANAGED
admin.conf Sep 23, 2033 02:05 UTC 9y ca no
apiserver Sep 23, 2033 02:05 UTC 9y ca no
apiserver-etcd-client Sep 23, 2033 02:05 UTC 9y etcd-ca no
apiserver-kubelet-client Sep 23, 2033 02:05 UTC 9y ca no
controller-manager.conf Sep 23, 2033 02:05 UTC 9y ca no
etcd-healthcheck-client Sep 23, 2033 02:05 UTC 9y etcd-ca no
etcd-peer Sep 23, 2033 02:05 UTC 9y etcd-ca no
etcd-server Sep 23, 2033 02:05 UTC 9y etcd-ca no
front-proxy-client Sep 23, 2033 02:05 UTC 9y front-proxy-ca no
scheduler.conf Sep 23, 2033 02:05 UTC 9y ca no
CERTIFICATE AUTHORITY EXPIRES RESIDUAL TIME EXTERNALLY MANAGED
ca Sep 22, 2033 11:29 UTC 9y no
etcd-ca Sep 22, 2033 11:29 UTC 9y no
front-proxy-ca Sep 22, 2033 11:29 UTC 9y no
至此,IP修改完成,可以看到证书与k8s集群状态正常。文章来源地址https://www.toymoban.com/news/detail-861252.html
到了这里,关于K8s主机IP地址变更集群恢复的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!