1. 环境准备
在开始之前,部署Kubernetes集群机器需要满足以下几个条件:
- 一台或多台机器,操作系统 CentOS7.x-86_x64;
- 硬件配置:2GB或更多RAM,2个CPU或更多CPU,硬盘30GB或更多;
- 集群中所有机器之间网络互通;
- 可以访问外网,需要拉取镜像;
- 禁止swap分区。
主机详情
IP | Hostname | 备注 |
---|---|---|
192.168.32.128 | K8s-master | Master,控制平面,docker |
192.168.32.129 | K8s-node1 | 节点,docker |
192.168.32.130 | K8s-node2 | 节点,docker |
2. 准备安装环境(三台机器都要执行)
2.1 修改主机hostname
# 分别在三台机器上执行:
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
2.2 修改hosts文件,三台机器都要执行
# 角色 系统 ip
cat > /etc/hosts << EOF
192.168.32.128 k8s-master
192.168.32.129 k8s-node1
192.168.32.130 k8s-node2
EOF
2.3 关闭防火墙 seliunx 三个机器都一样执行
# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
setenforce 0# 临时
# 关闭swap:
swapoff -a # 临时
vim /etc/fstab # 永久
# /dev/mapper/centos-swap swap swap defaults 0 0 # 禁用就好了
# 将桥接的IPv4流量传递到iptables的链:
cat >/etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables =1
net.bridge.bridge-nf-call-iptables =1
EOF
sysctl --system # 生效
2.4 时间同步三台机器都要操作
yum install ntpdate -y
ntpdate ntp1.aliyun.com
设置定时自动同步,三台机器都要操作
crontab -e
*/10 * * * * ntpdate ntp1.aliyun.com
*/12 * * * * hwclock --systohc
*/13 * * * * clock -w
2.5 安装Docker 三台机器都要安装
# 修改yum 源,三台机器都要修改
cd /etc/yum.repos.d/
[root@k8s-master yum.repos.d]# mkdir bak
[root@k8s-master yum.repos.d]# mv *.repo bak/
# 安装yum 源,三台机器都要安装
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
# 安装docker,三台机器都要安装
yum -y install docker-ce
systemctl enable docker && systemctl start docker
# 配置镜像下载加速器
cat >/etc/docker/daemon.json << EOF
{
"registry-mirrors":["https://b9pmyelo.mirror.aliyuncs.com"]
}
EOF
systemctl restart docker
2.6 添加阿里yum仓库源 安装 kubeadm 用的 三个节点
cat >/etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
2.7 开始安装 kubeadm , kubelet ,kubectl 三个节点都要安装
# 执行安装版本号,不指定默认最新版本
yum install -y kubelet-1.20.9 kubeadm-1.20.9 kubectl-1.20.9
systemctl enable kubelet
# 解释和注释
kubelet:systemd守护进程管理
kubeadm:部署工具
kubectl:k8s命令行管理工具
2.8 开始部署 kubernetes master,在k8s-master机器上执行
# 在 192.168.32.128 master 上执行 要写master ip
[root@k8s-master ~]# kubeadm init \
--apiserver-advertise-address=192.168.32.128 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.20.9 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
# 参数解释
–apiserver-advertise-address 集群通告地址
–image-repository 由于默认拉取镜像地址k8s.gcr.io国内无法访问,这里指定阿里云镜像仓库地址。
–kubernetes-version K8s版本,与上面安装的一致
–service-cidr 集群内部虚拟网络,Pod统一访问入口
–pod-network-cidr Pod网络,与下面部署的CNI网络组件yaml中保持一致
Master 初始化成功之后,会生成节点加入指令。
2.9 拷贝kubectl使用的连接k8s认证文件到默认路径
# 在master 上执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 查看节点情况,由于还没部署flannel 通信插件,所以status 是notready
kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master NotReady master 2m v1.20.9
2.10 加入kubernetes node 在32.129 和 32.130 上执行
# 这个东西是自己生成的 这个东西是那个界节点加入 在那个机器执行
[root@k8s-node1 ~]# kubeadm join 192.168.32.128:6443--token esce21.q6hetwm8si29qxwn \
--discovery-token-ca-cert-hash sha256:00603a05805807501d7181c3d60b478788408cfe6cedefedb1f97569708be9c5
默认token 有效期为24 小时 当过期后 改token 就不可用了 这时就需要重新创建token,操作如下:
[root@k8s-master ~]# kubeadm token create --print-join-command
2.11 部署容器网络(CNI)
这里使用Flannel作为Kubernetes容器网络方案,解决容器跨主机网络通信。
Flannel是CoreOS维护的一个网络组件,Flannel为每个Pod提供全局唯一的IP,Flannel使用ETCD来存储Pod子网与Node IP之间的关系。flanneld守护进程在每台主机上运行,并负责维护ETCD信息和路由数据包。
使用如下yaml 文件部署flannel网络组件
kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system
[root@k8s-master ~]# cat kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.2
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: quay.io/coreos/flannel:v0.15.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.15.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
# 创建网络插件
kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system
# 查看节点时候都ready 状态
kubectl get nodes -owide
2.12 部署官方 Doshboard (UI)
[root@k8s-master ~]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml
默认Dashboard只能集群内部访问,修改Service为NodePort类型,暴露到外部:
[root@k8s-master ~]# vim recommended.yaml
...
kind:Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dadashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port:443
targetPort:8443
nodePort: 30006 #添加的端口 30006
selector:
k8s-app: kubernetes-dashboard
type: NodePort 添加的
...
[root@k8s-master ~]# kubectl apply -f recommended.yaml
[root@k8s-master ~]# kubectl get pods -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-6b4884c9d5-gl8nr 1/1 Running 0 13m
kubernetes-dashboard-7f99b75bf4-89cds 1/1 Running 0 13m
访问地址:https://NodeIP:30006
创建service account并绑定默认cluster-admin管理员集群角色
创建用户
[root@k8s-master ~]# kubectl create serviceaccount dashboard-admin -n kube-system
# 用户授权
[root@k8s-master ~]# kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
# 获取用户Token
[root@k8s-master ~]# kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
使用输出的Token登入 Dashboard 最后就是
访问地址 192.168.32.128:30006 然后 token 登入就行了
2.13 服务无中断平滑发布
Kubernetes支持名为Rolling Update的功能,允许您不间断地,
接近几乎无缝地平滑升级部署应用程序 ,即在不停止对外服务的前提下完成应用的更新。
什么是滚动更新?
为了应用升级部署时候k8s不停服达到用户无感知,Kubernetes支持称为滚动更新的功能。此功能允许您按顺序更新pod,一次更新一个(按照配置比例),而不是一次停止/更新整个pod。使发布版本更新和回滚而不会中断服务
# 滚动更新示例
上传测试项目:
拉取基础环境 python:3.7
docker pull python:3.7
部署执行Dockerfile 打包过程
Vim Dockerfile
FROM python:3.7
VOLUME /tmp
ADD . /work
WORKDIR /work
RUN rm -rf simpleui
RUN /usr/local/bin/python -m pip install --upgrade pip
RUN pip3 install -i https://mirrors.aliyun.com/pypi/simple/ -r requirements.txt
RUN pip3 install django-simpleui -U
EXPOSE 8080
ENTRYPOINT ["python","manage.py","runserver" ,"0.0.0.0:8080"]
执行Dockerfile打包,生成本地制品镜像,(注意后面的点,代表当前目录的Dockerfile 文件)
[root@k8s-master hospital_manager]# docker build -t hospital:v1.23.6.20.1 .
hospital.yaml 文件详解:
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: hospital
name: deployment-hospital #deployment名字
namespace: encop #命名空间
spec:
replicas: 4 #开启的pods 副本
progressDeadlineSeconds: 600 #等待多少秒才能确定Deployment进程是卡住的,如果还是卡住,则标记次pods 为false
minReadySeconds: 10 #滚动升级时10s后认为该pod就绪可用
selector:
matchLabels:
app: hospital
strategy:
rollingUpdate: #滚动更新
maxSurge: 1 #滚动更新期间可以创建的pod的最大数量超过指定数量的pod。1表示当一个新的pod被创建才会删除一个pod,以此类推。可以是具体的整数,也可以是百分百 默认值为25%
maxUnavailable: 25% #滚动更新期间不可用的pods 数量
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: hospital
spec:
containers:
- image: test/hospital:v1.23.6.20.1 #镜像版本
imagePullPolicy: Never #表示拉取本地镜像,不拉去远程镜像
name: hospital
resources: {}
volumeMounts:
- name: time-config
mountPath: /etc/localtime
readOnly: true
volumes:
- name: time-config
hostPath:
path: /etc/localtime
status: {}
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: hospital
name: hospital
namespace: encop
spec:
ports:
- nodePort: 32121
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: hospital
type: NodePort
sessionAffinity: ClientIP #多pods时达到会话保持的状态
status:
loadBalancer: {}
创建kubectl apply -f hospital.yaml
[root@k8s-master v1]# kubectl get pods -n encop -owide
查看pods的版本号
v1.23.6.20.1版本号
[root@k8s-master v1]# kubectl get pod -w -n encop
在192.168.32.129 node1机器上使用脚本测试服务是否正常访问!
[root@k8s-node1 ~]# cat check_serv.sh
#!/bin/bash
while true; do
curl -m 10 -s -o /dev/null 192.168.32.128:32121
if [ $? == 0 ]; then
echo "服务正常!"
else
echo "服务不正常!"
fi
sleep 2
done
目前是正常的!
目前浏览器是可以正常访问的
修改程序标题改为v2,修改此处,更新后应该为v2字样
重新打包build 镜像
[root@k8s-master hospital_manager]# docker build -t test/hospital:v1.23.6.20.2 .
执行滚动更新前服务都是正常访问的
监控服务更新变化
[root@k8s-master v1]# kubectl get pod -w -n encop
执行更新操作,将版本号从v1.23.6.20.1 升级到v1.23.6.20.2文章来源:https://www.toymoban.com/news/detail-671569.html
kubectl set image deployment/deployment-hospital hospital=test/hospital:v1.23.6.20.2 -n encop
部署过程中服务一直都是正常访问的
网页标题也变了,服务也没有出现终端问题,滚动更新不停服务升级成功!
版本号也更新到v1.23.6.20.2
文章来源地址https://www.toymoban.com/news/detail-671569.html
参考文章:http://idcsec.com/2019/03/05/kubernetes%E6%BB%9A%E5%8A%A8%E6%9B%B4%E6%96%B0%EF%BC%88%E6%97%A0%E4%B8%AD%E6%96%AD%E5%B9%B3%E6%BB%91%E5%8F%91%E5%B8%83%EF%BC%89/
附加:如果服务启动时间比较长,可以添加探针,更加细致严谨
# 准备就绪探针
readinessProbe:
# 使用 HTTP GET 方法检查
httpGet:
# 检查的路径
# Windows 上使用 /sys-value-server/one-vo/12
# k8s 上使用 /sys-value-server/one-vo/12
path: /sys-value-server/one-vo/12 # 访问次服务url 返回200,才证明此pods 启动成功,流量才分配到此pods中
# 应用程序端口号
port: 42130
# 初始化延迟 30 秒
initialDelaySeconds: 20
# 每隔 10 秒一次检查
periodSeconds: 10
# 超时时间
timeoutSeconds: 5
# 连续成功的检查次数
successThreshold: 1
# 连续失败的检查次数
failureThreshold: 3
到了这里,关于Kubeadm 部署k8s实现并且演示滚动不停服务更新的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!