Kubernetes部署v1.26.0
一,准备环境
主机名 | ip地址 | 节点类型 | 系统版本 |
---|---|---|---|
k8s01 | 192.168.0.180 | master、etcd | centos7 |
k8s02 | 192.168.0.41 | worker | centos7 |
k8s03 | 192.168.0.241 | worker | centos7 |
二,配置环境
1.修改主机
[root@ecs-kmaster ~]# hostnamectl set-hostname kmaster
[root@ecs-kmaster ~]# bash
[root@kmaster ~]#
[root@ecs-knode1 ~]# hostnamectl set-hostname knode1
[root@ecs-knode1 ~]# bash
[root@knode1 ~]#
[root@ecs-knode2 ~]# hostnamectl set-hostname knode2
[root@ecs-knode2 ~]#
bash [root@knode2 ~]#
2。hosts设置
[root@kmaster ~]# vim /etc/hosts
[root@kmaster ~]# cat /etc/hosts
192.168.100.180 kmaster
192.168.100.181 knode1
192.168.100.182 knode2
2.1 复制hosts文件给其他节点
例如 节点1:
[root@kmaster ~]# scp -r /etc/hosts root@192.168.100.181:/etc/hosts
注意:3台服务器均要安装包和下载工具
3.安装包
[root@kmaster ~]# yum install -y yum-utils vim bash-completion net-tools wget CentOS-8 - AppStream
4.禁用swap分区
[root@kmaster ~]# swapoff -a
[root@kmaster ~]# swapon -s
[root@kmaster ~]# vim /etc/fstab
[root@kmaster ~]# cat /etc/fstab
#/dev/mapper/cs-swap none swap defaults 0 0
4.1关闭防火墙和SELinux
[root@kmaster ~]# systemctl stop firewalld
[root@kmaster ~]# systemctl disable firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@kmaster ~]# setenforce 0
[root@kmaster ~]# sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
5.安装docker
[root@kmaster ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo Adding repo from: http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@kmaster ~]# yum list docker-ce --showduplicates | sort -r
yum install -y docker-ce
6.开启转发及 iptables 过滤
[root@kmaster ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.ipv4.ip_forward = 1
> EOF
[root@kmaster ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
7.修改 containerd 镜像源
[root@kmaster ~]# containerd config default > /etc/containerd/config.toml
[root@kmaster ~]# sed -i "s#registry.k8s.io/pause#registry.aliyuncs.com/google_containers/pause#g" /etc/containerd/config.toml
[root@kmaster ~]# grep sandbox_image /etc/containerd/config.toml
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"
8.配置 systemd cgroup 驱动
[root@kmaster ~]# sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
[root@kmaster ~]# systemctl restart containerd
[root@kmaster ~]# grep SystemdCgroup /etc/containerd/config.toml
SystemdCgroup = true
9.添加 k8s 源文件
[root@kmaster ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
三.集群搭建
1.安装Kube工具
[root@kmaster ~]# yum install -y kubelet-1.26.0 kubeadm-1.26.0 kubectl-1.26.0 --disableexcludes=kubernetes
2.初始化集群(只需要master节点)
[root@kmaster ~]# kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version=v1.26.0 --pod-network-cidr=10.244.0.0/16
3.配置环境变量
[root@kmaster ~]# mkdir -p $HOME/.kube
[root@kmaster ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@kmaster ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@kmaster ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
[root@kmaster ~]# source ~/.bash_profile
[root@kmaster ~]# kubectl get node NAME STATUS ROLES AGE VERSION kmaster NotReady control-plane 90s v1.26.0
4.将节点加入集群
[root@knode1 ~]# kubeadm join 192.168.100.180:6443 --token weeweh.39qt15mksqv3kobq --discovery-token-ca-cert-hash sha256:6cce64581fa04e81c917f51cdc2623891463e0ac619dc62a88a1b3f8bab90ba0
[root@kmaster ~]# kubectl get node
注意:如果忘记加入集群链接可以用命令再次生成加入链接
kubeadm token create --print-join-command
5.修改默认端点连接
# 默认情况下,通过 crictl img 命令查询镜像会报错,因为 crictl 命令默认会去找 /var/run/dockershim.sock 文件,而自 1.24 版本起,Dockershim 已从 Kubernetes 项目中移除,找不到对应的文件所以报错。
[root@kmaster ~]# vim /etc/crictl.yaml
[root@kmaster ~]# cat /etc/crictl.yaml runtime-endpoint: unix:///run/containerd/containerd.sock image-endpoint: unix:///run/containerd/containerd.sock timeout: 5 debug: false
[root@kmaster ~]# crictl img
5.1 复制文件给其他节点
[root@kmaster ~]# scp -r /etc/crictl.yaml root@192.168.100.181:/etc
6安装 calico 网络
# 安装 Tigera Calico operator
[root@kmaster ~]# wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml
[root@kmaster ~]# kubectl create -f tigera-operator.yaml
# 配置 custom-resources.yaml
[root@kmaster ~]# wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml
[root@kmaster ~]# vim custom-resources.yaml
#更改IP地址池中的 CIDR,和 kubeadm 初始化集群中的 --pod-network-cidr 参数保持一致
cidr: 10.244.0.0/16
[root@kmaster ~]# kubectl create -f custom-resources.yaml
installation.operator.tigera.io/default created
apiserver.operator.tigera.io/default created
[root@kmaster ~]# kubectl get node
四.集群测试
# 创建nginx服务
[root@kmaster ~]# kubectl create deployment nginx --image=nginx:1.14-alpine
deployment.apps/nginx created
# 暴露端口
[root@kmaster ~]# kubectl expose deploy nginx --port=80 --target-port=80 --type=NodePort
service/nginx exposed
# 查看服务
[root@kmaster ~]# kubectl get pod,svc
NAME READY STATUS RESTARTS AGE
pod/nginx-6db6dff665-8rd29 1/1 Running 0 85s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 24m
service/nginx NodePort 10.110.168.191 <none> 80:30647/TCP 21s
# 查看pod
[root@kmaster ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6db6dff665-8rd29 1/1 Running 0 108s