kubernetes部署
kubernetes部署
节点信息
| host | ip |
|---|---|
| k8s-master | 10.1.8.130 |
| k8s-node1 | 10.1.8.131 |
| k8s-node2 | 10.1.8.132 |
前置准备
- kernel-ml-6.5.5-1.el7.elrepo.x86_64.rpm
- kernel-ml-devel-6.5.5-1.el7.elrepo.x86_64.rpm
[!TIP]
在终端工具中打开多执行模式,以MobaXterm为例,菜单->终端->在所有终端上写入命令
安装
[root@k8s-master ~]#yum localinstall -y kernel-ml-*[root@k8s-master ~]# uname -r
6.5.5-1.el7.elrepo.x86_64
/etc/hosts
[root@k8s-master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.1.8.130 k8s-master
10.1.8.131 k8s-node1
10.1.8.132 k8s-node2
环境准备
#安装环境包
[root@k8s-master ~]# yum -y install vim lrzsz unzip wget net-tools tree bash-completion conntrack ntpdate ntp ipvsadm ipset iptables curl sysstat libseccomp git psmisc telnet unzip gcc gcc-c++ make#关闭防火墙
[root@k8s-master ~]# systemctl disable firewalld --now
[root@k8s-master ~]# sed -i 's/enforcing/disabled/g' /etc/selinux/config
[root@k8s-master ~]# setenforce 0#关闭swap分区
[root@k8s-master ~]# swapoff -a && sed -i '/swap/s/^/#/' /etc/fstab#调整系统内核参数
[root@k8s-master ~]# cat >/etc/sysctl.d/kubernetes.conf<<EOF
> net.bridge.bridge-nf-call-iptables=1
> net.bridge.bridge-nf-call-ip6tables=1
> net.ipv4.ip_forward=1
> vm.swappiness=0
> vm.overcommit_memory=1
> EOF[root@k8s-master ~]# sysctl --system#调整Linux资源限制
#设置一个进程可以打开的最大文件句柄数
[root@k8s-master ~]# ulimit -SHn 65535
[root@k8s-master ~]# cat >> /etc/security/limits.conf <<EOF
#为所有用户设置文件描述符软限制
* soft nofile 655360
#为所有用户设置文件描述符硬限制
* hard nofile 131072
#为所有用户设置进程数软限制
* soft nproc 655350
#为所有用户设置进程数硬限制
* hard nproc 655350
#为所有用户设置内存锁定软限制为无限制
* soft memlock unlimited
#为所有用户设置内存锁定硬限制为无限制
* hard memlock unlimited
EOF#配置时间同步
[root@k8s-master ~]# yum -y install chrony
[root@k8s-master ~]# systemctl restart chronyd
[root@k8s-master ~]# chronyc sources -v
[root@k8s-master ~]# hwclock -s #配置ipvs内核模块
[root@k8s-master ~]# cat >>/etc/modules-load.d/ipvs.conf<<EOF
> ip_vs
> ip_vs_rr
> ip_vs_wrr
> ip_vs_sh
> nf_conntrack
> ip_tables
> ip_set
> xt_set
> ipt_set
> ipt_rpfilter
> ipt_REJECT
> ipip
> overlay
> br_netfilter
> EOF#查看模块
[root@k8s-master ~]# systemctl restart systemd-modules-load
[root@k8s-master ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4
安装containerd
#指定 containerd 在系统启动时加载的内核模块
[root@k8s-master ~]# cat >>/etc/modules-load.d/containerd.conf <<EOF
> overlay
> br_netfilter
> EOF#加载模块
[root@k8s-master ~]# modprobe overlay
[root@k8s-master ~]# modprobe br_netfilter#立即生效
[root@k8s-master ~]# sysctl --system#安装依赖的软件包
[root@k8s-master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2#添加 Docker 软件源
[root@k8s-master ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast#查看containerd版本
[root@k8s-master ~]# yum list containerd.io --showduplicates | sort -r#安装指定版本containerd
[root@k8s-master ~]# yum -y install containerd.io-1.6.16#默认安装最新版本containerd(省略)
[root@k8s-master ~]# yum -y install containerd.io #生成containerd的配置文件
[root@k8s-master ~]# mkdir -p /etc/containerd
[root@k8s-master ~]# containerd config default >/etc/containerd/config.toml#修改containerd的驱动程序
[root@k8s-master ~]# sed -i '/SystemdCgroup/s/false/true/g' /etc/containerd/config.toml#修改镜像仓库地址
[root@k8s-master ~]# vim /etc/containerd/config.toml
61 sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"#启动containerd
[root@k8s-master ~]# systemctl enable containerd
[root@k8s-master ~]# systemctl start containerd
[root@k8s-master ~]# systemctl status containerd#查看containerd版本
[root@k8s-master ~]# ctr version
1.6.16
镜像加速
[root@k8s-master ~]# vim /etc/containerd/config.toml145 [plugins."io.containerd.grpc.v1.cri".registry]config_path = "/etc/containerd/certs.d"[root@k8s-master ~]# mkdir /etc/containerd/certs.d
[root@k8s-master ~]# mkdir /etc/containerd/certs.d/docker.io
[root@k8s-master ~]# vim /etc/containerd/certs.d/docker.io/hosts.tomlserver = "https://docker.io"
[host."https://09def58152000fc00ff0c00057bad7e0.mirror.swr.myhuaweicloud.com"]capabilities = ["pull","resolve","push"][host."https://hub-mirror.c.163.com"]capabilities = ["pull","resolve","push"][host."https://do.nark.eu.org"]capabilities = ["pull","resolve","push"][host."https://dc.j8.work"]capabilities = ["pull","resolve","push"][host."https://docker.m.daocloud.io"]capabilities = ["pull","resolve","push"][host."https://dockerproxy.com"]capabilities = ["pull","resolve","push"][host."https://docker.mirrors.ustc.edu.cn"]capabilities = ["pull","resolve","push"][host."https://docker.nju.edu.cn"]capabilities = ["pull","resolve","push"][host."https://registry.docker-cn.com"]capabilities = ["pull","resolve","push"][host."https://hub.uuuadc.top"]capabilities = ["pull","resolve","push"][host."https://docker.anyhub.us.kg"]capabilities = ["pull","resolve","push"][host."https://dockerhub.jobcher.com"]capabilities = ["pull","resolve","push"][host."https://dockerhub.icu"]capabilities = ["pull","resolve","push"][host."https://docker.ckyl.me"]capabilities = ["pull","resolve","push"][host."https://docker.awsl9527.cn"]capabilities = ["pull","resolve","push"][host."https://mirror.baidubce.com"]capabilities = ["pull","resolve","push"][host."https://docker.1panel.live"]capabilities = ["pull","resolve","push"]#启动containerd
[root@k8s-master ~]# systemctl enable containerd
[root@k8s-master ~]# systemctl start containerd
[root@k8s-master ~]# systemctl status containerd
安装kubeadm
#添加k8s软件源
[root@k8s-master ~]# cat <<EOF> /etc/yum.repos.d/kubernetes.repo> [kubernetes]
> name=Kubernetes
> baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
> http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF#快速建立yum缓存
[root@k8s-master ~]# yum makecache fast
#安装k8s
[root@k8s-master ~]# yum -y install kubectl-1.28.0 kubelet-1.28.0 kubeadm-1.28.0[root@k8s-master ~]# cat >/etc/sysconfig/kubelet<<EOF
> KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
> KUBE_PROXY_MODE="ipvs"
> EOF#kubelet设置为开机自启动
[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl enable kubelet#设置crictl连接containerd
[root@k8s-master ~]# cat <<EOF | tee /etc/crictl.yaml
> runtime-endpoint: unix:///run/containerd/containerd.sock
> image-endpoint: unix:///run/containerd/containerd.sock
> timeout: 10
> debug: false
> EOF
[!TIP]
下面操作在master节点部署
主节点部署
#查看k8s集群所需要的镜像
[root@k8s-master ~]# kubeadm config images list --kubernetes-version=v1.28.0 --image-repository=registry.aliyuncs.com/google_containers#下载k8s集群所需要的镜像
[root@k8s-master ~]# kubeadm config images pull --kubernetes-version=v1.28.0 --image-repository=registry.aliyuncs.com/google_containers#初始化集群自动开启IPVS
#创建初始化集群配置文件
[root@k8s-master ~]# kubeadm config print init-defaults > kubeadm-init.yaml#修改初始化集群配置文件
[root@k8s-master ~]# vim kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:- system:bootstrappers:kubeadm:default-node-tokentoken: abcdef.0123456789abcdefttl: 24h0m0susages:- signing- authentication
kind: InitConfiguration
localAPIEndpoint:advertiseAddress: 10.1.8.130 #12行 修改master节点ipbindPort: 6443
nodeRegistration:criSocket: unix:///var/run/containerd/containerd.sock #15行 修改容器进行时imagePullPolicy: IfNotPresentname: k8s-master #17行 修改master节点主机名taints: #18行 注意!去掉Null- effect: NoSchedule #19行 添加污点key: node-role.kubernetes.io/control-plane #20行 添加
---
apiServer:timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:local:dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers #32行 修改镜像仓库地址
kind: ClusterConfiguration
kubernetesVersion: 1.28.0 #34行 修改k8s版本
networking:dnsDomain: cluster.localserviceSubnet: 10.96.0.0/12podSubnet: 10.244.0.0/16 #38行 增加pod网段
scheduler: {}#末尾添加
--- #更改kube-proxy的代理模式,默认为iptables
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
--- #更改kubelet cgroup驱动为systemd
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:- system:bootstrappers:kubeadm:default-node-tokentoken: abcdef.0123456789abcdefttl: 24h0m0susages:- signing- authentication
kind: InitConfiguration
localAPIEndpoint:advertiseAddress: 10.1.8.130bindPort: 6443
nodeRegistration:criSocket: unix:///var/run/containerd/containerd.sockimagePullPolicy: IfNotPresentname: k8s-mastertaints:- effect: NoSchedulekey: node-role.kubernetes.io/control-plane
---
apiServer:timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:local:dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.28.0
networking:dnsDomain: cluster.localserviceSubnet: 10.96.0.0/12podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
初始化集群
[root@k8s-master ~]# kubeadm init --config=kubeadm-init.yaml --upload-certs | tee kubeadm-init.logYour Kubernetes control-plane has initialized successfully!To start using your cluster, you need to run the following as a regular user:mkdir -p $HOME/.kubesudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/configAlternatively, if you are the root user, you can run:export KUBECONFIG=/etc/kubernetes/admin.confYou should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:https://kubernetes.io/docs/concepts/cluster-administration/addons/Then you can join any number of worker nodes by running the following on each as root:kubeadm join 10.1.8.130:6443 --token abcdef.0123456789abcdef \--discovery-token-ca-cert-hash sha256:b938cd15b3a5dd7bff33fffb9a405292d4a432033401fd549e24a649e71167a4
集群初始化失败操作
#删除kubernetes目录中所有内容
rm -rf /etc/kubernetes/*#删除启动端口进程
pkill -9 kubelet
pkill -9 kube-controll
pkill -9 kube-schedule#重置sock文件
kubeadm reset -f --cri-socket=unix:///var/run/containerd/containerd.sock
配置kubectl工具
[root@k8s-master ~]# mkdir -p $HOME/.kube
[root@k8s-master ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# chown $(id -u):$(id -g) $HOME/.kube/config#导入环境变量
[root@k8s-master ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
[root@k8s-master ~]# source ~/.bash_profile#查看组建状态
[root@k8s-master ~]# kubectl get cs
[!TIP]
node节点操作
node加入集群
[root@k8s-node ~]# kubeadm join 10.1.8.130:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:b938cd15b3a5dd7bff33fffb9a405292d4a432033401fd549e24a649e71167a4
部署网络CNI
#安装calico
[root@k8s-master ~]# wget --no-check-certificate https://docs.tigera.io/archive/v3.25/manifests/calico.yaml
[root@k8s-master ~]# vim calico.yaml
4601 - name: CALICO_IPV4POOL_CIDRvalue: "10.244.0.0/16"
[root@k8s-master ~]# kubectl apply -f calico.yaml[root@k8s-master ~]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-658d97c59c-lt5xv 1/1 Running 0 2m23s
kube-system calico-node-k99ls 1/1 Running 0 2m23s
kube-system calico-node-m8sxm 1/1 Running 0 2m23s
kube-system calico-node-ntxjd 1/1 Running 0 2m23s
kube-system coredns-66f779496c-9pz6n 1/1 Running 0 29m
kube-system coredns-66f779496c-nh9th 1/1 Running 0 29m
kube-system etcd-k8s-master 1/1 Running 0 29m
kube-system kube-apiserver-k8s-master 1/1 Running 0 29m
kube-system kube-controller-manager-k8s-master 1/1 Running 0 29m
kube-system kube-proxy-kxlfz 1/1 Running 0 12m
kube-system kube-proxy-l2kc4 1/1 Running 0 11m
kube-system kube-proxy-psc5q 1/1 Running 0 29m
kube-system kube-scheduler-k8s-master 1/1 Running 0 29m#查看节点状态
[root@k8s-master ~]# kubectl get nodesNAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane 32m v1.28.0
k8s-node01 Ready <none> 15m v1.28.0
k8s-node02 Ready <none> 14m v1.28.0
部署优化
#命令补全
[root@k8s-master ~]# yum install bash-completion -y
[root@k8s-master ~]# source /usr/share/bash-completion/bash_completion
[root@k8s-master ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc
[root@k8s-master ~]# source ~/.bashrc
测试
# 创建应用服务nginx
[root@k8s-master ~]# kubectl create deployment nginx --image=nginx --replicas=3# 暴露服务端口
[root@k8s-master ~]# kubectl expose deployment nginx --port=80 --target-port=80 --type=NodePort# 查看pod和service信息
# 默认情况下,master节点存在污点,不接受任何pod资源调度
[root@k8s-master ~]# kubectl get pod,svc
显示
NAME READY STATUS RESTARTS AGE
pod/nginx-7854ff8877-6vtlw 1/1 Running 0 96s
pod/nginx-7854ff8877-b2v7l 1/1 Running 0 96s
pod/nginx-7854ff8877-xkzp9 1/1 Running 0 96sNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 38m
service/nginx NodePort 10.106.93.139 <none> 80:31086/TCP 15s#browser访问测试 正常
