kubeadm安装k8s
1、环境准备
1.1、升级系统内核
参考另一篇文章:https://blog.csdn.net/u012533920/article/details/148457715?spm=1011.2415.3001.5331
1.2、设置Hostname
cat <<EOF > /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.20 k8s-20
192.168.10.21 k8s-21
192.168.10.22 k8s-22
192.168.10.23 k8s-23
EOF#以下命令行分别在每台虚拟机执行:
hostnamectl set-hostname <每台虚拟机的hostname>
1.3、关闭防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service
1.4、添加 aliyun 源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
1.5、重置yum源
yum clean all
yum makecache
1.6、同步时间
timedatectl set-timezone Asia/Shanghai
yum install ntpdate -y
ntpdate ntp1.aliyun.com
#可设置定时任务定时同步时间
1.7、关闭交换分区
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a
1.8、关闭selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
1.9、永久添加模块
cat <<EOF > /etc/modules-load.d/k8s.conf
overlay
br_netfilter
modprobe br_netfilter
modprobe overlay
EOFlsmod | grep br_netfilter
1.10、修改内核参数
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF#这个命令的作用是应用 k8s.conf 文件中的内核参数设置,并且开启网络桥接的防火墙功能。其中 k8s.conf 文件中的内容包括以下三个参数设置:
#net.bridge.bridge-nf-call-iptables = 1 表示开启防火墙功能。
#net.bridge.bridge-nf-call-ip6tables = 1 表示开启 IPV6 的防火墙功能。
#net.ipv4.ip_forward = 1 表示开启 IP 转发功能。sysctl -p /etc/sysctl.d/k8s.conf
#重新加载内核参数配置文件,以确保这些设置生效。
sysctl --system
1.11、安装ipvs模块
yum -y install ipset ipvsadmcat <<EOF > /etc/sysconfig/modules/ipvs.modules
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh:
modprobe -- nf_conntrack
EOFchmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
1.12、安装基础包
yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlibdevel python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate telnet ipvsadm
2、cri-docker配置
cat << EOF >/usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process[Install]
WantedBy=multi-user.target
EOFcat << EOF > /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker[Install]
WantedBy=sockets.target
EOF##########把以上两份脚本复制到集群中的其他主机################systemctl daemon-reload && systemctl enable cri-docker --now
systemctl is-active cri-docker
3、安装kubernetes
yum install -y kubelet-1.28.2 kubeadm-1.28.2 kubectl-1.28.2
systemctl enable --now kubeletkubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.28.2 --pod-network-cidr=10.244.0.0/16 --cri-socket unix:///var/run/cri-dockerd.sock
4、安装网络插件 - Flannel
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
测试网络的例子:
vi nginx.yamlapiVersion: apps/v1
kind: StatefulSet #资源类型
metadata:name: web
spec:serviceName: "nginx" #看这里,思考:为什么多一个这个字段?replicas: 2selector:matchLabels:app: nginxtemplate:metadata:labels:app: nginxspec:containers:- name: nginximage: nginx:1.9.1ports:- containerPort: 80name: web---
apiVersion: v1
kind: Service #资源类型
metadata:name: nginxlabels:app: nginx
spec:ports:- port: 80name: webclusterIP: None # 为none,思考一下selector:app: nginxkubectl apply -f nginx.yaml#以上执行完后,执行以下命令:
kubectl run busybox --image busybox:1.28.4 --image-pull-policy=IfNotPresent --restart=Never --rm -it busybox -- sh
If you don't see a command prompt, try pressing enter.
/ # nslookup web-0.nginx.default.svc.cluster.local
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.localName: web-0.nginx.default.svc.cluster.local
Address 1: 10.244.3.2 web-0.nginx.default.svc.cluster.local
5、创建默认的存储卷类
apiVersion: v1
kind: ServiceAccount
metadata:name: nfs-client-provisioner# replace with namespace where provisioner is deployednamespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: nfs-client-provisioner-runner
rules:- apiGroups: [""]resources: ["persistentvolumes"]verbs: ["get", "list", "watch", "create", "delete"]- apiGroups: [""]resources: ["persistentvolumeclaims"]verbs: ["get", "list", "watch", "update"]- apiGroups: ["storage.k8s.io"]resources: ["storageclasses"]verbs: ["get", "list", "watch"]- apiGroups: [""]resources: ["events"]verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: run-nfs-client-provisioner
subjects:- kind: ServiceAccountname: nfs-client-provisioner# replace with namespace where provisioner is deployednamespace: kube-system
roleRef:kind: ClusterRolename: nfs-client-provisioner-runnerapiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: leader-locking-nfs-client-provisioner# replace with namespace where provisioner is deployednamespace: kube-system
rules:- apiGroups: [""]resources: ["endpoints"]verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: leader-locking-nfs-client-provisioner
subjects:- kind: ServiceAccountname: nfs-client-provisioner# replace with namespace where provisioner is deployednamespace: kube-system
roleRef:kind: Rolename: leader-locking-nfs-client-provisionerapiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:name: nfs-storageclassannotations:storageclass.kubernetes.io/is-default-class: "true"
provisioner: nfs-storage-provisioner #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致
parameters:
# archiveOnDelete: "false"archiveOnDelete: "true"
reclaimPolicy: Retain
---
apiVersion: apps/v1
kind: Deployment
metadata:name: nfs-client-provisionerlabels:app: nfs-client-provisioner# replace with namespace where provisioner is deployednamespace: kube-system #与RBAC文件中的namespace保持一致
spec:replicas: 1selector:matchLabels:app: nfs-client-provisionerstrategy:type: Recreateselector:matchLabels:app: nfs-client-provisionertemplate:metadata:labels:app: nfs-client-provisionerspec:serviceAccountName: nfs-client-provisionercontainers:- name: nfs-client-provisioner#image: quay.io/external_storage/nfs-client-provisioner:latest#这里特别注意,在k8s-1.20以后版本中使用上面提供的包,并不好用,这里我折腾了好久,才解决,后来在官方的github上,别人提的问题中建议使用下面这个包才解决的,我这里是下载后,传到我自已的仓库里image: k8s.m.daocloud.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2# image: easzlab/nfs-subdir-external-provisioner:v4.0.1 # image: registry-op.test.cn/nfs-subdir-external-provisioner:v4.0.1volumeMounts:- name: nfs-client-rootmountPath: /persistentvolumesenv:- name: PROVISIONER_NAMEvalue: nfs-storage-provisioner #provisioner名称,请确保该名称与 nfs-StorageClass.yaml文件中的provisioner名称保持一致- name: NFS_SERVERvalue: 192.168.10.20 #NFS Server IP地址- name: NFS_PATHvalue: "/nfs/data" #NFS挂载卷volumes:- name: nfs-client-rootnfs:server: 192.168.10.20 #NFS Server IP地址path: "/nfs/data" #NFS 挂载卷