【ansible/K8s】K8s的自动化部署源码分享
环境
三台主机的主机名与ip:
ccka-master 192.168.30.135
ccka-worker1 192.168.30.136
ccka-worker2 192.168.30.137
采用 Ubuntu 作为我们的操作系统
源码
#!/bin/bash
set -ueo pipefail
echo
echo
echo -n Have you done the above? yes or no:
read input
case $input in
yes)echoecho now starting deploy
;;
no)echo please correct it && exit 1
;;
*)echo please input yes or noexit 1
;;
esacechocd /root#安装ansible
# 更新软件包列表apt update
# 安装软件属性通用包(用于添加PPA)
apt install -y software-properties-common
# 添加 Ansible 官方 PPA 仓库add-apt-repository --yes --update ppa:ansible/ansible
# 安装 Ansibleapt install -y ansible#配置国内镜像站
cat > /etc/apt/sources.list <<EOF
deb https://mirror.nju.edu.cn/ubuntu focal main restricted
deb https://mirror.nju.edu.cn/ubuntu focal-updates main restricted
deb https://mirror.nju.edu.cn/ubuntu focal universe
deb https://mirror.nju.edu.cn/ubuntu focal-updates universe
deb https://mirror.nju.edu.cn/ubuntu focal multiverse
deb https://mirror.nju.edu.cn/ubuntu focal-updates multiverse
deb https://mirror.nju.edu.cn/ubuntu focal-backports main restricted universe multiverse
deb https://mirror.nju.edu.cn/ubuntu focal-security main restricted
deb https://mirror.nju.edu.cn/ubuntu focal-security universe
deb https://mirror.nju.edu.cn/ubuntu focal-security multiverse
EOFapt update &> /dev/null
apt install sshpass wget bash-completion -y &> /dev/null
sed -i 's/^#host_key_checking = False/host_key_checking = False/' /etc/ansible/ansible.cfg#配置互信(多台主机情况下使用后边的脚本)
echo 'Create and copy ssh key to workers'
ssh-keygen -t rsa -f /root/.ssh/id_rsa -N '' &> /dev/null
sshpass -p 1 ssh-copy-id -o StrictHostKeyChecking=no root@ccka-master &> /dev/null
sshpass -p 1 ssh-copy-id -o StrictHostKeyChecking=no root@ccka-worker1 &> /dev/null
sshpass -p 1 ssh-copy-id -o StrictHostKeyChecking=no root@ccka-worker2 &> /dev/null#配置主机清单
cd /root
cat > /etc/ansible/hosts <<EOF
[master]
ccka-master ansible_user=root ansible_password=1
[worker]
ccka-worker1 ansible_user=root ansible_password=1
ccka-worker2 ansible_user=root ansible_password=1
EOF: <<'COMMENT'
生产环境中多台主机配置互信#!/bin/bash
# 配置 Ansible 主机列表的 SSH 互信set -euo pipefail # 启用严格错误处理# 1. 检查 Ansible 主机文件是否存在
ANSIBLE_HOSTS="/etc/ansible/hosts"
if [ ! -f "$ANSIBLE_HOSTS" ]; thenecho "错误: Ansible 主机文件 $ANSIBLE_HOSTS 不存在"exit 1
fi# 2. 生成 SSH 密钥对(如果不存在)
SSH_KEY="$HOME/.ssh/id_rsa"
if [ ! -f "$SSH_KEY" ]; thenecho "生成新的 SSH 密钥对..."ssh-keygen -t rsa -f "$SSH_KEY" -N '' -q
fi# 3. 从 Ansible 主机文件提取所有唯一主机
echo "从 $ANSIBLE_HOSTS 提取主机列表..."
ALL_HOSTS=$(grep -Eo '^[a-zA-Z0-9_.-]+' "$ANSIBLE_HOSTS" | sort -u)# 4. 验证主机列表
if [ -z "$ALL_HOSTS" ]; thenecho "错误: 未找到有效主机"exit 1
fiecho "找到以下主机:"
echo "$ALL_HOSTS"
echo ""# 5. 配置 SSH 互信
for host in $ALL_HOSTS; doecho "正在配置主机: $host"# 跳过本地主机if [ "$host" == "localhost" ] || [ "$host" == "127.0.0.1" ]; thenecho "跳过本地主机"continuefi# 检查主机是否可达if ! ping -c 1 -W 1 "$host" &> /dev/null; thenecho "警告: 无法连接到主机 $host"continuefi# 复制公钥if ssh-copy-id -o "StrictHostKeyChecking=accept-new" "$host"; thenecho "成功配置 $host"elseecho "错误: 无法配置 $host,请手动执行:"echo " ssh-copy-id $host"fi
doneecho ""
echo "SSH 互信配置完成"
COMMENTcat > create-k8s.yaml <<'EOF'
---
- name: Configure Kubernetes with Containerdhosts: allbecome: yesremote_user: roottasks:- name: clean apt lockshell: |killall apt apt-getrm -rf /var/lib/apt/lists/lockrm -rf /var/cache/apt/archives/lockrm -rf /var/lib/dpkg/lock*dpkg --configure -a- name: Install required packages for Dockerapt:name:- ca-certificates- curl- gnupg- lsb-releasestate: presentupdate_cache: no # 先不更新,节省时间- name: Create keyrings directoryfile:path: /etc/apt/keyringsstate: directorymode: '0755'- name: Download Docker GPG keyget_url:url: https://mirrors.nju.edu.cn/docker-ce/linux/ubuntu/gpgdest: /tmp/docker-key.gpgmode: '0644'- name: Process GPG keycommand: gpg --dearmor -o /etc/apt/keyrings/docker.gpg /tmp/docker-key.gpgargs:creates: /etc/apt/keyrings/docker.gpg- name: Add Docker repositoryapt_repository:repo: "deb [arch={{ ansible_architecture }} signed-by=/etc/apt/keyrings/docker.gpg] https://mirrors.nju.edu.cn/docker-ce/linux/ubuntu {{ ansible_distribution_release }} stable"state: presentfilename: docker- name: Update apt cache (only for Docker)apt:update_cache: yescache_valid_time: 3600 # 1小时内不需要重新update- name: clean apt lockshell: |killall apt apt-getrm -rf /var/lib/apt/lists/lockrm -rf /var/cache/apt/archives/lockrm -rf /var/lib/dpkg/lock*dpkg --configure -aapt update- name: Deploy chrony for make sure time on all node is sameapt:pkg:- chrony- name: restart chronyd service for timesyncsystemd:state: restarteddaemon_reload: yesname: chronydenabled: yes- name: set timezone to Asia/Shanghaishell: |timedatectl set-timezone Asia/Shanghai- name: Install and configure containerdapt:pkg:- containerdstate: present- name: Configure containerd for Kubernetesshell: |mkdir -p /etc/containerdcontainerd config default | tee /etc/containerd/config.tomlsed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.tomlsed -i 's|sandbox_image = ".*"|sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8"|' /etc/containerd/config.toml- name: restart containerd servicesystemd:state: restarteddaemon_reload: yesname: containerdenabled: yes- name: disable swap on /etc/fstablineinfile:path: /etc/fstabregexp: '.*swap.*'state: absent- name: disable swap runtimeshell: swapoff -a- name: configure iptables modulelineinfile:path: /etc/modules-load.d/k8s.confline: br_netfilterstate: presentcreate: true- name: configure iptables bridgelineinfile:path: /etc/sysctl/k8s.confline: |net.bridge.bridge-nf-call-ip6tables = 1net.bridge.bridge-nf-call-iptables = 1net.ipv4.ip_forward = 1create: true- name: apply sysctlshell: |modprobe br_netfiltersysctl --system- name: Add Kubernetes GPG keyapt_key:url: https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.32/deb/Release.keystate: present- name: Add Kubernetes repositoryapt_repository:repo: "deb https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.32/deb/ /"state: presentfilename: kubernetes- name: Update apt cacheapt:update_cache: yes- name: clean apt lockshell: |killall apt apt-getrm -rf /var/lib/apt/lists/lockrm -rf /var/cache/apt/archives/lockrm -rf /var/lib/dpkg/lock*dpkg --configure -aapt update- name: install kubeadm kubectl kubeletpackage:name:- kubeadm=1.32.0-1.1- kubelet=1.32.0-1.1- kubectl=1.32.0-1.1- sshpassstate: presentallow_downgrades: yes- name: clean apt lockshell: |killall apt apt-getrm -rf /var/lib/apt/lists/lockrm -rf /var/cache/apt/archives/lockrm -rf /var/lib/dpkg/lock*dpkg --configureapt update- name: configure crictl to use containerdshell: crictl config runtime-endpoint unix:///run/containerd/containerd.sock- name: creating kubeadm.yamlshell: kubeadm config print init-defaults > kubeadm.yamlwhen: "'master' in group_names"- name: modify api server addressshell: sed -i '/.*advertiseAddress.*/d' kubeadm.yamlwhen: "'master' in group_names"- name: modify cluster namelineinfile:path: kubeadm.yamlregexp: '.*name.*'line: ' name: ccka-master'state: presentwhen: "'master' in group_names"- name: modify image repositorylineinfile:path: kubeadm.yamlregexp: 'imageRepo.*'line: 'imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers'state: presentwhen: "'master' in group_names"- name: modify crisock to containerdlineinfile:path: kubeadm.yamlregexp: ' criSocket.*'line: ' criSocket: unix:///run/containerd/containerd.sock'state: presentwhen: "'master' in group_names"- name: restart containerd and kubelet servicesystemd:state: restarteddaemon_reload: yesname: "{{ item }}"enabled: yesloop:- containerd- kubelet- name: Deploy kubernetes on Master nodeshell: kubeadm init --config kubeadm.yaml | tee /root/installdetails.logwhen: "'master' in group_names"- name: pause 30s after cluster initshell: sleep 30swhen: "'master' in group_names"- name: Create local kubeconfig directoryfile:path: /root/.kubestate: directorymode: '0700'when: "'master' in group_names"- name: Copy admin config to localcopy:src: /etc/kubernetes/admin.confdest: /root/.kube/configremote_src: yesowner: rootgroup: rootmode: '0600'when: "'master' in group_names"- name: Create .kube directory on workersfile:path: /root/.kubestate: directorymode: '0700'delegate_to: "{{ item }}"loop:- ccka-worker1- ccka-worker2when: "'master' in group_names"- name: Copy admin config to workerscopy:src: /etc/kubernetes/admin.confdest: /root/.kube/configowner: rootgroup: rootmode: '0600'delegate_to: "{{ item }}"loop:- ccka-worker1- ccka-worker2when: "'master' in group_names"- name: Download calico.yamlget_url:url: https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/calico.yamldest: /root/calico.yamlmode: '0644'when: "'master' in group_names"- name: Modify calico.yaml to use domestic image registryreplace:path: /root/calico.yamlregexp: 'docker.io/calico/'replace: 'registry.cn-hangzhou.aliyuncs.com/calico/'when: "'master' in group_names"- name: Deploy Calicoshell: |kubectl apply -f /root/calico.yamlsleep 30when: "'master' in group_names"- name: join workers with containerdshell: |sleep 30join_command=$(sshpass -p 1 ssh -o StrictHostKeyChecking=no root@ccka-master "kubeadm token create --print-join-command")echo "$join_command --cri-socket=unix:///run/containerd/containerd.sock" | bashwhen: "'worker' in group_names"- name: assign worker role label to workersshell: |sleep 30kubectl label nodes ccka-worker2 ccka-worker1 node-role.kubernetes.io/worker=when: "'master' in group_names"EOFcp /etc/ansible/ansible.cfg /root/ansible.cfg
if [ $? -ne 0 ];then
echo please review the output on screen and fix error before re-run && exit;
fi
sed -i '/^# command_warnings.*/a\command_warnings = False' /root/ansible.cfg
if [ $? -ne 0 ];then
exit;
fi
echoecho 'Deploy K8S Cluster now'
ansible-playbook create-k8s.yaml
if [ $? -ne 0 ];then
exit;
fi
#rm -rf create-k8s.yaml /root/ansible.cfg /root/kubeadm.yaml /root/create-k8s-cluster.sh /root/installdetails.logkubectl completion bash > /etc/bash_completion.d/kubectl
kubeadm completion bash > /etc/bash_completion.d/kubeadm
source /etc/bash_completion.d/kubectl
source /etc/bash_completion.d/kubeadmecho
echo "Please wait one minute for nodes ready"
echosleep 1mkubectl get pod -A
echo
kubectl get nodesecho
echo
#!/bin/bash
set -ueo pipefail
echo
echo
echo -n Have you done the above? yes or no:
read input
case $input in
yes)echoecho now starting deploy
;;
no)echo please correct it && exit 1
;;
*)echo please input yes or noexit 1
;;
esac