使用playbook部署k8s集群

2023-12-13 11:39:25

1.部署ansible集群
?

使用python脚本一个简单的搭建ansible集群-CSDN博客

2.ansible命令搭建k8s:


1.主机规划:

节点IP地址操作系统配置
server192.168.174.150centos7.92G2核
client1192.168.174.151centos7.92G2核
client2192.168.174.152centos7.92G2


ansible清单文件内容如下

[clients_all]
server
client1
client2
[clients_master]
server
[clients_client]
client1
client2

2.完整yaml文件:
?

# 1.配置yum源
---
- hosts: clients_all
  gather_facts: no
  vars:
  tasks:
    - name: 配置本地yum源
      block:
        - name: 挂载/dev/cdrom
          mount:
            src: /dev/cdrom
            path: /mnt/cdrom
            fstype: iso9660
            opts: defaults
            state: mounted
        - name: 删除\/dev\/cdrom行
          lineinfile:
            path: /etc/fstab
            regexp: '^\/dev\/cdrom'
            state: absent
        - name: 修改/etc/fstab配置文件
          lineinfile:
            path: /etc/fstab
            line: '/dev/cdrom /mnt/cdrom iso9660 defaults  0  0'
            insertafter: EOF
        - name: 创建文件并且清空文件
          shell: 'echo "" > /etc/yum.repos.d/centos-local.repo'
        - name: 修改配置文件centos-local.repo
          blockinfile:
            path: /etc/yum.repos.d/centos-local.repo
            # block: |表示将文本块的内容作为纯文本进行处理,不进行任何额外的格式化或处理
            block: |
              [centos7.9]
              name=centos7.9
              baseurl=file:///mnt/cdrom
              enabled=1
              gpgcheck=0
            create: yes
            marker: '#{mark} centos7.9'
        - name: 清理yum缓存,列出yum源列表
          shell: yum clean all && yum repolist
    - name: 配置远程阿里源
      block:
        - name: yum安装wget
          yum:
            name: wget
        - name: 下载centosyum文件
          get_url:
            dest: /etc/yum.repos.d/CentOS-Base.repo
            url: http://mirrors.aliyun.com/repo/Centos-7.repo
    - name: 配置yum扩展源
      block:
        - name: yum安装epel-release
          yum:
            name: epel-release
        - name: 清理yum缓存,列出yum源列表
          shell: yum clean all && yum repolist
# 2.安装必要工具:
- hosts: clients_all
  gather_facts: no
  vars:
  tasks:
    - name: 安装必要工具
      yum:
        name: bash-completion,vim,net-tools,tree,psmisc,lrzsz,dos2unix
# 3.禁止防火墙和selinux:
- hosts: clients_all
  gather_facts: no
  vars:
  tasks:
    - name: 禁止使用selinux
      selinux:
        state: disabled
    - name: 禁用firewalld服务
      service:
        name: firewalld
        state: stopped
        enabled: false
    - name: 禁用iptables服务
      service:
        name: iptables
        state: stopped
        enabled: false
      register: service_status
      ignore_errors: True
    - name: 处理服务不存在的情况
      fail:
        msg: "服务不存在"
      when: service_status is failed
# 4.chrony时间同步
- hosts: clients_all
  gather_facts: no
  vars:
  tasks:
    - name: 在所有节点使用yum安装chrony
      yum:
        name: chrony
    - name: 开机自启动,并重启chrony服务
      service:
        name: chronyd
        state: restarted
        enabled: true
- hosts: clients_master
  gather_facts: no
  vars:
  tasks:
    - name: 修改master节点chrony.conf
      block:
        - name: 修改同步IP地址范围
          lineinfile:
            path: /etc/chrony.conf
            regexp: '^#allow 192.168.0.0\/16'
            line: 'allow 192.168.174.0/24'
            backrefs: yes
        - name: 修改同步层数
          lineinfile:
            path: /etc/chrony.conf
            regexp: '^#local stratum 10'
            line: 'local stratum 10'
            backrefs: yes
        - name: 开机自启动,并重启chrony服务
          service:
            name: chronyd
            state: restarted
            enabled: true
        - name: 开启主节点时间同步
          shell: timedatectl set-ntp true
- hosts: clients_client
  gather_facts: no
  vars:
  tasks:
    - name: 修改master节点chrony.conf
      block:
        - name: 删除旧的时间同步服务器地址
          lineinfile:
            path: /etc/chrony.conf
            regexp: '^server'
            state: absent
        - name: 添加新的时间同步服务器地址
          lineinfile:
            path: /etc/chrony.conf
            line: 'server 192.168.174.150 iburst'
            insertbefore: '^.*\bline2\b.*$'
        - name: 开机自启动,并重启chrony服务
          service:
            name: chronyd
            state: restarted
            enabled: true
        - name: 开启主节点时间同步
          shell: timedatectl set-ntp true
        - name: 等待5秒
          shell: sleep 5
        - name: 查看同步状况
          shell: chronyc sources -v | sed -n '/^\^\*/p'
          register: command_output
        # 将打印的结果重定向到command_output,并输出到控制台
        - name: chronyc sources -v输出
          debug:
            var: command_output.stdout_lines
# 5.禁用swap分区,修改linux的内核参数,配置ipvs功能
- hosts: clients_all
  gather_facts: no
  vars:
  tasks:
    - name: 禁用swap分区
      lineinfile:
        path: /etc/fstab
        regexp: '^\/dev\/mapper\/centos-swap'
        line: '#/dev/mapper/centos-swap swap                    swap    defaults        0 0'
        backrefs: yes
    - name: 修改linux的内核参数
      block:
        - name: 编辑/etc/sysctl.d/kubernetes.conf,添加网桥过滤和地址转发功能
          blockinfile:
            path: /etc/sysctl.d/kubernetes.conf
            block: |
              net.bridge.bridge-nf-call-ip6tables = 1
              net.bridge.bridge-nf-call-iptables = 1
              net.ipv4.ip_forward = 1
            create: yes
            marker: '#{mark} kubernetes'
        - name: 重新加载配置
          shell: sysctl -p
        - name: 加载网桥过滤模块
          shell: modprobe br_netfilter
        - name: 查看网桥过滤模块是否加载成功
          shell: lsmod | grep br_netfilter
          register: lsmod_output
        # 将打印的结果重定向到lsmod_output,并输出到控制台
        - name: lsmod | grep br_netfilter输出
          debug:
            var: lsmod_output.stdout_lines
    - name: 配置ipvs功能
      block:
        - name: 安装ipset和ipvsadm
          yum:
            name: ipset,ipvsadm
        - name: 添加需要加载的模块写入脚本文件
          blockinfile:
            path: /etc/sysconfig/modules/ipvs.modules
            block: |
              #! /bin/bash
              modprobe -- ip_vs
              modprobe -- ip_vs_rr
              modprobe -- ip_vs_wrr
              modprobe -- ip_vs_sh
              modprobe -- nf_conntrack_ipv4
            create: yes
            marker: '#{mark} ipvs'
        - name: 为脚本文件添加执行权限
          file:
            path: /etc/sysconfig/modules/ipvs.modules
            mode: '0755'
        - name: 执行脚本文件
          script: /bin/bash /etc/sysconfig/modules/ipvs.modules
        - name: 查看对应的模块是否加载成功
          shell: lsmod | grep -e ip_vs -e nf_conntrack_ipv4
          register: lsmod_ipv4_output
        # 将打印的结果重定向到lsmod_output,并输出到控制台
        - name: lsmod | grep -e ip_vs -e nf_conntrack_ipv4输出
          debug:
            var: lsmod_ipv4_output.stdout_lines
#        - name: 重启linux服务
#          reboot:
# 6.安装Docker
- hosts: clients_all
  gather_facts: no
  vars:
  tasks:
    - name: 修改linux的内核参数
      block:
        - name: 添加docker镜像到本地
          get_url:
            dest: /etc/yum.repos.d/docker-ce.repo
            url: http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
        - name: 然后输入命令
          shell: yum install -y --setopt=obsoletes=0 docker-ce-18.06.3.ce-3.el7
        - name: 修改配置文件
          file:
            path: /etc/docker
            state: directory
        - name: 修改/etc/docker/daemon.json
          blockinfile:
            path: /etc/sysconfig/modules/ipvs.modules
            block: |
              {
              "storage-driver": "devicemapper",
              "exec-opts": ["native.cgroupdriver=systemd"],
              "registry-mirrors": ["https://ja9e22yz.mirror.aliyuncs.com"]
              }
            create: yes
            marker: '#{mark} daemon'
        - name: 修改/etc/sysconfig/docker
          blockinfile:
            path: /etc/sysconfig/docker
            block:
              OPTIONS='--selinux-enabled --log-driver=journald --signature-verification=false'
            create: yes
            marker: '#{mark} docker'
        - name: 重启docker服务,并设置开机自启
          service:
            name: docker
            state: restarted
            enabled: true
# 7.安装k8s组件
- hosts: clients_all
  gather_facts: no
  vars:
  tasks:
    - name: 安装k8s组件
      block:
        - name: 配置k8syum仓库
          blockinfile:
            path: /etc/yum.repos.d/kubernetes.repo
            block: |
              [kubernetes]
              name=Kubernetes
              baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
              enabled=1
              gpgcheck=0
              repo_gpgcheck=0
              gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
              http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
            create: yes
            marker: '#{mark} kubernetes'
        - name: 安装kubeadm、kubelet和kubectl
          shell: yum install --setopt=obsoletes=0 kubeadm-1.17.4-0 kubelet-1.17.4-0 kubectl-1.17.4-0 -y
        - name: 编辑/etc/sysconfig/kubelet,配置kubelet的cgroup
          blockinfile:
            path: /etc/yum.repos.d/kubernetes.repo
            block:
              KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
              KUBE_PROXY_MODE="ipvs"
            create: yes
            marker: '#{mark} kubernetes'
        - name: 设置kubelet开机自启
          service:
            name: kubelet
            state: started
            enabled: true
# 8.准备集群镜像,如果拉取镜像卡住了,重启主机
- hosts: clients_all
  gather_facts: no
  vars:
    images:                 # 定义了一个镜像列表
      - kube-apiserver:v1.17.4
      - kube-controller-manager:v1.17.4
      - kube-scheduler:v1.17.4
      - kube-proxy:v1.17.4
      - pause:3.1
      - etcd:3.4.3-0
      - coredns:1.6.5
  tasks:
    - name: 拉取镜像
      shell: docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/{{ item }}
      with_items: "{{ images }}"          # 循环定义的列表
    - name: 给镜像打标签
      shell: docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/{{ item }} k8s.gcr.io/{{ item }}
      with_items: "{{ images }}"
    - name: 删除镜像
      shell: docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/{{ item }}
      with_items: "{{ images }}"
# 9.集群初始化
- hosts: clients_master
  gather_facts: no
  vars:
    - apiserver_address: 192.168.174.150
  tasks:
    - name: 在 master 节点,集群初始化
      block:
        - name: kubeadm init
          shell:
            kubeadm init \
            --kubernetes-version=v1.17.4 \
            --pod-network-cidr=10.244.0.0/16 \
            --service-cidr=10.96.0.0/12 \
            --apiserver-advertise-address={{ apiserver_address }} | \
            grep -Eo '(--token.*|--discovery-token-ca-cert-hash.*)' | awk -F ' ' '{print \$2}'
          # 将打印的结果重定向到lsmod_output
          register: kubeadm_init_output
        - name: 提取token和discovery-token-ca-cert-hash值
          set_fact:
            token: "{{ kubeadm_init_output.stdout_lines[0] }}"
            discovery_token_ca_cert_hash: "{{ kubeadm_init_output.stdout_lines[1] }}"
        - name: 将token和discovery_token_ca_cert_hash写入到文件中
          blockinfile:
            path: $HOME/kubeadm_init.yaml
            block: |
              token: {{ token }}
              discovery_token_ca_cert_hash: {{ discovery_token_ca_cert_hash }}
            create: yes
            marker: '#{mark} file'
        - name: 将token和discovery_token_ca_cert_hash的文件给ansible主机
          fetch:
            src: $HOME/kubeadm_init.yaml
            dest: $HOME
        - name: kubeadm init输出
          debug:
            var: token,discovery_token_ca_cert_hash
        - name: 创建必要目录$HOME/.kube
          file:
            path: $HOME/.kube
            state: directory
        - name: 复制/etc/kubernetes/admin.conf
          shell: cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        - name: 获取当前用户的id值
          shell: id -un
          register: current_user
        - name: 获取当前用户组的id值
          shell: id -gn
          register: current_group
        - name: 修改文件$HOME/.kube/config的所属主和组
          file:
            path: $HOME/.kube/config
            state: touch
            # 这里register的值需要stdout方法,不然输出会加上[]
            owner: "{{ current_user.stdout }}"
            group: "{{ current_group.stdout }}"
# 在 node 节点,将node节点加入集群
- hosts: clients_client
  vars:
    apiserver_address: 192.168.174.150
  vars_files: $HOME/server/root/kubeadm_init.yaml
  tasks:
    - name: 在 node 节点,将node节点加入集群
      # 如果想获取到上一个项目中定义的fact变量需要开启gather_facts收集
      shell:
        kubeadm join {{ apiserver_address }}:6443 --token {{ token }} \
        --discovery-token-ca-cert-hash {{ discovery_token_ca_cert_hash }}
# 在 master 节点,在查看集群状态
- hosts: clients_master
  gather_facts: no
  vars:
  tasks:
    - name: 在 master 节点,在查看集群状态
      shell: kubectl get nodes
      register: kubectl_nodes_output
    # 将打印的结果重定向到lsmod_output,并输出到控制台
    - name: kubectl get nodes输出
      debug:
        var: kubectl_nodes_output.stdout_lines
# 9.安装网络插件
- hosts: clients_master
  gather_facts: no
  vars:
  tasks:
    - name: 安装网络插件
      block:
        - name: 获取fannel的配置文件
          get_url:
            dest: $HOME
            url: https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
        - name: 部署flannel网络
          shell: kubectl apply -f $HOME/kube-flannel.yml && sleep 60
        - name: 过一分钟左右查看各节点状态,变为Ready说明网络打通了
          shell: kubectl get nodes
          register: kubectl_nodes_output
        # 将打印的结果重定向到lsmod_output,并输出到控制台
        - name: kubectl get nodes输出
          debug:
            var: kubectl_nodes_output.stdout_lines
        - name: 查看所有pod是否变为Running
          shell: kubectl get pod --all-namespaces
          register: kubectl_pod_output
        # 将打印的结果重定向到lsmod_output,并输出到控制台
        - name: kubectl get pod --all-namespaces输出
          debug:
            var: kubectl_pod_output.stdout_lines

文章来源:https://blog.csdn.net/qq_56776641/article/details/134966433
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。