【K8S环境搭建】
K8S环境搭建
[root@node2 ~]# yum install ntpdate -y 安装时间同步
[root@master ~]# ntpdate time.windows.com 开启时间同步
[root@node2 ~]# yum -y install ipset ipvsadm 安装ipset和ipsadm
关闭防火墙
systemctl disable firewalld
systemctl stop firewalld
关闭selinux
临时禁用selinux
setenforce 0
永久关闭 修改/etc/sysconfig/selinux文件设置
sed -i ‘s/SELINUX=permissive/SELINUX=disabled/’ /etc/sysconfig/selinux
sed -i “s/SELINUX=enforcing/SELINUX=disabled/g” /etc/selinux/config
禁用交换分区
swapoff -a
永久禁用,打开/etc/fstab注释掉swap那一行。
sed -i ‘s/.swap./#&/’ /etc/fstab
修改内核参数
cat < /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
安装docker k8s 3台服务器同时进行
## 获取镜像
wget http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
##刷新yum源
yum clean all
yum makecache
-
安装docker(三台节点)
yum -y install docker-ce-18.06.1.ce-3.el7
###启动docker并设置开机自启
systemctl enable docker && systemctl start docker
-
vim /etc/docker/daemon.json 修改配置文件3个节点都需要
{ "exec-opts": ["native.cgroupdriver=systemd"] }
重启docker systemctl restart docker
所有节点都要配置kubeadm,kubelet,kubectl镜像
kubelet:运行在集群所有节点上,负责启动POD和容器
kubeadm:用于初始化集群
kubectl:kubenetes命令行工具,通过kubectl可以部署和管理应用,查看各种资源,创建,删除和更新组件
–创建源文件
vim kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
创建好的源文件放入指定目录下
mv kubernetes.repo /etc/yum.repos.d/
下载 kubelet
yum install -y kubelet-1.14.1
yum install -y kubeadm-1.14.1 kubectl-1.14.1
设置开启启动
systemctl enable kubelet
设置k8s 为插件为systemd
vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
在KUBELET_KUBECONFIG_ARGS 后面追加 --cgroup-driver=systemd
Environment=“KUBELET_KUBECONFIG_ARGS=–bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cgroup-driver=systemd”
初始化节点
sudo docker pull mirrorgooglecontainers/kube-apiserver:v1.14.1
sudo docker pull mirrorgooglecontainers/kube-controller-manager:v1.14.1
sudo docker pull mirrorgooglecontainers/kube-scheduler:v1.14.1
sudo docker pull mirrorgooglecontainers/kube-proxy:v1.14.1
sudo docker pull mirrorgooglecontainers/pause:3.1
sudo docker pull mirrorgooglecontainers/etcd:3.3.10
sudo docker pull coredns/coredns:1.3.1
sudo docker tag mirrorgooglecontainers/kube-apiserver:v1.14.1 k8s.gcr.io/kube-apiserver:v1.14.1
sudo docker tag mirrorgooglecontainers/kube-controller-manager:v1.14.1 k8s.gcr.io/kube-controller-manager:v1.14.1
sudo docker tag mirrorgooglecontainers/kube-scheduler:v1.14.1 k8s.gcr.io/kube-scheduler:v1.14.1
sudo docker tag mirrorgooglecontainers/kube-proxy:v1.14.1 k8s.gcr.io/kube-proxy:v1.14.1
sudo docker tag mirrorgooglecontainers/pause:3.1 k8s.gcr.io/pause:3.1
sudo docker tag mirrorgooglecontainers/etcd:3.3.10 k8s.gcr.io/etcd:3.3.10
sudo docker tag coredns/coredns:1.3.1 k8s.gcr.io/coredns:1.3.1
kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.6.153 --kubernetes-version=v1.14.1 --ignore-preflight-errors=Swap
--pod-network-cidr是指配置节点中的pod的可用IP地址,此为内部IP
--apiserver-advertise-address 为master的IP地址
--kubernetes-version 通过kubectl version 可以查看到
然后执行三行命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
安装容器pod网络 主节点就不会NotReady
wget https://docs.projectcalico.org/v3.14/manifests/calico.yaml --no-check-certificate
? kubectl apply -f calico.yaml
复制admin.conf文件到节点机器
scp /etc/kubernetes/admin.conf root@192.168.6.154:/root/
scp /etc/kubernetes/admin.conf root@192.168.6.152:/root/
初始化的时候出现的命令 节点加入集群命令
kubeadm join 192.168.6.153:6443 --token 3zvmgf.suatjacffpos1jhq \
--discovery-token-ca-cert-hash sha256:dd3a2b886ea24685a1d61fe40c60d9416278e9f50974c608c9a3269c1acd82a8
#查看节点信息
kubectl get nodes
#节点不能使用get nodes命令
需要把master 上admin.conf文件拿到node节点上来
cp -i admin.conf /etc/kubernetes/admin.conf
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
出现cp: 无法创建普通文件"/root/.kube/config": 没有那个文件或目录
#创建一个文件夹在执行一次
mkdir /root/.kube/
k8s初始化失败
kubernetes 安装时需要从 k8s.gcr.io 拉取镜像,但是该网站被我国屏蔽了,国内没法正常访问导致没法正常进行kubernetes正常安装。
这里通过介绍从Docker官方默认镜像平台拉取镜像并重新打tag的方式来绕过对 k8s.gcr.io 的访问。
https://blog.csdn.net/happyworld1/article/details/106259080/
重新初始化
# 重启kubeadm
kubeadm reset
执行玩了在重新初始化
安装过程问题汇总
Execution phase preflight
kubeadm reset
Connection refuse
systemctl restart kubelet
the connection to the server localhost:8080 was refuse
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Unable to connection to the server :X509
rm -rf $HOME/.kube
Port is in use
kubeadm reset
节点状态NotReady
[root@k8s-node1 ~]# docker pull jmgao1983/flannel
[root@k8s-master~]# mkdir -p /etc/cni/net.d/
[root@k8s-master~]# cat <<EOF> /etc/cni/net.d/10-flannel.conf
> {"name":"cbr0","type":"flannel","delegate": {"isDefaultGateway": true}}
> EOF
[root@k8s-master~]# mkdir /usr/share/oci-umount/oci-umount.d -p
[root@k8s-master~]# mkdir /run/flannel/
mkdir: 无法创建目录"/run/flannel/":
[root@k8s-master~]# cat <<EOF> /run/flannel/subnet.env
> FLANNEL_NETWORK=192.168.0.0/16
> FLANNEL_SUBNET=192.168.1.0/24
> FLANNEL_MTU=1450
> FLANNEL_IPMASQ=true
> EOF
systemctl restart kubelet 重启
k8s常用命令
各种组件启动
systemctl start etcd
systemctl start docker
systemctl start kube-apiserver
systemctl start kube-controller-manager
systemctl start kube-scheduler
systemctl start kubelet
systemctl start kube-proxy
systemctl enable docker
systemctl start docker
systemctl enable kubelet
重启
systemctl daemon-reload
systemctl restart kube-apiserver
systemctl status kube-apiserver
systemctl restart kube-controller-manager
systemctl status kube-controller-manager
systemctl restart kube-scheduler
systemctl status kube-scheduler
systemctl restart kubelet
systemctl status kubelet
systemctl restart kube-proxy
systemctl status kube-proxy
查看node
kubectl get nodes
kubectl get pod --all-namespaces
kubectl get pods --namespace=kube-system
kubectl get pods -n kube-system -owide | grep cambricon-precision-3630-tower
删除pod
kubectl delete -f myjob.yml
kubectl delete pod coredns-bccdc95cf-mvxnb --grace-period=0 --force -n kube-system pod
描述pod
kubectl describe pod name
删除node
kubectl delete node nodename
执行
kubectl exec -it mlutestpod bash
查看当前可用的API****版本
kubectl api-versions
节点日志
journalctl -f -u kubelet
kubectl describe nodes k8s111
pod日志
kubectl describe pods cambricon-mlu-monitor-m6kdz -n kube-system
把集群日志输出到文件
kubectl cluster-info dump --output-directory=/root/cluster-state
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。 如若内容造成侵权/违法违规/事实不符,请联系我的编程经验分享网邮箱:veading@qq.com进行投诉反馈,一经查实,立即删除!