Kubernetes 一篇文章教你yum快速搭建K8s

   日期:2020-09-08     浏览:92    评论:0    
核心提示:master 192.168.179.104 node 192.168.179.103 etcd 192.168.179.102 ETCD节点[root@localhost ~]# vim /etc/etcd/etcd.conf [root@localhost ~]# cd /etc/etcd/[root@localhost etcd]# lsetcd.conf[root@localhost etcd]# cp etcd.conf etcd.conf.bak.

环境如下

 Centos 7.X

master 192.168.179.104
node 192.168.179.103       192.168.17.101
etcd 192.168.179.102

Kubernetes集群组件:
– etcd 一个高可用的K/V键值对存储和服务发现系统
– flannel 实现夸主机的容器网络的通信
– kube-apiserver 提供kubernetes集群的API调用
– kube-controller-manager 确保集群服务
– kube-scheduler 调度容器,分配到Node
– kubelet 在Node节点上按照配置文件中定义的容器规格启动容器
– kube-proxy 提供网络代理服务,将service与pod打通。 

关闭防火墙服务,避免与docker容器的防火墙规则冲突。

  1. # systemctl stop firewalld
  2. # systemctl disable firewalld

关闭selinux:
修改/etc/selinux/config为SELINUX=disabled
重启后配置生效。不建议临时关闭,防止机器重启失效。

 

ETCD节点 

[root@localhost ~]# vim /etc/etcd/etcd.conf 
[root@localhost ~]# cd /etc/etcd/
[root@localhost etcd]# ls
etcd.conf
[root@localhost etcd]# cp etcd.conf etcd.conf.bak

[root@localhost etcd]# grep -vE "#|^$" etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.179.102:2379"
ETCD_NAME="default"
ETCD_ADVERTISE_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.179.102:2379"

#和bind一样绑定哪块网卡和端口,其实就是监听的网卡,因为我有两块网卡一块ens32地址192.168.179.104 一块lo网卡127.0.0.1
ETCD_LISTEN_CLIENT_URLS 
#ETCD服务器对外宣告端口, 
ETCD_ADVERTISE_CLIENT_URLS

#这里是两块网卡都监听2379端口,所以写上两块网卡
[root@localhost etcd]# systemctl restart etcd
[root@localhost etcd]# netstat -tpln | grep 2379
tcp        0      0 192.168.179.102:2379    0.0.0.0:*               LISTEN      10564/etcd          
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      10564/etcd 

#检查etcd集群成员列表,这里只有一台
[root@localhost ~]# etcdctl member list
8e9e05c52164694d: name=default peerURLs=http://localhost:2380 clientURLs=http://127.0.0.1:2379,http://192.168.179.102:2379 isLeader=true  

#检查etcd cluster状态
[root@localhost ~]# etcdctl cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://127.0.0.1:2379
cluster is healthy


配置防火墙   
firewall-cmd --zone=public --add-port=2379/tcp --permanent
firewall-cmd --zone=public --add-port=2380/tcp --permanent
firewall-cmd --reload
firewall-cmd --list-all

 

Master节点配置 apiserver|config

[root@localhost ~]# yum install  kubernetes-master flannel -y

-----------------------------------------------------------------------------------------
#apiserver监听在8080端口,所以该机器不能启动tomcat
[root@localhost ~]# grep -vE "#|^$" /etc/kubernetes/apiserver 
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.179.102:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
KUBE_API_ARGS=""


#api服务监听的网卡地址
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"

#连接etcd数据库,如果etcd是集群,后面接着写多个
#KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.179.102:2379,http://192.168.179.103:2379"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.179.102:2379"

#VIP的网段,后期为VIP做负载均衡用的
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"

#会话控制的一些模块,ServiceAccount删除,因为提供用户名密码登入,这里不使用认证
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"




-----------------------------------------------------------------------------------------
#config是k8s系统配置

[root@localhost ~]# grep -vE "#|^$" /etc/kubernetes/config 
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=true"
KUBE_MASTER="--master=http://192.168.179.104:8080"

#错误日志打印是否开启,会打印到message日志里面
KUBE_LOGTOSTDERR="--logtostderr=true"

#修改为对外IP,API地址和端口
KUBE_MASTER="--master=http://192.168.179.104:8080"

#开启超级特权,启动docker有--privileged=true以支持更多命令
KUBE_ALLOW_PRIV="--allow-privileged=true"


-----------------------------------------------------------------------------------------
#先启动apiserver剩下两个顺序任意
[root@localhost kubernetes]# systemctl start kube-apiserver
[root@localhost kubernetes]# systemctl start kube-controller-manager
[root@localhost kubernetes]# systemctl start kube-scheduler
[root@localhost kubernetes]# ps -ef | grep kube
kube      15584      1  4 21:55 ?        00:00:02 /usr/bin/kube-apiserver --logtostderr=true --v=0 --etcd-servers=http://192.168.179.102:2379 --insecure-bind-address=0.0.0.0 --allow-privileged=true --service-cluster-ip-range=10.254.0.0/16 --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota
kube      15601      1  4 21:55 ?        00:00:00 /usr/bin/kube-controller-manager --logtostderr=true --v=0 --master=http://192.168.179.104:8080
kube      15614      1  6 21:56 ?        00:00:00 /usr/bin/kube-scheduler --logtostderr=true --v=0 --master=http://192.168.179.104:8080

[root@localhost kubernetes]# netstat -tpln | grep kube
tcp6       0      0 :::10251                :::*                    LISTEN      15614/kube-schedule 
tcp6       0      0 :::6443                 :::*                    LISTEN      15584/kube-apiserve 
tcp6       0      0 :::10252                :::*                    LISTEN      15601/kube-controll 
tcp6       0      0 :::8080                 :::*                    LISTEN      15584/kube-apiserve 

 

Node节点配置 config|kubelet

[root@localhost ~]# yum install kubernetes-node docker flannel *rhsm* -y

-------------------------------------------------------------------------------------------
[root@localhost ~]# grep -vE '^$|#' /etc/kubernetes/config 
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=true"
KUBE_MASTER="--master=http://192.168.179.104:8080"

#如果api server是其他端口,这里也需要修改为其他端口
KUBE_MASTER="--master=http://192.168.179.104:8080"
-------------------------------------------------------------------------------------------
[root@localhost ~]# grep -vE '^$|#' /etc/kubernetes/kubelet 
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_HOSTNAME="--hostname-override=192.168.179.103"
KUBELET_API_SERVER="--api-servers=http://192.168.179.104:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""

#写上对外IP,不能写成127.0.0.1
KUBELET_HOSTNAME="--hostname-override=192.168.179.103"
[root@localhost ~]#  systemctl start kubelet
[root@localhost ~]#  systemctl start kube-proxy

[root@localhost ~]# ps -ef | grep kube
root       7545      1  4 10:40 ?        00:00:01 /usr/bin/kubelet --logtostderr=true --v=0 --api-servers=http://192.168.179.104:8080 --address=0.0.0.0 --hostname-override=192.168.179.103 --allow-privileged=true --pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest
root       7624      1  2 10:41 ?        00:00:00 /usr/bin/kube-proxy --logtostderr=true --v=0 --master=http://192.168.179.104:8080

[root@localhost ~]# netstat -tpln | grep kube
tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      7545/kubelet        
tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      7624/kube-proxy     
tcp6       0      0 :::10255                :::*                    LISTEN      7545/kubelet        
tcp6       0      0 :::4194                 :::*                    LISTEN      7545/kubelet        
tcp6       0      0 :::10250                :::*                    LISTEN      7545/kubelet  

-----------------------------------------------------------------------------------------
[root@localhost kubernetes]# kubectl get node
NAME              STATUS    AGE
192.168.179.103   Ready     36s

#在另外一个node节点启动kubelet,kube-proxy。可以看到两个节点
[root@localhost kubernetes]# kubectl get node
NAME              STATUS    AGE
192.168.179.101   Ready     8s
192.168.179.103   Ready     2m

 

Master Node Flanneld网络配置

打通集群节点之间通信 ,安装在master node上都需要部署

#修改两个node节点和master flanneld配置,修改为Etcd节点的IP
[root@localhost ~]# grep -vE "^$|#" /etc/sysconfig/flanneld 
FLANNEL_ETCD_ENDPOINTS="http://192.168.179.102:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
#启动flanneld网络会卡在这
[root@localhost kubernetes]# systemctl start flanneld
^C

#/atomic.io/network 因为这个key没有,所以卡在这
[root@localhost etcd]# etcdctl  ls /
/registry


#在etcd里面创建key value,以后docker主机的IP设置在哪个网段
[root@localhost etcd]# etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
{"Network":"172.17.0.0/16"}
[root@localhost etcd]# etcdctl get  /atomic.io/network/config
{"Network":"172.17.0.0/16"}

[root@localhost etcd]# etcdctl  member list
8e9e05c52164694d: name=default peerURLs=http://localhost:2380 clientURLs=http://127.0.0.1:2379,http://192.168.179.102:2379 isLeader=true
[root@localhost etcd]# etcdctl get  /atomic.io/network/config
[root@localhost etcd]# etcdctl cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://127.0.0.1:2379
cluster is healthy

#Master Node节点启动flanneld网络
[root@localhost ~]#  systemctl start flanneld
[root@localhost ~]# systemctl restart docker
#可以看到flannel0网卡的IP就是从etcd数据库里面读取的,同时mater和node节点都在172.17.0.0网段,可以互相通信了,flanneld网络将整个集群网络打通了

master 节点
[root@localhost ~]# ifconfig
ens32: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.179.104  netmask 255.255.255.0  broadcast 192.168.179.255
        inet6 fe80::831c:6df1:a633:742a  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:a7:ff:f7  txqueuelen 1000  (Ethernet)

flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST>  mtu 1472
        inet 172.17.48.0  netmask 255.255.0.0  destination 172.17.48.0
        inet6 fe80::3402:860c:c93e:afe3  prefixlen 64  scopeid 0x20<link>

node1 节点 以后docker容器的ip就是172.17.35.0网段
[root@localhost ~]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 172.17.35.1  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 02:42:ff:4a:3b:38  txqueuelen 0  (Ethernet)

ens32: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.179.103  netmask 255.255.255.0  broadcast 192.168.179.255
        inet6 fe80::f54d:5639:6237:2d0e  prefixlen 64  scopeid 0x20<link>

flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST>  mtu 1472
        inet 172.17.35.0  netmask 255.255.0.0  destination 172.17.35.0
        inet6 fe80::b557:3e9f:1253:3674  prefixlen 64  scopeid 0x20<link>


node2 节点   以后docker容器的ip就是172.17.14.0网段
[root@localhost ~]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet   netmask 255.255.255.0  broadcast 0.0.0.0
        ether 02:42:5e:6d:3b:d3  txqueuelen 0  (Ethernet)

ens32: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.179.101  netmask 255.255.255.0  broadcast 192.168.179.255
        inet6 fe80::eb42:2f23:95cb:44b6  prefixlen 64  scopeid 0x20<link>

flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST>  mtu 1472
        inet 172.17.14.0  netmask 255.255.0.0  destination 172.17.14.0
        inet6 fe80::40fb:e70:39e5:b80c  prefixlen 64  scopeid 0x20<link>


[root@localhost etcd]# etcdctl ls  /atomic.io/network/subnets
/atomic.io/network/subnets/172.17.14.0-24
/atomic.io/network/subnets/172.17.48.0-24
/atomic.io/network/subnets/172.17.35.0-24
#互相ping一下看是否可以通
[root@localhost ~]# ping  172.17.14.0
PING 172.17.14.0 (172.17.14.0) 56(84) bytes of data.
64 bytes from 172.17.14.0: icmp_seq=1 ttl=62 time=1.49 ms
^C
--- 172.17.14.0 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 1.496/1.496/1.496/0.000 ms
[root@localhost ~]# ping 172.17.14.1
PING 172.17.14.1 (172.17.14.1) 56(84) bytes of data.
64 bytes from 172.17.14.1: icmp_seq=1 ttl=62 time=0.937 ms
^C
--- 172.17.14.1 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.937/0.937/0.937/0.000 ms

到此整个集群配置完成

[root@localhost ~]# kubectl get pod --namespace=default
No resources found.
[root@localhost ~]# kubectl get pod --namespace=kube-system
No resources found.
[root@localhost ~]# kubectl get nodes
NAME              STATUS    AGE
192.168.179.101   Ready     54m
192.168.179.103   Ready     56m

 

 
打赏
 本文转载自:网络 
所有权利归属于原作者,如文章来源标示错误或侵犯了您的权利请联系微信13520258486
更多>最近资讯中心
更多>最新资讯中心
0相关评论

推荐图文
推荐资讯中心
点击排行
最新信息
新手指南
采购商服务
供应商服务
交易安全
关注我们
手机网站:
新浪微博:
微信关注:

13520258486

周一至周五 9:00-18:00
(其他时间联系在线客服)

24小时在线客服