CentOS 7.4搭建Kubernetes 1.8.5集群

前端之家收集整理的这篇文章主要介绍了CentOS 7.4搭建Kubernetes 1.8.5集群前端之家小编觉得挺不错的,现在分享给大家,也给大家做个参考。

环境介绍

角色 操作系统 IP 主机名 Docker版本
master,node CentOS 7.4 192.168.0.210 node210 17.11.0-ce
node CentOS 7.4 192.168.0.211 node211 17.11.0-ce
node CentOS 7.4 192.168.0.212 node212 17.11.0-ce

1.基础环境配置(所有服务器执行) a.SELinux关闭

  1. sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
  2. setenforce 0

b.Docker安装

  1. curl -sSL https://get.docker.com/ | sh

c.配置国内Docker镜像加速器

  1. curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://e2a6d434.m.daocloud.io

d.开启Docker开机自动启动

  1. systemctl enable docker.service
  2. systemctl restart docker

2.kubernetes证书准备(master执行) a.为将文件复制到Node节点,节省部署时间,我这里做ssh信任免密复制

  1. ssh-genkey -t rsa
  2. ssh-copy-id 192.168.0.211
  3. ssh-copy-id 192.168.0.212

b.下载证书生成工具

  1. yum -y install wget
  2. wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
  3. chmod +x cfssl_linux-amd64
  4. mv cfssl_linux-amd64 /usr/local/bin/cfssl
  5.  
  6. wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
  7. chmod +x cfssljson_linux-amd64
  8. mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
  9.  
  10. wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
  11. chmod +x cfssl-certinfo_linux-amd64
  12. mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

c.CA证书制作 #目录准备

  1. mkdir /root/ssl
  2. cd /root/ssl

#创建CA证书配置 vim ca-config.json

  1. {
  2. "signing": {
  3. "default": {
  4. "expiry": "87600h"
  5. },"profiles": {
  6. "kubernetes": {
  7. "usages": [
  8. "signing","key encipherment","server auth","client auth"
  9. ],"expiry": "87600h"
  10. }
  11. }
  12. }
  13. }

#创建CA证书请求文件 vim ca-csr.json

  1. {
  2. "CN": "kubernetes","key": {
  3. "algo": "rsa","size": 2048
  4. },"names": [
  5. {
  6. "C": "CN","ST": "JIANGXI","L": "NANCHANG","O": "k8s","OU": "System"
  7. }
  8. ]
  9. }

#生成CA证书和私钥 cfssl gencert -initca ca-csr.json | cfssljson -bare ca

#创建kubernetes证书签名请求 vim kubernetes-csr.json

  1. {
  2. "CN": "kubernetes","hosts": [
  3. "127.0.0.1","192.168.0.210",#修改成自己主机的IP
  4. "192.168.0.211",#修改成自己主机的IP
  5. "192.168.0.212",#修改成自己主机的IP
  6. "10.254.0.1","kubernetes","node210",#修改成自己主机的主机名
  7. "node211",#修改成自己主机的主机名
  8. "node212",#修改成自己主机的主机名
  9. "kubernetes.default","kubernetes.default.svc","kubernetes.default.svc.cluster","kubernetes.default.svc.cluster.local"
  10. ],"key": {
  11. "algo": "rsa","size": 2048
  12. },"names": [
  13. {
  14. "C": "CN","L": "JIANGXI","OU": "System"
  15. }
  16. ]
  17. }

#生成kubernetes证书及私钥 cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

#创建admin证书签名请求 vim admin-csr.json

  1. {
  2. "CN": "admin","hosts": [],"O": "system:masters","OU": "System"
  3. }
  4. ]
  5. }

#生成admin证书及私钥 cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

#创建 kube-proxy 证书签名请求 vim kube-proxy-csr.json

  1. {
  2. "CN": "system:kube-proxy","OU": "System"
  3. }
  4. ]
  5. }

#生成证书及私钥 cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

#分发证书

  1. mkdir -p /etc/kubernetes/ssl
  2. cp -r *.pem /etc/kubernetes/ssl
  3.  
  4. cd /etc
  5. scp -r kubernetes/ 192.168.0.211:/etc/
  6. scp -r kubernetes/ 192.168.0.212:/etc/

3.etcd集群安装及配置 a.下载etcd,并分发至节点 wget https://github.com/coreos/etcd/releases/download/v3.2.11/etcd-v3.2.11-linux-amd64.tar.gz tar zxf etcd-v3.2.11-linux-amd64.tar.gz mv etcd-v3.2.11-linux-amd64/etcd* /usr/local/bin scp -r /usr/local/bin/etc* 192.168.0.211:/usr/local/bin/ scp -r /usr/local/bin/etc* 192.168.0.212:/usr/local/bin/

b.创建etcd服务启动文件 vim /usr/lib/systemd/system/etcd.service

  1. [Unit]
  2. Description=Etcd Server
  3. After=network.target
  4. After=network-online.target
  5. Wants=network-online.target
  6. Documentation=https://github.com/coreos
  7.  
  8. [Service]
  9. Type=notify
  10. WorkingDirectory=/var/lib/etcd/
  11. EnvironmentFile=-/etc/etcd/etcd.conf
  12. ExecStart=/usr/local/bin/etcd \
  13. --name ${ETCD_NAME} \
  14. --cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  15. --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  16. --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  17. --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  18. --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  19. --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  20. --initial-advertise-peer-urls ${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
  21. --listen-peer-urls ${ETCD_LISTEN_PEER_URLS} \
  22. --listen-client-urls ${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
  23. --advertise-client-urls ${ETCD_ADVERTISE_CLIENT_URLS} \
  24. --initial-cluster-token ${ETCD_INITIAL_CLUSTER_TOKEN} \
  25. --initial-cluster infra1=https://192.168.0.210:2380,infra2=https://192.168.0.211:2380,infra3=https://192.168.0.212:2380 \
  26. --initial-cluster-state new \
  27. --data-dir=${ETCD_DATA_DIR}
  28. Restart=on-failure
  29. RestartSec=5
  30. LimitNOFILE=65536
  31.  
  32. [Install]
  33. WantedBy=multi-user.target

c.创建必要的目录

  1. mkdir -p /var/lib/etcd/
  2. mkdir /etc/etcd

d.编辑etcd的配置文件 vim /etc/etcd/etcd.conf node210的配置文件/etc/etcd/etcd.conf为

  1. # [member]
  2. ETCD_NAME=infra1
  3. ETCD_DATA_DIR="/var/lib/etcd"
  4. ETCD_LISTEN_PEER_URLS="https://192.168.0.210:2380"
  5. ETCD_LISTEN_CLIENT_URLS="https://192.168.0.210:2379"
  6.  
  7. #[cluster]
  8. ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.210:2380"
  9. ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
  10. ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.210:2379"

node211的配置文件/etc/etcd/etcd.conf为

  1. # [member]
  2. ETCD_NAME=infra2
  3. ETCD_DATA_DIR="/var/lib/etcd"
  4. ETCD_LISTEN_PEER_URLS="https://192.168.0.211:2380"
  5. ETCD_LISTEN_CLIENT_URLS="https://192.168.0.211:2379"
  6.  
  7. #[cluster]
  8. ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.211:2380"
  9. ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
  10. ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.211:2379"

node212的配置文件/etc/etcd/etcd.conf为

  1. # [member]
  2. ETCD_NAME=infra3
  3. ETCD_DATA_DIR="/var/lib/etcd"
  4. ETCD_LISTEN_PEER_URLS="https://192.168.0.212:2380"
  5. ETCD_LISTEN_CLIENT_URLS="https://192.168.0.212:2379"
  6.  
  7. #[cluster]
  8. ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.212:2380"
  9. ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
  10. ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.212:2379"

#在所有节点执行,启动etcd

  1. systemctl daemon-reload
  2. systemctl enable etcd
  3. systemctl start etcd
  4. systemctl status etcd

如果报错,就需要查看/var/log/messages文件进行排错

e.测试集群是否正常

  1. 验证ETCD是否成功启动
  2. etcdctl \
  3. --ca-file=/etc/kubernetes/ssl/ca.pem \
  4. --cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  5. --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  6. cluster-health

4.配置kubernetes参数 a.下载kubernetes编译好的二进制文件并进行分发

  1. wget https://dl.k8s.io/v1.8.5/kubernetes-server-linux-amd64.tar.gz
  2. tar zxf kubernetes-server-linux-amd64.tar.gz
  3. cp -rf kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kubectl,kubefed,kubelet,kube-proxy,kube-scheduler} /usr/local/bin/
  4. scp -r kubernetes/server/bin/{kubelet,kube-proxy} 192.168.0.211:/usr/local/bin/
  5. scp -r kubernetes/server/bin/{kubelet,kube-proxy} 192.168.0.212:/usr/local/bin/

#查看kubernetes最新版,可到https://github.com/kubernetes/kubernetes/releases 然后进入 CHANGELOG-x.x.md就可限制二进制的下载地址

b.创建 TLS Bootstrapping Token

  1. cd /etc/kubernetes
  2. export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
  3. cat > token.csv < 14h v1.8.5
  4. 192.168.0.211 Ready 14h v1.8.5
  5. 192.168.0.212 Ready 14h v1.8.5

c.安装及配置kube-proxy #配置kube-proxy服务启动文件 vim /usr/lib/systemd/system/kube-proxy.service

  1. [Unit]
  2. Description=Kubernetes Kube-Proxy Server
  3. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  4. After=network.target
  5.  
  6. [Service]
  7. EnvironmentFile=-/etc/kubernetes/config
  8. EnvironmentFile=-/etc/kubernetes/proxy
  9. ExecStart=/usr/local/bin/kube-proxy \
  10. $KUBE_LOGTOSTDERR \
  11. $KUBE_LOG_LEVEL \
  12. $KUBE_MASTER \
  13. $KUBE_PROXY_ARGS
  14. Restart=on-failure
  15. LimitNOFILE=65536
  16.  
  17. [Install]
  18. WantedBy=multi-user.target

#kube-proxy配置文件如下: node210: vim /etc/kubernetes/proxy

  1. ###
  2. # kubernetes proxy config
  3.  
  4. # default config should be adequate
  5.  
  6. # Add your own!
  7. KUBE_PROXY_ARGS="--bind-address=192.168.0.210 --hostname-override=192.168.0.210 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig --cluster-cidr=10.254.0.0/16"

node211: vim /etc/kubernetes/proxy

  1. ###
  2. # kubernetes proxy config
  3.  
  4. # default config should be adequate
  5.  
  6. # Add your own!
  7. KUBE_PROXY_ARGS="--bind-address=192.168.0.211 --hostname-override=192.168.0.211 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig --cluster-cidr=10.254.0.0/16"

node212: vim /etc/kubernetes/proxy

  1. ###
  2. # kubernetes proxy config
  3.  
  4. # default config should be adequate
  5.  
  6. # Add your own!
  7. KUBE_PROXY_ARGS="--bind-address=192.168.0.212--hostname-override=192.168.0.212 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig --cluster-cidr=10.254.0.0/16"

#启动kube-proxy服务

  1. systemctl daemon-reload
  2. systemctl enable kube-proxy
  3. systemctl start kube-proxy
  4. systemctl status kube-proxy

d.在所有节点默认开启forward为accept vim /usr/lib/systemd/system/forward.service

  1. [Unit]
  2. Description=iptables forward
  3. Documentation=http://iptables.org/
  4. After=network.target docker.service
  5.  
  6. [Service]
  7. Type=forking
  8. ExecStart=/usr/sbin/iptables -P FORWARD ACCEPT
  9. ExecReload=/usr/sbin/iptables -P FORWARD ACCEPT
  10. ExecStop=/usr/sbin/iptables -P FORWARD ACCEPT
  11. PrivateTmp=true
  12.  
  13. [Install]
  14. WantedBy=multi-user.target

#启动forward服务

  1. systemctl daemon-reload
  2. systemctl enable forward
  3. systemctl start forward
  4. systemctl status forward

7.测试集群是否工作正常 a.创建一个deploy kubectl run Nginx --replicas=2 --labels="run=Nginx-service" --image=Nginx --port=80 b.映射服务到外网可访问 kubectl expose deployment Nginx --type=NodePort --name=Nginx-service c.查看服务状态

  1. kubectl describe svc example-service
  2. Name: Nginx-service
  3. Namespace: default
  4. Labels: run=Nginx-service
  5. Annotations:
  6. Selector: run=Nginx-service
  7. Type: NodePort
  8. IP: 10.254.84.99
  9. Port: 80/TCP
  10. NodePort: 30881/TCP
  11. Endpoints: 172.30.1.2:80,172.30.54.2:80
  12. Session Affinity: None
  13. Events:

d.查看pods启动情况

  1. kubectl get pods
  2. NAME READY STATUS RESTARTS AGE
  3. Nginx-2317272628-nsfrr 1/1 Running 0 1m
  4. Nginx-2317272628-qbbgg 1/1 Running 0 1m

e.在外网通过 http://192.168.0.210:30881 http://192.168.0.211:30881 http://192.168.0.212:30881 都可以访问Nginx页面

若无法访问,可通过iptables -nL查看forward链是否开启

猜你在找的CentOS相关文章