首页 > 技术文章 > kubadm创建k8s v1.10集群

jiuchongxiao 2018-04-25 11:39 原文

 

kubadm创建k8s集群

1:服务器信息以及节点介绍

主机名
ip
备注
k8s-master 192.168.0.104 master etcd keepalived
k8s-client1 192.168.0.99 master etcd keepalived
k8s-client2 192.168.0.114 node

虚拟IP:192.168.0.105

2.版本说明

docker 17.03.2-ce

kubelet-1.10.0-0.x86_64

kubernetes-cni-0.6.0-0.x86_64

kubectl-1.10.0-0.x86_64

kubeadm-1.10.0-0.x86_64

3.环境要求

3.1设置主机名称

hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-client1
hostnamectl set-hostname k8s-client2

3.2.配置主机映射

 192.168.0.104  k8s-master
192.168.0.99  k8s-client1
192.168.0.114  k8s-client2

3.3k8s-master上执行ssh免密码登陆配置

ssh-keygen #一路回车即可
ssh-copy-id k8s-client1
ssh-copy-id k8s-client2
ssh-copy-id k8s-client2

3.4主机配置、停防火墙、关闭Swap、关闭Selinux、设置内核、K8S的yum源、安装依赖包、配置ntp(配置完后建议重启一次

systemctl stop firewalld
systemctl disable firewalld

swapoff -a
sed -i 's/.swap./#&/' /etc/fstab

setenforce 0
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config

modprobe br_netfilter
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf
ls /proc/sys/net/bridge

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
【kubernete】
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y epel-release
yum install -y yum-utils device-mapper-persistent-data lvm2 net-tools conntrack-tools wget vim ntpdate libseccomp libtool-ltdl

systemctl enable ntpdate.service
echo '*/30 * * * * /usr/sbin/ntpdate time7.aliyun.com >/dev/null 2>&1' > /tmp/crontab2.tmp
crontab /tmp/crontab2.tmp
systemctl start ntpdate.service

echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 65536" >> /etc/security/limits.conf
echo "* soft nproc 65536" >> /etc/security/limits.conf
echo "* hard nproc 65536" >> /etc/security/limits.conf
echo "* soft memlock unlimited" >> /etc/security/limits.conf
echo "* hard memlock unlimited" >> /etc/security/limits.conf

4.安装、配置keepalived(主节点)

4.1:安装keepalived

yum install -y keepalived 
systemctl enable keepalived

k8s-master的keepalived.conf

cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.0.105:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface enp0s3
    virtual_router_id 61
    priority 100
    advert_int 1
    mcast_src_ip 192.168.0.104
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        192.168.0.99
    }
    virtual_ipaddress {
        192.168.0.105/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF

k8s-client1的keepalived.conf


cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://192.168.0.105:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface enp0s3
    virtual_router_id 61
    priority 90
    advert_int 1
    mcast_src_ip 192.168.0.99
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        192.168.0.104
    }
    virtual_ipaddress {
        192.168.0.105/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF

4.2:启动keepalived

systemctl restart keepalived

可以看到VIP已经绑定到k8s-master上面了

enp0s3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
    link/ether 00:50:56:b2:09:6a brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.104/24 brd 192.168.150.255 scope global enp0s3
       valid_lft forever preferred_lft forever
    inet 192.168.0.105/24 scope global secondary enp0s3
       valid_lft forever preferred_lft forever
    inet6 fe80::e3d1:55df:2f64:8571/64 scope link
       valid_lft forever preferred_lft forever

5.创建etcd证书(k8s-master上执行即可)

设置cfssl环境

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
chmod +x cfssljson_linux-amd64
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
chmod +x cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
export PATH=/usr/local/bin:$PATH


创建 CA 配置文件(下面配置的IP为etc节点的IP)

 

mkdir /root/ssl

cd /root/ssl

cat >  ca-config.json <<EOF

{

"signing": {

"default": {

  "expiry": "8760h"

},

"profiles": {

  "kubernetes-Soulmate": {

    "usages": [

        "signing",

        "key encipherment",

        "server auth",

        "client auth"

    ],

    "expiry": "8760h"

  }

}

}

}

EOF

 

cat >  ca-csr.json <<EOF

{

"CN": "kubernetes-Soulmate",

"key": {

"algo": "rsa",

"size": 2048

},

"names": [

{

  "C": "CN",

  "ST": "shanghai",

  "L": "shanghai",

  "O": "k8s",

  "OU": "System"

}

]

}

EOF

 

cfssl gencert -initca ca-csr.json | cfssljson -bare ca

 

cat > etcd-csr.json <<EOF

{

  "CN": "etcd",

  "hosts": [

    "127.0.0.1",

    "192.168.0.104",

    "192.168.0.99"

  ],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "shanghai",

      "L": "shanghai",

      "O": "k8s",

      "OU": "System"

    }

  ]

}

EOF

 

cfssl gencert -ca=ca.pem \

  -ca-key=ca-key.pem \

  -config=ca-config.json \

  -profile=kubernetes-Soulmate etcd-csr.json | cfssljson -bare etcd

 

k8s-master分发etcd证书到k8s-client1

mkdir -p /etc/etcd/ssl 
cp etcd.pem etcd-key.pem ca.pem /etc/etcd/ssl/ 
ssh -n k8s-client1 "mkdir -p /etc/etcd/ssl && exit" 
scp -r /etc/etcd/ssl/*.pem k8s-client1:/etc/etcd/ssl/

6.安装配置etcd (两个主节点)

安装etcd

yum install etcd -y 
mkdir -p /var/lib/etcd

配置k8s-master的etcd.service


cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name k8s-master \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --initial-advertise-peer-urls https://192.168.0.104:2380 \
  --listen-peer-urls https://192.168.0.104:2380 \
  --listen-client-urls https://192.168.0.104:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://192.168.0.104:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster k8s-master=https://192.168.0.104:2380,k8s-client1=https://192.168.0.99:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

配置k8s-client1的etcd.service


cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name k8s-client1 \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --initial-advertise-peer-urls https://192.168.0.99:2380 \
  --listen-peer-urls https://192.168.0.99:2380 \
  --listen-client-urls https://192.168.0.99:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://192.168.0.99:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster k8s-master=https://192.168.0.104:2380,k8s-client1=https://192.168.0.99:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

 

添加自启动(etc集群最少2个节点才能启动,启动报错看mesages日志)

 mv etcd.service /usr/lib/systemd/system/

 systemctl daemon-reload

 systemctl enable etcd

 systemctl start etcd

 systemctl status etcd

 

在两个etcd节点执行一下命令检查

etcdctl --endpoints=https://192.168.0.104:2379,https://192.168.0.99:2379 \
  --ca-file=/etc/etcd/ssl/ca.pem \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem  cluster-health

7.所有节点安装配置docker

安装docker(kubeadm目前支持docker最高版本是17.03.x)
yum install https://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/stable/Packages/docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch.rpm  -y
yum install https://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/stable/Packages/docker-ce-17.03.2.ce-1.el7.centos.x86_64.rpm  -y

修改配置文件 vim /usr/lib/systemd/system/docker.service

ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=https://ms3cfraz.mirror.aliyuncs.com

启动docker

systemctl daemon-reload

systemctl restart docker

systemctl enable docker

systemctl status docker

 

8.安装、配置kubeadm

所有节点安装kubelet kubeadm kubectl

yum install -y kubelet kubeadm kubectl 
systemctl enable kubelet

所有节点修改kubelet配置文件 /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

#修改这一行 
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"
#添加这一行
Environment="KUBELET_EXTRA_ARGS=--v=2 --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sth/pause-amd64:3.0"

所有节点修改完配置文件一定要重新加载配置

systemctl daemon-reload

systemctl enable kubelet

命令补全

yum install -y bash-completion

source /usr/share/bash-completion/bash_completion

source <(kubectl completion bash)

echo "source <(kubectl completion bash)" >> ~/.bashrc

 

9.初始化集群

k8s-master、k8s-client1添加集群初始配置文件(集群配置文件一样)


cat <<EOF > config.yaml
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
etcd:
  endpoints:
  - https://192.168.0.104:2379
  - https://192.168.0.99:2379
  caFile: /etc/etcd/ssl/ca.pem
  certFile: /etc/etcd/ssl/etcd.pem
  keyFile: /etc/etcd/ssl/etcd-key.pem
  dataDir: /var/lib/etcd
networking:
  podSubnet: 172.30.0.0/16
kubernetesVersion: 1.10.0
api:
  advertiseAddress: "192.168.0.105"
token: "b99a00.a144ef80536d4344"
tokenTTL: "0s"
apiServerCertSANs:
- k8s-master
- k8s-client1
- 192.168.0.104
- 192.168.0.99
- 192.168.0.114
- 192.168.0.105
featureGates:
  CoreDNS: true
imageRepository: "registry.cn-hangzhou.aliyuncs.com/k8sth"
EOF

首先k8s-master初始化集群

kubeadm init --config config.yaml 

初始化失败后处理办法

kubeadm reset

初始化正常的结果如下。 kubeadm join之后的内容要保存,以后node加入集群使用

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

kubeadm join 192.168.0.105:6443 --token b99a00.a144ef80536d4344 --discovery-token-ca-cert-hash sha256:f79b68fb698c92b9336474eb3bf184e847f967dc58a6296911892662b98b1315

k8s-master上面执行如下命令

 mkdir -p $HOME/.kube

  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

  sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubeadm生成证书密码文件分发到k8s-client1

 scp -r /etc/kubernetes/pki k8s-client1:/etc/kubernetes/

部署flannel网络,只需要在k8s-master执行就行

1)下载flannel镜像并且修改tag

docker pull cnych/flannel:v0.10.0-amd64

docker tag cnych/flannel:v0.10.0-amd64 quay.io/coreos/flannel:v0.10.0-amd64

2)wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 
#版本信息:quay.io/coreos/flannel:v0.10.0-amd64
kubectl create -f kube-flannel.yml

执行命令查看集群节点

[root@k8s-master ~]# kubectl   get node

NAME      STATUS    ROLES     AGE       VERSION

k8s-master    Ready     master    31m       v1.10.0

在k8s-client1上面分别执行初始化

kubeadm init --config config.yaml

#初始化的结果和k8s-master的结果完全一样

mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

sudo chown $(id -u):$(id -g) $HOME/.kube/config

查看节点信息

[root@k8s-master ~]# kubectl get nodes

NAME      STATUS    ROLES     AGE       VERSION

k8s-master    Ready     master    1h        v1.10.0

k8s-client1    Ready     master    1h        v1.10.0

让master也运行pod(默认master不运行pod)

kubectl taint nodes --all node-role.kubernetes.io/master-

10.添加k8s-client2节点到集群

在k8s-client2节点执行如下命令,即可将节点添加进集群

kubeadm join  192.168.0.105 :6443 --token b99a00.a144ef80536d4344 --discovery-token-ca-cert-hash sha256:f79b68fb698c92b9336474eb3bf184e847f967dc58a6296911892662b98b1315

 

[root@k8s-master ~]# kubectl get node

NAME      STATUS    ROLES     AGE       VERSION

k8s-master    Ready     master    45m       v1.10.0

k8s-client1    Ready     master    15m       v1.10.0

k8s-client2    Ready     <none>    13m       v1.10.0

推荐阅读