首页 > 技术文章 > kuberadm安装kubernetes

xuliang666 2019-07-04 15:48 原文

系统基础环境准备

环境信息

2台 Centos 7.5
cat /etc/hosts
192.168.100.101 k8s-master
192.168.103.102 k8s-node1
service 网络:10.96.0.0/16
Pod网络:10.81.0.0/16
主机名
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
关闭防火墙和selinux 
systemctl stop firewalld 
systemctl disable firewalld 
setenforce 0 
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

修改内核参数
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
关闭系统的swap交换分区
swapoff -a 
修改/etc/fstab,删除交换分区

 

 

更新及安装yum源

yum install -y epel-release

  

设置阿里镜像为yum源

yum install -y wget
1、备份
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
2、下载新的CentOS-Base.repo 到/etc/yum.repos.d/
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
3、之后运行yum makecache生成缓存
yum clean all  
yum makecache  

  

卸载老版本

yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-selinux \
                  docker-engine-selinux \
                  docker-engine
                  

  

安装docker

使用仓库安装

https://yq.aliyun.com/articles/110806
安装工具包
yum install -y yum-utils   device-mapper-persistent-data   lvm2
添加docker源
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

启动稳定版本
yum-config-manager --enable docker-ce-stable
关闭
yum-config-manager --disable docker-ce-stable
查看docker版本
yum list docker-ce --showduplicates | sort -r
安装
yum install docker-ce -y
yum install -y docker-ce-18.03.0.ce
验证
docker info

  

kubernetes安装包准备

使用第三个镜像仓库或者自己事先下载好

如果是安装最新版本的kubernetes,也可以使用阿里的镜像源

配置kubernetes镜像源

CentOS / RHEL / Fedora
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

直接yum安装即可

yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

systemctl enable kubelet && systemctl start kubelet #注意此时,kubelet并不能启动成功,初始化完成后会自动启动

  

kubernetes初始化

kubeadm init  --kubernetes-version=v1.13.1 --pod-network-cidr=10.81.0.0/16 --apiserver-advertise-address=192.168.100.101

为kubectl准备Kubeconfig文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

  

  

命令参数:

  • --pod-network-cidr:指定Pod网段,默认为192.168.0.0/16。

  • --service-cidr:指定服务网段,默认为10.96.0.0.12。

  • --kubernetes-version:指定kubernetes版本,不同时间安装的kubeadm,所支持部署的kubernetes版本也不同,如果不支持会有报错提示。

  • --apiserver-advertise-address:指定apiserver监听地址,默认监听0.0.0.0。

  • --apiserver-bind-port:指定apiserver监听端口,默认6443。

  • --ignore-preflight-errors:忽略指定错误信息,默认情况下如果swap打开会报错,如果关闭了Swap此项可以不指定

 

配置网络

kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
注意:默认calico网络在192.168.0.0/16,如果宿主机 ip也在这个段会发生冲突,可以手动修改配置文件
如下:修改为10.81.0.0

   - name: CALICO_IPV4POOL_CIDR
    value: "10.81.0.0/16"

  

使用IPVS进行负载均衡

在Kubernetes集群中Kube-Proxy组件负载均衡的功能,默认使用iptables,生产环境建议使用ipvs进行负载均衡。

 

1.添加脚本
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
[root@linux-node1 ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
[root@linux-node1 ~]# source /etc/sysconfig/modules/ipvs.modules
查看模块是否加载正常
 lsmod | grep -e ip_vs -enf_conntrack_ipv4

2.修改kube-proxy的配置
  将mode修改为ipvs,如下所示:
[root@linux-node1~]# kubectl edit cm kube-proxy -n kube-system
…
kind: KubeProxyConfiguration
   metricsBindAddress: 127.0.0.1:10249
    mode: "ipvs"
   nodePortAddresses: null
   oomScoreAdj: -999

修改完成后,要重启kube-proxy

3.安装ipvsadm命令验证
[root@k8s-master ~]#  yum install -y ipvsadm   
[root@k8s-master ~]#  ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 192.168.100.101:6443         Masq    1      0          0         
TCP  10.96.0.10:53 rr
  -> 10.81.0.2:53                 Masq    1      0          0         
  -> 10.81.0.3:53                 Masq    1      0          0         
TCP  10.105.54.12:5473 rr
UDP  10.96.0.10:53 rr
  -> 10.81.0.2:53                 Masq    1      0          0         
  -> 10.81.0.3:53                 Masq    1      0          0         
[root@k8s-master ~]# ku

  

验证

[root@k8s-master ~]#  kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                   
controller-manager   Healthy   ok                   
etcd-0               Healthy   {"health": "true"}   
[root@k8s-master ~]# kubectl get nodes --show-labels
NAME         STATUS   ROLES    AGE   VERSION   LABELS
k8s-master   Ready    master   40m   v1.13.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=k8s-master,node-role.kubernetes.io/master=
k8s-node1    Ready    <none>   37m   v1.13.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=k8s-node1
[root@k8s-master ~]# kubectl label nodes k8s-node1 node-role.kubernetes.io/node=
node/k8s-node1 labeled
[root@k8s-master ~]# kubectl get nodes --show-labels
NAME         STATUS   ROLES    AGE   VERSION   LABELS
k8s-master   Ready    master   42m   v1.13.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=k8s-master,node-role.kubernetes.io/master=
k8s-node1    Ready    node     39m   v1.13.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=k8s-node1,node-role.kubernetes.io/node=
[root@k8s-master ~]#

 

集群节点添加和删除

格式:kubeadm join --token <token> <master-ip>:<master-port> --discovery-token-ca-cert-hash sha256:<hash>

怎么查找token
[root@k8s-node1 ~]# kubeadm token list
TOKEN                     TTL         EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
39p4bn.spf1yvgt3n7qc933   <invalid>   2019-01-22T16:34:05+08:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token
[root@k8s-node1 ~]#
[root@k8s-master ~]# kubeadm token create
1xlw8v.0iv91yae7c4yw3t0
[root@k8s-master ~]# kubeadm token list
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
1xlw8v.0iv91yae7c4yw3t0 23h 2019-07-05T18:13:18+08:00 authentication,signing <none> system:bootstrappers:kubeadm:default-node-token
[root@k8s-master ~]#

##token-ca-cert-hash
[root@k8s-node1 ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null |    openssl dgst -sha256 -hex | sed 's/^.* //'
896ead0cc384e0e41139544e01049948c7b878732216476c2d5608c94c919ed6
[root@k8s-node1 ~]# kubeadm join 192.168.100.102:6443 --token 39p4bn.spf1yvgt3n7qc933 --discovery-token-ca-cert-hash sha256:896ead0cc384e0e41139544e01049948c7b878732216476c2d5608c94c919ed6
也可以这样
kubeadm join 192.168.100.101:6443 --token add3mn.tnorrntsgfo64tku --discovery-token-unsafe-skip-ca-verification
如果token过期 先生成
kubeadm token create
kubeadm token list
kubeadm join 192.168.100.101:6443 --token ijluj4.zkmo9aneom8yw3tz  --discovery-token-unsafe-skip-ca-verification

删除节点

kubectl delete node <node name>

 

 

集群以外主机管理kubernetes集群

scp root@<master ip>:/etc/kubernetes/admin.conf .
kubectl --kubeconfig ./admin.conf get nodes

格式化集群

命令格式
1、kubectl drain <node name> --delete-local-data --force --ignore-daemonsets
实例:
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE   VERSION
k8s-master   Ready    master   16h   v1.13.1
k8s-slave1   Ready    master   16h   v1.13.1
[root@k8s-master ~]# kubectl drain k8s-master --delete-local-data --force --ignore-daemonsets
node/k8s-master cordoned
WARNING: Ignoring DaemonSet-managed pods: kube-proxy-x7qgx, weave-net-hgx5z
pod/coredns-86c58d9df4-z6gms evicted
pod/coredns-86c58d9df4-ft67k evicted
node/k8s-master evicted
[root@k8s-master ~]# 
[root@k8s-master ~]# kubectl drain k8s-slave1 --delete-local-data --force --ignore-daemonsets
node/k8s-slave1 cordoned
WARNING: Ignoring DaemonSet-managed pods: kube-proxy-gb5bg, weave-net-dgjll
pod/coredns-86c58d9df4-xf884 evicted
pod/coredns-86c58d9df4-mrgbl evicted
node/k8s-slave1 evicted
[root@k8s-master ~]# 
格式:
2、kubectl delete node <node name>
实例:
[root@k8s-master ~]# kubectl delete node k8s-master
node "k8s-master" deleted
[root@k8s-master ~]# kubectl delete node k8s-node1
node "k8s-node1" deleted
[root@k8s-master ~]#
验证:
[root@k8s-master ~]# kubectl get nodes
No resources found.
[root@k8s-master ~]# 


3、kubeadm reset
实例:
[root@k8s-master ~]# kubeadm reset
[reset] WARNING: changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] are you sure you want to proceed? [y/N]: y
[preflight] running pre-flight checks
[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
W0125 10:01:24.805582    8782 reset.go:213] [reset] Unable to fetch the kubeadm-config ConfigMap, using etcd pod spec as fallback: failed to get node registration: faild to get corresponding node: nodes "k8s-master" not found
[reset] stopping the kubelet service
[reset] unmounting mounted directories in "/var/lib/kubelet"
[reset] deleting contents of stateful directories: [/var/lib/etcd /var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/run/kubernetes]
[reset] deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]

The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually.
For example: 
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X

If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.

[root@k8s-master ~]# 

[root@k8s-slave1 ~]#  kubeadm reset
[reset] WARNING: changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] are you sure you want to proceed? [y/N]: y
[preflight] running pre-flight checks
[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
W0125 10:01:47.326336  115631 reset.go:213] [reset] Unable to fetch the kubeadm-config ConfigMap, using etcd pod spec as fallback: failed to get config map: Get https://192.168.103.200:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config: dial tcp 192.168.103.200:6443: connect: connection refused
[reset] stopping the kubelet service
[reset] unmounting mounted directories in "/var/lib/kubelet"
[reset] deleting contents of stateful directories: [/var/lib/etcd /var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/run/kubernetes]
[reset] deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]

The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually.
For example: 
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X

If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.

[root@k8s-node1 ~]# 

4、iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X

  

简单使用

添加habor认证

使用k8s的时候,一般都要从自己的habor仓库拉去镜像,为了免去密码登录.可以配置secret解决密码登录的问题.

#kubectl create secret docker-registry myregistrykey --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
#docker-registry 创建一个给 Docker registry 使用的 secret

[root@k8s-master values.yaml]# kubectl create secret docker-registry registry-secret --docker-server=dev-hub.xx.net --docker-username=admin --docker-password=Harbor12345 --docker-email=admin@dev-hub.xx.net
secret/registry-secret created
[root@k8s-master values.yaml]# kubectl get secret
NAME                  TYPE                                  DATA   AGE
default-token-znlmw   kubernetes.io/service-account-token   3      20m
registry-secret       kubernetes.io/dockerconfigjson        1      8s
[root@k8s-master values.yaml]# kubectl get secret registry-secret
NAME              TYPE                             DATA   AGE
registry-secret   kubernetes.io/dockerconfigjson   1      20s
[root@k8s-master values.yaml]# kubectl get secret registry-secret -o yaml
apiVersion: v1
data:
  .dockerconfigjson: eyJhdXRocyI6eyJkZXYtaHViLmppYXR1aXl1bi5uZXQiOnsiVXNlcm5hbWUiOiJhZG1pbiIsIlBhc3N3b3JkIjoiSGFyYm9yMTIzNDUiLCJFbWFpbCI6ImFkbWluQGRldi1odWIuamlhdHVpeXVuLm5ldCJ9fX0=
kind: Secret
metadata:
  creationTimestamp: "2019-06-14T08:57:50Z"
  name: registry-secret
  namespace: default
  resourceVersion: "2113"
  selfLink: /api/v1/namespaces/default/secrets/registry-secret
  uid: 7d2c98ef-8e82-11e9-8cc4-000c29a74c85
type: kubernetes.io/dockerconfigjson

[root@k8s-master ~]# kubectl get secret registry-secret -o yaml > registry-secret-dev.yaml

删除多余配置
[root@k8s-master ~]# cat registry-secret-dev.yaml 
apiVersion: v1
data:
  .dockerconfigjson: eyJhdXRocyI6eyJkZXYtaHViLmppYXR1aXl1bi5uZXQiOnsiVXNlcm5hbWUiOiJhZG1pbiIsIlBhc3N3b3JkIjoiSGFyYm9yMTIzNDUiLCJFbWFpbCI6ImFkbWluQGRldi1odWIuamlhdHVpeXVuLm5ldCJ9fX0=
kind: Secret
metadata:
  name: registry-secret
  namespace: default
type: kubernetes.io/dockerconfigjson
[root@k8s-master ~]# 

echo "eyJhdXRocyI6eyJkZXYtaHViLmppYXR1aXl1bi5uZXQiOnsiVXNlcm5hbWUiOiJhZG1pbiIsIlBhc3N3b3JkIjoiSGFyYm9yMTIzNDUiLCJFbWFpbCI6ImFkbWluQGRldi1odWIuamlhdHVpeXVuLm5ldCJ9fX0=" |base64 -d
{"auths":{"dev-hub.xx.net":{"Username":"admin","Password":"Harbor12345","Email":"admin@dev-hub.xx.net"}}}[root@dev-k8s-master ~]#

  

 

推荐阅读