首页 > 技术文章 > openEuler安装iSulad和k8s(单master)

yftyxa 2021-11-24 19:36 原文

1 修改主机名和hosts文件

  • 修改主机名

命令:hostnamectl set-hostname iSula01

  • 修改hosts文件

echo "192.168.38.136 iSula01" >> /etc/hosts

2 清空iptables,关闭firewalld和selinux

  • 清空iptables

iptables -F

  • 关闭firewalld

systemctl stop firewalld && systemctl disable firewalld

  • 关闭selinux

sed -i 's/SELINUX=/SELINUX=disabled/g' /etc/selinux/config

3 关闭swap

swapoff -a

vim /etc/fstab

4 时间同步

#openEuler 21.09的yum源有个小bug,把EPOL的源修改为:

[EPOL]
name=EPOL
baseurl=http://repo.openeuler.org/openEuler-21.09/EPOL/main/$basearch/
enabled=1
gpgcheck=1
gpgkey=http://repo.openeuler.org/openEuler-21.09/OS/$basearch/RPM-GPG-KEY-openEuler

  • 命令:

yum install -y chrony -y

systemctl enable --now chronyd

chronyc sources

5 配置内核参数

  • 命令

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf

net.ipv4.ip_forward = 1

vm.swappiness = 0

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

fs.may_detach_mounts = 1

EOF

#忽略报错

6 加载ipvs内核模块

  • 加载内核模块

cat > /etc/sysconfig/modules/ipvs.modules <<EOF

#!/bin/bash

modprobe -- br_netfilter

modprobe -- ip_vs

modprobe -- ip_vs_rr

modprobe -- ip_vs_wrr

modprobe -- ip_vs_sh

modprobe -- nf_conntrack_ipv4

EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules &&  lsmod | grep -E "ip_vs|nf_conntrack_ipv4"

#系统会有报错,暂时没有解决,往后看,是否能够安装完成

7 安装ipvs管理工具

yum install -y ipset ipvsadm

 

#完成这一步骤后,重启所有的节点

8 安装和配置iSulad

  • 安装命令 

yum install -y iSulad

  • 配置iSulad:修改/etc/isulad/daemon.json文件,指定加速器(阿里加速器:https://8f3jf09b.mirror.aliyuncs.com)

{
"group": "isula",
"default-runtime": "lcr",
"graph": "/var/lib/isulad",
"state": "/var/run/isulad",
"engine": "lcr",
"log-level": "ERROR",
"pidfile": "/var/run/isulad.pid",
"log-opts": {
"log-file-mode": "0600",
"log-path": "/var/lib/isulad",
"max-file": "1",
"max-size": "30KB"
},
"log-driver": "stdout",
"container-log": {
"driver": "json-file"
},
"hook-spec": "/etc/default/isulad/hooks/default.json",
"start-timeout": "2m",
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"registry-mirrors": [
"https://461da12941f64b7f97eda2cae45cf736.mirror.swr.myhuaweicloud.com"
],
"insecure-registries": [
"rnd-dockerhub.huawei.com"
],
"pod-sandbox-image": "registry.aliyuncs.com/google_containers/pause:3.5",
"image-opt-timeout": "5m",
"image-server-sock-addr": "unix:///var/run/isulad/isula_image.sock",
"native.umask": "secure",
"network-plugin": "cni",
"cni-bin-dir": "/opt/cni/bin",
"cni-conf-dir": "/etc/cni/net.d",
"image-layer-check": false,
"use-decrypted-key": true,
"insecure-skip-verify-enforce": false
}

9 安装kubelet、kubectl和kubeadm

  •  配置kubernetes的yum源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/

enabled=1

gpgcheck=1

repo_gpgcheck=1

gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

  • 安装kubeadm、kubelet、kubectl

yum install -y kubelet-1.22.0 kubeadm-1.22.0 kubectl-1.22.0

systemctl enable kubelet --now

在部署完成后,使用命令kubectl get cs发现

 

 需要注释掉/etc/kubernetes/manifests下的kube-scheduler.yaml的- – port=0,然后等一会儿,状态就成了ok:

 

 

10 部署master节点

  • 生成kubeadm.yaml文件
kubeadm config print init-defaults --component-configs KubeletConfiguration --component-configs KubeProxyConfiguration > kubeadm.yaml
 

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.38.136
bindPort: 6443
nodeRegistration:
criSocket: /var/run/isulad.sock
imagePullPolicy: IfNotPresent
name: iSula01
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.22.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s

fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging: {}
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
bindAddressHardFail: false
clientConnection:
acceptContentTypes: ""
burst: 0
contentType: ""
kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
qps: 0
clusterCIDR: ""
configSyncPeriod: 0s
conntrack:
maxPerCore: null
min: null
tcpCloseWaitTimeout: null
tcpEstablishedTimeout: null
detectLocalMode: ""
enableProfiling: false
healthzBindAddress: ""
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: null
minSyncPeriod: 0s
syncPeriod: 0s
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
strictARP: false
syncPeriod: 0s
tcpFinTimeout: 0s
tcpTimeout: 0s
udpTimeout: 0s
kind: KubeProxyConfiguration
metricsBindAddress: ""
mode: "ipvs"

nodePortAddresses: null
oomScoreAdj: null
portRange: ""
showHiddenMetricsForVersion: ""
udpIdleTimeout: 0s
winkernel:
enableDSR: false
networkName: ""
sourceVip: ""

  • 拉取k8s所需镜像

kubeadm config images pull --config kubeadm.yaml

  •    安装master节点

kubeadm init --config kubeadm.yaml

  •  配置访问集群
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -u) $HOME/.kube/config
  • 配置网络
curl https://docs.projectcalico.org/archive/v3.15/manifests/calico.yaml -O

kubectl apply -f calico.yaml

网络配置完成后,组件会自动变成running状态

 

 如果某个pod的状态一直处于“ContainerCreating”或者其他状态,可强行删除该pod,k8s会自动创建一个新的pod出来,命令如下:

kubectl delete pod coredns-7f6cbbb7b8-n4x44  -n kube-system --force --grace-period=0

  • 添加work节点

kubeadm token create --print-join-command

使用以上命令返回的命令到其余节点进行添加,如:

kubeadm join 192.168.38.128:6443 --token rhmas8.87iot1cxgtpnvlm9 --discovery-token-ca-cert-hash sha256:3763d8e15b2ba8c5bf04531a648b1ab72f44570ccbbc6204225b45c9ce3ddd06 --cri-socket /var/run/isulad.sock

在添加过程如果有以下报错:

使用命令:modprobe br_netfilter && echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables 解决

  •  部署失败,使用以下命令可推到重建

# 重置集群

kubeadm reset

# 停止kubelet

systemctl stop kubelet

# 删除已经部署的容器

crictl --runtime-endpoint unix:///var/run/isulad.sock ps -aq |xargs crictl --runtime-endpoint unix:///var/run/isulad.sock rm

# 清理所有目录

rm -rf /etc/kubernetes /var/lib/kubelet /var/lib/etcd /var/lib/cni/

推荐阅读