原创

Kubernetes集群搭建


kubernetes集群搭建

要求CentOS版本大于等于7.5, 通过命令 cat /etc/redhat-release 查看

一主二从

规划

节点IP主机名节点类型
192.168.174.180k8s-mastermaster
192.168.174.181k8s-node1node
192.168.174.182k8s-node2node

所有节点都需进行的操作

1.关闭防火墙

# 1.关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 查看防火墙状态
systemctl status firewalld
# 安装vim工具
yum -y install vim
# 安装wget工具
yum -y install wget

2.设置主机名称

# 2.设置主机名称,通过hostname可查看主机名称
hostnamectl set-hostname 主机名称

3.主机名host解析

# 3.主机名解析
cat >> /etc/hosts << EOF
127.0.0.1       localhost
192.168.174.180 k8s-master
192.168.174.181 k8s-node1
192.168.174.182 k8s-node2
EOF

4.升级系统内核

# 查看当前系统的版本
cat /etc/redhat-release
# 查看当前系统的内核
uname -sr
# 在CentOS7.x上启用 ELRepo 仓库
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
# 查看可用的系统内核相关包
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
# 安装最新主线内核版本
yum -y --enablerepo=elrepo-kernel install kernel-ml
# 设置默认的内核版本
vim /etc/default/grub

GRUB_TIMEOUT=5
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
# 修改此处,原来是 saved
GRUB_DEFAULT=0
GRUB_DISABLE_SUBMENU=true
GRUB_TERMINAL_OUTPUT="console"
GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet"
GRUB_DISABLE_RECOVERY="true"

# 重新创建内核配置
grub2-mkconfig -o /boot/grub2/grub.cfg
# 重启系统
reboot

5.时间同步(选做)

# 方式1: 开启chronyd服务
systemctl start chronyd
systemctl enable chronyd

# 方式2: 安装ntpdate (建议)
yum install ntpdate -y
ntpdate time.windows.com

6.禁用SELINUX

# 编辑 /etc/selinux/config 文件,修改SELINUX的值为disabled
# 注意修改完毕之后需要重启linux服务
SELINUX=disabled

# 也可使用下面命令快速修改
sed -i 's/enforcing/disabled/' /etc/selinux/config

7.禁用swap分区

# 编辑分区配置文件/etc/fstab,注释掉swap分区一行
# 注意修改完毕之后需要重启linux服务
# 注释掉 /dev/mapper/centos-swap swap
# /dev/mapper/centos-swap swap
# 也可使用以下命令
sed -ri 's/.*swap.*/#&/' /etc/fstab
# 也可使用如下命令,只是关闭当前会话的,重启会失效
swapoff -a

8.网络配置

# 1.修改/etc/sysctl.conf
# 如果没有配置,则使用以下命令追加
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.forwarding = 1"  >> /etc/sysctl.conf
# 如果有配置,则使用以下命令修改
# 如果有配置,则修改
sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g"  /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g"  /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.all.disable_ipv6.*#net.ipv6.conf.all.disable_ipv6=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.default.disable_ipv6.*#net.ipv6.conf.default.disable_ipv6=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.lo.disable_ipv6.*#net.ipv6.conf.lo.disable_ipv6=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.all.forwarding.*#net.ipv6.conf.all.forwarding=1#g"  /etc/sysctl.conf

# 2.加载 br_netfilter 模块
modprobe br_netfilter
# 重新加载配置,持久化修改
sysctl -p

# 3.开启ipvs
# 安装ipset和ipvsadm
yum -y install ipset ipvsadm
# 添加需要加载的模块写入脚本文件
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
# 为脚本添加执行权限
chmod 755 /etc/sysconfig/modules/ipvs.modules
# 执行脚本文件
/bin/bash /etc/sysconfig/modules/ipvs.modules
# 查看对应的模块是否加载成功
lsmod | grep -e ip_vs -e nf_conntrack

# 重启
reboot

9.安装Docker

# yum 安装 gcc gcc-c++
yum -y install gcc
yum -y install gcc-c++
# 安装所需要的软件包
yum -y install yum-utils
# 设置 stable 镜像仓库
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 更新 yum 软件包索引
yum makecache fast
# 查看存储库中 Docker 的版本
yum list docker-ce --showduplicates | sort -r
# 安装指定版本的 Docker(v20.10)
yum -y install docker-ce-3:20.10.8-3.el7.x86_64 docker-ce-cli-1:20.10.8-3.el7.x86_64 containerd.io
# 启动Docker
systemctl start docker
# 开启自动启动
systemctl enable docker
# 验证 Docker 是否安装成功
docker version

# 阿里云镜像加速
mkdir -p /etc/docker
# 配置镜像仓库地址(这里是我自己阿里云账户的镜像加速地址)
tee /etc/docker/daemon.json <<-'EOF'
{
  "exec-opts": ["native.cgroupdriver=systemd"],	
  "registry-mirrors": [
    "https://ppdui2ld.mirror.aliyuncs.com",
    "https://hub-mirror.c.163.com",
    "https://mirror.baidubce.com",
    "https://registry.docker-cn.com",
    "https://docker.mirrors.sjtug.sjtu.edu.cn"
  ],
  "live-restore": true,
  "log-driver":"json-file",
  "log-opts": {"max-size":"500m", "max-file":"3"},
  "max-concurrent-downloads": 10,
  "max-concurrent-uploads": 5,
  "storage-driver": "overlay2"
}
EOF

# 重启
systemctl daemon-reload
systemctl restart docker

10.安装kubeadm, kubelet和kubectl

# 添加阿里云的Kubernetes的YUM源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 安装kubeadm,kubelet和kubectl,这里指定版本1.21.10
yum install -y kubelet-1.21.10 kubeadm-1.21.10 kubectl-1.21.10

# 为了实现Docker使用的cgroup-drvier和kubelet使用的cgroup-drver一致
# 建议修改 /etc/sysconfig/kubelet 文件的内容
vim /etc/sysconfig/kubelet

KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"

# 设置kubelet开机自启
systemctl enable kubelet

11.准备集群镜像

# 查看 Kubernetes 安装所需镜像
kubeadm config images list
# 根据上面命令列出的镜像版本进行下载
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.21.14
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.21.14
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.21.14
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.21.14
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.4.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.13-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.0
# 给coredns镜像重新打tag
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.0 registry.cn-hangzhou.aliyuncs.com/google_containers/coredns/coredns:v1.8.0

12.配置 ssh 免密连接

# 生成密钥
ssh-keygen -t rsa

# scp密钥文件到其他服务器上
for i in k8s-node1 k8s-node2;do ssh-copy-id -i .ssh/id_rsa.pub $i;done

master节点操作

# 在master节点上创建集群
kubeadm init \
	--apiserver-advertise-address=192.168.174.180 \
	--image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers \
	--kubernetes-version=v1.21.10 \
	--service-cidr=10.96.0.0/16 \
	--pod-network-cidr=10.244.0.0/16
注意:
1. apiserver-advertise-address 一定要是主机的 IP 地址
2. apiserver-advertise-address, service-cidr 和 pod-network-cidr 不能在同一个网络范围内
3. 不要使用 172.17.0.1/16 网段范围, 因为这是 Docker 默认使用的

# 显示初始化成功的日志后,根据日志信息进行操作
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.174.180:6443 --token es4u2j.k5nb3zi4769y1gct \
        --discovery-token-ca-cert-hash sha256:77aadd598df82c30644b42ee503e8914af073d9703f855fceb12540f3863335c 

# 创建必要文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 如果是 root 用户,还可以执行如下命令
export KUBECONFIG=/etc/kubernetes/admin.conf

# 默认的token有效期为2小时,当过期之后,该token就不能用了,这时可以使用如下的命令创建token
kubeadm token create --print-join-command
# 生成一个永不过期的token
kubeadm token create --ttl 0 --print-join-command

node节点操作

# 根据命令提示在node1,node2节点上执行:
kubeadm join 192.168.174.180:6443 --token es4u2j.k5nb3zi4769y1gct \
        --discovery-token-ca-cert-hash sha256:77aadd598df82c30644b42ee503e8914af073d9703f855fceb12540f3863335c
        
# 让Node节点也能使用kubectl命令
# 在node节点上
mkdir -pv ~/.kube
touch ~/.kube/config
# 把admin账户的配置从master节点上传递过来
scp /etc/kubernetes/admin.conf root@192.168.65.101:~/.kube/config

部署网络插件

# 在master节点上使用calico作为网络插件
kubectl apply -f https://projectcalico.docs.tigera.io/v3.19/manifests/calico.yaml
# 查看部署 CNI 网络插件进度
kubectl get pods -n kube-system
watch kubectl get pods -n kube-system
# 查看节点状态
kubectl get nodes

设置kube-proxy的ipvs模式

# 在master节点上进行编辑
kubectl edit cm kube-proxy -n kube-system
# 找到mode,修改为ipvs
apiVersion: v1
data:
  config.conf:
  .......
    ipvs:
      excludeCIDRs: null
      minSyncPeriod: 0s
      scheduler: ""
      strictARP: false
      syncPeriod: 0s
      tcpFinTimeout: 0s
      tcpTimeout: 0s
      udpTimeout: 0s
    kind: KubeProxyConfiguration
    metricsBindAddress: ""
    # 修改此处
    mode: "ipvs"

# 删除kube-proxy, 让Kubernetes集群自动创建新的kube-proxy
kubectl delete pod -l k8s-app=kube-proxy -n kube-system

kubectl命令自动补全

yum install -y bash-completion

source /usr/share/bash-completion/bash_completion

source <(kubectl completion bash)

echo “source <(kubectl completion bash)” >> ~/.bashrc

二进制高可用集群

规划

节点IP主机名节点类型
192.168.174.100k8s-master-1master
192.168.174.101k8s-master-2master
192.168.174.102k8s-master-3master
192.168.174.103k8s-node-1node
192.168.174.104k8s-node-2node
192.168.174.105k8s-node-3node
192.168.174.200虚拟节点(keepalived)master

CFSSL安装

# 在k8s-master-1节点上下载安装包(下载较缓慢)
wget https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl-certinfo_1.5.0_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl_1.5.0_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssljson_1.5.0_linux_amd64
# 授权执行权限
chmod +x cfssl*
# 批量重命名,注意是在用户当前目录下执行的
for name in `ls cfssl*`; do mv $name ${name%_1.5.0_linux_amd64};  done
# 移动文件到系统bin目录
mv cfssl* /usr/bin

所有节点都需进行的操作

注意执行到第9步

可参照一主二从上面进行搭建, 需要修改的地方有:

1.主机名host解析

# 主机名解析,注意有个192.168.174.200的keepalived虚拟IP
cat >> /etc/hosts << EOF
127.0.0.1   $(hostname)
192.168.174.100 k8s-master-1
192.168.174.101 k8s-master-2
192.168.174.102 k8s-master-3
192.168.174.103 k8s-node-1
192.168.174.104 k8s-node-2
192.168.174.105 k8s-node-3
192.168.174.200 k8s-master-lb
EOF

2.修改limit

# 临时生效
ulimit -SHn 65536
# 永久生效
vim /etc/security/limits.conf
# 末尾添加如下内容
* soft nofile 65536
* hard nofile 65536
* soft nproc 4096
* hard nproc 4096
* soft memlock unlimited
* hard memlock unlimited
# 也可使用下面命令快速添加
cat >> /etc/security/limits.conf << EOF
* soft nofile 65536
* hard nofile 65536
* soft nproc 4096
* hard nproc 4096
* soft memlock unlimited
* hard memlock unlimited
EOF

3.配置 ssh 免密连接

# 生成密钥
ssh-keygen -t rsa
# 批量传输
for i in k8s-master-1 k8s-master-2 k8s-master-3 k8s-node-1 k8s-node-2 k8s-node-3;do ssh-copy-id -i .ssh/id_rsa.pub $i;done

4.安装工具

# 下载源码等等其他
yum -y install wget git jq psmisc net-tools yum-utils device-mapper-persistent-data lvm2

5.安装IPVS

# 安装ipvs工具
yum -y install ipvsadm ipset sysstat conntrack libseccomp

# 所有节点配置ipvs模块,执行以下命令
# 在内核4.19+版本改为nf_conntrack, 4.18下改为nf_conntrack_ipv4
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack

# # 修改ipvs配置,加入以下内容
cat >> /etc/modules-load.d/ipvs.conf << EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

# 执行命令
systemctl enable --now systemd-modules-load.service
# 检测是否加载
lsmod | grep -e ip_vs -e nf_conntrack

# 将所有节点桥接的IPv4流量传递到iptables的链
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
net.ipv4.conf.all.route_localnet = 1

vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16768
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16768
EOF

# 启用
sysctl --system
# 重启
reboot

第10步及以后不执行

master节点操作

高可用组件安装

# 在master所有节点上安装keepalived haproxy
yum -y install keepalived haproxy

=======================================================================================
# 配置haproxy
mkdir -pv /etc/haproxy
# 编辑haproxy配置
vim /etc/haproxy/haproxy.cfg
# 配置如下
global
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    stats socket /var/lib/haproxy/stats

defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

frontend  monitor-in
    bind                 *:33305
    mode                 http
    option               httplog
    monitor-uri          /monitor

listen    stats
    bind                 *:8888 # haproxy统计页面地址
    mode                 http
    stats                enable
    stats                hide-version
    stats     uri        /stats
    stats     refresh    30s
    stats     realm      Haproxy\ Statistics
    stats     auth       admin:admin123 # 统计页面的账户和密码

frontend  k8s-master
    bind                 192.168.174.200:6443 # 是虚拟节点的IP
    mode                 tcp
    option               tcplog
    tcp-request          inspect-delay 5s
    default_backend      k8s-master

backend   k8s-master
    mode                 tcp
    option               tcplog
    option               tcp-check
    balance              roundrobin
    default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
    # 三台master主机
    server k8s-master-1 192.168.174.100:6443  check
    server k8s-master-2 192.168.174.101:6443  check
    server k8s-master-3 192.168.174.102:6443  check

=======================================================================================
# 配置keepalived
vim /etc/keepalived/keepalived.conf
# k8s-master-1节点的keepalived配置如下
global_defs {
    router_id k8s-master-1 # 主机名
    script_user root
    enable_script_security    
}

vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh" # 检测脚本
    interval 10
    weight -5
    fall 3  
    rise 2
}

vrrp_instance VI_1 { # 虚拟实例名称,每个master上的配置需一样
    state MASTER
    interface ens33
    mcast_src_ip 192.168.174.100 # 主机IP
    virtual_router_id 100 # 路由ID,每个主机保证不一样
    priority 100 # 优先级
    nopreempt # 优先级高的设置 nopreempt 解决异常恢复后再次抢占的问题
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.174.200 # 虚拟IP
    }
    track_script {
       chk_apiserver # 检测脚本
    }
}

# keepalived监控脚本
vim /etc/keepalived/check_apiserver.sh
# 脚本内容
#!/bin/bash
 
err=0
for k in $(seq 1 5)
do
    check_code=$(pgrep kube-apiserver)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 5
        continue
    else
        err=0
        break
    fi
done
 
if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
# 添加执行权限
chmod +x /etc/keepalived/check_apiserver.sh

========================================================================================
# 重启
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived
# 测试虚拟IP
ping 192.168.18.110 -c 4

etcd安装

# 1.下载etcd (节点k8s-master-1),在root用户目录下
# 如果下载缓慢,可进github进行下载好后,然后上传到服务器中
wget https://github.com/etcd-io/etcd/releases/download/v3.4.16/etcd-v3.4.16-linux-amd64.tar.gz
# 复制安装包到其他master节点
for i in k8s-master-2 k8s-master-3;do scp etcd-* root@$i:/root/;done
# 解压etcd包到/usr/local/bin
tar -zxvf etcd-v3.4.16-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.4.16-linux-amd64/etcd{,ctl}
# 验证安装是否成功
etcdctl

=========================================================================================
# 2.etcd证书生成(在节点k8s-master-1上)
# 创建目录
mkdir -pv /etc/kubernetes/pki/etcd
# 创建etcd根配置
sudo tee /etc/kubernetes/pki/etcd-ca-csr.json <<-'EOF'
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "etcd",
      "OU": "etcd"
    }
  ],
  "ca": {
    "expiry": "87600h"
  }
}
EOF
# 生成etcd根ca证书
cfssl gencert -initca \
/etc/kubernetes/pki/etcd-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/etcd/ca -
# 创建etcd-cluster配置
sudo tee /etc/kubernetes/pki/etcd-cluster-csr.json <<-'EOF'
{
    "CN": "etcd-cluster",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [  
        "127.0.0.1",
        "k8s-master-1",
        "k8s-master-2",
        "k8s-master-3",
        "192.168.174.100",
        "192.168.174.101",
        "192.168.174.102"
    ],
    "names": [
        {
            "C": "CN",
            "L": "ShangHai",
            "O": "etcd",
            "ST": "ShangHai",
            "OU": "System"
        }
    ]
}
EOF
# 创建ca-config.json文件
sudo tee /etc/kubernetes/pki/ca-config.json <<-'EOF'
{
    "signing": {
        "default": {
            "expiry": "87600h"
        },
        "profiles": {
            "server": {
                "expiry": "87600h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth"
                ]
            },
            "client": {
                "expiry": "87600h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "87600h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            },
            "kubernetes": {
                "expiry": "87600h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            },
            "etcd": {
                "expiry": "87600h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}
EOF

# 签发 cluster 的 etcd 证书
cfssl gencert \
   -ca=/etc/kubernetes/pki/etcd/ca.pem \
   -ca-key=/etc/kubernetes/pki/etcd/ca-key.pem \
   -config=/etc/kubernetes/pki/ca-config.json \
   -profile=etcd \
   /etc/kubernetes/pki/etcd-cluster-csr.json | cfssljson -bare /etc/kubernetes/pki/etcd/etcd

# 给k8s-master-2和k8s-master-3创建目录
mkdir -pv /etc/kubernetes/pki

# 将etcd证书传输到其他master节点上
for i in k8s-master-2 k8s-master-3;do scp -r /etc/kubernetes/pki/etcd root@$i:/etc/kubernetes/pki;done

=========================================================================================
# 3.安装etcd
# 三台master节点都创建目录
mkdir -pv /etc/etcd
# etcd配置文件
sudo tee /etc/etcd/etcd.yaml <<-'EOF'
name: 'etcd-master-1' # 不同的master节点不同
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.174.100:2380'  # k8s-master-1的IP
listen-client-urls: 'https://192.168.174.100:2379,http://127.0.0.1:2379' # k8s-master-1的IP
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.174.100:2380' # k8s-master-1的IP
advertise-client-urls: 'https://192.168.174.100:2379' # k8s-master-1的IP
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-master-1=https://192.168.174.100:2380,etcd-master-2=https://192.168.174.101:2380,etcd-master-3=https://192.168.174.102:2380' # 集群IP信息
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

# 开机启动etcd配置(每个master节点)
sudo tee /usr/lib/systemd/system/etcd.service <<-'EOF'
[Unit]
Description=Etcd Service
Documentation=https://etcd.io/docs/v3.4/op-guide/clustering/
After=network.target

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.yaml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF

# 启动
systemctl daemon-reload
systemctl enable --now etcd
# 查看状态
systemctl status etcd

# 测试 etcd 是否可用
etcdctl --endpoints="192.168.174.100:2379,192.168.174.101:2379,192.168.174.102:2379" --cacert=/etc/kubernetes/pki/etcd/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem  endpoint status --write-out=table

Kubernetes组件安装

# 在k8s-master-1节点上下载Kubernetes离线安装包(下载缓慢)
wget https://dl.k8s.io/v1.21.1/kubernetes-server-linux-amd64.tar.gz
# 将Kubernetes离线安装包发送到所有节点
for i in k8s-master-2 k8s-master-3 k8s-node-1 k8s-node-2 k8s-node-3;do scp kubernetes-server-* root@$i:/root/;done

# master节点解压到/usr/local/bin
tar -xvf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}

# node节点解压到/usr/local/bin
tar -xvf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,-proxy}

=========================================================================================
# 1.api-server证书生成
# 新建 apiserver-csr.json文件(k8s-master-1节点)
# 注意要把虚拟节点IP也添加到hosts中,还要把service使用的IP-DNS(10.96.0.1)添加
sudo tee /etc/kubernetes/pki/apiserver-csr.json <<-'EOF'
{
    "CN": "kube-apiserver",
    "hosts": [
      "10.96.0.1",
      "127.0.0.1",
      "192.168.174.200",
      "192.168.174.100",
      "192.168.174.101",
      "192.168.174.102",
      "192.168.174.103",
      "192.168.174.104",
      "192.168.174.105",
      "192.168.174.106",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "Kubernetes",
            "OU": "Kubernetes"
        }
    ]
}
EOF
# 生成Kubernetes的CA机构(k8s-master-1节点)
sudo tee /etc/kubernetes/pki/ca-csr.json <<-'EOF'
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes"
    }
  ],
  "ca": {
    "expiry": "87600h"
  }
}
EOF

# 初始化ca证书
cfssl gencert -initca /etc/kubernetes/pki/ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca -
# 生成api-server证书
cfssl gencert   \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=/etc/kubernetes/pki/ca-config.json \
-profile=kubernetes \
/etc/kubernetes/pki/apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver

=========================================================================================
# 2.front-proxy证书生成(front-proxy是api-server的聚合层)
# 新建front-proxy-ca-csr.json文件(k8s-master-1)
sudo tee /etc/kubernetes/pki/front-proxy-ca-csr.json <<-'EOF'
{
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF
# 初始化ca证书
cfssl gencert  -initca /etc/kubernetes/pki/front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
# 新建front-proxy-client-csr.json文件
sudo tee /etc/kubernetes/pki/front-proxy-client-csr.json <<-'EOF'
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF
# 生成front-proxy证书
cfssl gencert \
-ca=/etc/kubernetes/pki/front-proxy-ca.pem \
-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem \
-config=/etc/kubernetes/pki/ca-config.json \
-profile=kubernetes \
/etc/kubernetes/pki/front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client

=========================================================================================
# 3.controller-manager证书生成和配置
# 新建controller-manager-csr.json文件(k8s-master1)
sudo tee /etc/kubernetes/pki/controller-manager-csr.json <<-'EOF'
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-controller-manager",
      "OU": "Kubernetes"
    }
  ]
}
EOF
# 生成证书
cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=/etc/kubernetes/pki/ca-config.json \
   -profile=kubernetes \
  /etc/kubernetes/pki/controller-manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager

# 生成配置,注意--server的ip要用虚拟IP
kubectl config set-cluster kubernetes \
     --certificate-authority=/etc/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://192.168.174.200:6443 \
     --kubeconfig=/etc/kubernetes/controller-manager.conf

# 设置一个环境项,一个上下文
kubectl config set-context system:kube-controller-manager@kubernetes \
    --cluster=kubernetes \
    --user=system:kube-controller-manager \
    --kubeconfig=/etc/kubernetes/controller-manager.conf

# set-credentials设置一个用户项
kubectl config set-credentials system:kube-controller-manager \
     --client-certificate=/etc/kubernetes/pki/controller-manager.pem \
     --client-key=/etc/kubernetes/pki/controller-manager-key.pem \
     --embed-certs=true \
     --kubeconfig=/etc/kubernetes/controller-manager.conf

# 使用某个环境当做默认环境
kubectl config use-context system:kube-controller-manager@kubernetes \
     --kubeconfig=/etc/kubernetes/controller-manager.conf

=========================================================================================
# 4.scheduler证书生成与配置
# 新建scheduler-csr.json文件(k8s-master-1)
sudo tee /etc/kubernetes/pki/scheduler-csr.json <<-'EOF'
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-scheduler",
      "OU": "Kubernetes"
    }
  ]
}
EOF
# 生成证书
cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=/etc/kubernetes/pki/ca-config.json \
   -profile=kubernetes \
   /etc/kubernetes/pki/scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler

# 生成配置,注意--server的ip要用虚拟IP
kubectl config set-cluster kubernetes \
     --certificate-authority=/etc/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://192.168.174.200:6443 \
     --kubeconfig=/etc/kubernetes/scheduler.conf

# set-credentials设置一个用户项
kubectl config set-credentials system:kube-scheduler \
     --client-certificate=/etc/kubernetes/pki/scheduler.pem \
     --client-key=/etc/kubernetes/pki/scheduler-key.pem \
     --embed-certs=true \
     --kubeconfig=/etc/kubernetes/scheduler.conf

# 设置一个环境项,一个上下文
kubectl config set-context system:kube-scheduler@kubernetes \
     --cluster=kubernetes \
     --user=system:kube-scheduler \
     --kubeconfig=/etc/kubernetes/scheduler.conf

# 使用某个环境当做默认环境
kubectl config use-context system:kube-scheduler@kubernetes \
     --kubeconfig=/etc/kubernetes/scheduler.conf

=========================================================================================
# 5.admin证书生成与配置
# 新建admin-csr.json文件(k8s-master-1)
sudo tee /etc/kubernetes/pki/admin-csr.json <<-'EOF'
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:masters",
      "OU": "Kubernetes"
    }
  ]
}
EOF

# 生成证书
cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=/etc/kubernetes/pki/ca-config.json \
   -profile=kubernetes \
   /etc/kubernetes/pki/admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin

# 生成配置,注意--server的ip要用虚拟IP
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.174.200:6443 \
--kubeconfig=/etc/kubernetes/admin.conf

# set-credentials设置一个用户项
kubectl config set-credentials kubernetes-admin \
--client-certificate=/etc/kubernetes/pki/admin.pem \
--client-key=/etc/kubernetes/pki/admin-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/admin.conf

# 设置一个环境项,一个上下文
kubectl config set-context kubernetes-admin@kubernetes \
--cluster=kubernetes \
--user=kubernetes-admin \
--kubeconfig=/etc/kubernetes/admin.conf

# 使用某个环境当做默认环境
kubectl config use-context kubernetes-admin@kubernetes \
--kubeconfig=/etc/kubernetes/admin.conf

=========================================================================================
# 6.ServiceAccount Key生成
# 在k8s-master-1节点上
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub

=========================================================================================
# 7.发送证书到其他master节点
for NODE in k8s-master-2 k8s-master-3
do
  for FILE in admin.conf controller-manager.conf scheduler.conf
  do
  scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}
  done
done

# 将证书复制到其他 Master 节点
for NODE in k8s-master-2 k8s-master-3
do
  scp -r /etc/kubernetes/* root@$NODE:/etc/kubernetes/
done

=========================================================================================
# 8.组件启动
# 在所有master节点上创建目录
mkdir -pv /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
# 配置api-server服务(在master节点上)
# --advertise-address: 需要改为本master节点的ip
# --service-cluster-ip-range=10.96.0.0/16: 需要改为自己规划的service网段
# --etcd-servers: 改为自己etcd-server的所有地址
sudo tee /usr/lib/systemd/system/kube-apiserver.service <<-'EOF'
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \
      --v=2  \
      --logtostderr=true  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --insecure-port=0  \
      --advertise-address=192.168.174.100 \
      --service-cluster-ip-range=10.96.0.0/16  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://192.168.174.100:2379,https://192.168.174.101:2379,https://192.168.174.102:2379 \
      --etcd-cafile=/etc/kubernetes/pki/etcd/ca.pem  \
      --etcd-certfile=/etc/kubernetes/pki/etcd/etcd.pem  \
      --etcd-keyfile=/etc/kubernetes/pki/etcd/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator,front-proxy-client  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User
      # --token-auth-file=/etc/kubernetes/token.csv

Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

EOF

# 启动api-server服务(所有master节点)
systemctl daemon-reload && systemctl enable --now kube-apiserver
# 查看api-server状态
systemctl status kube-apiserver

=========================================================================================
# 8.配置controller-manager服务
# 在master节点上创建文件
## --cluster-cidr=196.16.0.0/16 为Pod的网段,修改成自己想规划的网段
sudo tee /usr/lib/systemd/system/kube-controller-manager.service <<-'EOF'
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
      --v=2 \
      --logtostderr=true \
      --address=127.0.0.1 \
      --root-ca-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
      --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
      --kubeconfig=/etc/kubernetes/controller-manager.conf \
      --leader-elect=true \
      --use-service-account-credentials=true \
      --node-monitor-grace-period=40s \
      --node-monitor-period=5s \
      --pod-eviction-timeout=2m0s \
      --controllers=*,bootstrapsigner,tokencleaner \
      --allocate-node-cidrs=true \
      --cluster-cidr=196.16.0.0/16 \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
      --node-cidr-mask-size=24
      
Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF

# 启动controller-manager服务
systemctl daemon-reload && systemctl enable --now kube-controller-manager
# 查看状态
systemctl status kube-controller-manager

=========================================================================================
# 9.配置scheduler
# 创建kube-scheduler.service文件(所有master节点上)
sudo tee /usr/lib/systemd/system/kube-scheduler.service  <<-'EOF'
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-scheduler \
      --v=2 \
      --logtostderr=true \
      --address=127.0.0.1 \
      --leader-elect=true \
      --kubeconfig=/etc/kubernetes/scheduler.conf

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF

# 启动scheduler服务(所有master节点上)
systemctl daemon-reload && systemctl enable --now kube-scheduler
# 查看状态
systemctl status kube-scheduler

=========================================================================================
# 10.配置kubelet
# 生成bootstrap-kubelet.conf文件(k8s-master-1)
# 设置集群,注意--server的ip要用虚拟IP
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.174.200:6443 \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf

# 设置秘钥(token的字符串从一主二从那里获得)
kubectl config set-credentials tls-bootstrap-token-user \
--token=es4u2j.k5nb3zi4769y1gct \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf

# 设置上下文
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf

# 使用设置
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf

=========================================================================================
# 11.配置kubectl(目前只在master节点上使用)
mkdir -pv /root/.kube 
cp /etc/kubernetes/admin.conf /root/.kube/config
# 验证kubectl是否可用
kubectl get pod
# 命令返回No resources found,说明已经可以连接apiserver并获取资源了
kubectl get nodes

=========================================================================================
# 12.创建权限引导文件(在k8s-master-1节点上)
sudo tee /etc/kubernetes/bootstrap.secret.yaml  <<-'EOF'
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-l6fy8c
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: l6fy8c
  token-secret: d683399b7a553977
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
 
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF

# 应用权限引导文件
kubectl create -f /etc/kubernetes/bootstrap.secret.yaml

node节点操作

# 1.所有节点创建相关目录
mkdir -pv /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/
# 2.发送证书到其他节点(k8s-master-1)
for NODE in k8s-master-2 k8s-master-3 k8s-node-1 k8s-node-2 k8s-node-3; do
     scp -r /etc/kubernetes/* root@$NODE:/etc/kubernetes/
 done
# 3.所有节点配置kubelet
sudo tee /usr/lib/systemd/system/kubelet.service  <<-'EOF'
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
ExecStart=/usr/local/bin/kubelet

Restart=always
StartLimitInterval=0
RestartSec=10

[Install]
WantedBy=multi-user.target
EOF

# 4.所有节点配置kubelet service配置文件
sudo tee /etc/systemd/system/kubelet.service.d/10-kubelet.conf  <<-'EOF'
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.4.1"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
EOF

# 5.所有节点配置kubelet-conf.yml文件
sudo tee /etc/kubernetes/kubelet-conf.yml <<-'EOF'
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF

# 6.所有节点启动kubelet
systemctl daemon-reload && systemctl enable --now kubelet
# 查看状态
systemctl status kubelet

# 7.kube-proxy配置
# 生成kube-proxy.conf文件
# 创建kube-proxy的sa
kubectl -n kube-system create serviceaccount kube-proxy

# 创建角色绑定
kubectl create clusterrolebinding system:kube-proxy \
--clusterrole system:node-proxier \
--serviceaccount kube-system:kube-proxy

# 导出变量,方便后面使用
SECRET=$(kubectl -n kube-system get sa/kube-proxy --output=jsonpath='{.secrets[0].name}')
JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET --output=jsonpath='{.data.token}' | base64 -d)
PKI_DIR=/etc/kubernetes/pki
K8S_DIR=/etc/kubernetes

# 生成kube-proxy配置
# --server: 使用虚拟IP
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.174.200:6443 \
--kubeconfig=${K8S_DIR}/kube-proxy.conf

# kube-proxy秘钥设置
kubectl config set-credentials kubernetes \
--token=${JWT_TOKEN} \
--kubeconfig=/etc/kubernetes/kube-proxy.conf

# 设置上下文
kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=kubernetes \
--kubeconfig=/etc/kubernetes/kube-proxy.conf

# 使用配置
kubectl config use-context kubernetes \
--kubeconfig=/etc/kubernetes/kube-proxy.conf

# k8s-master-1发送证书到其他节点
for NODE in k8s-master-2 k8s-master-3 k8s-node-1 k8s-node-2 k8s-node-3; do
      scp /etc/kubernetes/kube-proxy.conf $NODE:/etc/kubernetes/
 done
 
 # 8.所有节点配置 kube-proxy.service文件
sudo tee /usr/lib/systemd/system/kube-proxy.service <<-'EOF'
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.yaml \
  --v=2

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF

# 所有节点创建kube-proxy.yaml文件
sudo tee /etc/kubernetes/kube-proxy.yaml <<-'EOF'
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.conf
  qps: 5
clusterCIDR: 196.16.0.0/16
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF

# 所有节点启动kube-proxy 
systemctl daemon-reload && systemctl enable --now kube-proxy
# 查看状态
systemctl status kube-proxy

部署网络插件

# 下载calico配置文件(在k8s-master-1节点)
curl https://docs.projectcalico.org/v3.19/manifests/calico-etcd.yaml -o calico.yaml

# 修改etcd集群地址(在k8s-master-1节点)
sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://192.168.174.100:2379,https://192.168.174.101:2379,https://192.168.174.102:2379"#g' calico.yaml

# etcd的证书内容,需要base64编码设置到yaml中(k8s-master1)
ETCD_CA=`cat /etc/kubernetes/pki/etcd/ca.pem | base64 -w 0 `
ETCD_CERT=`cat /etc/kubernetes/pki/etcd/etcd.pem | base64 -w 0 `
ETCD_KEY=`cat /etc/kubernetes/pki/etcd/etcd-key.pem | base64 -w 0 `

# 替换etcd中的证书base64编码后的内容(k8s-master1)
sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" calico.yaml

# 打开etcd_ca等默认设置(k8s-master1)
sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' calico.yaml

# 修改自己的Pod网段(k8s-master1),不是service的ip网段
POD_SUBNET="196.16.0.0/16"
sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@#   value: "192.168.0.0/16"@  value: '"${POD_SUBNET}"'@g' calico.yaml

# 确认calico是否修改好(k8s-master-1)
grep "CALICO_IPV4POOL_CIDR" calico.yaml -A 1

# 应用calico配置(k8s-master-1)
kubectl apply -f calico.yaml

# 镜像下载比较慢,可以提前下载镜像(所有节点)
docker pull calico/cni:v3.22.1
docker pull calico/pod2daemon-flexvol:v3.22.1
docker pull calico/node:v3.22.1
docker pull calico/kube-controllers:v3.22.1

# 查看calico是否安装完毕(k8s-master-1)
kubectl get pod -A

# 部署coreDNS(k8s-master-1)
git clone https://github.com/coredns/deployment.git
cd deployment/kubernetes
# 10.96.0.10改为service网段的第10个ip(k8s-master-1)
./deploy.sh -s -i 10.96.0.10 | kubectl apply -f -
# 查看是否安装成功(k8s-master-1)
kubectl get pod -A

# 如果部署成功,则都是ready状态
kubectl get nodes
程序员内功
码出好代码
  • 作者:lzlg520
  • 发表时间:2023-02-20 18:19
  • 版权声明:自由转载-非商用-非衍生-保持署名
  • 公众号转载:请在文末添加作者公众号二维码