前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >使用kubeadm部署高可用IPV4/IPV6集群

使用kubeadm部署高可用IPV4/IPV6集群

原创
作者头像
小陈运维
发布2024-05-05 13:26:30
1250
发布2024-05-05 13:26:30
举报
文章被收录于专栏:小陈运维小陈运维

使用kubeadm部署高可用IPV4/IPV6集群

https://github.com/cby-chen/Kubernetes 开源不易,帮忙点个star,谢谢了

介绍

kubernetes(k8s)二进制高可用安装部署,支持IPv4+IPv6双栈。

我使用IPV6的目的是在公网进行访问,所以我配置了IPV6静态地址。

若您没有IPV6环境,或者不想使用IPv6,不对主机进行配置IPv6地址即可。

不配置IPV6,不影响后续,不过集群依旧是支持IPv6的。为后期留有扩展可能性。

若不要IPv6 ,不给网卡配置IPv6即可,不要对IPv6相关配置删除或操作,否则会出问题。

强烈建议在Github上查看文档 !!!

Github出问题会更新文档,并且后续尽可能第一时间更新新版本文档 !!!

k8s基础系统环境配置

配置IP

代码语言:shell
复制
# 注意!
# 若虚拟机是进行克隆的那么网卡的UUID会重复
# 若UUID重复需要重新生成新的UUID
# UUID重复无法获取到IPV6地址
# 

# 更改网卡的UUID
ssh root@192.168.1.31 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0"
ssh root@192.168.1.32 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0"
ssh root@192.168.1.33 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0"
ssh root@192.168.1.34 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0"
ssh root@192.168.1.35 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0"

# 修改静态的IPv4地址
ssh root@192.168.1.104 "nmcli con mod eth0 ipv4.addresses 192.168.1.31/24; nmcli con mod eth0 ipv4.gateway  192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh root@192.168.1.106 "nmcli con mod eth0 ipv4.addresses 192.168.1.32/24; nmcli con mod eth0 ipv4.gateway  192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh root@192.168.1.107 "nmcli con mod eth0 ipv4.addresses 192.168.1.33/24; nmcli con mod eth0 ipv4.gateway  192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh root@192.168.1.109 "nmcli con mod eth0 ipv4.addresses 192.168.1.34/24; nmcli con mod eth0 ipv4.gateway  192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh root@192.168.1.110 "nmcli con mod eth0 ipv4.addresses 192.168.1.35/24; nmcli con mod eth0 ipv4.gateway  192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"

# 没有IPv6选择不配置即可
ssh root@192.168.1.31 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::10; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh root@192.168.1.32 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::20; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh root@192.168.1.33 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::30; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh root@192.168.1.34 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::40; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh root@192.168.1.35 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::50; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"

# 查看网卡配置
# nmcli device show eth0
# nmcli con show eth0
[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=eth0
UUID=2aaddf95-3f36-4a48-8626-b55ebf7f53e7
DEVICE=eth0
ONBOOT=yes
IPADDR=192.168.1.31
PREFIX=24
GATEWAY=192.168.1.1
DNS1=8.8.8.8
[root@localhost ~]# 

设置主机名

代码语言:shell
复制
hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master02
hostnamectl set-hostname k8s-master03
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02

配置yum源

代码语言:shell
复制
# 其他系统的源地址
# https://mirrors.tuna.tsinghua.edu.cn/help/

# 对于 Ubuntu
sed -i 's/cn.archive.ubuntu.com/mirrors.ustc.edu.cn/g' /etc/apt/sources.list

# 对于 CentOS 7
sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
         -e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \
         -i.bak \
         /etc/yum.repos.d/CentOS-*.repo

# 对于 CentOS 8
sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
         -e 's|^#baseurl=http://mirror.centos.org/$contentdir|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \
         -i.bak \
         /etc/yum.repos.d/CentOS-*.repo

# 对于私有仓库
sed -e 's|^mirrorlist=|#mirrorlist=|g' -e 's|^#baseurl=http://mirror.centos.org/\$contentdir|baseurl=http://192.168.1.123/centos|g' -i.bak  /etc/yum.repos.d/CentOS-*.repo

安装一些必备工具

代码语言:shell
复制
# 对于 Ubuntu
apt update && apt upgrade -y && apt install -y wget psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl

# 对于 CentOS 7
yum update -y && yum -y install  wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl

# 对于 CentOS 8
yum update -y && yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl

关闭防火墙

代码语言:shell
复制
# Ubuntu忽略,CentOS执行
systemctl disable --now firewalld

关闭SELinux

代码语言:shell
复制
# Ubuntu忽略,CentOS执行
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config

关闭交换分区

代码语言:shell
复制
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a && sysctl -w vm.swappiness=0

cat /etc/fstab
# /dev/mapper/centos-swap swap                    swap    defaults        0 0

网络配置(俩种方式二选一)

代码语言:shell
复制
# Ubuntu忽略,CentOS执行

# 方式一
# systemctl disable --now NetworkManager
# systemctl start network && systemctl enable network

# 方式二
cat > /etc/NetworkManager/conf.d/calico.conf << EOF 
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*
EOF
systemctl restart NetworkManager

进行时间同步

代码语言:shell
复制
# 服务端
# apt install chrony -y
yum install chrony -y
cat > /etc/chrony.conf << EOF 
pool ntp.aliyun.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.1.0/24
local stratum 10
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF

systemctl restart chronyd ; systemctl enable chronyd

# 客户端
# apt install chrony -y
yum install chrony -y
cat > /etc/chrony.conf << EOF 
pool 192.168.1.31 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF

systemctl restart chronyd ; systemctl enable chronyd

#使用客户端进行验证
chronyc sources -v

配置ulimit

代码语言:shell
复制
ulimit -SHn 65535
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd
EOF

配置免密登录

代码语言:shell
复制
# apt install -y sshpass
yum install -y sshpass
ssh-keygen -f /root/.ssh/id_rsa -P ''
export IP="192.168.1.31 192.168.1.32 192.168.1.33 192.168.1.34 192.168.1.35"
export SSHPASS=123123
for HOST in $IP;do
     sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $HOST
done

添加启用源

代码语言:shell
复制
# Ubuntu忽略,CentOS执行

# 为 RHEL-8或 CentOS-8配置源
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm -y 
sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo 
sed -i "s@elrepo.org/linux@mirrors.tuna.tsinghua.edu.cn/elrepo@g" /etc/yum.repos.d/elrepo.repo 

# 为 RHEL-7 SL-7 或 CentOS-7 安装 ELRepo 
yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm -y 
sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo 
sed -i "s@elrepo.org/linux@mirrors.tuna.tsinghua.edu.cn/elrepo@g" /etc/yum.repos.d/elrepo.repo 

# 查看可用安装包
yum  --disablerepo="*"  --enablerepo="elrepo-kernel"  list  available

升级内核至4.18版本以上

代码语言:shell
复制
# Ubuntu忽略,CentOS执行

# 安装最新的内核
# 我这里选择的是稳定版kernel-ml   如需更新长期维护版本kernel-lt  
yum -y --enablerepo=elrepo-kernel  install  kernel-ml

# 查看已安装那些内核
rpm -qa | grep kernel

# 查看默认内核
grubby --default-kernel

# 若不是最新的使用命令设置
grubby --set-default $(ls /boot/vmlinuz-* | grep elrepo)

# 重启生效
reboot

# v8 整合命令为:
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm -y ; sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo ; sed -i "s@elrepo.org/linux@mirrors.tuna.tsinghua.edu.cn/elrepo@g" /etc/yum.repos.d/elrepo.repo ; yum  --disablerepo="*"  --enablerepo="elrepo-kernel"  list  available -y ; yum  --enablerepo=elrepo-kernel  install kernel-lt -y ; grubby --default-kernel ; reboot 

# v7 整合命令为:
yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm -y ; sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo ; sed -i "s@elrepo.org/linux@mirrors.tuna.tsinghua.edu.cn/elrepo@g" /etc/yum.repos.d/elrepo.repo ; yum  --disablerepo="*"  --enablerepo="elrepo-kernel"  list  available -y ; yum  --enablerepo=elrepo-kernel  install  kernel-lt -y ; grubby --set-default $(ls /boot/vmlinuz-* | grep elrepo) ; grubby --default-kernel ; reboot 

# 离线版本 
yum install -y /root/cby/kernel-lt-*-1.el7.elrepo.x86_64.rpm ; grubby --set-default $(ls /boot/vmlinuz-* | grep elrepo) ; grubby --default-kernel ; reboot 

安装ipvsadm

代码语言:shell
复制
# 对于CentOS7离线安装
# yum install /root/centos7/ipset-*.el7.x86_64.rpm /root/centos7/lm_sensors-libs-*.el7.x86_64.rpm  /root/centos7/ipset-libs-*.el7.x86_64.rpm /root/centos7/sysstat-*.el7_9.x86_64.rpm  /root/centos7/ipvsadm-*.el7.x86_64.rpm  -y

# 对于 Ubuntu
# apt install ipvsadm ipset sysstat conntrack -y

# 对于 CentOS
yum install ipvsadm ipset sysstat conntrack libseccomp -y
cat >> /etc/modules-load.d/ipvs.conf <<EOF 
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

systemctl restart systemd-modules-load.service

lsmod | grep -e ip_vs -e nf_conntrack
ip_vs_sh               16384  0
ip_vs_wrr              16384  0
ip_vs_rr               16384  0
ip_vs                 180224  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          176128  1 ip_vs
nf_defrag_ipv6         24576  2 nf_conntrack,ip_vs
nf_defrag_ipv4         16384  1 nf_conntrack
libcrc32c              16384  3 nf_conntrack,xfs,ip_vs

修改内核参数

代码语言:shell
复制
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384

net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding = 1
EOF

sysctl --system

所有节点配置hosts本地解析

代码语言:shell
复制
cat > /etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.1.31 k8s-master01
192.168.1.32 k8s-master02
192.168.1.33 k8s-master03
192.168.1.34 k8s-node01
192.168.1.35 k8s-node02
192.168.1.36 lb-vip
EOF

配置安装源

简介

Kubernetes是一个开源系统,用于容器化应用的自动部署、扩缩和管理。它将构成应用的容器按逻辑单位进行分组以便于管理和发现。

由于 Kubernetes 官方变更了仓库的存储路径以及使用方式,如果需要使用 1.28 及以上版本,请使用 新版配置方法 进行配置。

下载地址:https://mirrors.aliyun.com/kubernetes/

新版下载地址:https://mirrors.aliyun.com/kubernetes-new/

配置方法

新版配置方法

新版 kubernetes 源使用方法和之前有一定区别,请求按照如下配置方法配置使用。

其中新版 kubernetes 源按照安装版本区分不同仓库,该文档示例为配置 1.30 版本,如需其他版本请在对应位置字符串替换即可。

Debian / Ubuntu
  1. 在配置中添加镜像(注意修改为自己需要的版本号):
代码语言:shell
复制
apt-get update && apt-get install -y apt-transport-https
curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/Release.key |
    gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.28/deb/ /" |
    tee /etc/apt/sources.list.d/kubernetes.list
  1. 安装必要应用:
代码语言:shell
复制
apt-get update
apt-get install -y kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet

# 如安装指定版本
# apt install kubelet=1.28.2-00 kubeadm=1.28.2-00 kubectl=1.28.2-00
CentOS / RHEL / Fedora
  1. 执行如下命令(注意修改为自己需要的版本号):
代码语言:shell
复制
cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/repodata/repomd.xml.key
EOF
  1. 安装必要应用:
代码语言:shell
复制
yum update
yum install -y kubelet kubeadm kubectl

# 如安装指定版本
# yum install kubelet-1.28.2-0 kubeadm-1.28.2-0 kubectl-1.28.2-0

systemctl enable kubelet && systemctl start kubelet

# 将 SELinux 设置为 禁用
sudo setenforce 0
sudo sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config

ps: 由于官网未开放同步方式, 可能会有索引gpg检查失败的情况, 这时请用 yum install -y --nogpgcheck kubelet kubeadm kubectl 安装

旧版配置方法

目前由于kubernetes官方变更了仓库的存储路径以及使用方式,旧版 kubernetes 源只更新到 1.28 部分版本,后续更新版本请使用 新源配置方法 进行配置。

Debian / Ubuntu
代码语言:shell
复制
apt-get update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add - 
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet kubeadm kubectl
CentOS / RHEL / Fedora
代码语言:shell
复制
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
setenforce 0
yum install -y kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet

ps: 由于官网未开放同步方式, 可能会有索引gpg检查失败的情况, 这时请用 yum install -y --nogpgcheck kubelet kubeadm kubectl 安装

配置containerd

代码语言:shell
复制
# 下载所需应用包
wget https://mirrors.chenby.cn/https://github.com/containerd/containerd/releases/download/v1.7.16/cri-containerd-cni-1.7.16-linux-amd64.tar.gz
wget https://mirrors.chenby.cn/https://github.com/containernetworking/plugins/releases/download/v1.4.1/cni-plugins-linux-amd64-v1.4.1.tgz

# centos7 要升级libseccomp
yum -y install https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm


#创建cni插件所需目录
mkdir -p /etc/cni/net.d /opt/cni/bin 
#解压cni二进制包
tar xf cni-plugins-linux-amd64-v*.tgz -C /opt/cni/bin/

#解压
tar -xzf cri-containerd-cni-*-linux-amd64.tar.gz -C /

#创建服务启动文件
cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF

# 配置Containerd所需的模块
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

# 加载模块
systemctl restart systemd-modules-load.service

# 配置Containerd所需的内核
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

# 加载内核
sysctl --system

# 创建Containerd的配置文件
mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml

# 修改Containerd的配置文件
sed -i "s#SystemdCgroup\ \=\ false#SystemdCgroup\ \=\ true#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep SystemdCgroup
sed -i "s#registry.k8s.io#registry.cn-hangzhou.aliyuncs.com/google_containers#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep sandbox_image
sed -i "s#config_path\ \=\ \"\"#config_path\ \=\ \"/etc/containerd/certs.d\"#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep certs.d

# 配置加速器
mkdir /etc/containerd/certs.d/docker.io -pv
cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://dockerproxy.com"]
  capabilities = ["pull", "resolve"]
EOF


# 启动并设置为开机启动
systemctl daemon-reload
systemctl enable --now containerd.service
systemctl stop containerd.service
systemctl start containerd.service
systemctl restart containerd.service
systemctl status containerd.service

高可用keepalived、haproxy

安装keepalived和haproxy服务

代码语言:shell
复制
yum -y install keepalived haproxy

修改haproxy配置文件(配置文件一样)

代码语言:shell
复制
# cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak

cat >/etc/haproxy/haproxy.cfg<<"EOF"
global
 maxconn 2000
 ulimit-n 16384
 log 127.0.0.1 local0 err
 stats timeout 30s

defaults
 log global
 mode http
 option httplog
 timeout connect 5000
 timeout client 50000
 timeout server 50000
 timeout http-request 15s
 timeout http-keep-alive 15s


frontend monitor-in
 bind *:33305
 mode http
 option httplog
 monitor-uri /monitor

frontend k8s-master
 bind 0.0.0.0:9443
 bind 127.0.0.1:9443
 mode tcp
 option tcplog
 tcp-request inspect-delay 5s
 default_backend k8s-master


backend k8s-master
 mode tcp
 option tcplog
 option tcp-check
 balance roundrobin
 default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
 server  k8s-master01  192.168.1.31:6443 check
 server  k8s-master02  192.168.1.32:6443 check
 server  k8s-master03  192.168.1.33:6443 check
EOF

Master01配置keepalived master节点

代码语言:shell
复制
#cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak

cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5 
    weight -5
    fall 2
    rise 1
}
vrrp_instance VI_1 {
    state MASTER
    # 注意网卡名
    interface eth0 
    mcast_src_ip 192.168.1.31
    virtual_router_id 51
    priority 100
    nopreempt
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.36
    }
    track_script {
      chk_apiserver 
} }

EOF

Master02配置keepalived backup节点

代码语言:shell
复制
# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak

cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5 
    weight -5
    fall 2
    rise 1

}
vrrp_instance VI_1 {
    state BACKUP
    # 注意网卡名
    interface eth0
    mcast_src_ip 192.168.1.32
    virtual_router_id 51
    priority 80
    nopreempt
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.36
    }
    track_script {
      chk_apiserver 
} }

EOF

Master03配置keepalived backup节点

代码语言:shell
复制
# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak

cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5 
    weight -5
    fall 2
    rise 1

}
vrrp_instance VI_1 {
    state BACKUP
    # 注意网卡名
    interface eth0
    mcast_src_ip 192.168.1.33
    virtual_router_id 51
    priority 50
    nopreempt
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.1.36
    }
    track_script {
      chk_apiserver 
} }

EOF

健康检查脚本配置(lb主机)

代码语言:shell
复制
cat >  /etc/keepalived/check_apiserver.sh << EOF
#!/bin/bash

err=0
for k in \$(seq 1 3)
do
    check_code=\$(pgrep haproxy)
    if [[ \$check_code == "" ]]; then
        err=\$(expr \$err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ \$err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
EOF

# 给脚本授权

chmod +x /etc/keepalived/check_apiserver.sh

启动服务

代码语言:shell
复制
systemctl daemon-reload
# 用于重新加载systemd管理的单位文件。当你新增或修改了某个单位文件(如.service文件、.socket文件等),需要运行该命令来刷新systemd对该文件的配置。
systemctl enable --now haproxy.service
# 启用并立即启动haproxy.service单元。haproxy.service是haproxy守护进程的systemd服务单元。
systemctl enable --now keepalived.service
# 启用并立即启动keepalived.service单元。keepalived.service是keepalived守护进程的systemd服务单元。
systemctl status haproxy.service
# haproxy.service单元的当前状态,包括运行状态、是否启用等信息。
systemctl status keepalived.service
# keepalived.service单元的当前状态,包括运行状态、是否启用等信息。

测试高可用

代码语言:shell
复制
# 能ping同
[root@k8s-node02 ~]# ping 192.168.1.36

# 能telnet访问
[root@k8s-node02 ~]# telnet 192.168.1.36 9443

# 关闭主节点,看vip是否漂移到备节点

初始化安装

代码语言:shell
复制
# 查看最新版本有那些镜像
[root@k8s-master01 ~]# kubeadm config images list --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers
[root@k8s-master01 ~]# 

# 创建默认配置
kubeadm config print init-defaults > kubeadm-init.yaml
# 这是我使用的配置文件
cat > kubeadm.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 72h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.1.31
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  kubeletExtraArgs:
    # 这里使用maser01的IP 
    node-ip: 192.168.1.31,2408:822a:730:af01::7d8
  taints:
  - effect: PreferNoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
    - x.oiox.cn
    - k8s-master01
    - k8s-master02
    - k8s-master03
    - 192.168.1.31
    - 192.168.1.32
    - 192.168.1.33
    - 192.168.1.34
    - 192.168.1.35
    - 192.168.1.36
    - 192.168.1.60
    - 127.0.0.1
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
kind: ClusterConfiguration
kubernetesVersion: 1.30.0
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16,2408:822a:730:af01::/64
  serviceSubnet: 10.96.0.0/16,2408:822a:730:af01::/112
scheduler: {}
# 这里使用的是负载地址
controlPlaneEndpoint: "192.168.1.36:9443"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
cgroupDriver: systemd
logging: {}
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
EOF



# 进行初始化
[root@k8s-master01 ~]# kubeadm init --config=kubeadm.yaml
[root@k8s-master01 ~]# 



# 重新初始化
[root@k8s-master01 ~]# kubeadm reset



[root@k8s-master01 ~]# 
[root@k8s-master01 ~]#   mkdir -p $HOME/.kube
[root@k8s-master01 ~]#   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master01 ~]#   sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master01 ~]# 
[root@k8s-master01 ~]# 

# 使用脚本将这如果你睡拷贝到其他maser节点
USER=root
CONTROL_PLANE_IPS="192.168.1.32 192.168.1.33"
for host in ${CONTROL_PLANE_IPS}; do
    scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt
    # 如果你正使用外部 etcd,忽略下一行
    scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key
done

# 在其他的maser上面执行 ,将证书文件放入所需目录
USER=root
mkdir -p /etc/kubernetes/pki/etcd
mv /${USER}/ca.crt /etc/kubernetes/pki/
mv /${USER}/ca.key /etc/kubernetes/pki/
mv /${USER}/sa.pub /etc/kubernetes/pki/
mv /${USER}/sa.key /etc/kubernetes/pki/
mv /${USER}/front-proxy-ca.crt /etc/kubernetes/pki/
mv /${USER}/front-proxy-ca.key /etc/kubernetes/pki/
mv /${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
# 如果你正使用外部 etcd,忽略下一行
mv /${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key


# 在maser02上执行操作,将加入控制节点
cat > kubeadm-join-master-02.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
controlPlane:
  localAPIEndpoint:
    advertiseAddress: "192.168.1.32"
    bindPort: 6443
discovery:
  bootstrapToken:
    apiServerEndpoint: 192.168.1.36:9443
    token: "abcdef.0123456789abcdef"
    caCertHashes:
    - "sha256:583ddadd1318dae447c3890aa3a2469c5b00c6775e87102458db07e691c724be"
    # 请更改上面的认证信息,使之与你的集群中实际使用的令牌和 CA 证书匹配
nodeRegistration:
  kubeletExtraArgs:
    node-ip: 192.168.1.32,2408:822a:730:af01::fab
EOF

kubeadm join --config=kubeadm-join-master-02.yaml

# 在maser03上执行操作,将加入控制节点
cat > kubeadm-join-master-03.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
controlPlane:
  localAPIEndpoint:
    advertiseAddress: "192.168.1.33"
    bindPort: 6443
discovery:
  bootstrapToken:
    apiServerEndpoint: 192.168.1.36:9443
    token: "abcdef.0123456789abcdef"
    caCertHashes:
    - "sha256:583ddadd1318dae447c3890aa3a2469c5b00c6775e87102458db07e691c724be"
    # 请更改上面的认证信息,使之与你的集群中实际使用的令牌和 CA 证书匹配
nodeRegistration:
  kubeletExtraArgs:
    node-ip: 192.168.1.33,2408:822a:730:af01::bea
EOF

kubeadm join --config=kubeadm-join-master-03.yaml


# 在node02上执行操作,将加入工作节点
cat > kubeadm-join-node-01.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
  bootstrapToken:
    apiServerEndpoint: 192.168.1.36:9443
    token: "abcdef.0123456789abcdef"
    caCertHashes:
    - "sha256:583ddadd1318dae447c3890aa3a2469c5b00c6775e87102458db07e691c724be"
    # 请更改上面的认证信息,使之与你的集群中实际使用的令牌和 CA 证书匹配
nodeRegistration:
  kubeletExtraArgs:
    node-ip: 192.168.1.34,2408:822a:730:af01::bcf
EOF

kubeadm join --config=kubeadm-join-node-01.yaml

# 在node02上执行操作,将加入工作节点
cat > kubeadm-join-node-02.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
  bootstrapToken:
    apiServerEndpoint: 192.168.1.36:9443
    token: "abcdef.0123456789abcdef"
    caCertHashes:
    - "sha256:583ddadd1318dae447c3890aa3a2469c5b00c6775e87102458db07e691c724be"
    # 请更改上面的认证信息,使之与你的集群中实际使用的令牌和 CA 证书匹配
nodeRegistration:
  kubeletExtraArgs:
    node-ip: 192.168.1.35,2408:822a:730:af01::443
EOF

kubeadm join --config=kubeadm-join-node-02.yaml

查看集群状态

代码语言:shell
复制
[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS     ROLES           AGE     VERSION
k8s-master01   NotReady   control-plane   2m14s   v1.30.0
k8s-master02   NotReady   control-plane   48s     v1.30.0
k8s-master03   NotReady   control-plane   30s     v1.30.0
k8s-node01     NotReady   <none>          19s     v1.30.0
k8s-node02     NotReady   <none>          9s      v1.30.0
[root@k8s-master01 ~]# 

安装Calico

更改calico网段

代码语言:shell
复制
# 下载所需yaml文件
wget https://mirrors.chenby.cn/https://github.com/projectcalico/calico/blob/master/manifests/calico-typha.yaml

# 备份脚本文件
cp calico-typha.yaml calico.yaml
cp calico-typha.yaml calico-ipv6.yaml

# 修改脚本文件中配置项

# vim calico.yaml
# calico-config ConfigMap处
    "ipam": {
        "type": "calico-ipam",
    },
    - name: IP
      value: "autodetect"

    - name: CALICO_IPV4POOL_CIDR
      value: "172.16.0.0/12"

vim calico-ipv6.yaml
# calico-config ConfigMap处
    "ipam": {
        "type": "calico-ipam",
        "assign_ipv4": "true",
        "assign_ipv6": "true"
    },
    - name: IP
      value: "autodetect"

    - name: IP6
      value: "autodetect"

    - name: CALICO_IPV4POOL_CIDR
      value: "10.244.0.0/16"

    - name: CALICO_IPV6POOL_CIDR
      value: "2408:822a:730:af01::/64"

    - name: FELIX_IPV6SUPPORT
      value: "true"
      
     # 设置IPv6 vxLAN的模式为CrossSubnet
     # 如果节点跨了子网,pod通信用vxlan封装,注意该功能3.23版本后才支持
    - name: CALICO_IPV6POOL_VXLAN
      value: "CrossSubnet"
     # 增加环境变量,开启IPv6 pool nat outgoing功能
    - name: CALICO_IPV6POOL_NAT_OUTGOING
      value: "true"


# 本地没有公网 IPv6 使用 calico.yaml
# kubectl apply -f calico.yaml

# 本地有公网 IPv6 使用 calico-ipv6.yaml 
kubectl apply -f calico-ipv6.yaml 

查看容器状态

代码语言:shell
复制
# calico 初始化会很慢 需要耐心等待一下,大约十分钟左右
[root@k8s-master01 ~]# kubectl get pod -A| grep calico
kube-system   calico-kube-controllers-57cf4498-rqhhz   1/1     Running   0          4m1s
kube-system   calico-node-4mbth                        1/1     Running   0          4m1s
kube-system   calico-node-624z2                        1/1     Running   0          4m1s
kube-system   calico-node-646qq                        1/1     Running   0          4m1s
kube-system   calico-node-7m4z8                        1/1     Running   0          4m1s
kube-system   calico-node-889qb                        1/1     Running   0          4m1s
kube-system   calico-typha-7746b44b78-kcgkx            1/1     Running   0          4m1s
[root@k8s-master01 ~]# 

查看集群

代码语言:shell
复制
[root@k8s-master01 ~]# kubectl get node
NAME           STATUS   ROLES           AGE     VERSION
k8s-master01   Ready    control-plane   10m     v1.30.0
k8s-master02   Ready    control-plane   9m3s    v1.30.0
k8s-master03   Ready    control-plane   8m45s   v1.30.0
k8s-node01     Ready    <none>          8m34s   v1.30.0
k8s-node02     Ready    <none>          8m24s   v1.30.0
[root@k8s-master01 ~]# 
[root@k8s-master01 ~]# kubectl get pod -A
NAMESPACE     NAME                                     READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-57cf4498-rqhhz   1/1     Running   0          93s
kube-system   calico-node-4mbth                        1/1     Running   0          93s
kube-system   calico-node-624z2                        1/1     Running   0          93s
kube-system   calico-node-646qq                        1/1     Running   0          93s
kube-system   calico-node-7m4z8                        1/1     Running   0          93s
kube-system   calico-node-889qb                        1/1     Running   0          93s
kube-system   calico-typha-7746b44b78-kcgkx            1/1     Running   0          93s
kube-system   coredns-7c445c467-kmjd7                  1/1     Running   0          10m
kube-system   coredns-7c445c467-xzhn6                  1/1     Running   0          10m
kube-system   etcd-k8s-master01                        1/1     Running   5          10m
kube-system   etcd-k8s-master02                        1/1     Running   70         9m8s
kube-system   etcd-k8s-master03                        1/1     Running   0          8m50s
kube-system   kube-apiserver-k8s-master01              1/1     Running   5          10m
kube-system   kube-apiserver-k8s-master02              1/1     Running   70         9m8s
kube-system   kube-apiserver-k8s-master03              1/1     Running   0          8m50s
kube-system   kube-controller-manager-k8s-master01     1/1     Running   5          10m
kube-system   kube-controller-manager-k8s-master02     1/1     Running   2          9m8s
kube-system   kube-controller-manager-k8s-master03     1/1     Running   2          8m50s
kube-system   kube-proxy-74c8q                         1/1     Running   0          8m52s
kube-system   kube-proxy-g6mcf                         1/1     Running   0          8m31s
kube-system   kube-proxy-lcrv7                         1/1     Running   0          10m
kube-system   kube-proxy-qbvc8                         1/1     Running   0          8m41s
kube-system   kube-proxy-vxhh9                         1/1     Running   0          9m10s
kube-system   kube-scheduler-k8s-master01              1/1     Running   5          10m
kube-system   kube-scheduler-k8s-master02              1/1     Running   2          9m8s
kube-system   kube-scheduler-k8s-master03              1/1     Running   2          8m50s
[root@k8s-master01 ~]# 

集群验证

部署pod资源

代码语言:shell
复制
cat<<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: docker.io/library/busybox:1.28
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF

# 查看
kubectl  get pod
NAME      READY   STATUS    RESTARTS   AGE
busybox   1/1     Running   0          17s

用pod解析默认命名空间中的kubernetes

代码语言:shell
复制
# 查看name
kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   17h

# 进行解析
kubectl exec  busybox -n default -- nslookup kubernetes
3Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

测试跨命名空间是否可以解析

代码语言:shell
复制
# 查看有那些name
kubectl  get svc -A
NAMESPACE     NAME              TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
default       kubernetes        ClusterIP   10.96.0.1       <none>        443/TCP         76m
kube-system   calico-typha      ClusterIP   10.105.100.82   <none>        5473/TCP        35m
kube-system   coredns-coredns   ClusterIP   10.96.0.10      <none>        53/UDP,53/TCP   8m14s
kube-system   metrics-server    ClusterIP   10.105.60.31    <none>        443/TCP         109s

# 进行解析
kubectl exec  busybox -n default -- nslookup coredns-coredns.kube-system
Server:    10.96.0.10
Address 1: 10.96.0.10 coredns-coredns.kube-system.svc.cluster.local

Name:      coredns-coredns.kube-system
Address 1: 10.96.0.10 coredns-coredns.kube-system.svc.cluster.local
[root@k8s-master01 metrics-server]# 

每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53

代码语言:shell
复制
telnet 10.96.0.1 443
Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.

 telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.

curl 10.96.0.10:53
curl: (52) Empty reply from server

Pod和Pod之前要能通

代码语言:shell
复制
kubectl get po -owide
NAME      READY   STATUS    RESTARTS   AGE   IP              NODE         NOMINATED NODE   READINESS GATES
busybox   1/1     Running   0          17m   172.27.14.193   k8s-node02   <none>           <none>

kubectl get po -n kube-system -owide
NAME                                       READY   STATUS    RESTARTS   AGE     IP               NODE           NOMINATED NODE   READINESS GATES
calico-kube-controllers-76754ff848-pw4xg   1/1     Running   0          38m     172.25.244.193   k8s-master01   <none>           <none>
calico-node-97m55                          1/1     Running   0          38m     192.168.1.34     k8s-node01     <none>           <none>
calico-node-hlz7j                          1/1     Running   0          38m     192.168.1.32     k8s-master02   <none>           <none>
calico-node-jtlck                          1/1     Running   0          38m     192.168.1.33     k8s-master03   <none>           <none>
calico-node-lxfkf                          1/1     Running   0          38m     192.168.1.35     k8s-node02     <none>           <none>
calico-node-t667x                          1/1     Running   0          38m     192.168.1.31     k8s-master01   <none>           <none>
calico-typha-59d75c5dd4-gbhfp              1/1     Running   0          38m     192.168.1.35     k8s-node02     <none>           <none>
coredns-coredns-c5c6d4d9b-bd829            1/1     Running   0          10m     172.25.92.65     k8s-master02   <none>           <none>
metrics-server-7c8b55c754-w7q8v            1/1     Running   0          3m56s   172.17.125.3     k8s-node01     <none>           <none>

# 进入busybox ping其他节点上的pod

kubectl exec -ti busybox -- sh
/ # ping 192.168.1.34
PING 192.168.1.34 (192.168.1.34): 56 data bytes
64 bytes from 192.168.1.34: seq=0 ttl=63 time=0.358 ms
64 bytes from 192.168.1.34: seq=1 ttl=63 time=0.668 ms
64 bytes from 192.168.1.34: seq=2 ttl=63 time=0.637 ms
64 bytes from 192.168.1.34: seq=3 ttl=63 time=0.624 ms
64 bytes from 192.168.1.34: seq=4 ttl=63 time=0.907 ms

# 可以连通证明这个pod是可以跨命名空间和跨主机通信的

创建三个副本,可以看到3个副本分布在不同的节点上(用完可以删了)

代码语言:shell
复制
cat<<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
        ports:
        - containerPort: 80
EOF

kubectl  get pod 
NAME                               READY   STATUS    RESTARTS   AGE
busybox                            1/1     Running   0          6m25s
nginx-deployment-9456bbbf9-4bmvk   1/1     Running   0          8s
nginx-deployment-9456bbbf9-9rcdk   1/1     Running   0          8s
nginx-deployment-9456bbbf9-dqv8s   1/1     Running   0          8s

# 删除nginx
[root@k8s-master01 ~]# kubectl delete deployments nginx-deployment 

测试IPV6

代码语言:shell
复制
# 创建测试服务
[root@k8s-master01 ~]# cat > cby.yaml << EOF 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: chenby
spec:
  replicas: 3
  selector:
    matchLabels:
      app: chenby
  template:
    metadata:
      labels:
        app: chenby
    spec:
      containers:
      - name: chenby
        image: nginx
        resources:
          limits:
            memory: "128Mi"
            cpu: "500m"
        ports:
        - containerPort: 80

---
apiVersion: v1
kind: Service
metadata:
  name: chenby
spec:
  ipFamilyPolicy: RequireDualStack
  ipFamilies:
  - IPv6
  - IPv4
  type: NodePort
  selector:
    app: chenby
  ports:
  - port: 80
    targetPort: 80
EOF

[root@k8s-master01 ~]# kubectl  apply -f cby.yaml 

# 查看pod情况
[root@k8s-master01 ~]# kubectl  get pod
NAME                      READY   STATUS    RESTARTS   AGE
chenby-868fd8f687-727hd   1/1     Running   0          23s
chenby-868fd8f687-lrxsr   1/1     Running   0          23s
chenby-868fd8f687-n7f2k   1/1     Running   0          23s
[root@k8s-master01 ~]#

# 查看svc情况
[root@k8s-master01 ~]# kubectl get svc 
NAME         TYPE        CLUSTER-IP                 EXTERNAL-IP   PORT(S)        AGE
chenby       NodePort    2408:822a:730:af01::4466   <none>        80:30921/TCP   2m40s
kubernetes   ClusterIP   10.96.0.1                  <none>        443/TCP        58m
[root@k8s-master01 ~]# 

# 在集群内访问,需要在pod所在的节点上执行测试
[root@k8s-node01 ~]# curl -g -6 [2408:822a:730:af01::4466]
[root@k8s-node01 ~]# 

# 在集群内访问node地址,集群内需要在pod所在的节点上执行测试,集群外任意节点即可访问
[root@k8s-node01 ~]# curl -g -6 [2408:822a:730:af01::bcf]:30921
[root@k8s-node01 ~]#

# 测试ipv4地址
[root@k8s-master01 ~]# curl  http://192.168.1.31:30921/
[root@k8s-master01 ~]# 

安装Metrics-Server

代码语言:shell
复制
# 下载 
wget https://mirrors.chenby.cn/https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

# 修改配置
vim components.yaml

# 修改此处 添加   - --kubelet-insecure-tls
      - args:
        - --cert-dir=/tmp
        - --secure-port=10250
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        - --kubelet-insecure-tls


# 修改镜像地址
sed -i "s#registry.k8s.io/metrics-server#registry.aliyuncs.com/google_containers#g" components.yaml
cat components.yaml | grep image


[root@k8s-master01 ~]# kubectl apply -f components.yaml

# 需要稍等一会才可查看到
[root@k8s-master01 ~]# kubectl  top node
NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master01   196m         4%     2270Mi          58%       
k8s-master02   165m         4%     1823Mi          47%       
k8s-master03   162m         4%     1784Mi          46%       
k8s-node01     72m          1%     1492Mi          38%       
k8s-node02     62m          1%     1355Mi          35%       
[root@k8s-master01 ~]# 

安装HELM

代码语言:shell
复制
wget https://mirrors.huaweicloud.com/helm/v3.14.4/helm-v3.14.4-linux-amd64.tar.gz
tar xvf helm-*-linux-amd64.tar.gz
cp linux-amd64/helm /usr/local/bin/

安装dashboard

代码语言:shell
复制
# 添加源信息
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/

# 默认参数安装
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kube-system

# 我的集群使用默认参数安装 kubernetes-dashboard-kong 出现异常 8444 端口占用
# 使用下面的命令进行安装,在安装时关闭kong.tls功能
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --namespace kube-system --set kong.admin.tls.enabled=false

更改dashboard的svc为NodePort,如果已是请忽略

代码语言:shell
复制
kubectl edit svc  -n kube-system kubernetes-dashboard-kong-proxy
  type: NodePort

查看端口号

代码语言:shell
复制
[root@k8s-master01 ~]# kubectl get svc kubernetes-dashboard-kong-proxy -n kube-system
NAME                              TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard-kong-proxy   NodePort   10.96.247.74   <none>        443:32457/TCP   2m29s
[root@k8s-master01 ~]# 

创建token

代码语言:shell
复制
cat > dashboard-user.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
EOF

kubectl  apply -f dashboard-user.yaml

# 创建token
kubectl -n kube-system create token admin-user

创建长期token

代码语言:shell
复制
cat > dashboard-user-token.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
  name: admin-user
  namespace: kube-system
  annotations:
    kubernetes.io/service-account.name: "admin-user"   
type: kubernetes.io/service-account-token  
EOF

kubectl  apply -f dashboard-user-token.yaml

# 查看密码
kubectl get secret admin-user -n kube-system -o jsonpath={".data.token"} | base64 -d

登录dashboard

https://192.168.1.31:32457/

ingress安装

执行部署

代码语言:shell
复制
wget https://mirrors.chenby.cn/https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/cloud/deploy.yaml

# 修改为国内源 docker源可选
sed -i "s#registry.k8s.io#k8s.dockerproxy.com#g" *.yaml
cat deploy.yaml | grep image

cat > backend.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: default-http-backend
  labels:
    app.kubernetes.io/name: default-http-backend
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: default-http-backend
  template:
    metadata:
      labels:
        app.kubernetes.io/name: default-http-backend
    spec:
      terminationGracePeriodSeconds: 60
      containers:
      - name: default-http-backend
        image: registry.cn-hangzhou.aliyuncs.com/chenby/defaultbackend-amd64:1.5 
        livenessProbe:
          httpGet:
            path: /healthz
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 30
          timeoutSeconds: 5
        ports:
        - containerPort: 8080
        resources:
          limits:
            cpu: 10m
            memory: 20Mi
          requests:
            cpu: 10m
            memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
  name: default-http-backend
  namespace: kube-system
  labels:
    app.kubernetes.io/name: default-http-backend
spec:
  ports:
  - port: 80
    targetPort: 8080
  selector:
    app.kubernetes.io/name: default-http-backend
EOF

kubectl  apply -f deploy.yaml 
kubectl  apply -f backend.yaml 


cat > ingress-demo-app.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: hello-server
spec:
  replicas: 2
  selector:
    matchLabels:
      app: hello-server
  template:
    metadata:
      labels:
        app: hello-server
    spec:
      containers:
      - name: hello-server
        image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/hello-server
        ports:
        - containerPort: 9000
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-demo
  template:
    metadata:
      labels:
        app: nginx-demo
    spec:
      containers:
      - image: nginx
        name: nginx
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  selector:
    app: nginx-demo
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: hello-server
  name: hello-server
spec:
  selector:
    app: hello-server
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 9000
---
apiVersion: networking.k8s.io/v1
kind: Ingress  
metadata:
  name: ingress-host-bar
spec:
  ingressClassName: nginx
  rules:
  - host: "hello.chenby.cn"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: hello-server
            port:
              number: 8000
  - host: "demo.chenby.cn"
    http:
      paths:
      - pathType: Prefix
        path: "/nginx"  
        backend:
          service:
            name: nginx-demo
            port:
              number: 8000
EOF

# 等创建完成后在执行:
kubectl  apply -f ingress-demo-app.yaml 

kubectl  get ingress
NAME               CLASS   HOSTS                            ADDRESS     PORTS   AGE
ingress-host-bar   nginx   hello.chenby.cn,demo.chenby.cn   192.168.1.32   80      7s

过滤查看ingress端口

代码语言:shell
复制
# 修改为nodeport
kubectl edit svc -n ingress-nginx   ingress-nginx-controller
type: NodePort

[root@hello ~/yaml]# kubectl  get svc -A | grep ingress
ingress-nginx          ingress-nginx-controller             NodePort    10.104.231.36    <none>        80:32636/TCP,443:30579/TCP   104s
ingress-nginx          ingress-nginx-controller-admission   ClusterIP   10.101.85.88     <none>        443/TCP                      105s
[root@hello ~/yaml]#

ingress测试

代码语言:shell
复制
cat >> /etc/hosts <<EOF
192.168.1.31 hello.chenby.cn
192.168.1.31 demo.chenby.cn
EOF

[root@k8s-master01 ~]# curl hello.chenby.cn:32472
[root@k8s-master01 ~]# curl demo.chenby.cn:32472

安装 Grafana Prometheus Altermanager 套件

下载离线包

代码语言:shell
复制
# 添加 prometheus-community 官方Helm Chart仓库
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts

# 下载离线包
helm pull  prometheus-community/kube-prometheus-stack

# 解压下载下来的包
tar xvf kube-prometheus-stack-*.tgz 

修改镜像地址

代码语言:shell
复制
# 进入目录进行修改images地址
cd kube-prometheus-stack/
sed -i "s#registry.k8s.io#k8s.dockerproxy.com#g" charts/kube-state-metrics/values.yaml
sed -i "s#quay.io#quay.dockerproxy.com#g" charts/kube-state-metrics/values.yaml

sed -i "s#registry.k8s.io#k8s.dockerproxy.com#g" values.yaml
sed -i "s#quay.io#quay.dockerproxy.com#g" values.yaml

安装

代码语言:shell
复制
# 进行安装 
helm install  op  .  --create-namespace --namespace op
NAME: op
LAST DEPLOYED: Sun May  5 12:43:26 2024
NAMESPACE: op
STATUS: deployed
REVISION: 1
NOTES:
kube-prometheus-stack has been installed. Check its status by running:
  kubectl --namespace op get pods -l "release=op"

Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.

修改 svc

代码语言:shell
复制
# 修改 svc 将其设置为NodePort
kubectl  edit svc -n op op-grafana
kubectl  edit svc -n op op-kube-prometheus-stack-prometheus 
        type: NodePort

查看

代码语言:shell
复制
[root@hello ~/yaml]# kubectl --namespace op get pods -l "release=op"
[root@hello ~/yaml]# 

# 查看svc
[root@hello ~/yaml]# kubectl --namespace op get svc
[root@hello ~/yaml]# 

# 查看POD
[root@hello ~/yaml]# kubectl --namespace op get pod
[root@hello ~/yaml]# 

访问

代码语言:shell
复制
# 访问
http://192.168.1.31:30833
http://192.168.1.31:31474

user: admin
password: prom-operator

安装命令行自动补全功能

代码语言:shell
复制
yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

关于

https://www.oiox.cn/

https://www.oiox.cn/index.php/start-page.html

CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客

全网可搜《小陈运维》

文章主要发布于微信公众号:《Linux运维交流社区》

原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。

如有侵权,请联系 cloudcommunity@tencent.com 删除。

原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。

如有侵权,请联系 cloudcommunity@tencent.com 删除。

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
目录
  • 使用kubeadm部署高可用IPV4/IPV6集群
    • 介绍
      • 强烈建议在Github上查看文档 !!!
        • Github出问题会更新文档,并且后续尽可能第一时间更新新版本文档 !!!
          • k8s基础系统环境配置
            • 配置IP
            • 设置主机名
            • 配置yum源
            • 安装一些必备工具
            • 关闭防火墙
            • 关闭SELinux
            • 关闭交换分区
            • 网络配置(俩种方式二选一)
            • 进行时间同步
            • 配置ulimit
            • 配置免密登录
            • 添加启用源
            • 升级内核至4.18版本以上
            • 安装ipvsadm
            • 修改内核参数
            • 所有节点配置hosts本地解析
          • 配置安装源
            • 简介
            • 配置方法
          • 配置containerd
            • 高可用keepalived、haproxy
              • 安装keepalived和haproxy服务
              • 修改haproxy配置文件(配置文件一样)
              • Master01配置keepalived master节点
              • Master02配置keepalived backup节点
              • Master03配置keepalived backup节点
              • 健康检查脚本配置(lb主机)
              • 启动服务
              • 测试高可用
            • 初始化安装
              • 查看集群状态
                • 安装Calico
                  • 更改calico网段
                  • 查看容器状态
                • 查看集群
                  • 集群验证
                    • 部署pod资源
                    • 用pod解析默认命名空间中的kubernetes
                    • 测试跨命名空间是否可以解析
                    • 每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53
                    • Pod和Pod之前要能通
                    • 创建三个副本,可以看到3个副本分布在不同的节点上(用完可以删了)
                  • 测试IPV6
                    • 安装Metrics-Server
                      • 安装HELM
                        • 安装dashboard
                          • 更改dashboard的svc为NodePort,如果已是请忽略
                          • 查看端口号
                          • 创建token
                          • 创建长期token
                          • 登录dashboard
                        • ingress安装
                          • 执行部署
                          • 过滤查看ingress端口
                          • ingress测试
                        • 安装 Grafana Prometheus Altermanager 套件
                          • 下载离线包
                          • 修改镜像地址
                          • 安装
                          • 修改 svc
                          • 查看
                          • 访问
                        • 安装命令行自动补全功能
                        相关产品与服务
                        容器服务
                        腾讯云容器服务(Tencent Kubernetes Engine, TKE)基于原生 kubernetes 提供以容器为核心的、高度可扩展的高性能容器管理服务,覆盖 Serverless、边缘计算、分布式云等多种业务部署场景,业内首创单个集群兼容多种计算节点的容器资源管理模式。同时产品作为云原生 Finops 领先布道者,主导开源项目Crane,全面助力客户实现资源优化、成本控制。
                        领券
                        问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档
                        http://www.vxiaotou.com