任务目标

  1. 完成高可用k8s集群安装部署

任务平台

  1. 物理设备—
  2. 操作系统:openEuler 22.03 LTS SP2

部署指南

集群拓扑图

一:部署ceph集群

任务一:配置准备

  1. 重命名hostname
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 将10.10.1.80的主机名改为future-k8s-master0
hostnamectl set-hostname future-k8s-master0 && bash
# 将10.10.1.81的主机名改为future-k8s-master1
hostnamectl set-hostname future-k8s-master1 && bash
# 将10.10.1.82的主机名改为future-k8s-master2
hostnamectl set-hostname future-k8s-master2 && bash
# 将10.10.1.16的主机名改为k8s-ceph-node0
hostnamectl set-hostname k8s-ceph-node0 && bash
# 将10.10.1.17的主机名改为k8s-ceph-node1
hostnamectl set-hostname k8s-ceph-node1 && bash
# 将10.10.1.18的主机名改为k8s-ceph-node2
hostnamectl set-hostname k8s-ceph-node2 && bash
# 将10.10.1.15的主机名改为k8s-ceph-node2
hostnamectl set-hostname k8s-ceph-node3 && bash
  1. 安装前的配置修改
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state

# selinux永久关闭
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
cat /etc/selinux/config

# swap永久关闭
swapoff --all
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
cat /etc/fstab

# 添加hosts
cat >> /etc/hosts << EOF
10.10.1.80 future-k8s-master0
10.10.1.81 future-k8s-master1
10.10.1.82 future-k8s-master2
10.10.1.16 k8s-ceph-node0
10.10.1.17 k8s-ceph-node1
10.10.1.18 k8s-ceph-node2
10.10.1.15 k8s-ceph-node3
10.10.1.83 future-k8s-vip
EOF
#查看
cat /etc/hosts


# 添加网桥过滤及内核转发配置文件
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
# 查看
cat /etc/sysctl.d/k8s.conf
# 加载br_netfilter模块
modprobe br_netfilter
# 查看是否加载
lsmod | grep br_netfilter
# 加载网桥过滤及内核转发配置文件
sysctl -p /etc/sysctl.d/k8s.conf

#同步时间
yum install ntp -y
systemctl start ntpd
systemctl enable ntpd
yum install chrony -y
systemctl start chronyd
systemctl enable chronyd
#修改配置,添加内容
echo "
server 10.10.3.70 iburst
allow 10.10.3.0/24
allow 10.10.1.0/24
" >> /etc/chrony.conf
timedatectl set-ntp true
systemctl restart chronyd
timedatectl status
date
  1. 安装ipset及ipvsadm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
 # 安装ipset及ipvsadm
yum -y install ipset ipvsadm
配置ipvsadm模块加载方式
# 添加需要加载的模块
echo ' #!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
' > /etc/sysconfig/modules/ipvs.modules
#查看
cat /etc/sysconfig/modules/ipvs.modules
# 授权、运行、检查是否加载
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack

#重启
reboot

配置准备完成后,所有节点都需重启

任务二:配置python环境

下载python2

  1. 安装zlib库,不然安装pip时会报错(还要重新编译python)
1
yum -y install zlib*
  1. 安装 GCC 包,如果没有安装 GCC,请使用以下命令进行安装
1
yum -y install gcc openssl-devel bzip2-devel
  1. 下载Python-2.7.18
1
2
3
4
cd /usr/src
yum -y install wget tar
wget https://www.python.org/ftp/python/2.7.18/Python-2.7.18.tgz
tar xzf Python-2.7.18.tgz
  1. 在编译之前还需要在安装源文件中修改Modules/Setup.dist文件,将注释去掉
1
sed -i 's/#zlib zlibmodule.c -I$(prefix)/zlib zlibmodule.c -I$(prefix)/'  Python-2.7.18/Modules/Setup.dist
  1. 编译Python-2.7.18(make altinstall用于防止替换默认的 python 二进制文件 /usr/bin/python)
1
2
3
4
cd /usr/src/Python-2.7.18
./configure --enable-optimizations
yum install -y make
make altinstall

不要覆盖或链接原始的 Python 二进制文件,这可能会损坏系统

  1. 设置环境变量
1
2
3
4
5
6
echo "
export PYTHON_HOME=/usr/local/
PATH=\$PATH:\$PYTHON_HOME/bin
" >> /etc/profile
cat /etc/profile
source /etc/profile
  1. 方法一:
1
2
curl "https://bootstrap.pypa.io/pip/2.7/get-pip.py" -o "get-pip.py"
python2.7 get-pip.py

下载ceph

1
2
3
4
5
6
7
8
#k8s-ceph-node0下载
#方法一:使用pip下载
pip2 install ceph-deploy
yum install -y ceph ceph-radosgw
#其他节点下载
yum install -y ceph ceph-radosgw
#检查安装包是否完整
rpm -qa |egrep -i "ceph|rados|rbd"

任务三:部署ceph集群

  1. admin节点

  2. 部署Monitor
  3. 创建配置文件目录,并创建配置文件

1
2
mkdir /etc/ceph/
touch /etc/ceph/ceph.conf
  1. 为集群生成一个FSDI:
1
2
uuidgen
30912204-0c26-413f-8e00-6d55c9c0af03
  1. 集群创建一个钥匙串,为Monitor 服务创建一个密钥:
1
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
  1. 创建一个管理员钥匙串,生成一个client.admin用户,并将此用户添加到钥匙串中:
1
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
  1. 创建bootstrap-osd钥匙串,将client.bootstrap-osd 用户添加到此钥匙串中:
1
ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
  1. 将生成的key加入ceph.mon.keyring.
1
2
3
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring

ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
  1. 使用主机名和IP地址以及FSID生成monitor map:
1
monmaptool --create --add k8s-ceph-node0 10.10.1.16 --fsid 30912204-0c26-413f-8e00-6d55c9c0af03 /tmp/monmap
  1. 创建mon的目录,使用 集群名称-主机名的形式:
1
mkdir  /var/lib/ceph/mon/ceph-k8s-ceph-node0
  1. 填入第一个mon守护进程的信息:
1
ceph-mon --mkfs -i k8s-ceph-node0 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
  1. 配置/etc/ceph/ceph.conf文件:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
cat /etc/ceph/ceph.conf
################################################
[global]
fsid = 30912204-0c26-413f-8e00-6d55c9c0af03 # 生成的FSID
mon initial members =k8s-ceph-node0
mon host = 10.10.1.16
public network = 10.10.1.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1

################################################
  1. 由于我们使用使用root操作的,需要设置权限为ceph(也可以修改systemd的启动文件,将ceph用户改为root),并启动Monitor
1
2
3
chown  -R ceph:ceph /var/lib/ceph
systemctl start ceph-mon@k8s-ceph-node0.service
systemctl enable ceph-mon@k8s-ceph-node0.service
  1. 确认服务已经正常启动:
1
2
3
ceph -s
yum install -y net-tools
netstat -lntp|grep ceph-mon
  1. 部署Manager

当我们配置好ceph-mon服务之后,就需要配置ceph-mgr服务。

  1. 生成一个认证密钥(ceph-mgr为自定义的名称):
1
2
3
4
5
6
7
8
9
#10.10.1.16
ceph auth get-or-create mgr.ceph-mgr mon 'allow profile mgr' osd 'allow *' mds 'allow *'
[mgr.ceph-mgr]
key = AQANDD9lfWg2LBAAHY0mprdbuKFBPJDkE7/I5Q==

#10.10.1.17
ceph auth get-or-create mgr.ceph-mgr1 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
[mgr.ceph-mgr1]
key = AQDbRTZlgjXWBBAAGew4Xta+t9vgIWPCWC8EVg==
  1. 创建存放此密钥的文件的目录
1
2
3
4
5
6
7
8
9
10
11
12
13
#10.10.1.16
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-ceph-mgr
#将产生的密钥文件存入此目录下,并命名为keyring
vi /var/lib/ceph/mgr/ceph-ceph-mgr/keyring
[mgr.ceph-mgr]
key = AQANDD9lfWg2LBAAHY0mprdbuKFBPJDkE7/I5Q==

#10.10.1.17
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-ceph-mgr1
#将产生的密钥文件存入此目录下,并命名为keyring
vi /var/lib/ceph/mgr/ceph-ceph-mgr1/keyring
[mgr.ceph-mgr1]
key = AQDbRTZlgjXWBBAAGew4Xta+t9vgIWPCWC8EVg==
  1. 启动ceph-mgr服务
1
2
3
4
5
6
7
8
ceph-mgr -i ceph-mgr
ceph-mgr -i ceph-mgr1
systemctl enable ceph-mgr@k8s-ceph-node0
systemctl enable ceph-mgr@k8s-ceph-node1
#检查服务是否启动,查看ceph状态
ceph -s
#查看当前mgr中可用的模块
ceph mgr module ls
  1. 创建OSD
1
2
3
4
5
ceph-volume lvm create --data /dev/sda8
#查看当前的lvm逻辑卷
ceph-volume lvm list
#查看ceph状态
ceph -s
  1. 安装配置Ceph-dashboard
  2. 开启dashboard功能

1
ceph mgr module enable dashboard
  1. 创建证书
1
ceph dashboard create-self-signed-cert
  1. 配置web登录的用户名和密码
1
2
3
#创建/etc/ceph/dashboard.key,并将密码写入
echo "qishi#09319" >/etc/ceph/dashboard.key
ceph dashboard ac-user-create k8s administrator -i /etc/ceph/dashboard.key
  1. 修改dashboard默认端口(可选)

配置端口,默认端口是8443,修改为18443,修改后需重启mgr,修改端口才生效。

1
2
ceph config set mgr mgr/dashboard/server_port 18443
systemctl restart ceph-mgr.target
  1. 查看发布服务地址并登录
1
ceph mgr services

{

​ “dashboard”: “https://k8s-ceph-node0:8443/

}

  1. node节点

  2. 扩展Monitor
  3. 修改master节点上的配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
vi /etc/ceph/ceph.conf
[global]
fsid = 30912204-0c26-413f-8e00-6d55c9c0af03 # 生成的FSID
mon initial members =k8s-ceph-node0,k8s-ceph-node1,k8s-ceph-node2,k8s-ceph-node3 # 主机名
mon host = 10.10.1.16,10.10.1.17,10.10.1.18,10.10.1.15 # 对应的IP
public network = 10.10.1.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mon]
mon allow pool delete = true

[mds.k8s-ceph-node0]
host = k8s-ceph-node0
  1. 将配置和密钥文件分发到其它的节点上(master节点)
1
2
3
4
5
6
7
8
9
#生成公钥,复制到node节点主机上
ssh-keygen -t rsa
ssh-copy-id 10.10.1.17
ssh-copy-id 10.10.1.18
ssh-copy-id 10.10.1.15
#复制认证密钥
scp /etc/ceph/* 10.10.1.17:/etc/ceph/
scp /etc/ceph/* 10.10.1.18:/etc/ceph/
scp /etc/ceph/* 10.10.1.15:/etc/ceph/
  1. 在node节点创建ceph相关目录,并添加权限:
1
2
3
4
5
mkdir -p  /var/lib/ceph/{bootstrap-mds,bootstrap-mgr,bootstrap-osd,bootstrap-rbd,bootstrap-rgw,mds,mgr,mon,osd}
chown -R ceph:ceph /var/lib/ceph

sudo -u ceph mkdir /var/lib/ceph/mon/ceph-k8s-ceph-node1
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-k8s-ceph-node2
  1. 修改node节点的配置文件,以node1为例(其他节点相似)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[global]
fsid = 30912204-0c26-413f-8e00-6d55c9c0af03 # 生成的FSID
mon initial members =k8s-ceph-node0,k8s-ceph-node1,k8s-ceph-node2,k8s-ceph-node3 # 主机名
mon host = 10.10.1.16,10.10.1.17,10.10.1.18,10.10.1.15 # 对应的IP
public network = 10.10.1.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mon]
mon allow pool delete = true

[mon.k8s-ceph-node1]
mon_addr = 10.10.1.17:6789
host = k8s-ceph-node1
  1. 获取集群中的密钥和map,以node1为例(其他节点相似)
1
2
ceph auth get mon. -o /tmp/monkeyring
ceph mon getmap -o /tmp/monmap
  1. 使用已有的密钥和map添加一个新的Monitor,指定主机名,以node1为例(其他节点相似)
1
sudo -u ceph ceph-mon --mkfs -i k8s-ceph-node1 --monmap /tmp/monmap --keyring /tmp/monkeyring
  1. 启动服务,以node1为例(其他节点相似)
1
2
3
4
5
systemctl start ceph-mon@k8s-ceph-node1
systemctl enable ceph-mon@k8s-ceph-node1
#查看mon状态
ceph -s
ceph mon stat
  1. 添加OSD

从已经存在的osd的master节点上拷贝初始化的密钥文件

1
2
3
scp -p  /var/lib/ceph/bootstrap-osd/ceph.keyring  10.10.1.17:/var/lib/ceph/bootstrap-osd/
scp -p /var/lib/ceph/bootstrap-osd/ceph.keyring 10.10.1.18:/var/lib/ceph/bootstrap-osd/
scp -p /var/lib/ceph/bootstrap-osd/ceph.keyring 10.10.1.15:/var/lib/ceph/bootstrap-osd/

在node节点添加osd

1
2
3
4
5
ceph-volume lvm create --data /dev/sdb

systemctl enable ceph-osd@k8s-ceph-node1
#查看状态
ceph -s
  1. 添加Mds(以node0为例)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
#创建目录
sudo -u ceph mkdir -p /var/lib/ceph/mds/ceph-k8s-ceph-node0
#创建密钥
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-k8s-ceph-node0/keyring --gen-key -n mds.k8s-ceph-node0
#导入密钥,并设置caps
ceph auth add mds.k8s-ceph-node0 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-k8s-ceph-node0/keyring
#手动启动服务
ceph-mds --cluster ceph -i k8s-ceph-node0 -m k8s-ceph-node0:6789
chown -R ceph:ceph /var/lib/ceph/mds/
systemctl start ceph-mds@k8s-ceph-node0
systemctl enable ceph-mds@k8s-ceph-node0
#检查服务是否启动
ps -ef|grep ceph-mds
#检查ceph 集群状态
ceph -s
  1. 创建CephFS

创建pools

1
2
3
4
5
6
7
8
9
#存储数据
ceph osd pool create cephfs_data 64
#存储元数据
ceph osd pool create cephfs_metadata 64
#启用cephfs文件系统
ceph fs new cephfs cephfs_metadata cephfs_data
#查看文件系统状态
ceph fs ls
ceph mds stat
  1. 创建rbd池

1
2
3
4
5
6
7
8
#创建rbd池
ceph osd pool create rbd-k8s 64 64
#启用
ceph osd pool application enable rbd-k8s rbd
#初始化
rbd pool init rbd-k8s
#查看
ceph osd lspools

二:部署高可用k8s集群

任务一:配置准备(与ceph集群一样)

任务二:安装docker

  1. 配置Docker CE的yum存储库。打开docker-ce.repo的文件,并将以下内容复制到文件中:
1
2
3
4
5
6
7
echo '
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://download.docker.com/linux/centos/7/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://download.docker.com/linux/centos/gpg ' > /etc/yum.repos.d/docker-ce.repo

保存并退出文件。

  1. 安装Docker CE。运行以下命令来安装Docker CE:
1
2
3
4
5
6
7
 yum -y install docker-ce docker-ce-cli  containerd.io
#启动docker并设置开机自启
systemctl start docker
systemctl enable docker
#查看版本
docker -v
docker compose version
  1. Docker配置修改,设置cgroup驱动,使用systemd,配置修改为如下。
1
2
3
4
5
6
7
8
9
10
#将配置写入daemon.json文件
echo '{
"exec-opts": ["native.cgroupdriver=systemd"],
"data-root": "/data/docker"
} ' > /etc/docker/daemon.json
#查看
cat /etc/docker/daemon.json
systemctl daemon-reload
systemctl restart docker
docker info
  1. 创建所需目录
1
2
cd /data
mkdir cri-dockerd calico dashboard metrics-server script ingress-nginx

任务三:安装cri-dockerd (k8s 1.24及以上版本)

1
2
3
4
5
6
7
8
9
10
11
12
13
cd /data/cri-dockerd
# 下载cri-dockerd安装包
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4-3.el8.x86_64.rpm
# 安装cri-dockerd
rpm -ivh cri-dockerd-0.3.4-3.el8.x86_64.rpm
docker pull registry.aliyuncs.com/google_containers/pause:3.9
# 修改镜像地址为国内,否则kubelet拉取不了镜像导致启动失败
sed -i.bak 's|ExecStart=.*$|ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9|g' /usr/lib/systemd/system/cri-docker.service
cat /usr/lib/systemd/system/cri-docker.service
# 启动cri-dockerd
systemctl daemon-reload
systemctl start cri-docker.service
systemctl enable cri-docker.service

任务四:安装高可用组件

部署高可用集群需要安装keepalived和haproxy,实现**master节点高可用,在各master节点操作**

  1. 安装keepalived与haproxy
1
yum install keepalived haproxy -y
  1. 备份keepalived与haproxy配置文件
1
2
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
  1. 修改各master节点的/etc/keepalived/keepalived.conf文件

    1. future-k8s-master0
    2. ```Shell
      echo ‘
      global_defs {
      router_id k8s
      }

      vrrp_script check_haproxy {

      script "killall -0 haproxy"
      interval 3
      weight -2
      fall 10
      rise 2
      

      }

      vrrp_instance VI_1 {

      state MASTER  #主节点 则为MASTER ,其他则为 BACKUP
      interface ens192  #网卡名称
      virtual_router_id 51
      priority 250   #优先级
      nopreempt   #设置非抢占模式
      advert_int 1
      authentication {
          auth_type PASS
          auth_pass ceb1b3ec013d66163d6ab
      }
      virtual_ipaddress {
          10.10.1.83/24   #虚拟ip
      }
      track_script {
          check_haproxy
      }
      

      }
      ‘ > /etc/keepalived/keepalived.conf
      cat /etc/keepalived/keepalived.conf

      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25
      26
      27
      28
      29
      30
      31
      32
      33
      34
      35
      36

      3. future-k8s-master1
      4. ```Shell
      echo '
      global_defs {
      router_id k8s
      }

      vrrp_script check_haproxy {
      script "killall -0 haproxy"
      interval 3
      weight -2
      fall 10
      rise 2
      }

      vrrp_instance VI_1 {
      state BACKUP #主节点 则为MASTER ,其他则为 BACKUP
      interface ens192 #网卡名称
      virtual_router_id 51
      priority 200 #优先级
      nopreempt #设置非抢占模式
      advert_int 1
      authentication {
      auth_type PASS
      auth_pass ceb1b3ec013d66163d6ab
      }
      virtual_ipaddress {
      10.10.1.83/24 #虚拟ip
      }
      track_script {
      check_haproxy
      }
      }
      ' > /etc/keepalived/keepalived.conf
      cat /etc/keepalived/keepalived.conf
    3. future-k8s-master2

    4. ```Shell
      echo ‘
      global_defs {
      router_id k8s
      }

      vrrp_script check_haproxy {

      script "killall -0 haproxy"
      interval 3
      weight -2
      fall 10
      rise 2
      

      }

      vrrp_instance VI_1 {

      state BACKUP  #主节点 则为MASTER ,其他则为 BACKUP
      interface ens192  #网卡名称
      virtual_router_id 51
      priority 150   #优先级
      nopreempt   #设置非抢占模式
      advert_int 1
      authentication {
          auth_type PASS
          auth_pass ceb1b3ec013d66163d6ab
      }
      virtual_ipaddress {
          10.10.1.83/24   #虚拟ip
      }
      track_script {
          check_haproxy
      }
      

      }
      ‘ > /etc/keepalived/keepalived.conf
      cat /etc/keepalived/keepalived.conf

      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25
      26
      27
      28
      29
      30
      31
      32
      33
      34
      35
      36
      37
      38
      39
      40
      41
      42
      43
      44
      45
      46
      47
      48
      49
      50
      51
      52
      53
      54
      55
      56
      57
      58
      59
      60
      61
      62
      63
      64
      65
      66
      67
      68
      69
      70
      71
      72
      73
      74
      75
      76
      77
      78
      79
      80
      81
      82
      2. 修改各master节点的`/etc/haproxy/haproxy.cfg`文件,(三个master节点的配置文件相同)

      ```Shell
      echo "
      #---------------------------------------------------------------------
      # Global settings
      #---------------------------------------------------------------------
      global
      # to have these messages end up in /var/log/haproxy.log you will
      # need to:
      # 1) configure syslog to accept network log events. This is done
      # by adding the '-r' option to the SYSLOGD_OPTIONS in
      # /etc/sysconfig/syslog
      # 2) configure local2 events to go to the /var/log/haproxy.log
      # file. A line like the following can be added to
      # /etc/sysconfig/syslog
      #
      # local2.* /var/log/haproxy.log
      #
      log 127.0.0.1 local2

      chroot /var/lib/haproxy
      pidfile /var/run/haproxy.pid
      maxconn 4000
      user haproxy
      group haproxy
      daemon

      # turn on stats unix socket
      stats socket /var/lib/haproxy/stats
      #---------------------------------------------------------------------
      # common defaults that all the 'listen' and 'backend' sections will
      # use if not designated in their block
      #---------------------------------------------------------------------
      defaults
      mode http
      log global
      option httplog
      option dontlognull
      option http-server-close
      option forwardfor except 127.0.0.0/8
      option redispatch
      retries 3
      timeout http-request 10s
      timeout queue 1m
      timeout connect 10s
      timeout client 1m
      timeout server 1m
      timeout http-keep-alive 10s
      timeout check 10s
      maxconn 3000
      #---------------------------------------------------------------------
      # kubernetes apiserver frontend which proxys to the backends
      #---------------------------------------------------------------------
      frontend kubernetes-apiserver
      mode tcp
      bind *:16443 #高可用监控端口,初始化k8s集群时会用
      option tcplog
      default_backend kubernetes-apiserver
      #---------------------------------------------------------------------
      # round robin balancing between the various backends
      #---------------------------------------------------------------------
      backend kubernetes-apiserver
      mode tcp
      balance roundrobin
      server future-k8s-master0 10.10.1.80:6443 check
      server future-k8s-master1 10.10.1.81:6443 check
      server future-k8s-master2 10.10.1.82:6443 check

      #---------------------------------------------------------------------
      # collection haproxy statistics message
      #---------------------------------------------------------------------
      listen stats
      bind *:1080
      stats auth admin:awesomePassword
      stats refresh 5s
      stats realm HAProxy\ Statistics
      stats uri /admin?stats

      " > /etc/haproxy/haproxy.cfg

      cat /etc/haproxy/haproxy.cfg
  2. 启动(各master节点按顺序启动)

1
2
3
4
5
6
#启动keepalived  
systemctl enable keepalived && systemctl start keepalived
#启动haproxy
systemctl enable haproxy && systemctl start haproxy
systemctl status keepalived
systemctl status haproxy
  1. 在future-k8s-master0查看绑定的vip地址

ip add 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens192: mtu 1500 qdisc mq state UP group default qlen 1000 link/ether 00:50:56:9a:eb:48 brd ff:ff:ff:ff:ff:ff inet 10.10.1.80/24 brd 10.10.3.255 scope global noprefixroute ens192 valid_lft forever preferred_lft forever inet 10.10.1.83/24 scope global ens192 valid_lft forever preferred_lft forever inet6 fe80::250:56ff:fe9a:eb48/64 scope link noprefixroute valid_lft forever preferred_lft forever

任务五:部署k8s集群

  1. 添加yum软件源

1
2
3
4
5
6
7
8
9
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
  1. 安装kubeadm,kubelet和kubectl

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 安装kubelet、kubeadm、kubectl
yum install -y kubelet-1.28.0 kubeadm-1.28.0 kubectl-1.28.0 --disableexcludes=kubernetes

#将cgroup改为systemd
echo 'KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"' > /etc/sysconfig/kubelet
# 查看
cat /etc/sysconfig/kubelet
# 设置开机启动
systemctl start kubelet.service
systemctl enable kubelet.service

#查看版本
kubeadm version
kubelet --version
kubectl version
  1. 初始化k8s集群(future-k8s-master0节点)

    方式一:使用配置文件初始化
    1. 导出默认配置文件 (可选)
    1
    kubeadm config print init-defaults > kubeadm-config.yaml
    1. 配置文件
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    echo '
    apiVersion: kubeadm.k8s.io/v1beta3
    kind: InitConfiguration
    localAPIEndpoint:
    advertiseAddress: 10.10.1.83 #虚拟ip
    bindPort: 6443
    nodeRegistration:
    criSocket: unix:///var/run/cri-dockerd.sock
    ---
    apiServer:
    certSANs: #master节点与对应主机名
    - future-k8s-master0
    - future-k8s-master1
    - future-k8s-master2
    - future-k8s-vip
    - 10.10.1.80
    - 10.10.1.81
    - 10.10.1.82
    - 10.10.1.83
    - 127.0.0.1
    timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta3
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controlPlaneEndpoint: "future-k8s-vip:16443" #虚拟ip及高可用配置的端口号
    controllerManager: {}
    dns: {}
    etcd:
    local:
    dataDir: /var/lib/etcd
    imageRepository: registry.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: 1.28.0
    networking:
    dnsDomain: cluster.local
    podSubnet: 10.244.0.0/16
    serviceSubnet: 10.96.0.0/12
    scheduler: {}
    ' > /data/script/kubeadm-config.yaml
    cat /data/script/kubeadm-config.yaml
    1. 集群初始化
    1
    2
    3
    4
    5
    kubeadm init --config kubeadm-config.yaml --upload-certs

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    方式二:使用命令初始化
    1. 部署master节点,在10.10.1.80执行,初始化master节点
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    kubeadm init \
    --apiserver-advertise-address=10.10.1.80 \
    --image-repository registry.aliyuncs.com/google_containers \
    --kubernetes-version v1.28.0 \
    --control-plane-endpoint=future-k8s-vip:16443 \ #虚拟ip(未定)
    --control-plane-endpoint=future-k8s-vip \ #虚拟ip(未定)
    --service-cidr=10.96.0.0/12 \
    --pod-network-cidr=10.244.0.0/16 \
    --cri-socket=unix:///var/run/cri-dockerd.sock \
    --ignore-preflight-errors=all


    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    1. 配置ssh免密码
    1
    2
    3
    4
    #在10.10.1.80上生成公钥,复制到其他master节点上
    ssh-keygen -t rsa
    ssh-copy-id 10.10.1.81
    ssh-copy-id 10.10.1.82
    1. 将10.10.1.80上的证书拷贝到其他master节点
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    #在其他master节点创建证书存放目录
    cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/

    #将future-k8s-master0的证书复制到future-k8s-master1
    scp /etc/kubernetes/pki/ca.crt 10.10.1.81:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/ca.key 10.10.1.81:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.key 10.10.1.81:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.pub 10.10.1.81:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.crt 10.10.1.81:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.key 10.10.1.81:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/etcd/ca.crt 10.10.1.81:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/pki/etcd/ca.key 10.10.1.81:/etc/kubernetes/pki/etcd/

    #将future-k8s-master0的证书复制到future-k8s-master2
    scp /etc/kubernetes/pki/ca.crt 10.10.1.82:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/ca.key 10.10.1.82:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.key 10.10.1.82:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.pub 10.10.1.82:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.crt 10.10.1.82:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.key 10.10.1.82:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/etcd/ca.crt 10.10.1.82:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/pki/etcd/ca.key 10.10.1.82:/etc/kubernetes/pki/etcd/
  2. 初始化其他master节点

1
2
3
4
5
6
 kubeadm join future-k8s-vip:16443 --token yjphdh.guefcomqw3am4ask \
--discovery-token-ca-cert-hash sha256:ed44c7deada0ea0fe5a54212ab4e5aa6fc34672ffe2a2c87a31ba73306e75c21 \
--control-plane --certificate-key 4929b83577eafcd5933fc0b6506cb6d82e7bc481751e442888c4c2b32b5d0c9c --cri-socket=unix:///var/run/cri-dockerd.sock
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
  1. 初始化node节点

1
2
kubeadm join future-k8s-vip:16443 --token yjphdh.guefcomqw3am4ask \
--discovery-token-ca-cert-hash sha256:ed44c7deada0ea0fe5a54212ab4e5aa6fc34672ffe2a2c87a31ba73306e75c21 --cri-socket=unix:///var/run/cri-dockerd.sock
  1. 设置master节点允许调度POD (可选)

默认配置下Kubernetes不会将Pod调度到Master节点。如果希望将k8s-master也当作Node使用,需去除污点,开启调度。

1
2
#查看默认配置的污点
kubectl describe node future-k8s-master2 |grep Taints

Taints: node-role.kubernetes.io/control-plane:NoSchedule

1
2
#去除污点
kubectl taint nodes future-k8s-master2 node-role.kubernetes.io/control-plane-

添加woker标记

1
2
3
4
#添加worker标记
kubectl label nodes future-k8s-master2 node-role.kubernetes.io/worker=
#删除worker标记
kubectl label nodes future-k8s-master2 node-role.kubernetes.io/worker-

任务六:安装网络插件(master)

安装calico

1
2
3
4
5
6
7
8
9
mkdir /data/calico
wget https://docs.tigera.io/archive/v3.25/manifests/calico.yaml
#修改calico.yaml找到CALICO_IPV4POOL_CIDR
vi calico.yaml
##############修改内容###################
value: "10.244.0.0/16"
##############修改内容###################
#在master节点上安装calico
kubectl apply -f calico.yaml

查看节点状态

1
2
3
4
5
# 查看所有的节点
kubectl get nodes
kubectl get nodes -o wide
#查看集群健康情况
kubectl get cs

任务七:安装nginx进行测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 创建Nginx程序
kubectl create deployment nginx --image=nginx
# 开放80端口
kubectl expose deployment nginx --port=80 --type=NodePort
# 查看pod状态
kubectl get pod
#查看service状态
kubectl get service
##########################################################################
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 5d1h
nginx NodePort 10.98.221.224 <none> 80:32743/TCP 23s
##########################################################################
# 访问网页测试(端口号以查看service状态得到的为准)
http://10.10.1.80:32743/

任务八:安装Dashboard界面

  1. 下载yaml文件
1
2
3
4
5
#创建存放目录
mkdir dashboard
cd dashboard/
#2.7
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
  1. 修改yaml文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
vi recommended.yaml
#将副本设置为2
#################修改内容#######################
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 32009 #添加这一行,注意缩进对齐
selector:
k8s-app: kubernetes-dashboard
type: NodePort #添加这一行,注意缩进对齐
#################修改内容#######################
  1. 应用安装,查看pod和svc
1
2
3
4
5
6
7
8
9
10
11
12
13
#安装
kubectl apply -f recommended.yaml
#查看pod和svc
kubectl get pod,svc -o wide -n kubernetes-dashboard
#########################################################
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/dashboard-metrics-scraper-5cb4f4bb9c-mg569 0/1 ContainerCreating 0 9s <none> node1 <none> <none>
pod/kubernetes-dashboard-6967859bff-2968p 0/1 ContainerCreating 0 9s <none> node1 <none> <none>

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/dashboard-metrics-scraper ClusterIP 10.100.129.191 <none> 8000/TCP 9s k8s-app=dashboard-metrics-scraper
service/kubernetes-dashboard NodePort 10.106.130.53 <none> 443:31283/TCP 9s k8s-app=kubernetes-dashboard
########################################################

使用所查看的svc,所提供的端口访问Dashboard

  1. 创建dashboard服务账户
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
#创建一个admin-user的服务账户并与集群绑定
vi dashboard-adminuser.yaml
##################内容####################
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard

---
# 创建密钥,获取服务帐户的长期持有者令牌
apiVersion: v1
kind: Secret
metadata:
name: admin-user
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
##################内容####################

#执行生效
kubectl apply -f dashboard-adminuser.yaml
  1. 登录方式

方案一:获取长期可用token

1
2
3
#将其保存在/data/dashboard/的admin-user.token文件中
cd /data/dashboard/
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d > admin-user.token

获取长期可用token脚本

1
2
3
4
5
6
7
8
9
10
11
#!/bin/bash
#作者:云
#############描述#############
:<<!
获取长期可用token脚本
将token存放在admin-user.token文件中
!
#############描述#############
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d > admin-user.token

echo -e "\033[1;32m创建token成功,请在admin-user.token文件中查看\033[m"

方案二:使用使用 Kubeconfig 文件登录

1
2
3
4
5
6
7
8
9
10
#定义 token 变量
DASH_TOCKEN=$(kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d)
#设置 kubeconfig 集群条目
kubectl config set-cluster kubernetes --server=10.10.1.80:6433 --kubeconfig=/root/dashbord-admin.conf
#设置 kubeconfig 用户条目
kubectl config set-credentials admin-user --token=$DASH_TOCKEN --kubeconfig=/root/dashbord-admin.conf
#设置 kubeconfig 上下文条目
kubectl config set-context admin-user@kubernetes --cluster=kubernetes --user=admin-user --kubeconfig=/root/dashbord-admin.conf
#设置 kubeconfig 当前上下文
kubectl config use-context admin-user@kubernetes --kubeconfig=/root/dashbord-admin.conf

将生成的dashbord-admin.conf文件放到本地主机上,登录时选择Kubeconfig选项,选择 kubeconfig 文件登录

任务九:安装metrics-server

下载部署文件

1
wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -O metrics-server-components.yaml

修改yaml文件中的Deployment内容

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls #添加
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.6.4 #修改
imagePullPolicy: IfNotPresent

#安装
kubectl apply -f metrics-server-components.yaml

查看metrics-server的pod状态

1
kubectl get pods --all-namespaces | grep metrics

等待一些时间,查看查看各类监控图像已成功显示。

任务十:kubectl命令自动补全

1
2
3
4
yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
echo 'source <(kubectl completion bash)' >> ~/.bashrc
bash

任务十一:ingress-nginx控制器安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#下载yaml文件
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.0/deploy/static/provider/baremetal/deploy.yaml
#修改yaml文件中拉取镜像的地址
#####################修改内容######################
willdockerhub/ingress-nginx-controller:v1.0.0
hzde0128/kube-webhook-certgen:v1.0
#####################修改内容######################
#修改Deployment修改成DaemonSet
#修改网络模式为host network
#####################修改内容######################
template:
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
tolerations: #使用亲和性配置可在所有节点部署
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
nodeSelector:
kubernetes.io/os: linux
custem/ingress-controller-ready: 'true'
containers:
- name: controller
#####################修改内容######################
#为工作节点设置标签(必需)
kubectl label nodes future-k8s-master0 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-master1 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-master2 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-node3 custem/ingress-controller-ready=true

#安装
kubectl apply -f deploy.yaml

#查看状态
kubectl get pods -n ingress-nginx
################状态##################
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-2lz4v 0/1 Completed 0 5m46s
ingress-nginx-admission-patch-c6896 0/1 Completed 0 5m46s
ingress-nginx-controller-7575fb546-q29qn 1/1 Running 0 5m46s

任务十二:配置Dashboard代理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
echo '
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: k8s-dashboard
namespace: kubernetes-dashboard
labels:
ingress: k8s-dashboard
annotations:
nginx.ingress.kubernetes.io/rewrite-target: / #重写路径
nginx.ingress.kubernetes.io/force-ssl-redirect: "true" #http自动转https
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
ingressClassName: nginx
rules:
- host: k8s.yjs.51xueweb.cn
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 443
' > /data/dashboard/dashboard-ingress.yaml

三:对接k8s集群与ceph集群

任务一:安装ceph客户端(ceph-common)

在k8s集群的每个节点安装ceph-common

1
yum install ceph-common -y

任务二:同步cpeh集群配置文件

将 ceph 集群的 /etc/ceph/{ceph.conf,ceph.client.admin.keyring} 文件同步到 k8s 所有节点上

1
2
3
4
5
6
7
8
9
10
#配置ssh免密
ssh-keygen -t rsa
ssh-copy-id 10.10.1.80
ssh-copy-id 10.10.1.81
ssh-copy-id 10.10.1.82

#拷贝文件
scp -r /etc/ceph/{ceph.conf,ceph.client.admin.keyring} 10.10.1.80:/etc/ceph
scp -r /etc/ceph/{ceph.conf,ceph.client.admin.keyring} 10.10.1.81:/etc/ceph
scp -r /etc/ceph/{ceph.conf,ceph.client.admin.keyring} 10.10.1.82:/etc/ceph

任务三:部署ceph-csi(使用rbd)

  1. 下载ceph-csi组件(k8s中的一个master节点)
1
2
3
4
5
6
7
8
9
10
#下载文件
wget https://github.com/ceph/ceph-csi/archive/refs/tags/v3.9.0.tar.gz
#解压
mv v3.9.0.tar.gz ceph-csi-v3.9.0.tar.gz
tar -xzf ceph-csi-v3.9.0.tar.gz
#进入目录
cd ceph-csi-3.9.0/deploy/rbd/kubernetes
mkdir /data/cephfs/csi
#拷进csi中,共6六个文件
cp * /data/cephfs/csi
  1. 拉取csi组件所需镜像
1
2
3
#查看所需镜像
grep image csi-rbdplugin-provisioner.yaml
grep image csi-rbdplugin.yaml

在所有k8s节点上拉取所需的镜像

1
2
3
4
5
6
7
cd /data/script
./pull-images.sh registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
./pull-images.sh registry.k8s.io/sig-storage/csi-resizer:v1.8.0
./pull-images.sh registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2
docker pull quay.io/cephcsi/cephcsi:v3.9.0
./pull-images.sh registry.k8s.io/sig-storage/csi-attacher:v4.3.0
./pull-images.sh registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0
  1. 创建命名空间cephfs
1
2
3
4
5
6
7
8
9
10
11
echo '
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: cephfs
name: cephfs
' > ceph-namespace.yaml

#执行
kubectl apply -f ceph-namespace.yaml
  1. 创建连接 ceph 集群的秘钥文件csi-rbd-secret.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
echo '
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: cephfs
stringData:
adminID: admin
adminKey: AQANDD9lfWg2LBAAHY0mprdbuKFBPJDkE7/I5Q==
userID: admin
userKey: AQANDD9lfWg2LBAAHY0mprdbuKFBPJDkE7/I5Q==
' > csi-rbd-secret.yaml

#执行
kubectl apply -f csi-rbd-secret.yaml
  1. 创建ceph-config-map.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
echo '
apiVersion: v1
kind: ConfigMap
data:
ceph.conf: |
[global]
fsid = 30912204-0c26-413f-8e00-6d55c9c0af03 # 生成的FSID
mon initial members =k8s-ceph-node0,k8s-ceph-node1,k8s-ceph-node2 # 主机名
mon host = 10.10.1.16,10.10.1.17,10.10.1.18 # 对应的IP
public network = 10.10.1.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mon]
mon allow pool delete = true

[mds.k8s-ceph-node0]
host = k8s-ceph-node0
keyring: |
metadata:
name: ceph-config
namespace: cephfs
' > ceph-config-map.yaml

#执行
kubectl apply -f ceph-config-map.yaml
  1. 修改csi-config-map.yaml,配置连接 ceph 集群的信息
1
2
3
4
5
6
7
8
9
10
11
12
echo '
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-csi-config
namespace: cephfs
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
config.json: |-
[{"clusterID":"30912204-0c26-413f-8e00-6d55c9c0af03","monitors":["10.10.1.16:6789","10.10.1.17:6789","10.10.1.18:6789"]}]
' > csi-config-map.yaml
  1. 修改csi组件配置文件

    1. 拷贝进/data/cephfs/csi目录中的所有yaml文件中的命名空间由default改为cephfs

    2. ```Shell
      cd /data/cephfs/csi
      sed -i “s/namespace: default/namespace: cephfs/g” $(grep -rl “namespace: default” ./)
      sed -i -e “/^kind: ServiceAccount/{N;N;a\ namespace: cephfs}” $(egrep -rl “^kind: ServiceAccount” ./)

      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25

      3. 将`csi-rbdplugin-provisioner.yaml` 和 `csi-rbdplugin.yaml` 中的 kms 部分配置注释掉

      4. > \# - name: KMS_CONFIGMAP_NAME
      >
      > ​ \# value: encryptionConfig
      >
      >
      >
      > \#- name: ceph-csi-encryption-kms-config
      >
      > ​ \# configMap:
      >
      > ​ \# name: ceph-csi-encryption-kms-config
      >
      >

      ```Shell
      #执行,安装csi组件
      kubectl apply -f csi-config-map.yaml
      kubectl apply -f csi-nodeplugin-rbac.yaml
      kubectl apply -f csidriver.yaml
      kubectl apply -f csi-provisioner-rbac.yaml
      kubectl apply -f csi-rbdplugin-provisioner.yaml
      kubectl apply -f csi-rbdplugin.yaml

任务四:创建storageclass

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
echo '
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
k8s.kuboard.cn/storageType: cephfs_provisioner
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
# fsName: cephfs (cephfs方式使用)
clusterID: 30912204-0c26-413f-8e00-6d55c9c0af03
pool: rbd-k8s
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: cephfs
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: cephfs
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: cephfs
csi.storage.k8s.io/fstype: xfs
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
mountOptions:
- discard
' > storageclass.yaml

#执行
kubectl apply -f storageclass.yaml

任务五:创建PVC

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
echo '
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rbd-pvc
namespace: cephfs
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc
' > pvc.yaml

#执行
kubectl apply -f pvc.yaml
#查看PVC是否创建成功
kubectl get pvc -n cephfs
#查看PV是否创建成功
kubectl get pv -n cephfs

#查看ceph集群中的cephfs_data存储池中是否创建了image
rbd ls -p rbd-k8s

任务六:创建pod,进行测试验证

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
echo '
apiVersion: v1
kind: Pod
metadata:
name: csi-rbd-demo-pod
namespace: cephfs
spec:
containers:
- name: web-server
image: nginx:latest
volumeMounts:
- name: mypvc
mountPath: /var/lib/www/html
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: rbd-pvc
readOnly: false
' > pod.yaml

#执行
kubectl apply -f pod.yaml
#进入容器查看挂载信息
kubectl exec -it csi-rbd-demo-pod -n cephfs -- bash
lsblk -l|grep rbd