k8s使用glusterfs实现动态持久化存储

简介

本文章介绍如何使用glusterfs为k8s提供动态申请pv的功能。glusterfs提供底层存储功能,heketi为glusterfs提供restful风格的api,方便管理glusterfs。支持k8s的pv的3种访问模式ReadWriteOnce,ReadOnlyMany ,ReadWriteMany

访问模式只是能力描述,并不是强制执行的,对于没有按pvc声明的方式使用pv,存储提供者应该负责访问时的运行错误。例如如果设置pvc的访问模式为ReadOnlyMany ,pod挂载后依然可写,如果需要真正的不可写,申请pvc是需要指定 readOnly: true 参数

安装

实验用的Vagrantfile

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# -*- mode: ruby -*-
# vi: set ft=ruby :

ENV["LC_ALL"] = "en_US.UTF-8"

Vagrant.configure("2") do |config|
(1..3).each do |i|
config.vm.define "lab#{i}" do |node|
node.vm.box = "centos-7.4-docker-17"
node.ssh.insert_key = false
node.vm.hostname = "lab#{i}"
node.vm.network "private_network", ip: "11.11.11.11#{i}"
node.vm.provision "shell",
inline: "echo hello from node #{i}"
node.vm.provider "virtualbox" do |v|
v.cpus = 2
v.customize ["modifyvm", :id, "--name", "lab#{i}", "--memory", "3096"]
file_to_disk = "lab#{i}_vdb.vdi"
unless File.exist?(file_to_disk)
# 50GB
v.customize ['createhd', '--filename', file_to_disk, '--size', 50 * 1024]
end
v.customize ['storageattach', :id, '--storagectl', 'IDE', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', file_to_disk]
end
end
end
end

环境配置说明

1
2
3
4
5
6
7
8
9
10
# 安装 glusterfs 每节点需要提前加载 dm_thin_pool 模块
modprobe dm_thin_pool

# 配置开启自加载
cat >/etc/modules-load.d/glusterfs.conf<<EOF
dm_thin_pool
EOF

# 安装 glusterfs-fuse
yum install -y glusterfs-fuse

安装glusterfs与heketi

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
# 安装 heketi client
# https://github.com/heketi/heketi/releases
# 去github下载相关的版本
wget https://github.com/heketi/heketi/releases/download/v7.0.0/heketi-client-v7.0.0.linux.amd64.tar.gz
tar xf heketi-client-v7.0.0.linux.amd64.tar.gz
cp heketi-client/bin/heketi-cli /usr/local/bin

# 查看版本
heketi-cli -v

# 如下部署步骤都在如下目录执行
cd heketi-client/share/heketi/kubernetes

# 在k8s中部署 glusterfs
kubectl create -f glusterfs-daemonset.json

# 查看 node 节点
kubectl get nodes

# 给提供存储 node 节点打 label
kubectl label node lab1 lab2 lab3 storagenode=glusterfs

# 查看 glusterfs 状态
kubectl get pods -o wide

# 部署 heketi server
# 配置 heketi server 的权限
kubectl create -f heketi-service-account.json
kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account

# 创建 cofig secret
kubectl create secret generic heketi-config-secret --from-file=./heketi.json

# 初始化部署
kubectl create -f heketi-bootstrap.json

# 查看 heketi bootstrap 状态
kubectl get pods -o wide
kubectl get svc

# 配置端口转发 heketi server
HEKETI_BOOTSTRAP_POD=$(kubectl get pods | grep deploy-heketi | awk '{print $1}')
kubectl port-forward $HEKETI_BOOTSTRAP_POD 58080:8080

# 测试访问
# 另起一终端
curl http://localhost:58080/hello

# 配置 glusterfs
# hostnames/manage 字段里必须和 kubectl get node 一致
# hostnames/storage 指定存储网络 ip 本次实验使用与k8s集群同一个ip
cat >topology.json<<EOF
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"lab1"
],
"storage": [
"11.11.11.111"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
},
{
"node": {
"hostnames": {
"manage": [
"lab2"
],
"storage": [
"11.11.11.112"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
},
{
"node": {
"hostnames": {
"manage": [
"lab3"
],
"storage": [
"11.11.11.113"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": false
}
]
}
]
}
]
}
EOF
export HEKETI_CLI_SERVER=http://localhost:58080
heketi-cli topology load --json=topology.json

# 使用 Heketi 创建一个用于存储 Heketi 数据库的 volume
heketi-cli setup-openshift-heketi-storage
kubectl create -f heketi-storage.json

# 查看状态
# 等所有job完成 即状态为 Completed
# 才能进行如下的步骤
kubectl get pods
kubectl get job

# 删除部署时产生的相关资源
kubectl delete all,service,jobs,deployment,secret --selector="deploy-heketi"

# 部署 heketi server
kubectl create -f heketi-deployment.json

# 查看 heketi server 状态
kubectl get pods -o wide
kubectl get svc

# 查看 heketi 状态信息
# 配置端口转发 heketi server
HEKETI_BOOTSTRAP_POD=$(kubectl get pods | grep heketi | awk '{print $1}')
kubectl port-forward $HEKETI_BOOTSTRAP_POD 58080:8080
export HEKETI_CLI_SERVER=http://localhost:58080
heketi-cli cluster list
heketi-cli volume list

测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# 创建 StorageClass
# 由于没有开启认证
# restuser restuserkey 可以随意写
HEKETI_SERVER=$(kubectl get svc | grep heketi | head -1 | awk '{print $3}')
echo $HEKETI_SERVER
cat >storageclass-glusterfs.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: gluster-heketi
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://$HEKETI_SERVER:8080"
restauthenabled: "false"
restuser: "will"
restuserkey: "will"
gidMin: "40000"
gidMax: "50000"
volumetype: "replicate:3"
EOF
kubectl create -f storageclass-glusterfs.yaml

# 查看
kubectl get sc

# 创建pvc测试
cat >gluster-pvc-test.yaml<<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gluster1
annotations:
volume.beta.kubernetes.io/storage-class: gluster-heketi
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
EOF
kubectl apply -f gluster-pvc-test.yaml

# 查看
kubectl get pvc
kubectl get pv

# 创建 nginx pod 挂载测试
cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod1
labels:
name: nginx-pod1
spec:
containers:
- name: nginx-pod1
image: nginx:alpine
ports:
- name: web
containerPort: 80
volumeMounts:
- name: gluster-vol1
mountPath: /usr/share/nginx/html
volumes:
- name: gluster-vol1
persistentVolumeClaim:
claimName: gluster1
EOF
kubectl apply -f nginx-pod.yaml

# 查看
kubectl get pods -o wide

# 修改文件内容
kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from GlusterFS!!! > /usr/share/nginx/html/index.html'

# 访问测试
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}')
curl http://$POD_ID

# node 节点查看文件内容
GLUSTERFS_POD=$(kubectl get pod | grep glusterfs | head -1 | awk '{print $1}')
kubectl exec -ti $GLUSTERFS_POD /bin/sh
mount | grep heketi
cat /var/lib/heketi/mounts/vg_56033aa8a9131e84faa61a6f4774d8c3/brick_1ac5f3a0730457cf3fcec6d881e132a2/brick/index.html

参考文档