Centos7二进制部署k8s-v1.20.2 ipvs版本(NFS持久化存储)

一、k8s集群配置nfs存储
1、创建pv有两种方式:
1)集群管理员通过手动方式静态创建应用所需要的PV
2)用户手动创建PVC并由Provisioner组件动态创建对应的PV
2、静态创建存储卷
1)集群管理员创建NFS PV
2)用户创建PVC
3)用户创建应用,并使用第二步创建的PVC
3、动态创建存储卷
1)集群管理员只需要保证环境中有NFS相关的storageclass即可
2)用户创建PVC,此处PVC的storageClassName指定为上面的NFS的storageclass名称
3)用户创建应用。并使用第二步创建的PVC
4、对比
动态创建存储卷比静态创建存储卷,少了集群管理员的干预
动态创建存储卷,要求集群中部署有nfs-client-provisioner以及对应的storageclass
二、环境准备
1、nfs服务端
#所有服务端节点安装nfs

这里的服务端:192.168.112.138
yum install -y nfs-utils
systemctl enable nfs-server rpcbind --now
#创建nfs共享目录,授权
mkdir -p /data/nfs-volume && chmod -R 777 /data/nfs-volume
#写入exports
cat > /etc/exports << EOF
/data/nfs-volume 192.168.112.0/24(rw,sync,no_root_squash)
EOF
#重新加载配置
systemctl reload nfs-server
#使用如下命令进行验证
showmount -e 192.168.112.138

2、nfs客户端

yum install -y nfs-utils
systemctl enable nfs-server rpcbind --now
#使用如下命令进行验证
showmount -e 192.168.112.138
df -h|tail -1

3、配置客户端开机自动挂载

mkdir /opt/nfs-volume
cat >> /etc/fstab << EOF
192.168.112.138:/data/nfs-volume          /opt/nfs-volume         nfs     soft,timeo=1    0 0 
EOF
mout -a  #读取/etc/fstab,实现自动挂在
mount -t nfs 192.168.112.138:/data/nfs-volume /opt/nfs-volume 手动挂载

三、k8s配置nfs持久化存储(静态创建存储卷)
1、创建pv资源,编辑pv资源的配置文件

vim nfs-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv001
spec:
  capacity:
    storage: 5Gi
  #指定访问模式
  accessModes: #访问模式有三种,ReadWriteOnce,ReadOnlyMany,ReadWriteMany
    - ReadWriteMany
  #指定pv的回收策略,即pvc资源释放后的事件,回收策略有三种:Retain,Recyle,Delete
  persistentVolumeReclaimPolicy: Retain
  #指定pv的class为nfs,相当于为pv分类,pvc将指定class申请pv
  storageClassName: nfs           #注意此处的修改
  #指定pv为nfs服务器上对应的目录
  nfs:
    path: /data/nfs-volume               #在NFS文件系统上创建的共享文件目录
    server: 192.168.112.138              #NFS服务器ip地址

#创建pv
[root@k8s-master nfs]# kubectl apply -f nfs-pv.yaml

2、创建pvc资源,编辑pvc资源配置文件

vim nfs-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nfs-pvc001
  namespace: nfs-pv-pvc
spec:
  accessModes:
    - ReadWriteMany
  storageClassName: nfs #注意此处的修改
  resources:
    requests:
      storage: 5Gi

#创建pvc
[root@k8s-master nfs]# kubectl create namespace nfs-pv-pvc   创建用到的命名空间
[root@k8s-master nfs]# kubectl apply -f nfs-pvc.yaml

3、查看创建的资源

[root@k8s-master nfs]# kubectl get pv -A
NAME        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                   STORAGECLASS   REASON   AGE
nfs-pv001   5Gi        RWX            Retain           Bound    nfs-pv-pvc/nfs-pvc001   nfs                     10s
[root@k8s-master nfs]# kubectl get pvc -A
NAMESPACE    NAME         STATUS   VOLUME      CAPACITY   ACCESS MODES   STORAGECLASS   AGE
nfs-pv-pvc   nfs-pvc001   Bound    nfs-pv001   5Gi        RWX            nfs            10s

4、创建测试资源

vim nginx-apline.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  namespace: nfs-pv-pvc
  labels: 
    app: nginx
spec:
  replicas: 2   #注意此处的修改
  selector:
    matchLabels: 
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:alpine
        imagePullPolicy: IfNotPresent    #镜像拉去策略,Always-总是拉取;IfNotPresent-默认值,本地有则使用本地镜像,不拉取;Never-只使用本地镜像,从不拉取
        ports:
        - containerPort: 80
        volumeMounts:
        - name: nfs-pvc 
          mountPath: "/usr/share/nginx/html"         
      restartPolicy: Always    #容器的重启策略,如果restartPolicy不写,默认为:Always
      volumes:
      - name: nfs-pvc
        persistentVolumeClaim:
          claimName: nfs-pvc001     #与pvc名字一样
---
apiVersion: v1
kind: Service
metadata:
   name: my-svc-nginx-alpine
   namespace: nfs-pv-pvc
spec:
   type: ClusterIP
   selector:
     app: nginx
   ports:
   - protocol: TCP
     port: 80
     targetPort: 80

#创建
[root@k8s-master nfs]# kubectl apply -f nginx-apline.yaml

5、验证

#nfs服务端创建测试文件
echo "2021-09-03" > /data/nfs-volume/index/html
#k8s集群查看
[root@k8s-master nfs]# kubectl get pod -n nfs-pv-pvc -o custom-columns=':metadata.name'
nginx-deployment-799b74d8dc-8nrx9
nginx-deployment-799b74d8dc-9mf9c
[root@k8s-master nfs]# kubectl exec -it nginx-deployment-799b74d8dc-8nrx9 -n nfs-pv-pvc -- cat /usr/share/nginx/html/index.html
2021-09-03
#访问pod的IP验证
[root@k8s-master nfs]# kubectl get pod -n nfs-pv-pvc -o custom-columns=':status.podIP' |xargs curl
2021-09-03
2021-09-03
#访问pod的IP验证
[root@k8s-master nfs]# curl 172.16.169.166       
2021-09-03
#访问svc的IP验证
[root@k8s-master nfs]# curl 10.255.73.238        
2021-09-03

四、k8s配置nfs持久化存储(动态创建存储卷)
1、获取工程

git clone https://github.com/kubernetes-retired/external-storage.git
cd ~/external-storage/nfs-client/deploy

2、修改配置文件

mkdir my-nfs-client-provisioner && cd my-nfs-client-provisioner
[root@k8s-master my-nfs-client-provisioner]# cat rbac.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

[root@k8s-master my-nfs-client-provisioner]# cat class.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
  archiveOnDelete: "false"

[root@k8s-master my-nfs-client-provisioner]# cat deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:latest
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 192.168.112.138    #注意此处的修改为nfs的服务端IP
            - name: NFS_PATH
              value: /data/nfs-volume/  #注意此处的修改为nfs的服务端共享目录
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.112.138     #注意此处的修改为nfs的服务端IP
            path: /data/nfs-volume      #注意此处的修改为nfs的服务端共享目录

[root@k8s-master my-nfs-client-provisioner]# cat test-claim.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
  annotations:
    volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1024Mi

[root@k8s-master my-nfs-client-provisioner]# cat test-pod.yaml 
kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: gcr.io/google_containers/busybox:1.24
    command:
      - "/bin/sh"
    args:
      - "-c"
      - "touch /mnt/SUCCESS && exit 0 || exit 1"
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/mnt"
  restartPolicy: "Never"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-claim

配置说明:
kind: ServiceAccount       定义一个服务账户,该账户负责向集群申请资源
kind: ClusterRole          定义集群角色
kind: ClusterRoleBinding   集群角色与服务账户绑定
kind: Role                 角色
kind: RoleBinding          角色与服务账户绑定

kubectl apply -f .         应用

3、验证

nfs动态截图

4、如果没有自动创建pv,nfs-client-provisioner日志报如下错误

E0304 06:18:05.352939       1 controller.go:1004] provision "default/diss-db-pvc" class "managed-nfs-storage": unexpected error getting claim reference: selfLink was empty, can't make reference
I0304 06:18:06.365388       1 controller.go:987] provision "default/diss-db-pvc" class "managed-nfs-storage": started
E0304 06:18:06.371904       1 controller.go:1004] provision "default/diss-db-pvc" class "managed-nfs-storage": unexpected error getting claim reference: selfLink was empty, can't make reference
I0304 06:23:09.410514       1 controller.go:987] provision "default/diss-db-pvc" class "managed-nfs-storage": started
E0304 06:23:09.416387       1 controller.go:1004] provision "default/diss-db-pvc" class "managed-nfs-storage": unexpected error getting claim reference: selfLink was empty, can't make reference
I0304 06:23:16.933814       1 controller.go:987] provision "default/diss-db-pvc" class "managed-nfs-storage": started
E0304 06:23:16.937994       1 controller.go:1004] provision "default/diss-db-pvc" class "managed-nfs-storage": unexpected error getting claim reference: selfLink was empty, can't make reference
I0304 06:33:06.365740       1 controller.go:987] provision "default/diss-db-pvc" class "managed-nfs-storage": started
E0304 06:33:06.369275       1 controller.go:1004] provision "default/diss-db-pvc" class "managed-nfs-storage": unexpected error getting claim reference: selfLink was empty, can't make reference
I0304 06:48:06.365940       1 controller.go:987] provision "default/diss-db-pvc" class "managed-nfs-storage": started
E0304 06:48:06.369685       1 controller.go:1004] provision "default/diss-db-pvc" class "managed-nfs-storage": unexpected error getting claim reference: selfLink was empty, can't make reference

这是因为kubernetes 1.20以上版本 禁用了 selfLink
解决办法:

在kube-apiserver.yaml配置文件里面添加
--feature-gates=RemoveSelfLink=false 
然后重启kube-apiserver服务即可
© 版权声明
THE END
喜欢就支持一下吧
点赞0 分享
评论 抢沙发
头像
欢迎您留下宝贵的见解!
提交
头像

昵称

取消
昵称表情代码图片

    暂无评论内容