Browse Source
Currently can create and remove volumes, but resizing and snapshots is not supported yetallow-etcd-address-option
22 changed files with 1808 additions and 0 deletions
@ -0,0 +1,3 @@ |
|||
vitastor-csi |
|||
go.sum |
|||
Dockerfile |
@ -0,0 +1,32 @@ |
|||
# Compile stage |
|||
FROM golang:buster AS build |
|||
|
|||
ADD go.mod /app/ |
|||
RUN cd /app; CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go mod download -x |
|||
ADD . /app |
|||
RUN perl -i -e '$/ = undef; while(<>) { s/\n\s*(\{\s*\n)/$1\n/g; s/\}(\s*\n\s*)else\b/$1} else/g; print; }' `find /app -name '*.go'` |
|||
RUN cd /app; CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o vitastor-csi |
|||
|
|||
# Final stage |
|||
FROM debian:buster |
|||
|
|||
LABEL maintainers="Vitaliy Filippov <vitalif@yourcmc.ru>" |
|||
LABEL description="Vitastor CSI Driver" |
|||
|
|||
ENV NODE_ID="" |
|||
ENV CSI_ENDPOINT="" |
|||
|
|||
RUN apt-get update && \ |
|||
apt-get install -y wget && \ |
|||
wget -q -O /etc/apt/trusted.gpg.d/vitastor.gpg https://vitastor.io/debian/pubkey.gpg && \ |
|||
(echo deb http://vitastor.io/debian buster main > /etc/apt/sources.list.d/vitastor.list) && \ |
|||
(echo deb http://deb.debian.org/debian buster-backports main > /etc/apt/sources.list.d/backports.list) && \ |
|||
(echo "APT::Install-Recommends false;" > /etc/apt/apt.conf) && \ |
|||
apt-get update && \ |
|||
apt-get install -y e2fsprogs xfsprogs vitastor kmod && \ |
|||
apt-get clean && \ |
|||
(echo options nbd nbds_max=128 > /etc/modprobe.d/nbd.conf) |
|||
|
|||
COPY --from=build /app/vitastor-csi /bin/ |
|||
|
|||
ENTRYPOINT ["/bin/vitastor-csi"] |
@ -0,0 +1,9 @@ |
|||
VERSION ?= v0.6.3 |
|||
|
|||
all: build push |
|||
|
|||
build: |
|||
@docker build --rm -t vitalif/vitastor-csi:$(VERSION) . |
|||
|
|||
push: |
|||
@docker push vitalif/vitastor-csi:$(VERSION) |
@ -0,0 +1,5 @@ |
|||
--- |
|||
apiVersion: v1 |
|||
kind: Namespace |
|||
metadata: |
|||
name: vitastor-system |
@ -0,0 +1,9 @@ |
|||
--- |
|||
apiVersion: v1 |
|||
kind: ConfigMap |
|||
data: |
|||
vitastor.conf: |- |
|||
{"etcd_address":"http://192.168.7.2:2379","etcd_prefix":"/vitastor"} |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-config |
@ -0,0 +1,37 @@ |
|||
--- |
|||
apiVersion: v1 |
|||
kind: ServiceAccount |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-csi-nodeplugin |
|||
--- |
|||
kind: ClusterRole |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-csi-nodeplugin |
|||
rules: |
|||
- apiGroups: [""] |
|||
resources: ["nodes"] |
|||
verbs: ["get"] |
|||
# allow to read Vault Token and connection options from the Tenants namespace |
|||
- apiGroups: [""] |
|||
resources: ["secrets"] |
|||
verbs: ["get"] |
|||
- apiGroups: [""] |
|||
resources: ["configmaps"] |
|||
verbs: ["get"] |
|||
--- |
|||
kind: ClusterRoleBinding |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-csi-nodeplugin |
|||
subjects: |
|||
- kind: ServiceAccount |
|||
name: vitastor-csi-nodeplugin |
|||
namespace: vitastor-system |
|||
roleRef: |
|||
kind: ClusterRole |
|||
name: vitastor-csi-nodeplugin |
|||
apiGroup: rbac.authorization.k8s.io |
@ -0,0 +1,72 @@ |
|||
--- |
|||
apiVersion: policy/v1beta1 |
|||
kind: PodSecurityPolicy |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-csi-nodeplugin-psp |
|||
spec: |
|||
allowPrivilegeEscalation: true |
|||
allowedCapabilities: |
|||
- 'SYS_ADMIN' |
|||
fsGroup: |
|||
rule: RunAsAny |
|||
privileged: true |
|||
hostNetwork: true |
|||
hostPID: true |
|||
runAsUser: |
|||
rule: RunAsAny |
|||
seLinux: |
|||
rule: RunAsAny |
|||
supplementalGroups: |
|||
rule: RunAsAny |
|||
volumes: |
|||
- 'configMap' |
|||
- 'emptyDir' |
|||
- 'projected' |
|||
- 'secret' |
|||
- 'downwardAPI' |
|||
- 'hostPath' |
|||
allowedHostPaths: |
|||
- pathPrefix: '/dev' |
|||
readOnly: false |
|||
- pathPrefix: '/run/mount' |
|||
readOnly: false |
|||
- pathPrefix: '/sys' |
|||
readOnly: false |
|||
- pathPrefix: '/lib/modules' |
|||
readOnly: true |
|||
- pathPrefix: '/var/lib/kubelet/pods' |
|||
readOnly: false |
|||
- pathPrefix: '/var/lib/kubelet/plugins/csi.vitastor.io' |
|||
readOnly: false |
|||
- pathPrefix: '/var/lib/kubelet/plugins_registry' |
|||
readOnly: false |
|||
- pathPrefix: '/var/lib/kubelet/plugins' |
|||
readOnly: false |
|||
|
|||
--- |
|||
kind: Role |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-csi-nodeplugin-psp |
|||
rules: |
|||
- apiGroups: ['policy'] |
|||
resources: ['podsecuritypolicies'] |
|||
verbs: ['use'] |
|||
resourceNames: ['vitastor-csi-nodeplugin-psp'] |
|||
|
|||
--- |
|||
kind: RoleBinding |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-csi-nodeplugin-psp |
|||
subjects: |
|||
- kind: ServiceAccount |
|||
name: vitastor-csi-nodeplugin |
|||
namespace: vitastor-system |
|||
roleRef: |
|||
kind: Role |
|||
name: vitastor-csi-nodeplugin-psp |
|||
apiGroup: rbac.authorization.k8s.io |
@ -0,0 +1,140 @@ |
|||
--- |
|||
kind: DaemonSet |
|||
apiVersion: apps/v1 |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: csi-vitastor |
|||
spec: |
|||
selector: |
|||
matchLabels: |
|||
app: csi-vitastor |
|||
template: |
|||
metadata: |
|||
namespace: vitastor-system |
|||
labels: |
|||
app: csi-vitastor |
|||
spec: |
|||
serviceAccountName: vitastor-csi-nodeplugin |
|||
hostNetwork: true |
|||
hostPID: true |
|||
priorityClassName: system-node-critical |
|||
# to use e.g. Rook orchestrated cluster, and mons' FQDN is |
|||
# resolved through k8s service, set dns policy to cluster first |
|||
dnsPolicy: ClusterFirstWithHostNet |
|||
containers: |
|||
- name: driver-registrar |
|||
# This is necessary only for systems with SELinux, where |
|||
# non-privileged sidecar containers cannot access unix domain socket |
|||
# created by privileged CSI driver container. |
|||
securityContext: |
|||
privileged: true |
|||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 |
|||
args: |
|||
- "--v=5" |
|||
- "--csi-address=/csi/csi.sock" |
|||
- "--kubelet-registration-path=/var/lib/kubelet/plugins/csi.vitastor.io/csi.sock" |
|||
env: |
|||
- name: KUBE_NODE_NAME |
|||
valueFrom: |
|||
fieldRef: |
|||
fieldPath: spec.nodeName |
|||
volumeMounts: |
|||
- name: socket-dir |
|||
mountPath: /csi |
|||
- name: registration-dir |
|||
mountPath: /registration |
|||
- name: csi-vitastor |
|||
securityContext: |
|||
privileged: true |
|||
capabilities: |
|||
add: ["SYS_ADMIN"] |
|||
allowPrivilegeEscalation: true |
|||
image: vitalif/vitastor-csi:v0.6.3 |
|||
args: |
|||
- "--node=$(NODE_ID)" |
|||
- "--endpoint=$(CSI_ENDPOINT)" |
|||
env: |
|||
- name: NODE_ID |
|||
valueFrom: |
|||
fieldRef: |
|||
fieldPath: spec.nodeName |
|||
- name: CSI_ENDPOINT |
|||
value: unix:///csi/csi.sock |
|||
imagePullPolicy: "IfNotPresent" |
|||
ports: |
|||
- containerPort: 9898 |
|||
name: healthz |
|||
protocol: TCP |
|||
livenessProbe: |
|||
failureThreshold: 5 |
|||
httpGet: |
|||
path: /healthz |
|||
port: healthz |
|||
initialDelaySeconds: 10 |
|||
timeoutSeconds: 3 |
|||
periodSeconds: 2 |
|||
volumeMounts: |
|||
- name: socket-dir |
|||
mountPath: /csi |
|||
- mountPath: /dev |
|||
name: host-dev |
|||
- mountPath: /sys |
|||
name: host-sys |
|||
- mountPath: /run/mount |
|||
name: host-mount |
|||
- mountPath: /lib/modules |
|||
name: lib-modules |
|||
readOnly: true |
|||
- name: vitastor-config |
|||
mountPath: /etc/vitastor |
|||
- name: plugin-dir |
|||
mountPath: /var/lib/kubelet/plugins |
|||
mountPropagation: "Bidirectional" |
|||
- name: mountpoint-dir |
|||
mountPath: /var/lib/kubelet/pods |
|||
mountPropagation: "Bidirectional" |
|||
- name: liveness-probe |
|||
securityContext: |
|||
privileged: true |
|||
image: quay.io/k8scsi/livenessprobe:v1.1.0 |
|||
args: |
|||
- "--csi-address=$(CSI_ENDPOINT)" |
|||
- "--health-port=9898" |
|||
env: |
|||
- name: CSI_ENDPOINT |
|||
value: unix://csi/csi.sock |
|||
volumeMounts: |
|||
- mountPath: /csi |
|||
name: socket-dir |
|||
volumes: |
|||
- name: socket-dir |
|||
hostPath: |
|||
path: /var/lib/kubelet/plugins/csi.vitastor.io |
|||
type: DirectoryOrCreate |
|||
- name: plugin-dir |
|||
hostPath: |
|||
path: /var/lib/kubelet/plugins |
|||
type: Directory |
|||
- name: mountpoint-dir |
|||
hostPath: |
|||
path: /var/lib/kubelet/pods |
|||
type: DirectoryOrCreate |
|||
- name: registration-dir |
|||
hostPath: |
|||
path: /var/lib/kubelet/plugins_registry/ |
|||
type: Directory |
|||
- name: host-dev |
|||
hostPath: |
|||
path: /dev |
|||
- name: host-sys |
|||
hostPath: |
|||
path: /sys |
|||
- name: host-mount |
|||
hostPath: |
|||
path: /run/mount |
|||
- name: lib-modules |
|||
hostPath: |
|||
path: /lib/modules |
|||
- name: vitastor-config |
|||
configMap: |
|||
name: vitastor-config |
@ -0,0 +1,102 @@ |
|||
--- |
|||
apiVersion: v1 |
|||
kind: ServiceAccount |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-csi-provisioner |
|||
|
|||
--- |
|||
kind: ClusterRole |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-external-provisioner-runner |
|||
rules: |
|||
- apiGroups: [""] |
|||
resources: ["nodes"] |
|||
verbs: ["get", "list", "watch"] |
|||
- apiGroups: [""] |
|||
resources: ["secrets"] |
|||
verbs: ["get", "list", "watch"] |
|||
- apiGroups: [""] |
|||
resources: ["events"] |
|||
verbs: ["list", "watch", "create", "update", "patch"] |
|||
- apiGroups: [""] |
|||
resources: ["persistentvolumes"] |
|||
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] |
|||
- apiGroups: [""] |
|||
resources: ["persistentvolumeclaims"] |
|||
verbs: ["get", "list", "watch", "update"] |
|||
- apiGroups: [""] |
|||
resources: ["persistentvolumeclaims/status"] |
|||
verbs: ["update", "patch"] |
|||
- apiGroups: ["storage.k8s.io"] |
|||
resources: ["storageclasses"] |
|||
verbs: ["get", "list", "watch"] |
|||
- apiGroups: ["snapshot.storage.k8s.io"] |
|||
resources: ["volumesnapshots"] |
|||
verbs: ["get", "list"] |
|||
- apiGroups: ["snapshot.storage.k8s.io"] |
|||
resources: ["volumesnapshotcontents"] |
|||
verbs: ["create", "get", "list", "watch", "update", "delete"] |
|||
- apiGroups: ["snapshot.storage.k8s.io"] |
|||
resources: ["volumesnapshotclasses"] |
|||
verbs: ["get", "list", "watch"] |
|||
- apiGroups: ["storage.k8s.io"] |
|||
resources: ["volumeattachments"] |
|||
verbs: ["get", "list", "watch", "update", "patch"] |
|||
- apiGroups: ["storage.k8s.io"] |
|||
resources: ["volumeattachments/status"] |
|||
verbs: ["patch"] |
|||
- apiGroups: ["storage.k8s.io"] |
|||
resources: ["csinodes"] |
|||
verbs: ["get", "list", "watch"] |
|||
- apiGroups: ["snapshot.storage.k8s.io"] |
|||
resources: ["volumesnapshotcontents/status"] |
|||
verbs: ["update"] |
|||
- apiGroups: [""] |
|||
resources: ["configmaps"] |
|||
verbs: ["get"] |
|||
--- |
|||
kind: ClusterRoleBinding |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-csi-provisioner-role |
|||
subjects: |
|||
- kind: ServiceAccount |
|||
name: vitastor-csi-provisioner |
|||
namespace: vitastor-system |
|||
roleRef: |
|||
kind: ClusterRole |
|||
name: vitastor-external-provisioner-runner |
|||
apiGroup: rbac.authorization.k8s.io |
|||
|
|||
--- |
|||
kind: Role |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-external-provisioner-cfg |
|||
rules: |
|||
- apiGroups: [""] |
|||
resources: ["configmaps"] |
|||
verbs: ["get", "list", "watch", "create", "update", "delete"] |
|||
- apiGroups: ["coordination.k8s.io"] |
|||
resources: ["leases"] |
|||
verbs: ["get", "watch", "list", "delete", "update", "create"] |
|||
|
|||
--- |
|||
kind: RoleBinding |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
name: vitastor-csi-provisioner-role-cfg |
|||
namespace: vitastor-system |
|||
subjects: |
|||
- kind: ServiceAccount |
|||
name: vitastor-csi-provisioner |
|||
namespace: vitastor-system |
|||
roleRef: |
|||
kind: Role |
|||
name: vitastor-external-provisioner-cfg |
|||
apiGroup: rbac.authorization.k8s.io |
@ -0,0 +1,60 @@ |
|||
--- |
|||
apiVersion: policy/v1beta1 |
|||
kind: PodSecurityPolicy |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-csi-provisioner-psp |
|||
spec: |
|||
allowPrivilegeEscalation: true |
|||
allowedCapabilities: |
|||
- 'SYS_ADMIN' |
|||
fsGroup: |
|||
rule: RunAsAny |
|||
privileged: true |
|||
runAsUser: |
|||
rule: RunAsAny |
|||
seLinux: |
|||
rule: RunAsAny |
|||
supplementalGroups: |
|||
rule: RunAsAny |
|||
volumes: |
|||
- 'configMap' |
|||
- 'emptyDir' |
|||
- 'projected' |
|||
- 'secret' |
|||
- 'downwardAPI' |
|||
- 'hostPath' |
|||
allowedHostPaths: |
|||
- pathPrefix: '/dev' |
|||
readOnly: false |
|||
- pathPrefix: '/sys' |
|||
readOnly: false |
|||
- pathPrefix: '/lib/modules' |
|||
readOnly: true |
|||
|
|||
--- |
|||
kind: Role |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-csi-provisioner-psp |
|||
rules: |
|||
- apiGroups: ['policy'] |
|||
resources: ['podsecuritypolicies'] |
|||
verbs: ['use'] |
|||
resourceNames: ['vitastor-csi-provisioner-psp'] |
|||
|
|||
--- |
|||
kind: RoleBinding |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
name: vitastor-csi-provisioner-psp |
|||
namespace: vitastor-system |
|||
subjects: |
|||
- kind: ServiceAccount |
|||
name: vitastor-csi-provisioner |
|||
namespace: vitastor-system |
|||
roleRef: |
|||
kind: Role |
|||
name: vitastor-csi-provisioner-psp |
|||
apiGroup: rbac.authorization.k8s.io |
@ -0,0 +1,159 @@ |
|||
--- |
|||
kind: Service |
|||
apiVersion: v1 |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: csi-vitastor-provisioner |
|||
labels: |
|||
app: csi-metrics |
|||
spec: |
|||
selector: |
|||
app: csi-vitastor-provisioner |
|||
ports: |
|||
- name: http-metrics |
|||
port: 8080 |
|||
protocol: TCP |
|||
targetPort: 8680 |
|||
|
|||
--- |
|||
kind: Deployment |
|||
apiVersion: apps/v1 |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: csi-vitastor-provisioner |
|||
spec: |
|||
replicas: 3 |
|||
selector: |
|||
matchLabels: |
|||
app: csi-vitastor-provisioner |
|||
template: |
|||
metadata: |
|||
namespace: vitastor-system |
|||
labels: |
|||
app: csi-vitastor-provisioner |
|||
spec: |
|||
affinity: |
|||
podAntiAffinity: |
|||
requiredDuringSchedulingIgnoredDuringExecution: |
|||
- labelSelector: |
|||
matchExpressions: |
|||
- key: app |
|||
operator: In |
|||
values: |
|||
- csi-vitastor-provisioner |
|||
topologyKey: "kubernetes.io/hostname" |
|||
serviceAccountName: vitastor-csi-provisioner |
|||
priorityClassName: system-cluster-critical |
|||
containers: |
|||
- name: csi-provisioner |
|||
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.0 |
|||
args: |
|||
- "--csi-address=$(ADDRESS)" |
|||
- "--v=5" |
|||
- "--timeout=150s" |
|||
- "--retry-interval-start=500ms" |
|||
- "--leader-election=true" |
|||
# set it to true to use topology based provisioning |
|||
- "--feature-gates=Topology=false" |
|||
# if fstype is not specified in storageclass, ext4 is default |
|||
- "--default-fstype=ext4" |
|||
- "--extra-create-metadata=true" |
|||
env: |
|||
- name: ADDRESS |
|||
value: unix:///csi/csi-provisioner.sock |
|||
imagePullPolicy: "IfNotPresent" |
|||
volumeMounts: |
|||
- name: socket-dir |
|||
mountPath: /csi |
|||
- name: csi-snapshotter |
|||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.0.0 |
|||
args: |
|||
- "--csi-address=$(ADDRESS)" |
|||
- "--v=5" |
|||
- "--timeout=150s" |
|||
- "--leader-election=true" |
|||
env: |
|||
- name: ADDRESS |
|||
value: unix:///csi/csi-provisioner.sock |
|||
imagePullPolicy: "IfNotPresent" |
|||
securityContext: |
|||
privileged: true |
|||
volumeMounts: |
|||
- name: socket-dir |
|||
mountPath: /csi |
|||
- name: csi-attacher |
|||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.1.0 |
|||
args: |
|||
- "--v=5" |
|||
- "--csi-address=$(ADDRESS)" |
|||
- "--leader-election=true" |
|||
- "--retry-interval-start=500ms" |
|||
env: |
|||
- name: ADDRESS |
|||
value: /csi/csi-provisioner.sock |
|||
imagePullPolicy: "IfNotPresent" |
|||
volumeMounts: |
|||
- name: socket-dir |
|||
mountPath: /csi |
|||
- name: csi-resizer |
|||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.1.0 |
|||
args: |
|||
- "--csi-address=$(ADDRESS)" |
|||
- "--v=5" |
|||
- "--timeout=150s" |
|||
- "--leader-election" |
|||
- "--retry-interval-start=500ms" |
|||
- "--handle-volume-inuse-error=false" |
|||
env: |
|||
- name: ADDRESS |
|||
value: unix:///csi/csi-provisioner.sock |
|||
imagePullPolicy: "IfNotPresent" |
|||
volumeMounts: |
|||
- name: socket-dir |
|||
mountPath: /csi |
|||
- name: csi-vitastor |
|||
securityContext: |
|||
privileged: true |
|||
capabilities: |
|||
add: ["SYS_ADMIN"] |
|||
image: vitalif/vitastor-csi:v0.6.3 |
|||
args: |
|||
- "--node=$(NODE_ID)" |
|||
- "--endpoint=$(CSI_ENDPOINT)" |
|||
env: |
|||
- name: NODE_ID |
|||
valueFrom: |
|||
fieldRef: |
|||
fieldPath: spec.nodeName |
|||
- name: CSI_ENDPOINT |
|||
value: unix:///csi/csi-provisioner.sock |
|||
imagePullPolicy: "IfNotPresent" |
|||
volumeMounts: |
|||
- name: socket-dir |
|||
mountPath: /csi |
|||
- mountPath: /dev |
|||
name: host-dev |
|||
- mountPath: /sys |
|||
name: host-sys |
|||
- mountPath: /lib/modules |
|||
name: lib-modules |
|||
readOnly: true |
|||
- name: vitastor-config |
|||
mountPath: /etc/vitastor |
|||
volumes: |
|||
- name: host-dev |
|||
hostPath: |
|||
path: /dev |
|||
- name: host-sys |
|||
hostPath: |
|||
path: /sys |
|||
- name: lib-modules |
|||
hostPath: |
|||
path: /lib/modules |
|||
- name: socket-dir |
|||
emptyDir: { |
|||
medium: "Memory" |
|||
} |
|||
- name: vitastor-config |
|||
configMap: |
|||
name: vitastor-config |
@ -0,0 +1,11 @@ |
|||
--- |
|||
# if Kubernetes version is less than 1.18 change |
|||
# apiVersion to storage.k8s.io/v1betav1 |
|||
apiVersion: storage.k8s.io/v1 |
|||
kind: CSIDriver |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: csi.vitastor.io |
|||
spec: |
|||
attachRequired: true |
|||
podInfoOnMount: false |
@ -0,0 +1,12 @@ |
|||
--- |
|||
apiVersion: v1 |
|||
kind: PersistentVolumeClaim |
|||
metadata: |
|||
name: test-vitastor-pvc |
|||
spec: |
|||
storageClassName: vitastor-csi-storage-class |
|||
accessModes: |
|||
- ReadWriteOnce |
|||
resources: |
|||
requests: |
|||
storage: 10Gi |
@ -0,0 +1,19 @@ |
|||
--- |
|||
apiVersion: storage.k8s.io/v1 |
|||
kind: StorageClass |
|||
metadata: |
|||
namespace: vitastor-system |
|||
name: vitastor-csi-storage-class |
|||
annotations: |
|||
storageclass.kubernetes.io/is-default-class: "true" |
|||
provisioner: csi.vitastor.io |
|||
volumeBindingMode: Immediate |
|||
parameters: |
|||
etcdVolumePrefix: "" |
|||
poolId: "1" |
|||
# you can choose other configuration file if you have it in the config map |
|||
#configPath: "/etc/vitastor/vitastor.conf" |
|||
# you can also specify etcdUrl here, maybe to connect to another Vitastor cluster |
|||
# multiple etcdUrls may be specified, delimited by comma |
|||
#etcdUrl: "http://192.168.7.2:2379" |
|||
#etcdPrefix: "/vitastor" |
@ -0,0 +1,35 @@ |
|||
module vitastor.io/csi |
|||
|
|||
go 1.15 |
|||
|
|||
require ( |
|||
github.com/container-storage-interface/spec v1.4.0 |
|||
github.com/coreos/bbolt v0.0.0-00010101000000-000000000000 // indirect |
|||
github.com/coreos/etcd v3.3.25+incompatible // indirect |
|||
github.com/coreos/go-semver v0.3.0 // indirect |
|||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect |
|||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect |
|||
github.com/dustin/go-humanize v1.0.0 // indirect |
|||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b |
|||
github.com/gorilla/websocket v1.4.2 // indirect |
|||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect |
|||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect |
|||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect |
|||
github.com/jonboulle/clockwork v0.2.2 // indirect |
|||
github.com/kubernetes-csi/csi-lib-utils v0.9.1 |
|||
github.com/soheilhy/cmux v0.1.5 // indirect |
|||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect |
|||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect |
|||
go.etcd.io/bbolt v0.0.0-00010101000000-000000000000 // indirect |
|||
go.etcd.io/etcd v3.3.25+incompatible |
|||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb |
|||
google.golang.org/grpc v1.33.1 |
|||
k8s.io/klog v1.0.0 |
|||
k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 |
|||
) |
|||
|
|||
replace github.com/coreos/bbolt => go.etcd.io/bbolt v1.3.5 |
|||
|
|||
replace go.etcd.io/bbolt => github.com/coreos/bbolt v1.3.5 |
|||
|
|||
replace google.golang.org/grpc => google.golang.org/grpc v1.25.1 |
@ -0,0 +1,22 @@ |
|||
// Copyright (c) Vitaliy Filippov, 2019+
|
|||
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
|
|||
|
|||
package vitastor |
|||
|
|||
const ( |
|||
vitastorCSIDriverName = "csi.vitastor.io" |
|||
vitastorCSIDriverVersion = "0.6.3" |
|||
) |
|||
|
|||
// Config struct fills the parameters of request or user input
|
|||
type Config struct |
|||
{ |
|||
Endpoint string |
|||
NodeID string |
|||
} |
|||
|
|||
// NewConfig returns config struct to initialize new driver
|
|||
func NewConfig() *Config |
|||
{ |
|||
return &Config{} |
|||
} |
@ -0,0 +1,530 @@ |
|||
// Copyright (c) Vitaliy Filippov, 2019+
|
|||
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
|
|||
|
|||
package vitastor |
|||
|
|||
import ( |
|||
"context" |
|||
"encoding/json" |
|||
"strings" |
|||
"bytes" |
|||
"strconv" |
|||
"time" |
|||
"fmt" |
|||
"os" |
|||
"os/exec" |
|||
"io/ioutil" |
|||
|
|||
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer" |
|||
"k8s.io/klog" |
|||
|
|||
"google.golang.org/grpc/codes" |
|||
"google.golang.org/grpc/status" |
|||
|
|||
"go.etcd.io/etcd/clientv3" |
|||
|
|||
"github.com/container-storage-interface/spec/lib/go/csi" |
|||
) |
|||
|
|||
const ( |
|||
KB int64 = 1024 |
|||
MB int64 = 1024 * KB |
|||
GB int64 = 1024 * MB |
|||
TB int64 = 1024 * GB |
|||
ETCD_TIMEOUT time.Duration = 15*time.Second |
|||
) |
|||
|
|||
type InodeIndex struct |
|||
{ |
|||
Id uint64 `json:"id"` |
|||
PoolId uint64 `json:"pool_id"` |
|||
} |
|||
|
|||
type InodeConfig struct |
|||
{ |
|||
Name string `json:"name"` |
|||
Size uint64 `json:"size,omitempty"` |
|||
ParentPool uint64 `json:"parent_pool,omitempty"` |
|||
ParentId uint64 `json:"parent_id,omitempty"` |
|||
Readonly bool `json:"readonly,omitempty"` |
|||
} |
|||
|
|||
type ControllerServer struct |
|||
{ |
|||
*Driver |
|||
} |
|||
|
|||
// NewControllerServer create new instance controller
|
|||
func NewControllerServer(driver *Driver) *ControllerServer |
|||
{ |
|||
return &ControllerServer{ |
|||
Driver: driver, |
|||
} |
|||
} |
|||
|
|||
func GetConnectionParams(params map[string]string) (map[string]string, []string, string) |
|||
{ |
|||
ctxVars := make(map[string]string) |
|||
configPath := params["configPath"] |
|||
if (configPath == "") |
|||
{ |
|||
configPath = "/etc/vitastor/vitastor.conf" |
|||
} |
|||
else |
|||
{ |
|||
ctxVars["configPath"] = configPath |
|||
} |
|||
config := make(map[string]interface{}) |
|||
if configFD, err := os.Open(configPath); err == nil |
|||
{ |
|||
defer configFD.Close() |
|||
data, _ := ioutil.ReadAll(configFD) |
|||
json.Unmarshal(data, &config) |
|||
} |
|||
// Try to load prefix & etcd URL from the config
|
|||
var etcdUrl []string |
|||
if (params["etcdUrl"] != "") |
|||
{ |
|||
ctxVars["etcdUrl"] = params["etcdUrl"] |
|||
etcdUrl = strings.Split(params["etcdUrl"], ",") |
|||
} |
|||
if (len(etcdUrl) == 0) |
|||
{ |
|||
switch config["etcd_address"].(type) |
|||
{ |
|||
case string: |
|||
etcdUrl = strings.Split(config["etcd_address"].(string), ",") |
|||
case []string: |
|||
etcdUrl = config["etcd_address"].([]string) |
|||
} |
|||
} |
|||
etcdPrefix := params["etcdPrefix"] |
|||
if (etcdPrefix == "") |
|||
{ |
|||
etcdPrefix, _ = config["etcd_prefix"].(string) |
|||
if (etcdPrefix == "") |
|||
{ |
|||
etcdPrefix = "/vitastor" |
|||
} |
|||
} |
|||
else |
|||
{ |
|||
ctxVars["etcdPrefix"] = etcdPrefix |
|||
} |
|||
return ctxVars, etcdUrl, etcdPrefix |
|||
} |
|||
|
|||
// Create the volume
|
|||
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) |
|||
{ |
|||
klog.Infof("received controller create volume request %+v", protosanitizer.StripSecrets(req)) |
|||
if (req == nil) |
|||
{ |
|||
return nil, status.Errorf(codes.InvalidArgument, "request cannot be empty") |
|||
} |
|||
if (req.GetName() == "") |
|||
{ |
|||
return nil, status.Error(codes.InvalidArgument, "name is a required field") |
|||
} |
|||
volumeCapabilities := req.GetVolumeCapabilities() |
|||
if (volumeCapabilities == nil) |
|||
{ |
|||
return nil, status.Error(codes.InvalidArgument, "volume capabilities is a required field") |
|||
} |
|||
|
|||
etcdVolumePrefix := req.Parameters["etcdVolumePrefix"] |
|||
poolId, _ := strconv.ParseUint(req.Parameters["poolId"], 10, 64) |
|||
if (poolId == 0) |
|||
{ |
|||
return nil, status.Error(codes.InvalidArgument, "poolId is missing in storage class configuration") |
|||
} |
|||
|
|||
volName := etcdVolumePrefix + req.GetName() |
|||
volSize := 1 * GB |
|||
if capRange := req.GetCapacityRange(); capRange != nil |
|||
{ |
|||
volSize = ((capRange.GetRequiredBytes() + MB - 1) / MB) * MB |
|||
} |
|||
|
|||
// FIXME: The following should PROBABLY be implemented externally in a management tool
|
|||
|
|||
ctxVars, etcdUrl, etcdPrefix := GetConnectionParams(req.Parameters) |
|||
if (len(etcdUrl) == 0) |
|||
{ |
|||
return nil, status.Error(codes.InvalidArgument, "no etcdUrl in storage class configuration and no etcd_address in vitastor.conf") |
|||
} |
|||
|
|||
// Connect to etcd
|
|||
cli, err := clientv3.New(clientv3.Config{ |
|||
DialTimeout: ETCD_TIMEOUT, |
|||
Endpoints: etcdUrl, |
|||
}) |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "failed to connect to etcd at "+strings.Join(etcdUrl, ",")+": "+err.Error()) |
|||
} |
|||
defer cli.Close() |
|||
|
|||
var imageId uint64 = 0 |
|||
for |
|||
{ |
|||
// Check if the image exists
|
|||
ctx, cancel := context.WithTimeout(context.Background(), ETCD_TIMEOUT) |
|||
resp, err := cli.Get(ctx, etcdPrefix+"/index/image/"+volName) |
|||
cancel() |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "failed to read key from etcd: "+err.Error()) |
|||
} |
|||
if (len(resp.Kvs) > 0) |
|||
{ |
|||
kv := resp.Kvs[0] |
|||
var v InodeIndex |
|||
err := json.Unmarshal(kv.Value, &v) |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "invalid /index/image/"+volName+" key in etcd: "+err.Error()) |
|||
} |
|||
poolId = v.PoolId |
|||
imageId = v.Id |
|||
inodeCfgKey := fmt.Sprintf("/config/inode/%d/%d", poolId, imageId) |
|||
ctx, cancel := context.WithTimeout(context.Background(), ETCD_TIMEOUT) |
|||
resp, err := cli.Get(ctx, etcdPrefix+inodeCfgKey) |
|||
cancel() |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "failed to read key from etcd: "+err.Error()) |
|||
} |
|||
if (len(resp.Kvs) == 0) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "missing "+inodeCfgKey+" key in etcd") |
|||
} |
|||
var inodeCfg InodeConfig |
|||
err = json.Unmarshal(resp.Kvs[0].Value, &inodeCfg) |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "invalid "+inodeCfgKey+" key in etcd: "+err.Error()) |
|||
} |
|||
if (inodeCfg.Size < uint64(volSize)) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "image "+volName+" is already created, but size is less than expected") |
|||
} |
|||
} |
|||
else |
|||
{ |
|||
// Find a free ID
|
|||
// Create image metadata in a transaction verifying that the image doesn't exist yet AND ID is still free
|
|||
maxIdKey := fmt.Sprintf("%s/index/maxid/%d", etcdPrefix, poolId) |
|||
ctx, cancel := context.WithTimeout(context.Background(), ETCD_TIMEOUT) |
|||
resp, err := cli.Get(ctx, maxIdKey) |
|||
cancel() |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "failed to read key from etcd: "+err.Error()) |
|||
} |
|||
var modRev int64 |
|||
var nextId uint64 |
|||
if (len(resp.Kvs) > 0) |
|||
{ |
|||
var err error |
|||
nextId, err = strconv.ParseUint(string(resp.Kvs[0].Value), 10, 64) |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, maxIdKey+" contains invalid ID") |
|||
} |
|||
modRev = resp.Kvs[0].ModRevision |
|||
nextId++ |
|||
} |
|||
else |
|||
{ |
|||
nextId = 1 |
|||
} |
|||
inodeIdxJson, _ := json.Marshal(InodeIndex{ |
|||
Id: nextId, |
|||
PoolId: poolId, |
|||
}) |
|||
inodeCfgJson, _ := json.Marshal(InodeConfig{ |
|||
Name: volName, |
|||
Size: uint64(volSize), |
|||
}) |
|||
ctx, cancel = context.WithTimeout(context.Background(), ETCD_TIMEOUT) |
|||
txnResp, err := cli.Txn(ctx).If( |
|||
clientv3.Compare(clientv3.ModRevision(fmt.Sprintf("%s/index/maxid/%d", etcdPrefix, poolId)), "=", modRev), |
|||
clientv3.Compare(clientv3.CreateRevision(fmt.Sprintf("%s/index/image/%s", etcdPrefix, volName)), "=", 0), |
|||
clientv3.Compare(clientv3.CreateRevision(fmt.Sprintf("%s/config/inode/%d/%d", etcdPrefix, poolId, nextId)), "=", 0), |
|||
).Then( |
|||
clientv3.OpPut(fmt.Sprintf("%s/index/maxid/%d", etcdPrefix, poolId), fmt.Sprintf("%d", nextId)), |
|||
clientv3.OpPut(fmt.Sprintf("%s/index/image/%s", etcdPrefix, volName), string(inodeIdxJson)), |
|||
clientv3.OpPut(fmt.Sprintf("%s/config/inode/%d/%d", etcdPrefix, poolId, nextId), string(inodeCfgJson)), |
|||
).Commit() |
|||
cancel() |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "failed to commit transaction in etcd: "+err.Error()) |
|||
} |
|||
if (txnResp.Succeeded) |
|||
{ |
|||
imageId = nextId |
|||
break |
|||
} |
|||
// Start over if the transaction fails
|
|||
} |
|||
} |
|||
|
|||
ctxVars["name"] = volName |
|||
volumeIdJson, _ := json.Marshal(ctxVars) |
|||
return &csi.CreateVolumeResponse{ |
|||
Volume: &csi.Volume{ |
|||
// Ugly, but VolumeContext isn't passed to DeleteVolume :-(
|
|||
VolumeId: string(volumeIdJson), |
|||
CapacityBytes: volSize, |
|||
}, |
|||
}, nil |
|||
} |
|||
|
|||
// DeleteVolume deletes the given volume
|
|||
func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) |
|||
{ |
|||
klog.Infof("received controller delete volume request %+v", protosanitizer.StripSecrets(req)) |
|||
if (req == nil) |
|||
{ |
|||
return nil, status.Error(codes.InvalidArgument, "request cannot be empty") |
|||
} |
|||
|
|||
ctxVars := make(map[string]string) |
|||
err := json.Unmarshal([]byte(req.VolumeId), &ctxVars) |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "volume ID not in JSON format") |
|||
} |
|||
volName := ctxVars["name"] |
|||
|
|||
_, etcdUrl, etcdPrefix := GetConnectionParams(ctxVars) |
|||
if (len(etcdUrl) == 0) |
|||
{ |
|||
return nil, status.Error(codes.InvalidArgument, "no etcdUrl in storage class configuration and no etcd_address in vitastor.conf") |
|||
} |
|||
|
|||
cli, err := clientv3.New(clientv3.Config{ |
|||
DialTimeout: ETCD_TIMEOUT, |
|||
Endpoints: etcdUrl, |
|||
}) |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "failed to connect to etcd at "+strings.Join(etcdUrl, ",")+": "+err.Error()) |
|||
} |
|||
defer cli.Close() |
|||
|
|||
// Find inode by name
|
|||
ctx, cancel := context.WithTimeout(context.Background(), ETCD_TIMEOUT) |
|||
resp, err := cli.Get(ctx, etcdPrefix+"/index/image/"+volName) |
|||
cancel() |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "failed to read key from etcd: "+err.Error()) |
|||
} |
|||
if (len(resp.Kvs) == 0) |
|||
{ |
|||
return nil, status.Error(codes.NotFound, "volume "+volName+" does not exist") |
|||
} |
|||
var idx InodeIndex |
|||
err = json.Unmarshal(resp.Kvs[0].Value, &idx) |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "invalid /index/image/"+volName+" key in etcd: "+err.Error()) |
|||
} |
|||
|
|||
// Get inode config
|
|||
inodeCfgKey := fmt.Sprintf("%s/config/inode/%d/%d", etcdPrefix, idx.PoolId, idx.Id) |
|||
ctx, cancel = context.WithTimeout(context.Background(), ETCD_TIMEOUT) |
|||
resp, err = cli.Get(ctx, inodeCfgKey) |
|||
cancel() |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "failed to read key from etcd: "+err.Error()) |
|||
} |
|||
if (len(resp.Kvs) == 0) |
|||
{ |
|||
return nil, status.Error(codes.NotFound, "volume "+volName+" does not exist") |
|||
} |
|||
var inodeCfg InodeConfig |
|||
err = json.Unmarshal(resp.Kvs[0].Value, &inodeCfg) |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "invalid "+inodeCfgKey+" key in etcd: "+err.Error()) |
|||
} |
|||
|
|||
// Delete inode data by invoking vitastor-rm
|
|||
args := []string{ |
|||
"--etcd_address", strings.Join(etcdUrl, ","), |
|||
"--pool", fmt.Sprintf("%d", idx.PoolId), |
|||
"--inode", fmt.Sprintf("%d", idx.Id), |
|||
} |
|||
if (ctxVars["configPath"] != "") |
|||
{ |
|||
args = append(args, "--config_path", ctxVars["configPath"]) |
|||
} |
|||
c := exec.Command("/usr/bin/vitastor-rm", args...) |
|||
var stderr bytes.Buffer |
|||
c.Stdout = nil |
|||
c.Stderr = &stderr |
|||
err = c.Run() |
|||
stderrStr := string(stderr.Bytes()) |
|||
if (err != nil) |
|||
{ |
|||
klog.Errorf("vitastor-rm failed: %s, status %s\n", stderrStr, err) |
|||
return nil, status.Error(codes.Internal, stderrStr+" (status "+err.Error()+")") |
|||
} |
|||
|
|||
// Delete inode config in etcd
|
|||
ctx, cancel = context.WithTimeout(context.Background(), ETCD_TIMEOUT) |
|||
txnResp, err := cli.Txn(ctx).Then( |
|||
clientv3.OpDelete(fmt.Sprintf("%s/index/image/%s", etcdPrefix, volName)), |
|||
clientv3.OpDelete(fmt.Sprintf("%s/config/inode/%d/%d", etcdPrefix, idx.PoolId, idx.Id)), |
|||
).Commit() |
|||
cancel() |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "failed to delete keys in etcd: "+err.Error()) |
|||
} |
|||
if (!txnResp.Succeeded) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "failed to delete keys in etcd: transaction failed") |
|||
} |
|||
|
|||
return &csi.DeleteVolumeResponse{}, nil |
|||
} |
|||
|
|||
// ControllerPublishVolume return Unimplemented error
|
|||
func (cs *ControllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) |
|||
{ |
|||
return nil, status.Error(codes.Unimplemented, "") |
|||
} |
|||
|
|||
// ControllerUnpublishVolume return Unimplemented error
|
|||
func (cs *ControllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) |
|||
{ |
|||
return nil, status.Error(codes.Unimplemented, "") |
|||
} |
|||
|
|||
// ValidateVolumeCapabilities checks whether the volume capabilities requested are supported.
|
|||
func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) |
|||
{ |
|||
klog.Infof("received controller validate volume capability request %+v", protosanitizer.StripSecrets(req)) |
|||
if (req == nil) |
|||
{ |
|||
return nil, status.Errorf(codes.InvalidArgument, "request is nil") |
|||
} |
|||
volumeID := req.GetVolumeId() |
|||
if (volumeID == "") |
|||
{ |
|||
return nil, status.Error(codes.InvalidArgument, "volumeId is nil") |
|||
} |
|||
volumeCapabilities := req.GetVolumeCapabilities() |
|||
if (volumeCapabilities == nil) |
|||
{ |
|||
return nil, status.Error(codes.InvalidArgument, "volumeCapabilities is nil") |
|||
} |
|||
|
|||
var volumeCapabilityAccessModes []*csi.VolumeCapability_AccessMode |
|||
for _, mode := range []csi.VolumeCapability_AccessMode_Mode{ |
|||
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, |
|||
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, |
|||
} { |
|||
volumeCapabilityAccessModes = append(volumeCapabilityAccessModes, &csi.VolumeCapability_AccessMode{Mode: mode}) |
|||
} |
|||
|
|||
capabilitySupport := false |
|||
for _, capability := range volumeCapabilities |
|||
{ |
|||
for _, volumeCapabilityAccessMode := range volumeCapabilityAccessModes |
|||
{ |
|||
if (volumeCapabilityAccessMode.Mode == capability.AccessMode.Mode) |
|||
{ |
|||
capabilitySupport = true |
|||
} |
|||
} |
|||
} |
|||
|
|||
if (!capabilitySupport) |
|||
{ |
|||
return nil, status.Errorf(codes.NotFound, "%v not supported", req.GetVolumeCapabilities()) |
|||
} |
|||
|
|||
return &csi.ValidateVolumeCapabilitiesResponse{ |
|||
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{ |
|||
VolumeCapabilities: req.VolumeCapabilities, |
|||
}, |
|||
}, nil |
|||
} |
|||
|
|||
// ListVolumes returns a list of volumes
|
|||
func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) |
|||
{ |
|||
return nil, status.Error(codes.Unimplemented, "") |
|||
} |
|||
|
|||
// GetCapacity returns the capacity of the storage pool
|
|||
func (cs *ControllerServer) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) |
|||
{ |
|||
return nil, status.Error(codes.Unimplemented, "") |
|||
} |
|||
|
|||
// ControllerGetCapabilities returns the capabilities of the controller service.
|
|||
func (cs *ControllerServer) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) |
|||
{ |
|||
functionControllerServerCapabilities := func(cap csi.ControllerServiceCapability_RPC_Type) *csi.ControllerServiceCapability |
|||
{ |
|||
return &csi.ControllerServiceCapability{ |
|||
Type: &csi.ControllerServiceCapability_Rpc{ |
|||
Rpc: &csi.ControllerServiceCapability_RPC{ |
|||
Type: cap, |
|||
}, |
|||
}, |
|||
} |
|||
} |
|||
|
|||
var controllerServerCapabilities []*csi.ControllerServiceCapability |
|||
for _, capability := range []csi.ControllerServiceCapability_RPC_Type{ |
|||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, |
|||
csi.ControllerServiceCapability_RPC_LIST_VOLUMES, |
|||
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, |
|||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, |
|||
} { |
|||
controllerServerCapabilities = append(controllerServerCapabilities, functionControllerServerCapabilities(capability)) |
|||
} |
|||
|
|||
return &csi.ControllerGetCapabilitiesResponse{ |
|||
Capabilities: controllerServerCapabilities, |
|||
}, nil |
|||
} |
|||
|
|||
// CreateSnapshot create snapshot of an existing PV
|
|||
func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) |
|||
{ |
|||
return nil, status.Error(codes.Unimplemented, "") |
|||
} |
|||
|
|||
// DeleteSnapshot delete provided snapshot of a PV
|
|||
func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) |
|||
{ |
|||
return nil, status.Error(codes.Unimplemented, "") |
|||
} |
|||
|
|||
// ListSnapshots list the snapshots of a PV
|
|||
func (cs *ControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) |
|||
{ |
|||
return nil, status.Error(codes.Unimplemented, "") |
|||
} |
|||
|
|||
// ControllerExpandVolume resizes a volume
|
|||
func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) |
|||
{ |
|||
return nil, status.Error(codes.Unimplemented, "") |
|||
} |
|||
|
|||
// ControllerGetVolume get volume info
|
|||
func (cs *ControllerServer) ControllerGetVolume(ctx context.Context, req *csi.ControllerGetVolumeRequest) (*csi.ControllerGetVolumeResponse, error) |
|||
{ |
|||
return nil, status.Error(codes.Unimplemented, "") |
|||
} |
@ -0,0 +1,137 @@ |
|||
/* |
|||
Copyright 2017 The Kubernetes Authors. |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0
|
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
|||
*/ |
|||
|
|||
package vitastor |
|||
|
|||
import ( |
|||
"fmt" |
|||
"net" |
|||
"os" |
|||
"strings" |
|||
"sync" |
|||
|
|||
"github.com/golang/glog" |
|||
"golang.org/x/net/context" |
|||
"google.golang.org/grpc" |
|||
|
|||
"github.com/container-storage-interface/spec/lib/go/csi" |
|||
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer" |
|||
) |
|||
|
|||
// Defines Non blocking GRPC server interfaces
|
|||
type NonBlockingGRPCServer interface { |
|||
// Start services at the endpoint
|
|||
Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) |
|||
// Waits for the service to stop
|
|||
Wait() |
|||
// Stops the service gracefully
|
|||
Stop() |
|||
// Stops the service forcefully
|
|||
ForceStop() |
|||
} |
|||
|
|||
func NewNonBlockingGRPCServer() NonBlockingGRPCServer { |
|||
return &nonBlockingGRPCServer{} |
|||
} |
|||
|
|||
// NonBlocking server
|
|||
type nonBlockingGRPCServer struct { |
|||
wg sync.WaitGroup |
|||
server *grpc.Server |
|||
} |
|||
|
|||
func (s *nonBlockingGRPCServer) Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { |
|||
|
|||
s.wg.Add(1) |
|||
|
|||
go s.serve(endpoint, ids, cs, ns) |
|||
|
|||
return |
|||
} |
|||
|
|||
func (s *nonBlockingGRPCServer) Wait() { |
|||
s.wg.Wait() |
|||
} |
|||
|
|||
func (s *nonBlockingGRPCServer) Stop() { |
|||
s.server.GracefulStop() |
|||
} |
|||
|
|||
func (s *nonBlockingGRPCServer) ForceStop() { |
|||
s.server.Stop() |
|||
} |
|||
|
|||
func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { |
|||
|
|||
proto, addr, err := ParseEndpoint(endpoint) |
|||
if err != nil { |
|||
glog.Fatal(err.Error()) |
|||
} |
|||
|
|||
if proto == "unix" { |
|||
addr = "/" + addr |
|||
if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { |
|||
glog.Fatalf("Failed to remove %s, error: %s", addr, err.Error()) |
|||
} |
|||
} |
|||
|
|||
listener, err := net.Listen(proto, addr) |
|||
if err != nil { |
|||
glog.Fatalf("Failed to listen: %v", err) |
|||
} |
|||
|
|||
opts := []grpc.ServerOption{ |
|||
grpc.UnaryInterceptor(logGRPC), |
|||
} |
|||
server := grpc.NewServer(opts...) |
|||
s.server = server |
|||
|
|||
if ids != nil { |
|||
csi.RegisterIdentityServer(server, ids) |
|||
} |
|||
if cs != nil { |
|||
csi.RegisterControllerServer(server, cs) |
|||
} |
|||
if ns != nil { |
|||
csi.RegisterNodeServer(server, ns) |
|||
} |
|||
|
|||
glog.Infof("Listening for connections on address: %#v", listener.Addr()) |
|||
|
|||
server.Serve(listener) |
|||
} |
|||
|
|||
func ParseEndpoint(ep string) (string, string, error) { |
|||
if strings.HasPrefix(strings.ToLower(ep), "unix://") || strings.HasPrefix(strings.ToLower(ep), "tcp://") { |
|||
s := strings.SplitN(ep, "://", 2) |
|||
if s[1] != "" { |
|||
return s[0], s[1], nil |
|||
} |
|||
} |
|||
return "", "", fmt.Errorf("Invalid endpoint: %v", ep) |
|||
} |
|||
|
|||
func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { |
|||
glog.V(3).Infof("GRPC call: %s", info.FullMethod) |
|||
glog.V(5).Infof("GRPC request: %s", protosanitizer.StripSecrets(req)) |
|||
resp, err := handler(ctx, req) |
|||
if err != nil { |
|||
glog.Errorf("GRPC error: %v", err) |
|||
} else { |
|||
glog.V(5).Infof("GRPC response: %s", protosanitizer.StripSecrets(resp)) |
|||
} |
|||
return resp, err |
|||
} |
@ -0,0 +1,60 @@ |
|||
// Copyright (c) Vitaliy Filippov, 2019+
|
|||
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
|
|||
|
|||
package vitastor |
|||
|
|||
import ( |
|||
"context" |
|||
|
|||
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer" |
|||
"k8s.io/klog" |
|||
|
|||
"github.com/container-storage-interface/spec/lib/go/csi" |
|||
) |
|||
|
|||
// IdentityServer struct of Vitastor CSI driver with supported methods of CSI identity server spec.
|
|||
type IdentityServer struct |
|||
{ |
|||
*Driver |
|||
} |
|||
|
|||
// NewIdentityServer create new instance identity
|
|||
func NewIdentityServer(driver *Driver) *IdentityServer |
|||
{ |
|||
return &IdentityServer{ |
|||
Driver: driver, |
|||
} |
|||
} |
|||
|
|||
// GetPluginInfo returns metadata of the plugin
|
|||
func (is *IdentityServer) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) |
|||
{ |
|||
klog.Infof("received identity plugin info request %+v", protosanitizer.StripSecrets(req)) |
|||
return &csi.GetPluginInfoResponse{ |
|||
Name: vitastorCSIDriverName, |
|||
VendorVersion: vitastorCSIDriverVersion, |
|||
}, nil |
|||
} |
|||
|
|||
// GetPluginCapabilities returns available capabilities of the plugin
|
|||
func (is *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) |
|||
{ |
|||
klog.Infof("received identity plugin capabilities request %+v", protosanitizer.StripSecrets(req)) |
|||
return &csi.GetPluginCapabilitiesResponse{ |
|||
Capabilities: []*csi.PluginCapability{ |
|||
{ |
|||
Type: &csi.PluginCapability_Service_{ |
|||
Service: &csi.PluginCapability_Service{ |
|||
Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, |
|||
}, |
|||
}, |
|||
}, |
|||
}, |
|||
}, nil |
|||
} |
|||
|
|||
// Probe returns the health and readiness of the plugin
|
|||
func (is *IdentityServer) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) |
|||
{ |
|||
return &csi.ProbeResponse{}, nil |
|||
} |
@ -0,0 +1,279 @@ |
|||
// Copyright (c) Vitaliy Filippov, 2019+
|
|||
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
|
|||
|
|||
package vitastor |
|||
|
|||
import ( |
|||
"context" |
|||
"os" |
|||
"os/exec" |
|||
"encoding/json" |
|||
"strings" |
|||
"bytes" |
|||
|
|||
"google.golang.org/grpc/codes" |
|||
"google.golang.org/grpc/status" |
|||
"k8s.io/utils/mount" |
|||
utilexec "k8s.io/utils/exec" |
|||
|
|||
"github.com/container-storage-interface/spec/lib/go/csi" |
|||
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer" |
|||
"k8s.io/klog" |
|||
) |
|||
|
|||
// NodeServer struct of Vitastor CSI driver with supported methods of CSI node server spec.
|
|||
type NodeServer struct |
|||
{ |
|||
*Driver |
|||
mounter mount.Interface |
|||
} |
|||
|
|||
// NewNodeServer create new instance node
|
|||
func NewNodeServer(driver *Driver) *NodeServer |
|||
{ |
|||
return &NodeServer{ |
|||
Driver: driver, |
|||
mounter: mount.New(""), |
|||
} |
|||
} |
|||
|
|||
// NodeStageVolume mounts the volume to a staging path on the node.
|
|||
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) |
|||
{ |
|||
return &csi.NodeStageVolumeResponse{}, nil |
|||
} |
|||
|
|||
// NodeUnstageVolume unstages the volume from the staging path
|
|||
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) |
|||
{ |
|||
return &csi.NodeUnstageVolumeResponse{}, nil |
|||
} |
|||
|
|||
func Contains(list []string, s string) bool |
|||
{ |
|||
for i := 0; i < len(list); i++ |
|||
{ |
|||
if (list[i] == s) |
|||
{ |
|||
return true |
|||
} |
|||
} |
|||
return false |
|||
} |
|||
|
|||
// NodePublishVolume mounts the volume mounted to the staging path to the target path
|
|||
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) |
|||
{ |
|||
klog.Infof("received node publish volume request %+v", protosanitizer.StripSecrets(req)) |
|||
|
|||
targetPath := req.GetTargetPath() |
|||
|
|||
// Check that it's not already mounted
|
|||
free, error := mount.IsNotMountPoint(ns.mounter, targetPath) |
|||
if (error != nil) |
|||
{ |
|||
if (os.IsNotExist(error)) |
|||
{ |
|||
error := os.MkdirAll(targetPath, 0777) |
|||
if (error != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, error.Error()) |
|||
} |
|||
free = true |
|||
} |
|||
else |
|||
{ |
|||
return nil, status.Error(codes.Internal, error.Error()) |
|||
} |
|||
} |
|||
if (!free) |
|||
{ |
|||
return &csi.NodePublishVolumeResponse{}, nil |
|||
} |
|||
|
|||
ctxVars := make(map[string]string) |
|||
err := json.Unmarshal([]byte(req.VolumeId), &ctxVars) |
|||
if (err != nil) |
|||
{ |
|||
return nil, status.Error(codes.Internal, "volume ID not in JSON format") |
|||
} |
|||
volName := ctxVars["name"] |
|||
|
|||
_, etcdUrl, etcdPrefix := GetConnectionParams(ctxVars) |
|||
if (len(etcdUrl) == 0) |
|||
{ |
|||
return nil, status.Error(codes.InvalidArgument, "no etcdUrl in storage class configuration and no etcd_address in vitastor.conf") |
|||
} |
|||
|
|||
// Map NBD device
|
|||
// FIXME: Check if already mapped
|
|||
args := []string{ |
|||
"map", "--etcd_address", strings.Join(etcdUrl, ","), |
|||
"--etcd_prefix", etcdPrefix |