Skip to content
Snippets Groups Projects
Commit f86c4c42 authored by Matthew Wong's avatar Matthew Wong Committed by GitHub
Browse files

Merge pull request #206 from cofyc/ceph/rbd

Add Ceph RBD provisioner
parents 999a5f33 13c73981
No related branches found
No related tags found
No related merge requests found
......@@ -19,7 +19,7 @@ ifeq ($(VERSION),)
VERSION = latest
endif
clean: clean-aws/efs clean-ceph/cephfs clean-flex clean-gluster/block clean-local-volume/provisioner clean-local-volume/bootstrapper clean-nfs-client clean-nfs
clean: clean-aws/efs clean-ceph/cephfs clean-ceph/rbd clean-flex clean-gluster/block clean-local-volume/provisioner clean-local-volume/bootstrapper clean-nfs-client clean-nfs
.PHONY: clean
test: test-aws/efs test-local-volume/provisioner test-nfs
......@@ -57,6 +57,18 @@ clean-ceph/cephfs:
rm -f cephfs-provisioner
.PHONY: clean-ceph/cephfs
ceph/rbd:
cd ceph/rbd; \
go build -o rbd-provisioner cmd/rbd-provisioner/main.go; \
docker build -t $(REGISTRY)rbd-provisioner:latest .
docker tag $(REGISTRY)rbd-provisioner:latest $(REGISTRY)rbd-provisioner:$(VERSION)
.PHONY: ceph/rbd
clean-ceph/rbd:
cd ceph/rbd; \
rm -f rbd-provisioner
.PHONY: clean-ceph/rbd
flex:
cd flex; \
make container
......@@ -129,6 +141,11 @@ push-cephfs-provisioner: ceph/cephfs
docker push $(REGISTRY)cephfs-provisioner:latest
.PHONY: push-cephfs-provisioner
push-rbd-provisioner: ceph/rbd
docker push $(REGISTRY)rbd-provisioner:$(VERSION)
docker push $(REGISTRY)rbd-provisioner:latest
.PHONY: push-nfs-client-provisioner
push-efs-provisioner:
cd aws/efs; \
make push
......
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM centos:7
RUN rpm -Uvh https://download.ceph.com/rpm-jewel/el7/noarch/ceph-release-1-1.el7.noarch.rpm
RUN yum install -y epel-release
RUN yum install -y ceph-common
ADD rbd-provisioner /usr/local/bin/rbd-provisioner
ENTRYPOINT ["/usr/local/bin/rbd-provisioner"]
assignees:
- rootfs
- cofyc
# RBD Volume Provisioner for Kubernetes 1.5+
`rbd-provisioner` is an out-of-tree dynamic provisioner for Kubernetes 1.5+.
You can use it quickly & easily deploy ceph RBD storage that works almost
anywhere.
It works just like in-tree dynamic provisioner. For more information on how
dynamic provisioning works, see [the docs](http://kubernetes.io/docs/user-guide/persistent-volumes/)
or [this blog post](http://blog.kubernetes.io/2016/10/dynamic-provisioning-and-storage-in-kubernetes.html).
## Test instruction
* Build rbd-provisioner and container image
```bash
go build -o rbd-provisioner cmd/rbd-provisioner/main.go
docker build -t rbd-provisioner .
```
* Start Kubernetes local cluster
* Create a Ceph admin secret
```bash
ceph auth get client.admin 2>&1 |grep "key = " |awk '{print $3'} |xargs echo -n > /tmp/secret
kubectl create secret generic ceph-admin-secret --from-file=/tmp/secret --namespace=kube-system
```
* Create a Ceph pool and a user secret
```bash
ceph osd pool create kube 8 8
ceph auth add client.kube mon 'allow r' osd 'allow rwx pool=kube'
ceph auth get client.kube 2>&1 |grep "key = " |awk '{print $3'} |xargs echo -n > /tmp/secret
kubectl create secret generic ceph-secret --from-file=/tmp/secret --namespace=default
```
* Start RBD provisioner
The following example uses `rbd-provisioner-1` as the identity for the instance and assumes kubeconfig is at `/root/.kube`. The identity should remain the same if the provisioner restarts. If there are multiple provisioners, each should have a different identity.
```bash
docker run -ti -v /root/.kube:/kube -v /var/run/kubernetes:/var/run/kubernetes --privileged --net=host rbd-provisioner /usr/local/bin/rbd-provisioner -master=http://127.0.0.1:8080 -kubeconfig=/kube/config -id=rbd-provisioner-1
```
Alternatively, start a deployment:
```bash
kubectl create -f deployment.yaml
```
* Create a RBD Storage Class
Replace Ceph monitor's IP in [class.yaml](class.yaml) with your own and create storage class:
```bash
kubectl create -f class.yaml
```
* Create a claim
```bash
kubectl create -f claim.yaml
```
* Create a Pod using the claim
```bash
kubectl create -f test-pod.yaml
```
# Acknowledgements
- This provisioner is extracted from [Kubernetes core](https://github.com/kubernetes/kubernetes) with some modifications for this project.
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: claim1
spec:
accessModes:
- ReadWriteOnce
storageClassName: rbd
resources:
requests:
storage: 1Gi
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: rbd
provisioner: ceph.com/rbd
parameters:
monitors: 172.16.118.132:6789
pool: kube
adminId: admin
adminSecretNamespace: kube-system
adminSecretName: ceph-admin-secret
userId: kube
userSecretName: ceph-secret
imageFormat: "2"
imageFeatures: layering
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"github.com/golang/glog"
"github.com/kubernetes-incubator/external-storage/ceph/rbd/pkg/provision"
"github.com/kubernetes-incubator/external-storage/lib/controller"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
master = flag.String("master", "", "Master URL")
kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig")
id = flag.String("id", "", "Unique provisioner identity")
)
func main() {
flag.Parse()
flag.Set("logtostderr", "true")
var config *rest.Config
var err error
if *master != "" || *kubeconfig != "" {
config, err = clientcmd.BuildConfigFromFlags(*master, *kubeconfig)
} else {
config, err = rest.InClusterConfig()
}
prID := string(uuid.NewUUID())
if *id != "" {
prID = *id
}
if err != nil {
glog.Fatalf("Failed to create config: %v", err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
}
// The controller needs to know what the server version is because out-of-tree
// provisioners aren't officially supported until 1.5
serverVersion, err := clientset.Discovery().ServerVersion()
if err != nil {
glog.Fatalf("Error getting server version: %v", err)
}
// Create the provisioner: it implements the Provisioner interface expected by
// the controller
glog.Infof("Creating RBD provisioner with identity: %s", prID)
rbdProvisioner := provision.NewRBDProvisioner(clientset, prID)
// Start the provision controller which will dynamically provision rbd
// PVs
pc := controller.NewProvisionController(
clientset,
provision.ProvisionerName,
rbdProvisioner,
serverVersion.GitVersion,
)
pc.Run(wait.NeverStop)
}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: rbd-provisioner
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: rbd-provisioner
spec:
containers:
- name: rbd-provisioner
image: "quay.io/external_storage/rbd-provisioner:latest"
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PATH=${PATH}:`pwd`
if [[ "$KUBECONFIG" == "" ]]; then
KUBECONFIG=/root/.kube/config
fi
rbd-provisioner -id=rbd-provisioner-1 -master=http://127.0.0.1:8080 -kubeconfig=${KUBECONFIG} -logtostderr
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Helper utlities copied from kubernetes/pkg/volume.
// TODO: Merge this into github.com/kubernetes-incubator/external-storage/lib/{util or helper} if possible.
package provision
import "k8s.io/client-go/pkg/api/v1"
// AccessModesContains returns whether the requested mode is contained by modes
func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
// AccessModesContainedInAll returns whether all of the requested modes are contained by modes
func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool {
for _, mode := range requestedModes {
if !AccessModesContains(indexedModes, mode) {
return false
}
}
return true
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provision
import (
"errors"
"fmt"
"strings"
"github.com/golang/glog"
"github.com/kubernetes-incubator/external-storage/lib/controller"
"github.com/kubernetes-incubator/external-storage/lib/helper"
"github.com/pborman/uuid"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
)
const (
// ProvisionerName is a unique string to represent this volume provisioner. This value will be
// added in PV annotations under 'pv.kubernetes.io/provisioned-by' key.
ProvisionerName = "ceph.com/rbd"
// Each provisioner have a identify string to distinguish with others. This
// identify string will be added in PV annoations under this key.
provisionerIDAnn = "rbdProvisionerIdentity"
secretKeyName = "key" // key name used in secret
rbdImageFormat1 = "1"
rbdImageFormat2 = "2"
)
var (
supportedFeatures = sets.NewString("layering")
)
type rbdProvisionOptions struct {
monitors []string
pool string
adminSecret string
adminID string
userID string
userSecretName string
imageFormat string
imageFeatures []string
}
type rbdProvisioner struct {
// Kubernetes Client. Use to retrieve Ceph admin secret
client kubernetes.Interface
// Identity of this rbdProvisioner, generated. Used to identify "this"
// provisioner's PVs.
identity string
rbdUtil *RBDUtil
}
// NewRBDProvisioner creates a Provisioner that provisions Ceph RBD PVs backed by Ceph RBD images.
func NewRBDProvisioner(client kubernetes.Interface, id string) controller.Provisioner {
return &rbdProvisioner{
client: client,
identity: id,
rbdUtil: &RBDUtil{},
}
}
var _ controller.Provisioner = &rbdProvisioner{}
// getAccessModes returns access modes RBD volume supported.
func (p *rbdProvisioner) getAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
}
}
// Provision creates a storage asset and returns a PV object representing it.
func (p *rbdProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) {
if !AccessModesContainedInAll(p.getAccessModes(), options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", options.PVC.Spec.AccessModes, p.getAccessModes())
}
if options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim Selector is not supported")
}
opts, err := p.parseParameters(options.Parameters)
if err != nil {
return nil, err
}
// create random image name
image := fmt.Sprintf("kubernetes-dynamic-pvc-%s", uuid.NewUUID())
rbd, sizeMB, err := p.rbdUtil.CreateImage(image, opts, options)
if err != nil {
glog.Errorf("rbd: create volume failed, err: %v", err)
return nil, err
}
glog.Infof("successfully created rbd image %q", image)
rbd.SecretRef = new(v1.LocalObjectReference)
rbd.SecretRef.Name = opts.userSecretName
rbd.RadosUser = opts.userID
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: options.PVName,
Annotations: map[string]string{
provisionerIDAnn: p.identity,
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy,
AccessModes: options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", sizeMB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: rbd,
},
},
}
// use default access modes if missing
if len(pv.Spec.AccessModes) == 0 {
glog.Warningf("no access modes specified, use default: %v", p.getAccessModes())
pv.Spec.AccessModes = p.getAccessModes()
}
return pv, nil
}
// Delete removes the storage asset that was created by Provision represented
// by the given PV.
func (p *rbdProvisioner) Delete(volume *v1.PersistentVolume) error {
// TODO: Should we check `pv.kubernetes.io/provisioned-by` key too?
ann, ok := volume.Annotations[provisionerIDAnn]
if !ok {
return errors.New("identity annotation not found on PV")
}
if ann != p.identity {
return &controller.IgnoredError{Reason: "identity annotation on PV does not match ours"}
}
class, err := p.client.StorageV1beta1().StorageClasses().Get(helper.GetPersistentVolumeClass(volume), metav1.GetOptions{})
if err != nil {
return err
}
opts, err := p.parseParameters(class.Parameters)
if err != nil {
return err
}
image := volume.Spec.PersistentVolumeSource.RBD.RBDImage
return p.rbdUtil.DeleteImage(image, opts)
}
func (p *rbdProvisioner) parseParameters(parameters map[string]string) (*rbdProvisionOptions, error) {
// options with default values
opts := &rbdProvisionOptions{
pool: "rbd",
adminID: "admin",
imageFormat: rbdImageFormat1,
}
var (
err error
adminSecretName = ""
adminSecretNamespace = "default"
)
for k, v := range parameters {
switch strings.ToLower(k) {
case "monitors":
arr := strings.Split(v, ",")
for _, m := range arr {
opts.monitors = append(opts.monitors, m)
}
if len(opts.monitors) < 1 {
return nil, fmt.Errorf("missing Ceph monitors")
}
case "adminid":
if v == "" {
// keep consistent behavior with in-tree rbd provisioner, which use default value if user provides empty string
// TODO: treat empty string invalid value?
v = "admin"
}
opts.adminID = v
case "adminsecretname":
adminSecretName = v
case "adminsecretnamespace":
adminSecretNamespace = v
case "userid":
opts.userID = v
case "pool":
if v == "" {
// keep consistent behavior with in-tree rbd provisioner, which use default value if user provides empty string
// TODO: treat empty string invalid value?
v = "rbd"
}
opts.pool = v
case "usersecretname":
if v == "" {
return nil, fmt.Errorf("missing user secret name")
}
opts.userSecretName = v
case "imageformat":
if v != rbdImageFormat1 && v != rbdImageFormat2 {
return nil, fmt.Errorf("invalid ceph imageformat %s, expecting %s or %s", v, rbdImageFormat1, rbdImageFormat2)
}
opts.imageFormat = v
case "imagefeatures":
arr := strings.Split(v, ",")
for _, f := range arr {
if !supportedFeatures.Has(f) {
return nil, fmt.Errorf("invalid feature %q for %s provisioner, supported features are: %v", f, ProvisionerName, supportedFeatures)
}
opts.imageFeatures = append(opts.imageFeatures, f)
}
default:
return nil, fmt.Errorf("invalid option %q for %s provisioner", k, ProvisionerName)
}
}
// find adminSecret
var secret string
if adminSecretName == "" {
return nil, fmt.Errorf("missing Ceph admin secret name")
}
if secret, err = p.parsePVSecret(adminSecretNamespace, adminSecretName); err != nil {
return nil, fmt.Errorf("failed to get admin secret from [%q/%q]: %v", adminSecretNamespace, adminSecretName, err)
}
opts.adminSecret = secret
// set user ID to admin ID if empty
if opts.userID == "" {
opts.userID = opts.adminID
}
return opts, nil
}
// parsePVSecret retrives secret value for a given namespace and name.
func (p *rbdProvisioner) parsePVSecret(namespace, secretName string) (string, error) {
if p.client == nil {
return "", fmt.Errorf("Cannot get kube client")
}
secrets, err := p.client.Core().Secrets(namespace).Get(secretName, metav1.GetOptions{})
if err != nil {
return "", err
}
// TODO: Should we check secret.Type, like `k8s.io/kubernetes/pkg/volume/util.GetSecretForPV` function?
secret := ""
for k, v := range secrets.Data {
if k == secretKeyName {
return string(v), nil
}
secret = string(v)
}
// If not found, the last secret in the map wins as done before
return secret, nil
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provision
import (
"fmt"
"math/rand"
"os/exec"
"strings"
"github.com/golang/glog"
"github.com/kubernetes-incubator/external-storage/lib/controller"
"github.com/kubernetes-incubator/external-storage/lib/util"
"k8s.io/client-go/pkg/api/v1"
)
const (
imageWatcherStr = "watcher="
)
// RBDUtil is the utility structure to interact with the RBD.
type RBDUtil struct{}
// CreateImage creates a new ceph image with provision and volume options.
func (u *RBDUtil) CreateImage(image string, pOpts *rbdProvisionOptions, options controller.VolumeOptions) (*v1.RBDVolumeSource, int, error) {
var output []byte
var err error
capacity := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// convert to MB that rbd defaults on
sz := int(util.RoundUpSize(volSizeBytes, 1024*1024))
volSz := fmt.Sprintf("%d", sz)
// rbd create
l := len(pOpts.monitors)
// pick a mon randomly
start := rand.Int() % l
// iterate all monitors until create succeeds.
for i := start; i < start+l; i++ {
mon := pOpts.monitors[i%l]
if pOpts.imageFormat == rbdImageFormat2 {
glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", image, volSz, pOpts.imageFormat, pOpts.imageFeatures, mon, pOpts.pool, pOpts.adminID, pOpts.adminSecret)
} else {
glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", image, volSz, pOpts.imageFormat, mon, pOpts.pool, pOpts.adminID, pOpts.adminSecret)
}
args := []string{"create", image, "--size", volSz, "--pool", pOpts.pool, "--id", pOpts.adminID, "-m", mon, "--key=" + pOpts.adminSecret, "--image-format", pOpts.imageFormat}
if pOpts.imageFormat == rbdImageFormat2 {
// if no image features is provided, it results in empty string
// which disable all RBD image format 2 features as we expected
features := strings.Join(pOpts.imageFeatures, ",")
args = append(args, "--image-feature", features)
}
output, err = u.execCommand("rbd", args)
if err == nil {
break
} else {
glog.Warningf("failed to create rbd image, output %v", string(output))
}
}
if err != nil {
return nil, 0, fmt.Errorf("failed to create rbd image: %v, command output: %s", err, string(output))
}
return &v1.RBDVolumeSource{
CephMonitors: pOpts.monitors,
RBDImage: image,
RBDPool: pOpts.pool,
}, sz, nil
}
// rbdStatus checks if there is watcher on the image.
// It returns true if there is a watcher onthe image, otherwise returns false.
func (u *RBDUtil) rbdStatus(image string, pOpts *rbdProvisionOptions) (bool, error) {
var err error
var output string
var cmd []byte
l := len(pOpts.monitors)
start := rand.Int() % l
// iterate all hosts until mount succeeds.
for i := start; i < start+l; i++ {
mon := pOpts.monitors[i%l]
// cmd "rbd status" list the rbd client watch with the following output:
// Watchers:
// watcher=10.16.153.105:0/710245699 client.14163 cookie=1
glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", image, mon, pOpts.pool, pOpts.adminID, pOpts.adminSecret)
args := []string{"status", image, "--pool", pOpts.pool, "-m", mon, "--id", pOpts.adminID, "--key=" + pOpts.adminSecret}
cmd, err = u.execCommand("rbd", args)
output = string(cmd)
if err != nil {
// ignore error code, just checkout output for watcher string
// TODO: Why should we ignore error code here? Igorning error code here cause we only try first monitor.
glog.Warningf("failed to execute rbd status on mon %s", mon)
}
if strings.Contains(output, imageWatcherStr) {
glog.V(4).Infof("rbd: watchers on %s: %s", image, output)
return true, nil
}
glog.Warningf("rbd: no watchers on %s", image)
return false, nil
}
return false, nil
}
// DeleteImage deletes a ceph image with provision and volume options.
func (u *RBDUtil) DeleteImage(image string, pOpts *rbdProvisionOptions) error {
var output []byte
found, err := u.rbdStatus(image, pOpts)
if err != nil {
return err
}
if found {
glog.Info("rbd is still being used ", image)
return fmt.Errorf("rbd %s is still being used", image)
}
// rbd rm
l := len(pOpts.monitors)
// pick a mon randomly
start := rand.Int() % l
// iterate all monitors until rm succeeds.
for i := start; i < start+l; i++ {
mon := pOpts.monitors[i%l]
glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", image, mon, pOpts.pool, pOpts.adminID, pOpts.adminSecret)
args := []string{"rm", image, "--pool", pOpts.pool, "--id", pOpts.adminID, "-m", mon, "--key=" + pOpts.adminSecret}
output, err = u.execCommand("rbd", args)
if err == nil {
return nil
}
glog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
}
return err
}
func (u *RBDUtil) execCommand(command string, args []string) ([]byte, error) {
cmd := exec.Command(command, args...)
return cmd.CombinedOutput()
}
apiVersion: v1
kind: Secret
metadata:
name: ceph-admin-secret
namespace: kube-system
type: "kubernetes.io/rbd"
data:
# ceph auth get-key client.admin | base64
key: QVFCaUpWdFo5NW40TnhBQWVBZGU1M3NOeVd5UTExRTJ4bEZkOFE9PQ==
---
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
type: "kubernetes.io/rbd"
data:
# ceph auth add client.kube mon 'allow r' osd 'allow rwx pool=kube'
# ceph auth get-key client.kube | base64
key: QVFDNkpWdFpLNCtSTEJBQUFLM2hCSTA0eU13ODZUd3hjRzlzK0E9PQ==
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: gcr.io/google_containers/busybox:1.24
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/SUCCESS && exit 0 || exit 1"
volumeMounts:
- name: pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: pvc
persistentVolumeClaim:
claimName: claim1
......@@ -84,6 +84,7 @@ elif [ "$TEST_SUITE" = "everything-else" ]; then
make aws/efs
make test-aws/efs
make ceph/cephfs
make ceph/rbd
make flex
make gluster/block
make nfs-client
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment