Skip to content

Commit

Permalink
Adding KMS support to OCS Operator
Browse files Browse the repository at this point in the history
OCS Operator will check whether KMS (Key Management System) feature is enabled or not through
StorageCluster Spec (under 'Encryption' -> 'KeyManagementService' -> 'Enable' = true).
If KMS is enabled, operator will look for a configmap, named "ocs-kms-connection-details",
fetch details from it and update the 'CephCluster' Spec. Once successfully updated,
rook will access the necessary info through the CephCluster Spec and in turn try to store
the LUKS's (Linux Unified Key Setup) Key Encryption Key (KEK) inside a KMS.

Main pre-requisite for this feature is a valid KMS Service provider should be
running and should be reachable from other nodes in the cluster.

More internal details could be found at: rook/rook#6474

Signed-off-by: Arun Kumar Mohan <[email protected]>
  • Loading branch information
aruniiird committed Dec 16, 2020
1 parent 84805b6 commit 89b3bac
Show file tree
Hide file tree
Showing 8 changed files with 87 additions and 171 deletions.
7 changes: 7 additions & 0 deletions deploy/crds/ocs.openshift.io_storageclusters_crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,13 @@ spec:
properties:
enable:
type: boolean
kms:
description: KeyManagementServiceSpec provides a way to enable
KMS
type: object
properties:
enable:
type: boolean
externalStorage:
description: External Storage is optional and defaults to false. When
set to true, OCS will connect to an external OCS Storage Cluster
Expand Down
163 changes: 0 additions & 163 deletions go.sum

Large diffs are not rendered by default.

9 changes: 8 additions & 1 deletion pkg/apis/ocs/v1/storagecluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,12 @@ type StorageClusterSpec struct {
FlexibleScaling bool `json:"flexibleScaling,omitempty"`
}

// KeyManagementServiceSpec provides a way to enable KMS
type KeyManagementServiceSpec struct {
// +optional
Enable bool `json:"enable,omitempty"`
}

// ManagedResourcesSpec defines how to reconcile auxiliary resources
type ManagedResourcesSpec struct {
CephBlockPools ManageCephBlockPools `json:"cephBlockPools,omitempty"`
Expand Down Expand Up @@ -156,7 +162,8 @@ type MultiCloudGatewaySpec struct {
// It is optional and defaults to false.
type EncryptionSpec struct {
// +optional
Enable bool `json:"enable,omitempty"`
Enable bool `json:"enable,omitempty"`
KeyManagementService KeyManagementServiceSpec `json:"kms,omitempty"`
}

// StorageClusterStatus defines the observed state of StorageCluster
Expand Down
17 changes: 17 additions & 0 deletions pkg/apis/ocs/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 12 additions & 2 deletions pkg/controller/storagecluster/cephcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,12 @@ func (r *ReconcileStorageCluster) ensureCephCluster(sc *ocsv1.StorageCluster, re
if sc.Spec.ExternalStorage.Enable {
cephCluster = newExternalCephCluster(sc, r.images.Ceph, r.monitoringIP)
} else {
cephCluster = newCephCluster(sc, r.images.Ceph, r.nodeCount, r.serverVersion, reqLogger)
kmsConfigMap, err := getKMSConfigMap(sc, r.client)
if err != nil {
reqLogger.Error(err, "failed to procure KMS config")
return err
}
cephCluster = newCephCluster(sc, r.images.Ceph, r.nodeCount, r.serverVersion, kmsConfigMap, reqLogger)
}

// Set StorageCluster instance as the owner and controller
Expand Down Expand Up @@ -199,7 +204,7 @@ func (r *ReconcileStorageCluster) ensureCephCluster(sc *ocsv1.StorageCluster, re
}

// newCephCluster returns a CephCluster object.
func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, nodeCount int, serverVersion *version.Info, reqLogger logr.Logger) *cephv1.CephCluster {
func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, nodeCount int, serverVersion *version.Info, kmsConfigMap *corev1.ConfigMap, reqLogger logr.Logger) *cephv1.CephCluster {
labels := map[string]string{
"app": sc.Name,
}
Expand Down Expand Up @@ -279,6 +284,11 @@ func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, nodeCount int, s
if isMultus(sc.Spec.Network) {
cephCluster.Spec.Network.NetworkSpec = *sc.Spec.Network
}
// if kmsConfig is not 'nil', add the KMS details to CephCluster spec
if kmsConfigMap != nil {
cephCluster.Spec.Security.KeyManagementService.ConnectionDetails = kmsConfigMap.Data
cephCluster.Spec.Security.KeyManagementService.TokenSecretName = KMSTokenSecretName
}
return cephCluster
}

Expand Down
8 changes: 4 additions & 4 deletions pkg/controller/storagecluster/cephcluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func TestEnsureCephCluster(t *testing.T) {
c.cc.ObjectMeta.Name = "doesn't exist"
}
} else {
c.cc = newCephCluster(mockStorageCluster, "", 3, serverVersion, log)
c.cc = newCephCluster(mockStorageCluster, "", 3, serverVersion, nil, log)
c.cc.ObjectMeta.SelfLink = "/api/v1/namespaces/ceph/secrets/pvc-ceph-client-key"
if c.condition == "negativeCondition" {
c.cc.Status.State = rookCephv1.ClusterStateCreated
Expand All @@ -69,8 +69,8 @@ func TestEnsureCephCluster(t *testing.T) {
err := reconciler.ensureCephCluster(sc, reconciler.reqLogger)
assert.NoError(t, err)
if c.condition == "" {
expected := newCephCluster(sc, "", 3, reconciler.serverVersion, log)
actual := newCephCluster(sc, "", 3, reconciler.serverVersion, log)
expected := newCephCluster(sc, "", 3, reconciler.serverVersion, nil, log)
actual := newCephCluster(sc, "", 3, reconciler.serverVersion, nil, log)
err = reconciler.client.Get(context.TODO(), mockCephClusterNamespacedName, actual)
assert.NoError(t, err)
assert.Equal(t, expected.ObjectMeta.Name, actual.ObjectMeta.Name)
Expand Down Expand Up @@ -151,7 +151,7 @@ func TestNewCephClusterMonData(t *testing.T) {
c.sc.Spec.MonDataDirHostPath = c.monDataPath
c.sc.Status.Images.Ceph = &api.ComponentImageStatus{}

actual := newCephCluster(c.sc, "", 3, serverVersion, log)
actual := newCephCluster(c.sc, "", 3, serverVersion, nil, log)
assert.Equal(t, generateNameForCephCluster(c.sc), actual.Name)
assert.Equal(t, c.sc.Namespace, actual.Namespace)
assert.Equal(t, c.expectedMonDataPath, actual.Spec.DataDirHostPath)
Expand Down
38 changes: 38 additions & 0 deletions pkg/controller/storagecluster/kms_resources.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
package storagecluster

import (
"context"

ocsv1 "github.com/openshift/ocs-operator/pkg/apis/ocs/v1"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"

"k8s.io/apimachinery/pkg/types"
)

const (
// KMSConfigMapName is the name configmap which has KMS config details
KMSConfigMapName = "ocs-kms-connection-details"
// KMSTokenSecretName is the name of the secret which has KMS token details
KMSTokenSecretName = "ocs-kms-token"
// KMSProviderKey is the key in config map to get the KMS provider name
KMSProviderKey = "KMS_PROVIDER"
// VaultKMSProvider a constant to represent 'vault' KMS provider
VaultKMSProvider = "vault"
)

func getKMSConfigMap(instance *ocsv1.StorageCluster, client client.Client) (*corev1.ConfigMap, error) {
// if 'KMS' is not enabled, nothing to fetch
if !instance.Spec.Encryption.KeyManagementService.Enable {
return nil, nil
}
kmsConfigMap := corev1.ConfigMap{}
err := client.Get(context.TODO(),
types.NamespacedName{
Name: KMSConfigMapName,
Namespace: instance.ObjectMeta.Namespace,
},
&kmsConfigMap,
)
return &kmsConfigMap, err
}
Original file line number Diff line number Diff line change
Expand Up @@ -983,7 +983,7 @@ func TestStorageClusterOnMultus(t *testing.T) {
func assertCephClusterNetwork(t assert.TestingT, reconciler ReconcileStorageCluster, cr *api.StorageCluster, request reconcile.Request) {
serverVersion := &k8sVersion.Info{}
request.Name = "ocsinit-cephcluster"
cephCluster := newCephCluster(cr, "", 3, serverVersion, log)
cephCluster := newCephCluster(cr, "", 3, serverVersion, nil, log)
err := reconciler.client.Get(context.TODO(), request.NamespacedName, cephCluster)
assert.NoError(t, err)
if cr.Spec.Network == nil {
Expand Down

0 comments on commit 89b3bac

Please sign in to comment.