Skip to content

Commit

Permalink
Change conditons and add events (#155)
Browse files Browse the repository at this point in the history
Change conditions to support wait for status and add events
Also check dependencies before checking if apply is needed.
  • Loading branch information
Paul Carlton authored Nov 20, 2020
1 parent fe3a082 commit c1132df
Show file tree
Hide file tree
Showing 5 changed files with 63 additions and 159 deletions.
34 changes: 32 additions & 2 deletions controllers/addons_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,10 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
kscheme "k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
Expand Down Expand Up @@ -177,6 +180,22 @@ type AddonsLayerReconciler struct {
Applier apply.LayerApplier
Repos repos.Repos
Metrics metrics.Metrics
Recorder record.EventRecorder
}

// EventRecorder returns an EventRecorder type that can be
// used to post Events to different object's lifecycles.
func eventRecorder(
kubeClient kubernetes.Interface) record.EventRecorder {
eventBroadcaster := record.NewBroadcaster()
//eventBroadcaster.StartLogging(setupLog.Infof)
eventBroadcaster.StartRecordingToSink(
&typedcorev1.EventSinkImpl{
Interface: kubeClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(
kscheme.Scheme,
corev1.EventSource{Component: "kraan-controller"})
return recorder
}

// NewReconciler returns an AddonsLayerReconciler instance
Expand All @@ -193,6 +212,7 @@ func NewReconciler(config *rest.Config, client client.Client, logger logr.Logger
if err != nil {
return nil, errors.WithMessagef(err, "%s - failed to create reconciler", logging.CallerStr(logging.Me))
}
reconciler.Recorder = eventRecorder(reconciler.k8client)
reconciler.Context = context.Background()
reconciler.Applier, err = apply.NewApplier(client, logger.WithName("applier"), scheme)
if err != nil {
Expand Down Expand Up @@ -246,6 +266,11 @@ func (r *AddonsLayerReconciler) processApply(l layers.Layer) (statusReconciled b
ctx := r.Context
applier := r.Applier

if !l.DependenciesDeployed() {
l.SetRequeue()
return true, nil
}

applyIsRequired, err := applier.ApplyIsRequired(ctx, l)
if err != nil {
return false, errors.WithMessagef(err, "%s - check if apply is required failed", logging.CallerStr(logging.Me))
Expand Down Expand Up @@ -496,13 +521,18 @@ func (r *AddonsLayerReconciler) Reconcile(req ctrl.Request) (res ctrl.Result, er

log := r.Log.WithValues("layer", req.NamespacedName.Name)

l := layers.CreateLayer(ctx, r.Client, r.k8client, log, addonsLayer)
l := layers.CreateLayer(ctx, r.Client, r.k8client, log, r.Recorder, r.Scheme, addonsLayer)
deployedRevision, err := r.processAddonLayer(l)
if err != nil {
l.StatusUpdate(kraanv1alpha1.FailedCondition, kraanv1alpha1.AddonsLayerFailedReason, errors.Cause(err).Error())
log.Error(err, "failed to process addons layer", logging.GetFunctionAndSource(logging.MyCaller)...)
}

if l.GetSpec().Version != l.GetFullStatus().Version {
l.SetUpdated()
l.GetFullStatus().Version = l.GetSpec().Version
}

if l.GetAddonsLayer().Generation != l.GetFullStatus().ObservedGeneration {
l.SetUpdated()
l.GetFullStatus().ObservedGeneration = l.GetAddonsLayer().Generation
Expand Down Expand Up @@ -565,7 +595,7 @@ func (r *AddonsLayerReconciler) repoMapperFunc(a handler.MapObject) []reconcile.
layerList := []layers.Layer{}
addons := []reconcile.Request{}
for _, addon := range addonsList.Items {
layer := layers.CreateLayer(r.Context, r.Client, r.k8client, r.Log, &addon) //nolint:scopelint // ok
layer := layers.CreateLayer(r.Context, r.Client, r.k8client, r.Log, r.Recorder, r.Scheme, &addon) //nolint:scopelint // ok
if layer.GetSpec().Source.Name == srcRepo.Name && layer.GetSpec().Source.NameSpace == srcRepo.Namespace {
r.Log.V(1).Info("layer is using this source", append(logging.GetGitRepoInfo(srcRepo), append(logging.GetFunctionAndSource(logging.MyCaller), "layers", addons)...)...)
layerList = append(layerList, layer)
Expand Down
3 changes: 3 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -290,8 +290,10 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-retryablehttp v0.6.7 h1:8/CAEZt/+F7kR7GevNHulKkUjLht3CPmn7egmhieNKo=
github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
Expand Down Expand Up @@ -789,6 +791,7 @@ k8s.io/apimachinery v0.18.9/go.mod h1:PF5taHbXgTEJLU+xMypMmYTXTWPJ5LaW8bfsisxnEX
k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc=
k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apimachinery v0.19.3 h1:bpIQXlKjB4cB/oNpnNnV+BybGPR7iP5oYpsOTEJ4hgc=
k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0=
k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg=
k8s.io/apiserver v0.18.8/go.mod h1:12u5FuGql8Cc497ORNj79rhPdiXQC4bf53X/skR/1YM=
k8s.io/apiserver v0.18.9/go.mod h1:vXQzMtUCLsGg1Bh+7Jo2mZKHpHZFCZn8eTNSepcIA1M=
Expand Down
28 changes: 14 additions & 14 deletions pkg/layers/layers.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,11 @@ import (
"golang.org/x/mod/semver"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/reference"
"sigs.k8s.io/controller-runtime/pkg/client"

kraanv1alpha1 "github.com/fidelity/kraan/api/v1alpha1"
Expand Down Expand Up @@ -83,12 +86,15 @@ type KraanLayer struct {
client client.Client
k8client kubernetes.Interface
log logr.Logger
recorder record.EventRecorder
ref *corev1.ObjectReference
Layer `json:"-"`
addonsLayer *kraanv1alpha1.AddonsLayer
}

// CreateLayer creates a layer object.
func CreateLayer(ctx context.Context, client client.Client, k8client kubernetes.Interface, log logr.Logger, addonsLayer *kraanv1alpha1.AddonsLayer) Layer {
func CreateLayer(ctx context.Context, client client.Client, k8client kubernetes.Interface, log logr.Logger,
recorder record.EventRecorder, scheme *runtime.Scheme, addonsLayer *kraanv1alpha1.AddonsLayer) Layer {
l := &KraanLayer{
requeue: false,
delayed: false,
Expand All @@ -97,9 +103,14 @@ func CreateLayer(ctx context.Context, client client.Client, k8client kubernetes.
log: log,
client: client,
k8client: k8client,
recorder: recorder,
addonsLayer: addonsLayer,
}
l.delay = l.addonsLayer.Spec.Interval.Duration
var err error
if l.ref, err = reference.GetReference(scheme, addonsLayer); err != nil {
log.Error(err, "failed to get reference")
}
return l
}

Expand Down Expand Up @@ -166,17 +177,6 @@ func (l *KraanLayer) CheckK8sVersion() bool {
return semver.Compare(versionInfo.String(), l.GetRequiredK8sVersion()) >= 0
}

func (l *KraanLayer) trimConditions() {
logging.TraceCall(l.GetLogger())
defer logging.TraceExit(l.GetLogger())
length := len(l.addonsLayer.Status.Conditions)
if length < MaxConditions {
return
}
trimedCond := l.addonsLayer.Status.Conditions[length-MaxConditions:]
l.addonsLayer.Status.Conditions = trimedCond
}

func (l *KraanLayer) setStatus(status, reason, message string) {
logging.TraceCall(l.GetLogger())
defer logging.TraceExit(l.GetLogger())
Expand All @@ -186,7 +186,7 @@ func (l *KraanLayer) setStatus(status, reason, message string) {
if last.Reason == reason && last.Message == message && last.Type == status {
return
}
last.Status = corev1.ConditionFalse
l.addonsLayer.Status.Conditions = []kraanv1alpha1.Condition{}
}

l.addonsLayer.Status.Conditions = append(l.addonsLayer.Status.Conditions, kraanv1alpha1.Condition{
Expand All @@ -197,14 +197,14 @@ func (l *KraanLayer) setStatus(status, reason, message string) {
Reason: reason,
Message: message,
})
l.trimConditions()
l.addonsLayer.Status.State = status
l.addonsLayer.Status.Version = l.addonsLayer.Spec.Version
l.updated = true
l.requeue = true
if l.addonsLayer.Status.Resources == nil {
l.addonsLayer.Status.Resources = []kraanv1alpha1.Resource{}
}
l.recorder.Event(l.ref, corev1.EventTypeNormal, reason, message)
}

// SetStatusK8sVersion sets the addon layer's status to waiting for required K8s Version.
Expand Down
86 changes: 14 additions & 72 deletions pkg/layers/layers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,10 @@ import (
fakediscovery "k8s.io/client-go/discovery/fake"
fakeK8s "k8s.io/client-go/kubernetes/fake"
fakeTest "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record"

//k8sscheme "k8s.io/client-go/kubernetes/scheme"
extv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

kraanv1alpha1 "github.com/fidelity/kraan/api/v1alpha1"
Expand All @@ -29,8 +31,17 @@ var (
testScheme = runtime.NewScheme()
// testCtx = context.Background()
fakeK8sClient *fakeK8s.Clientset
scheme = runtime.NewScheme()
)

func init() {
_ = corev1.AddToScheme(scheme) // nolint:errcheck // ok
//_ = helmctlv2.AddToScheme(scheme) // nolint:errcheck // ok
_ = kraanv1alpha1.AddToScheme(scheme) // nolint:errcheck // ok
_ = sourcev1.AddToScheme(scheme) // nolint:errcheck // ok
_ = extv1b1.AddToScheme(scheme) // nolint:errcheck // ok
}

const (
holdSet = "hold-set"
k8sPending = "k8s-pending"
Expand Down Expand Up @@ -119,7 +130,8 @@ func getLayer(layerName, testDataFileName, reposDataFileName string) (layers.Lay
if data == nil {
return nil, fmt.Errorf("failed to find item: %s in test data", layerName)
}
return layers.CreateLayer(context.Background(), client, fakeK8sClient, logger, data), nil
fakeRecorder := record.NewFakeRecorder(1000)
return layers.CreateLayer(context.Background(), client, fakeK8sClient, logger, fakeRecorder, scheme, data), nil
}

func testDelayedRequeue(t *testing.T, l layers.Layer) bool {
Expand Down Expand Up @@ -504,77 +516,7 @@ func TestHold(t *testing.T) {
Reason: kraanv1alpha1.AddonsLayerK8sVersionReason,
Message: kraanv1alpha1.AddonsLayerK8sVersionMsg},
},
}}, {
name: "set status when maximum number of conditions already",
layerName: maxConditions,
status: kraanv1alpha1.PruningCondition,
reason: kraanv1alpha1.AddonsLayerPruningReason,
message: kraanv1alpha1.AddonsLayerPruningMsg,
expected: &kraanv1alpha1.AddonsLayerStatus{
State: kraanv1alpha1.PruningCondition,
Version: versionOne,
Conditions: []kraanv1alpha1.Condition{
{
Status: corev1.ConditionFalse,
Version: versionOne,
Type: kraanv1alpha1.PruningCondition,
Reason: kraanv1alpha1.AddonsLayerPruningReason,
Message: kraanv1alpha1.AddonsLayerPruningMsg},
{
Status: corev1.ConditionFalse,
Version: versionOne,
Type: kraanv1alpha1.ApplyPendingCondition,
Reason: "waiting for layer: test-layer2, version: 0.1.01 to be applied.",
Message: "Layer: test-layer2, current state: Applying."},
{
Status: corev1.ConditionFalse,
Version: versionOne,
Type: kraanv1alpha1.ApplyingCondition,
Reason: kraanv1alpha1.AddonsLayerApplyingReason,
Message: kraanv1alpha1.AddonsLayerApplyingMsg},
{
Status: corev1.ConditionFalse,
Version: versionOne,
Type: kraanv1alpha1.DeployedCondition,
Reason: kraanv1alpha1.AddonsLayerDeployedReason,
Message: ""},
{
Status: corev1.ConditionFalse,
Version: versionOne,
Type: kraanv1alpha1.K8sVersionCondition,
Reason: kraanv1alpha1.AddonsLayerK8sVersionReason,
Message: kraanv1alpha1.AddonsLayerK8sVersionMsg},
{
Status: corev1.ConditionFalse,
Version: versionOne,
Type: kraanv1alpha1.PruningCondition,
Reason: kraanv1alpha1.AddonsLayerPruningReason,
Message: kraanv1alpha1.AddonsLayerPruningMsg},
{
Status: corev1.ConditionFalse,
Version: versionOne,
Type: kraanv1alpha1.ApplyPendingCondition,
Reason: "waiting for layer: test-layer2, version: 0.1.01 to be applied.",
Message: "Layer: test-layer2, current state: Applying."},
{
Status: corev1.ConditionFalse,
Version: versionOne,
Type: kraanv1alpha1.ApplyingCondition,
Reason: kraanv1alpha1.AddonsLayerApplyingReason,
Message: kraanv1alpha1.AddonsLayerApplyingMsg},
{
Status: corev1.ConditionFalse,
Version: versionOne,
Type: kraanv1alpha1.DeployedCondition,
Reason: kraanv1alpha1.AddonsLayerDeployedReason,
Message: ""},
{
Status: corev1.ConditionTrue,
Version: versionOne,
Type: kraanv1alpha1.PruningCondition,
Reason: kraanv1alpha1.AddonsLayerPruningReason,
Message: kraanv1alpha1.AddonsLayerPruningMsg},
}}}}
}}}
for _, test := range tests {
l, e := getLayer(test.layerName, layersData, reposData)
Expand Down
71 changes: 0 additions & 71 deletions pkg/layers/testdata/layersdata.json
Original file line number Diff line number Diff line change
Expand Up @@ -133,77 +133,6 @@
},
"status": {
"conditions": [
{
"type": "K8sVersion",
"status": "False",
"version": "0.1.01",
"lastTransitionTime": null,
"reason": "AddonsLayer is waiting for the required K8sVersion",
"message": "The k8sVersion status means the manager has detected that the AddonsLayer needs a higher version of the Kubernetes API than the current version running on the cluster."
},
{
"type": "Pruning",
"status": "False",
"version": "0.1.01",
"lastTransitionTime": null,
"reason": "AddonsLayer is being pruned",
"message": "The pruning status means the manager is pruning objects removed from this layer"
},
{
"lastTransitionTime": null,
"message": "Layer: test-layer2, current state: Applying.",
"reason": "waiting for layer: test-layer2, version: 0.1.01 to be applied.",
"status": "False",
"type": "ApplyPending",
"version": "0.1.01"
},
{
"lastTransitionTime": null,
"message": "The applying status means the manager is either applying the yaml files or waiting for the HelmReleases to successfully deploy.",
"reason": "AddonsLayer is being applied",
"status": "False",
"type": "Applying",
"version": "0.1.01"
},
{
"lastTransitionTime": null,
"reason": "AddonsLayer is Deployed",
"status": "False",
"type": "Deployed",
"version": "0.1.01"
},
{
"type": "K8sVersion",
"status": "False",
"version": "0.1.01",
"lastTransitionTime": null,
"reason": "AddonsLayer is waiting for the required K8sVersion",
"message": "The k8sVersion status means the manager has detected that the AddonsLayer needs a higher version of the Kubernetes API than the current version running on the cluster."
},
{
"type": "Pruning",
"status": "False",
"version": "0.1.01",
"lastTransitionTime": null,
"reason": "AddonsLayer is being pruned",
"message": "The pruning status means the manager is pruning objects removed from this layer"
},
{
"lastTransitionTime": null,
"message": "Layer: test-layer2, current state: Applying.",
"reason": "waiting for layer: test-layer2, version: 0.1.01 to be applied.",
"status": "False",
"type": "ApplyPending",
"version": "0.1.01"
},
{
"lastTransitionTime": null,
"message": "The applying status means the manager is either applying the yaml files or waiting for the HelmReleases to successfully deploy.",
"reason": "AddonsLayer is being applied",
"status": "False",
"type": "Applying",
"version": "0.1.01"
},
{
"lastTransitionTime": null,
"reason": "AddonsLayer is Deployed",
Expand Down

0 comments on commit c1132df

Please sign in to comment.