Skip to content

Commit 32646a7

Browse files
committed
Improve logs, errors and conditions
Signed-off-by: Stefan Büringer buringerst@vmware.com
1 parent 27e2b18 commit 32646a7

25 files changed

+158
-118
lines changed

controlplane/kubeadm/internal/controllers/controller.go

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package controllers
1919
import (
2020
"context"
2121
"fmt"
22+
"slices"
2223
"sort"
2324
"strings"
2425
"time"
@@ -456,6 +457,11 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl
456457
}
457458

458459
if err := r.syncMachines(ctx, controlPlane); err != nil {
460+
// Note: If any of the calls got a NotFound error, it means that at least one Machine got deleted.
461+
// Let's return here so that the next Reconcile will get the updated list of Machines.
462+
if apierrors.IsNotFound(err) {
463+
return ctrl.Result{}, nil // Note: Requeue is not needed, changes to Machines trigger another reconcile.
464+
}
459465
return ctrl.Result{}, errors.Wrap(err, "failed to sync Machines")
460466
}
461467

@@ -504,7 +510,9 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl
504510
// Note: We have to wait here even if there are no more Machines that need rollout (in-place update in
505511
// progress is not counted as needs rollout).
506512
if machines := controlPlane.MachinesToCompleteInPlaceUpdate(); machines.Len() > 0 {
507-
log.Info("Waiting for in-place update to complete", "machines", strings.Join(machines.Names(), ", "))
513+
for _, machine := range machines {
514+
log.Info("Waiting for in-place update to complete", "Machine", klog.KObj(machine))
515+
}
508516
return ctrl.Result{}, nil // Note: Changes to Machines trigger another reconcile.
509517
}
510518

@@ -513,10 +521,12 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl
513521
switch {
514522
case len(machinesNeedingRollout) > 0:
515523
var allMessages []string
516-
for machine, machineUpToDateResult := range machinesUpToDateResults {
517-
allMessages = append(allMessages, fmt.Sprintf("Machine %s needs rollout: %s", machine, strings.Join(machineUpToDateResult.LogMessages, ",")))
524+
machinesNeedingRolloutNames := machinesNeedingRollout.Names()
525+
slices.Sort(machinesNeedingRolloutNames)
526+
for _, name := range machinesNeedingRolloutNames {
527+
allMessages = append(allMessages, fmt.Sprintf("Machine %s needs rollout: %s", name, strings.Join(machinesUpToDateResults[name].LogMessages, ", ")))
518528
}
519-
log.Info(fmt.Sprintf("Rolling out Control Plane machines: %s", strings.Join(allMessages, ",")), "machinesNeedingRollout", machinesNeedingRollout.Names())
529+
log.Info(fmt.Sprintf("Machines need rollout: %s", machinesNeedingRolloutNames), "reason", strings.Join(allMessages, ", "))
520530
v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateV1Beta1Condition, controlplanev1.RollingUpdateInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(machinesNeedingRollout), len(controlPlane.Machines)-len(machinesNeedingRollout))
521531
return r.updateControlPlane(ctx, controlPlane, machinesNeedingRollout, machinesUpToDateResults)
522532
default:
@@ -1042,10 +1052,15 @@ func reconcileMachineUpToDateCondition(_ context.Context, controlPlane *internal
10421052
}
10431053

10441054
if inplace.IsUpdateInProgress(machine) {
1055+
msg := "* In-place update in progress"
1056+
if c := conditions.Get(machine, clusterv1.MachineUpdatingCondition); c != nil && c.Status == metav1.ConditionTrue && c.Message != "" {
1057+
msg = fmt.Sprintf("* %s", c.Message)
1058+
}
10451059
conditions.Set(machine, metav1.Condition{
1046-
Type: clusterv1.MachineUpToDateCondition,
1047-
Status: metav1.ConditionFalse,
1048-
Reason: clusterv1.MachineUpToDateUpdatingReason,
1060+
Type: clusterv1.MachineUpToDateCondition,
1061+
Status: metav1.ConditionFalse,
1062+
Reason: clusterv1.MachineUpToDateUpdatingReason,
1063+
Message: msg,
10491064
})
10501065
continue
10511066
}

controlplane/kubeadm/internal/controllers/controller_test.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2400,9 +2400,10 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition
24002400
Message: "Waiting for a Node with spec.providerID foo to exist",
24012401
},
24022402
{
2403-
Type: clusterv1.MachineUpToDateCondition,
2404-
Status: metav1.ConditionFalse,
2405-
Reason: clusterv1.MachineUpToDateUpdatingReason,
2403+
Type: clusterv1.MachineUpToDateCondition,
2404+
Status: metav1.ConditionFalse,
2405+
Reason: clusterv1.MachineUpToDateUpdatingReason,
2406+
Message: "* In-place update in progress",
24062407
},
24072408
},
24082409
},

controlplane/kubeadm/internal/controllers/inplace_canupdatemachine.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ func (r *KubeadmControlPlaneReconciler) canUpdateMachine(ctx context.Context, ma
4545
return r.overrideCanUpdateMachineFunc(ctx, machine, machineUpToDateResult)
4646
}
4747

48-
log := ctrl.LoggerFrom(ctx)
48+
log := ctrl.LoggerFrom(ctx).WithValues("Machine", klog.KObj(machine))
4949

5050
// Machine cannot be updated in-place if the feature gate is not enabled.
5151
if !feature.Gates.Enabled(feature.InPlaceUpdates) {
@@ -79,7 +79,7 @@ func (r *KubeadmControlPlaneReconciler) canUpdateMachine(ctx context.Context, ma
7979
return false, err
8080
}
8181
if !canUpdateMachine {
82-
log.Info(fmt.Sprintf("Machine cannot be updated in-place by extensions: %s", strings.Join(reasons, ",")), "Machine", klog.KObj(machine))
82+
log.Info(fmt.Sprintf("Machine %s cannot be updated in-place by extensions", machine.Name), "reason", strings.Join(reasons, ","))
8383
return false, nil
8484
}
8585
return true, nil

controlplane/kubeadm/internal/controllers/inplace_trigger.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,8 @@ func (r *KubeadmControlPlaneReconciler) triggerInPlaceUpdate(ctx context.Context
4040
return r.overrideTriggerInPlaceUpdate(ctx, machine, machineUpToDateResult)
4141
}
4242

43-
log := ctrl.LoggerFrom(ctx)
44-
log.Info("Triggering in-place update", "Machine", klog.KObj(machine))
43+
log := ctrl.LoggerFrom(ctx).WithValues("Machine", klog.KObj(machine))
44+
log.Info(fmt.Sprintf("Triggering in-place update for Machine %s", machine.Name))
4545

4646
// Mark Machine for in-place update.
4747
// Note: Once we write UpdateInProgressAnnotation we will always continue with the in-place update.
@@ -133,7 +133,7 @@ func (r *KubeadmControlPlaneReconciler) triggerInPlaceUpdate(ctx context.Context
133133
return errors.Wrapf(err, "failed to complete triggering in-place update for Machine %s", klog.KObj(machine))
134134
}
135135

136-
log.Info("Completed triggering in-place update", "Machine", klog.KObj(machine))
136+
log.Info(fmt.Sprintf("Completed triggering in-place update for Machine %s", machine.Name))
137137
r.recorder.Event(machine, corev1.EventTypeNormal, "SuccessfulStartInPlaceUpdate", "Machine starting in-place update")
138138

139139
// Wait until the cache observed the Machine with PendingHooksAnnotation to ensure subsequent reconciles

controlplane/kubeadm/internal/controllers/scale.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte
5353
}
5454

5555
log.WithValues(controlPlane.StatusToLogKeyAndValues(newMachine, nil)...).
56-
Info("Machine created (init)",
56+
Info(fmt.Sprintf("Machine %s created (init)", newMachine.Name),
5757
"Machine", klog.KObj(newMachine),
5858
newMachine.Spec.InfrastructureRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.InfrastructureRef.Name),
5959
newMachine.Spec.Bootstrap.ConfigRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.Bootstrap.ConfigRef.Name))
@@ -87,7 +87,7 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context,
8787
}
8888

8989
log.WithValues(controlPlane.StatusToLogKeyAndValues(newMachine, nil)...).
90-
Info("Machine created (scale up)",
90+
Info(fmt.Sprintf("Machine %s created (scale up)", newMachine.Name),
9191
"Machine", klog.KObj(newMachine),
9292
newMachine.Spec.InfrastructureRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.InfrastructureRef.Name),
9393
newMachine.Spec.Bootstrap.ConfigRef.Kind, klog.KRef(newMachine.Namespace, newMachine.Spec.Bootstrap.ConfigRef.Name))
@@ -144,7 +144,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
144144
// Note: We intentionally log after Delete because we want this log line to show up only after DeletionTimestamp has been set.
145145
// Also, setting DeletionTimestamp doesn't mean the Machine is actually deleted (deletion takes some time).
146146
log.WithValues(controlPlane.StatusToLogKeyAndValues(nil, machineToDelete)...).
147-
Info("Deleting Machine (scale down)", "Machine", klog.KObj(machineToDelete))
147+
Info(fmt.Sprintf("Machine %s deleting (scale down)", machineToDelete.Name), "Machine", klog.KObj(machineToDelete))
148148

149149
// Requeue the control plane, in case there are additional operations to perform
150150
return ctrl.Result{Requeue: true}, nil

internal/controllers/machine/machine_controller.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -668,7 +668,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result
668668
// We only delete the node after the underlying infrastructure is gone.
669669
// https://github.com/kubernetes-sigs/cluster-api/issues/2565
670670
if isDeleteNodeAllowed {
671-
log.Info("Deleting node", "Node", klog.KRef("", m.Status.NodeRef.Name))
671+
log.Info("Deleting Node", "Node", klog.KRef("", m.Status.NodeRef.Name))
672672

673673
var deleteNodeErr error
674674
waitErr := wait.PollUntilContextTimeout(ctx, 2*time.Second, r.nodeDeletionRetryTimeout, true, func(ctx context.Context) (bool, error) {
@@ -678,9 +678,9 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result
678678
return true, nil
679679
})
680680
if waitErr != nil {
681-
log.Error(deleteNodeErr, "Timed out deleting node", "Node", klog.KRef("", m.Status.NodeRef.Name))
681+
log.Error(deleteNodeErr, "Timed out deleting Node", "Node", klog.KRef("", m.Status.NodeRef.Name))
682682
v1beta1conditions.MarkFalse(m, clusterv1.MachineNodeHealthyV1Beta1Condition, clusterv1.DeletionFailedV1Beta1Reason, clusterv1.ConditionSeverityWarning, "")
683-
r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDeleteNode", "error deleting Machine's node: %v", deleteNodeErr)
683+
r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDeleteNode", "error deleting Machine's Node: %v", deleteNodeErr)
684684

685685
// If the node deletion timeout is not expired yet, requeue the Machine for reconciliation.
686686
if m.Spec.Deletion.NodeDeletionTimeoutSeconds == nil || *m.Spec.Deletion.NodeDeletionTimeoutSeconds == 0 || m.DeletionTimestamp.Add(time.Duration(*m.Spec.Deletion.NodeDeletionTimeoutSeconds)*time.Second).After(time.Now()) {

internal/controllers/machine/machine_controller_inplace_update.go

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ func (r *Reconciler) reconcileInPlaceUpdate(ctx context.Context, s *scope) (ctrl
6666

6767
// If hook is not pending, we're waiting for the owner controller to mark it as pending.
6868
if !hasUpdateMachinePending {
69-
log.Info("In-place update annotation is set, waiting for UpdateMachine hook to be marked as pending")
69+
log.Info("Machine marked for in-place update, waiting for owning controller to mark UpdateMachine hook as pending")
7070
return ctrl.Result{}, nil
7171
}
7272

@@ -110,7 +110,6 @@ func (r *Reconciler) reconcileInPlaceUpdate(ctx context.Context, s *scope) (ctrl
110110
return result, nil
111111
}
112112

113-
log.Info("In-place update completed successfully")
114113
if err := r.completeInPlaceUpdate(ctx, s); err != nil {
115114
return ctrl.Result{}, errors.Wrap(err, "failed to complete in-place update")
116115
}
@@ -144,11 +143,9 @@ func (r *Reconciler) callUpdateMachineHook(ctx context.Context, s *scope) (ctrl.
144143
if err != nil {
145144
return ctrl.Result{}, "", err
146145
}
147-
148146
if len(extensions) == 0 {
149147
return ctrl.Result{}, "", errors.New("no extensions registered for UpdateMachine hook")
150148
}
151-
152149
if len(extensions) > 1 {
153150
return ctrl.Result{}, "", errors.Errorf("found multiple UpdateMachine hooks (%s): only one hook is supported", strings.Join(extensions, ","))
154151
}
@@ -208,7 +205,7 @@ func (r *Reconciler) completeInPlaceUpdate(ctx context.Context, s *scope) error
208205
return err
209206
}
210207

211-
log.Info("In-place update completed!")
208+
log.Info("Completed in-place update")
212209
return nil
213210
}
214211

@@ -224,6 +221,8 @@ func (r *Reconciler) removeInPlaceUpdateAnnotation(ctx context.Context, obj clie
224221
return errors.Wrapf(err, "failed to remove %s annotation from object %s", clusterv1.UpdateInProgressAnnotation, klog.KObj(obj))
225222
}
226223

224+
// Note: DeepCopy object to not modify the passed-in object which can lead to conflict errors later on.
225+
obj = obj.DeepCopyObject().(client.Object)
227226
orig := obj.DeepCopyObject().(client.Object)
228227
delete(annotations, clusterv1.UpdateInProgressAnnotation)
229228
obj.SetAnnotations(annotations)

internal/controllers/machine/machine_controller_noderef.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,6 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result,
7070

7171
// Check that the Machine has a valid ProviderID.
7272
if machine.Spec.ProviderID == "" {
73-
log.Info("Waiting for infrastructure provider to report spec.providerID", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Namespace, machine.Spec.InfrastructureRef.Name))
7473
v1beta1conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyV1Beta1Condition, clusterv1.WaitingForNodeRefV1Beta1Reason, clusterv1.ConditionSeverityInfo, "")
7574
return ctrl.Result{}, nil
7675
}
@@ -96,7 +95,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result,
9695
return ctrl.Result{}, errors.Wrapf(err, "no matching Node for Machine %q in namespace %q", machine.Name, machine.Namespace)
9796
}
9897
v1beta1conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyV1Beta1Condition, clusterv1.NodeProvisioningV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Waiting for a node with matching ProviderID to exist")
99-
log.Info("Infrastructure provider reporting spec.providerID, matching Kubernetes node is not yet available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", machine.Spec.ProviderID)
98+
log.Info("Infrastructure provider reporting spec.providerID, matching Kubernetes Node is not yet available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", machine.Spec.ProviderID)
10099
// No need to requeue here. Nodes emit an event that triggers reconciliation.
101100
return ctrl.Result{}, nil
102101
}
@@ -112,7 +111,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result,
112111
machine.Status.NodeRef = clusterv1.MachineNodeReference{
113112
Name: s.node.Name,
114113
}
115-
log.Info("Infrastructure provider reporting spec.providerID, Kubernetes node is now available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", machine.Spec.ProviderID, "Node", klog.KRef("", machine.Status.NodeRef.Name))
114+
log.Info("Infrastructure provider reporting spec.providerID, Kubernetes Node is now available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", machine.Spec.ProviderID, "Node", klog.KRef("", machine.Status.NodeRef.Name))
116115
r.recorder.Event(machine, corev1.EventTypeNormal, "SuccessfulSetNodeRef", machine.Status.NodeRef.Name)
117116
}
118117

internal/controllers/machine/machine_controller_phases.go

Lines changed: 20 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ import (
3737
capierrors "sigs.k8s.io/cluster-api/errors"
3838
"sigs.k8s.io/cluster-api/internal/contract"
3939
"sigs.k8s.io/cluster-api/util"
40+
"sigs.k8s.io/cluster-api/util/conditions"
4041
v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1"
4142
"sigs.k8s.io/cluster-api/util/patch"
4243
"sigs.k8s.io/cluster-api/util/predicates"
@@ -212,9 +213,11 @@ func (r *Reconciler) reconcileBootstrap(ctx context.Context, s *scope) (ctrl.Res
212213

213214
// If the data secret was not created yet, return.
214215
if !dataSecretCreated {
215-
log.Info(fmt.Sprintf("Waiting for bootstrap provider to generate data secret and set %s",
216-
contract.Bootstrap().DataSecretCreated(contractVersion).Path().String()),
217-
s.bootstrapConfig.GetKind(), klog.KObj(s.bootstrapConfig))
216+
if util.IsControlPlaneMachine(m) || conditions.IsTrue(s.cluster, clusterv1.ClusterControlPlaneInitializedCondition) {
217+
log.Info(fmt.Sprintf("Waiting for bootstrap provider to generate data secret and set %s",
218+
contract.Bootstrap().DataSecretCreated(contractVersion).Path().String()),
219+
s.bootstrapConfig.GetKind(), klog.KObj(s.bootstrapConfig))
220+
}
218221
return ctrl.Result{}, nil
219222
}
220223

@@ -309,21 +312,26 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctr
309312

310313
// If the InfrastructureMachine is not provisioned (and it wasn't already provisioned before), return.
311314
if !provisioned && !ptr.Deref(m.Status.Initialization.InfrastructureProvisioned, false) {
312-
log.Info(fmt.Sprintf("Waiting for infrastructure provider to create machine infrastructure and set %s",
313-
contract.InfrastructureMachine().Provisioned(contractVersion).Path().String()),
314-
s.infraMachine.GetKind(), klog.KObj(s.infraMachine))
315+
if util.IsControlPlaneMachine(m) || conditions.IsTrue(s.cluster, clusterv1.ClusterControlPlaneInitializedCondition) {
316+
log.Info(fmt.Sprintf("Waiting for infrastructure provider to set %s on %s",
317+
contract.InfrastructureMachine().Provisioned(contractVersion).Path().String(), s.infraMachine.GetKind()),
318+
s.infraMachine.GetKind(), klog.KObj(s.infraMachine))
319+
}
315320
return ctrl.Result{}, nil
316321
}
317322

318323
// Get providerID from the InfrastructureMachine (intentionally not setting it on the Machine yet).
319-
var providerID *string
320-
if providerID, err = contract.InfrastructureMachine().ProviderID().Get(s.infraMachine); err != nil {
321-
return ctrl.Result{}, errors.Wrapf(err, "failed to read providerID from %s %s",
322-
s.infraMachine.GetKind(), klog.KObj(s.infraMachine))
323-
} else if *providerID == "" {
324-
return ctrl.Result{}, errors.Errorf("got empty %s field from %s %s",
324+
providerID, err := contract.InfrastructureMachine().ProviderID().Get(s.infraMachine)
325+
switch {
326+
case err != nil && !errors.Is(err, contract.ErrFieldNotFound):
327+
return ctrl.Result{}, errors.Wrapf(err, "failed to read %s from %s %s",
325328
contract.InfrastructureMachine().ProviderID().Path().String(),
326329
s.infraMachine.GetKind(), klog.KObj(s.infraMachine))
330+
case ptr.Deref(providerID, "") == "":
331+
log.Info(fmt.Sprintf("Waiting for infrastructure provider to set %s on %s",
332+
contract.InfrastructureMachine().ProviderID().Path().String(), s.infraMachine.GetKind()),
333+
s.infraMachine.GetKind(), klog.KObj(s.infraMachine))
334+
return ctrl.Result{}, nil // Note: Requeue is not needed, changes to InfraMachine trigger another reconcile.
327335
}
328336

329337
// Get and set addresses from the InfrastructureMachine.

internal/controllers/machine/machine_controller_status.go

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -642,6 +642,9 @@ func setUpdatingCondition(_ context.Context, machine *clusterv1.Machine, updatin
642642
if updatingReason == "" {
643643
updatingReason = clusterv1.MachineInPlaceUpdatingReason
644644
}
645+
if updatingMessage == "" {
646+
updatingMessage = "In-place update in progress"
647+
}
645648
conditions.Set(machine, metav1.Condition{
646649
Type: clusterv1.MachineUpdatingCondition,
647650
Status: metav1.ConditionTrue,
@@ -714,11 +717,16 @@ func setUpToDateCondition(_ context.Context, m *clusterv1.Machine, ms *clusterv1
714717
return
715718
}
716719

717-
if conditions.IsTrue(m, clusterv1.MachineUpdatingCondition) {
720+
if c := conditions.Get(m, clusterv1.MachineUpdatingCondition); c != nil && c.Status == metav1.ConditionTrue {
721+
msg := "* In-place update in progress"
722+
if c.Message != "" {
723+
msg = fmt.Sprintf("* %s", c.Message)
724+
}
718725
conditions.Set(m, metav1.Condition{
719-
Type: clusterv1.MachineUpToDateCondition,
720-
Status: metav1.ConditionFalse,
721-
Reason: clusterv1.MachineUpToDateUpdatingReason,
726+
Type: clusterv1.MachineUpToDateCondition,
727+
Status: metav1.ConditionFalse,
728+
Reason: clusterv1.MachineUpToDateUpdatingReason,
729+
Message: msg,
722730
})
723731
return
724732
}

0 commit comments

Comments
 (0)