@@ -46,8 +46,10 @@ import (
4646"github.com/Azure/azure-container-networking/store" 
4747"github.com/avast/retry-go/v3" 
4848"github.com/pkg/errors" 
49+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 
4950"k8s.io/apimachinery/pkg/fields" 
5051"k8s.io/apimachinery/pkg/types" 
52+ "k8s.io/client-go/kubernetes" 
5153ctrl "sigs.k8s.io/controller-runtime" 
5254"sigs.k8s.io/controller-runtime/pkg/cache" 
5355"sigs.k8s.io/controller-runtime/pkg/client" 
@@ -863,13 +865,12 @@ func initCNS(ctx context.Context, cli nodeNetworkConfigGetter, ncReconciler ncSt
863865}
864866podInfoByIP , err  :=  podInfoByIPProvider .PodInfoByIP ()
865867if  err  !=  nil  {
866- return  errors .Wrap (err , "err in CNS initialization " )
868+ return  errors .Wrap (err , "provider failed to provide PodInfoByIP " )
867869}
868870
869- // errors.Wrap provides additional context, and return nil if the err input arg is nil 
870871// Call cnsclient init cns passing those two things. 
871872err  =  restserver .ResponseCodeToError (ncReconciler .ReconcileNCState (& ncRequest , podInfoByIP , nnc ))
872- return  errors .Wrap (err , "err in CNS reconciliation " )
873+ return  errors .Wrap (err , "failed to reconcile NC state " )
873874}
874875
875876// InitializeCRDState builds and starts the CRD controllers. 
@@ -945,6 +946,7 @@ func InitializeCRDState(ctx context.Context, httpRestService cns.HTTPService, cn
945946},
946947},
947948})
949+ 
948950manager , err  :=  ctrl .NewManager (kubeConfig , ctrl.Options {
949951Scheme : nodenetworkconfig .Scheme ,
950952MetricsBindAddress : cnsconfig .MetricsBindAddress ,
@@ -954,9 +956,23 @@ func InitializeCRDState(ctx context.Context, httpRestService cns.HTTPService, cn
954956if  err  !=  nil  {
955957return  errors .Wrap (err , "failed to create manager" )
956958}
959+ 
960+ clientset , err  :=  kubernetes .NewForConfig (kubeConfig )
961+ if  err  !=  nil  {
962+ return  errors .Wrap (err , "failed to build clientset" )
963+ }
964+ 
965+ // get our Node so that we can xref it against the NodeNetworkConfig's to make sure that the 
966+ // NNC is not stale and represents the Node we're running on. 
967+ node , err  :=  clientset .CoreV1 ().Nodes ().Get (ctx , nodeName , metav1.GetOptions {})
968+ if  err  !=  nil  {
969+ return  errors .Wrapf (err , "failed to get node %s" , nodeName )
970+ }
971+ 
957972reconciler  :=  kubecontroller .NewReconciler (nnccli , httpRestServiceImplementation , httpRestServiceImplementation .IPAMPoolMonitor )
958- if  err  :=  reconciler .SetupWithManager (manager , nodeName ); err  !=  nil  {
959- return  err 
973+ // pass Node to the Reconciler for Controller xref 
974+ if  err  :=  reconciler .SetupWithManager (manager , node ); err  !=  nil  {
975+ return  errors .Wrapf (err , "failed to setup reconciler with manager" )
960976}
961977
962978// Start the RequestController which starts the reconcile loop 
0 commit comments