Skip to content
108 changes: 108 additions & 0 deletions test/integration/lrp/lrp_fqdn_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
//go:build lrp

package lrp

import (
"context"
"testing"

"github.com/Azure/azure-container-networking/test/internal/kubernetes"
ciliumClientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
"github.com/stretchr/testify/require"
)

var (
fqdnCNPPath = ciliumManifestsDir + "fqdn-cnp.yaml"
enableFQDNFlag = "enable-l7-proxy"
)

// TestLRPFQDN tests if the local redirect policy in a cilium cluster is functioning with a
// FQDN Cilium Network Policy. As such, enable-l7-proxy should be enabled in the config
// The test assumes the current kubeconfig points to a cluster with cilium, cns,
// and kube-dns already installed. The lrp feature flag should also be enabled in the cilium config
// Does not check if cluster is in a stable state
// Resources created are automatically cleaned up
// From the lrp folder, run: go test ./ -v -tags "lrp" -run ^TestLRPFQDN$
func TestLRPFQDN(t *testing.T) {
ctx := context.Background()

selectedPod, cleanupFn := setupLRP(t, ctx)
defer cleanupFn()
require.NotNil(t, selectedPod)

cs := kubernetes.MustGetClientset()
config := kubernetes.MustGetRestConfig()
ciliumCS, err := ciliumClientset.NewForConfig(config)
require.NoError(t, err)

// ensure enable l7 proxy flag is enabled
ciliumCM, err := kubernetes.GetConfigmap(ctx, cs, kubeSystemNamespace, ciliumConfigmapName)
require.NoError(t, err)
require.Equal(t, "true", ciliumCM.Data[enableFQDNFlag], "enable-l7-proxy not set to true in cilium-config")

_, cleanupCNP := kubernetes.MustSetupCNP(ctx, ciliumCS, fqdnCNPPath)
defer cleanupCNP()

tests := []struct {
name string
command []string
expectedMsgContains string
expectedErrMsgContains string
shouldError bool
countIncreases bool
}{
{
name: "nslookup google succeeds",
command: []string{"nslookup", "www.google.com", "10.0.0.10"},
countIncreases: true,
shouldError: false,
},
{
name: "nslookup google succeeds without explicit dns server",
command: []string{"nslookup", "www.google.com"},
countIncreases: true,
shouldError: false,
},
{
name: "wget google succeeds",
command: []string{"wget", "-O", "index.html", "www.google.com", "--timeout=5"},
expectedErrMsgContains: "saved",
countIncreases: true,
shouldError: false,
},
{
name: "nslookup cloudflare succeeds",
command: []string{"nslookup", "www.cloudflare.com", "10.0.0.10"},
countIncreases: true,
shouldError: false,
},
{
name: "wget cloudflare fails but dns succeeds",
command: []string{"wget", "-O", "index.html", "www.cloudflare.com", "--timeout=5"},
expectedErrMsgContains: "timed out",
countIncreases: true,
shouldError: true,
},
{
name: "nslookup example fails",
command: []string{"nslookup", "www.example.com", "10.0.0.10"},
expectedMsgContains: "REFUSED",
countIncreases: false,
shouldError: true,
},
{
// won't be able to nslookup, let alone query the website
name: "wget example fails",
command: []string{"wget", "-O", "index.html", "www.example.com", "--timeout=5"},
expectedErrMsgContains: "bad address",
countIncreases: false,
shouldError: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
testLRPCase(t, ctx, *selectedPod, tt.command, tt.expectedMsgContains, tt.expectedErrMsgContains, tt.shouldError, tt.countIncreases)
})
}
}
106 changes: 74 additions & 32 deletions test/integration/lrp/lrp_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"golang.org/x/exp/rand"
corev1 "k8s.io/api/core/v1"
)

const (
Expand Down Expand Up @@ -46,15 +47,22 @@ var (
clientPath = ciliumManifestsDir + "client-ds.yaml"
)

// TestLRP tests if the local redirect policy in a cilium cluster is functioning
// The test assumes the current kubeconfig points to a cluster with cilium (1.16+), cns,
// and kube-dns already installed. The lrp feature flag should be enabled in the cilium config
// Resources created are automatically cleaned up
// From the lrp folder, run: go test ./lrp_test.go -v -tags "lrp" -run ^TestLRP$
func TestLRP(t *testing.T) {
config := kubernetes.MustGetRestConfig()
ctx := context.Background()
func setupLRP(t *testing.T, ctx context.Context) (*corev1.Pod, func()) {
var cleanUpFns []func()
success := false
cleanupFn := func() {
for len(cleanUpFns) > 0 {
cleanUpFns[len(cleanUpFns)-1]()
cleanUpFns = cleanUpFns[:len(cleanUpFns)-1]
}
}
defer func() {
if !success {
cleanupFn()
}
}()

config := kubernetes.MustGetRestConfig()
cs := kubernetes.MustGetClientset()

ciliumCS, err := ciliumClientset.NewForConfig(config)
Expand Down Expand Up @@ -90,14 +98,14 @@ func TestLRP(t *testing.T) {

// deploy node local dns preqreqs and pods
_, cleanupConfigMap := kubernetes.MustSetupConfigMap(ctx, cs, nodeLocalDNSConfigMapPath)
defer cleanupConfigMap()
cleanUpFns = append(cleanUpFns, cleanupConfigMap)
_, cleanupServiceAccount := kubernetes.MustSetupServiceAccount(ctx, cs, nodeLocalDNSServiceAccountPath)
defer cleanupServiceAccount()
cleanUpFns = append(cleanUpFns, cleanupServiceAccount)
_, cleanupService := kubernetes.MustSetupService(ctx, cs, nodeLocalDNSServicePath)
defer cleanupService()
cleanUpFns = append(cleanUpFns, cleanupService)
nodeLocalDNSDS, cleanupNodeLocalDNS := kubernetes.MustSetupDaemonset(ctx, cs, tempNodeLocalDNSDaemonsetPath)
defer cleanupNodeLocalDNS()
err = kubernetes.WaitForPodsRunning(ctx, cs, nodeLocalDNSDS.Namespace, nodeLocalDNSLabelSelector)
cleanUpFns = append(cleanUpFns, cleanupNodeLocalDNS)
kubernetes.WaitForPodDaemonset(ctx, cs, nodeLocalDNSDS.Namespace, nodeLocalDNSDS.Name, nodeLocalDNSLabelSelector)
require.NoError(t, err)
// select a local dns pod after they start running
pods, err := kubernetes.GetPodsByNode(ctx, cs, nodeLocalDNSDS.Namespace, nodeLocalDNSLabelSelector, selectedNode)
Expand All @@ -106,19 +114,19 @@ func TestLRP(t *testing.T) {

// deploy lrp
_, cleanupLRP := kubernetes.MustSetupLRP(ctx, ciliumCS, lrpPath)
defer cleanupLRP()
cleanUpFns = append(cleanUpFns, cleanupLRP)

// create client pods
clientDS, cleanupClient := kubernetes.MustSetupDaemonset(ctx, cs, clientPath)
defer cleanupClient()
err = kubernetes.WaitForPodsRunning(ctx, cs, clientDS.Namespace, clientLabelSelector)
cleanUpFns = append(cleanUpFns, cleanupClient)
kubernetes.WaitForPodDaemonset(ctx, cs, clientDS.Namespace, clientDS.Name, clientLabelSelector)
require.NoError(t, err)
// select a client pod after they start running
clientPods, err := kubernetes.GetPodsByNode(ctx, cs, clientDS.Namespace, clientLabelSelector, selectedNode)
require.NoError(t, err)
selectedClientPod := TakeOne(clientPods.Items).Name
selectedClientPod := TakeOne(clientPods.Items)

t.Logf("Selected node: %s, node local dns pod: %s, client pod: %s\n", selectedNode, selectedLocalDNSPod, selectedClientPod)
t.Logf("Selected node: %s, node local dns pod: %s, client pod: %s\n", selectedNode, selectedLocalDNSPod, selectedClientPod.Name)

// port forward to local dns pod on same node (separate thread)
pf, err := k8s.NewPortForwarder(config, k8s.PortForwardingOpts{
Expand All @@ -130,17 +138,27 @@ func TestLRP(t *testing.T) {
require.NoError(t, err)
pctx := context.Background()
portForwardCtx, cancel := context.WithTimeout(pctx, (retryAttempts+1)*retryDelay)
defer cancel()
cleanUpFns = append(cleanUpFns, cancel)

err = defaultRetrier.Do(portForwardCtx, func() error {
t.Logf("attempting port forward to a pod with label %s, in namespace %s...", nodeLocalDNSLabelSelector, nodeLocalDNSDS.Namespace)
return errors.Wrap(pf.Forward(portForwardCtx), "could not start port forward")
})
require.NoError(t, err, "could not start port forward within %d", (retryAttempts+1)*retryDelay)
defer pf.Stop()
cleanUpFns = append(cleanUpFns, pf.Stop)

t.Log("started port forward")

success = true
return &selectedClientPod, cleanupFn
}

func testLRPCase(t *testing.T, ctx context.Context, clientPod corev1.Pod, clientCmd []string, expectResponse, expectErrMsg string,
shouldError, countShouldIncrease bool) {

config := kubernetes.MustGetRestConfig()
cs := kubernetes.MustGetClientset()

// labels for target lrp metric
metricLabels := map[string]string{
"family": "1",
Expand All @@ -153,24 +171,48 @@ func TestLRP(t *testing.T) {
beforeMetric, err := prometheus.GetMetric(promAddress, coreDNSRequestCountTotal, metricLabels)
require.NoError(t, err)

t.Log("calling nslookup from client")
// nslookup to 10.0.0.10 (coredns)
val, err := kubernetes.ExecCmdOnPod(ctx, cs, clientDS.Namespace, selectedClientPod, clientContainer, []string{
"nslookup", "google.com", "10.0.0.10",
}, config)
require.NoError(t, err, string(val))
// can connect
require.Contains(t, string(val), "Server:")
t.Log("calling command from client")

val, errMsg, err := kubernetes.ExecCmdOnPod(ctx, cs, clientPod.Namespace, clientPod.Name, clientContainer, clientCmd, config, false)
if shouldError {
require.Error(t, err, "stdout: %s, stderr: %s", string(val), string(errMsg))
} else {
require.NoError(t, err, "stdout: %s, stderr: %s", string(val), string(errMsg))
}

require.Contains(t, string(val), expectResponse)
require.Contains(t, string(errMsg), expectErrMsg)

// in case there is time to propagate
time.Sleep(1 * time.Second)
time.Sleep(500 * time.Millisecond)

// curl again and see count increases
// curl again and see count diff
afterMetric, err := prometheus.GetMetric(promAddress, coreDNSRequestCountTotal, metricLabels)
require.NoError(t, err)

// count should go up
require.Greater(t, afterMetric.GetCounter().GetValue(), beforeMetric.GetCounter().GetValue(), "dns metric count did not increase after nslookup")
if countShouldIncrease {
require.Greater(t, afterMetric.GetCounter().GetValue(), beforeMetric.GetCounter().GetValue(), "dns metric count did not increase after command")
} else {
require.Equal(t, afterMetric.GetCounter().GetValue(), beforeMetric.GetCounter().GetValue(), "dns metric count increased after command")
}
}

// TestLRP tests if the local redirect policy in a cilium cluster is functioning
// The test assumes the current kubeconfig points to a cluster with cilium (1.16+), cns,
// and kube-dns already installed. The lrp feature flag should be enabled in the cilium config
// Does not check if cluster is in a stable state
// Resources created are automatically cleaned up
// From the lrp folder, run: go test ./ -v -tags "lrp" -run ^TestLRP$
func TestLRP(t *testing.T) {
ctx := context.Background()

selectedPod, cleanupFn := setupLRP(t, ctx)
defer cleanupFn()
require.NotNil(t, selectedPod)

testLRPCase(t, ctx, *selectedPod, []string{
"nslookup", "google.com", "10.0.0.10",
}, "", "", false, true)
}

// TakeOne takes one item from the slice randomly; if empty, it returns the empty value for the type
Expand Down
24 changes: 24 additions & 0 deletions test/integration/manifests/cilium/lrp/fqdn-cnp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
apiVersion: "cilium.io/v2"
kind: CiliumNetworkPolicy
metadata:
name: "to-fqdn"
namespace: "default"
spec:
endpointSelector:
matchLabels:
lrp-test: "true"
egress:
- toEndpoints:
- matchLabels:
"k8s:io.kubernetes.pod.namespace": kube-system
"k8s:k8s-app": node-local-dns
toPorts:
- ports:
- port: "53"
protocol: UDP
rules:
dns:
- matchPattern: "*.google.com"
- matchPattern: "*.cloudflare.com"
- toFQDNs:
- matchPattern: "*.google.com"
2 changes: 1 addition & 1 deletion test/internal/datapath/datapath_win.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ var ipv6PrefixPolicy = []string{"powershell", "-c", "curl.exe", "-6", "-v", "www

func podTest(ctx context.Context, clientset *kubernetes.Clientset, srcPod *apiv1.Pod, cmd []string, rc *restclient.Config, passFunc func(string) error) error {
logrus.Infof("podTest() - %v %v", srcPod.Name, cmd)
output, err := acnk8s.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, "", cmd, rc)
output, _, err := acnk8s.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, "", cmd, rc, true)
if err != nil {
return errors.Wrapf(err, "failed to execute command on pod: %v", srcPod.Name)
}
Expand Down
Loading
Loading