Skip to content

K3s

Since testcontainers-go v0.21.0

Introduction

The Testcontainers module for K3s.

Adding this module to your project dependencies

Please run the following command to add the K3s module to your Go dependencies:

go get github.com/testcontainers/testcontainers-go/modules/k3s

Usage example

package k3s_test

import (
    "context"
    "testing"
    "time"

    corev1 "k8s.io/api/core/v1"
    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/client-go/kubernetes"
    "k8s.io/client-go/tools/clientcmd"

    "github.com/testcontainers/testcontainers-go"
    "github.com/testcontainers/testcontainers-go/modules/k3s"
)

func Test_LoadImages(t *testing.T) {
    ctx := context.Background()

    k3sContainer, err := k3s.RunContainer(ctx,
        testcontainers.WithImage("docker.io/rancher/k3s:v1.27.1-k3s1"),
    )
    if err != nil {
        t.Fatal(err)
    }

    // Clean up the container
    defer func() {
        if err := k3sContainer.Terminate(ctx); err != nil {
            t.Fatal(err)
        }
    }()

    kubeConfigYaml, err := k3sContainer.GetKubeConfig(ctx)
    if err != nil {
        t.Fatal(err)
    }

    restcfg, err := clientcmd.RESTConfigFromKubeConfig(kubeConfigYaml)
    if err != nil {
        t.Fatal(err)
    }

    k8s, err := kubernetes.NewForConfig(restcfg)
    if err != nil {
        t.Fatal(err)
    }

    provider, err := testcontainers.ProviderDocker.GetProvider()
    if err != nil {
        t.Fatal(err)
    }

    // ensure nginx image is available locally
    err = provider.PullImage(context.Background(), "nginx")
    if err != nil {
        t.Fatal(err)
    }

    t.Run("Test load image not available", func(t *testing.T) {
        err := k3sContainer.LoadImages(context.Background(), "fake.registry/fake:non-existing")
        if err == nil {
            t.Fatal("should had failed")
        }
    })

    t.Run("Test load image in cluster", func(t *testing.T) {
        err := k3sContainer.LoadImages(context.Background(), "nginx")
        if err != nil {
            t.Fatal(err)
        }

        pod := &corev1.Pod{
            TypeMeta: metav1.TypeMeta{
                Kind:       "Pod",
                APIVersion: "v1",
            },
            ObjectMeta: metav1.ObjectMeta{
                Name: "test-pod",
            },
            Spec: corev1.PodSpec{
                Containers: []corev1.Container{
                    {
                        Name:            "nginx",
                        Image:           "nginx",
                        ImagePullPolicy: corev1.PullNever, // use image only if already present
                    },
                },
            },
        }

        _, err = k8s.CoreV1().Pods("default").Create(context.Background(), pod, metav1.CreateOptions{})
        if err != nil {
            t.Fatal(err)
        }

        time.Sleep(1 * time.Second)
        pod, err = k8s.CoreV1().Pods("default").Get(context.Background(), "test-pod", metav1.GetOptions{})
        if err != nil {
            t.Fatal(err)
        }
        waiting := pod.Status.ContainerStatuses[0].State.Waiting
        if waiting != nil && waiting.Reason == "ErrImageNeverPull" {
            t.Fatal("Image was not loaded")
        }
    })
}

func Test_APIServerReady(t *testing.T) {
    ctx := context.Background()

    k3sContainer, err := k3s.RunContainer(ctx,
        testcontainers.WithImage("docker.io/rancher/k3s:v1.27.1-k3s1"),
    )
    if err != nil {
        t.Fatal(err)
    }

    // Clean up the container
    defer func() {
        if err := k3sContainer.Terminate(ctx); err != nil {
            t.Fatal(err)
        }
    }()

    kubeConfigYaml, err := k3sContainer.GetKubeConfig(ctx)
    if err != nil {
        t.Fatal(err)
    }

    restcfg, err := clientcmd.RESTConfigFromKubeConfig(kubeConfigYaml)
    if err != nil {
        t.Fatal(err)
    }

    k8s, err := kubernetes.NewForConfig(restcfg)
    if err != nil {
        t.Fatal(err)
    }

    pod := &corev1.Pod{
        TypeMeta: metav1.TypeMeta{
            Kind:       "Pod",
            APIVersion: "v1",
        },
        ObjectMeta: metav1.ObjectMeta{
            Name: "test-pod",
        },
        Spec: corev1.PodSpec{
            Containers: []corev1.Container{
                {
                    Name:  "nginx",
                    Image: "nginx",
                },
            },
        },
    }

    _, err = k8s.CoreV1().Pods("default").Create(context.Background(), pod, metav1.CreateOptions{})
    if err != nil {
        t.Fatalf("failed to create pod %v", err)
    }
}

Module reference

The K3s module exposes one entrypoint function to create the K3s container, and this function receives two parameters:

func RunContainer(ctx context.Context, opts ...testcontainers.ContainerCustomizer) (*K3sContainer, error)
  • context.Context, the Go context.
  • testcontainers.ContainerCustomizer, a variadic argument for passing options.

Container Ports

These are the ports used by the K3s container:

defaultKubeSecurePort     = "6443/tcp"
defaultRancherWebhookPort = "8443/tcp"

Container Options

When starting the K3s container, you can pass options in a variadic way to configure it.

Image

If you need to set a different K3s Docker image, you can use testcontainers.WithImage with a valid Docker image for K3s. E.g. testcontainers.WithImage("docker.io/rancher/k3s:v1.27.1-k3s1").

Image Substitutions

In more locked down / secured environments, it can be problematic to pull images from Docker Hub and run them without additional precautions.

An image name substitutor converts a Docker image name, as may be specified in code, to an alternative name. This is intended to provide a way to override image names, for example to enforce pulling of images from a private registry.

Testcontainers for Go exposes an interface to perform this operations: ImageSubstitutor, and a No-operation implementation to be used as reference for custom implementations:

// ImageSubstitutor represents a way to substitute container image names
type ImageSubstitutor interface {
    // Description returns the name of the type and a short description of how it modifies the image.
    // Useful to be printed in logs
    Description() string
    Substitute(image string) (string, error)
}
type NoopImageSubstitutor struct{}

// Description returns a description of what is expected from this Substitutor,
// which is used in logs.
func (s NoopImageSubstitutor) Description() string {
    return "NoopImageSubstitutor (noop)"
}

// Substitute returns the original image, without any change
func (s NoopImageSubstitutor) Substitute(image string) (string, error) {
    return image, nil
}

Using the WithImageSubstitutors options, you could define your own substitutions to the container images. E.g. adding a prefix to the images so that they can be pulled from a Docker registry other than Docker Hub. This is the usual mechanism for using Docker image proxies, caches, etc.

Wait Strategies

If you need to set a different wait strategy for the container, you can use testcontainers.WithWaitStrategy with a valid wait strategy.

Info

The default deadline for the wait strategy is 60 seconds.

At the same time, it's possible to set a wait strategy and a custom deadline with testcontainers.WithWaitStrategyAndDeadline.

Startup Commands

Testcontainers exposes the WithStartupCommand(e ...Executable) option to run arbitrary commands in the container right after it's started.

Info

To better understand how this feature works, please read the Create containers: Lifecycle Hooks documentation.

It also exports an Executable interface, defining one single method: AsCommand(), which returns a slice of strings to represent the command and positional arguments to be executed in the container.

You could use this feature to run a custom script, or to run a command that is not supported by the module right after the container is started.

Docker type modifiers

If you need an advanced configuration for the container, you can leverage the following Docker type modifiers:

  • testcontainers.WithConfigModifier
  • testcontainers.WithHostConfigModifier
  • testcontainers.WithEndpointSettingsModifier

Please read the Create containers: Advanced Settings documentation for more information.

Container Methods

The K3s container exposes the following methods:

GetKubeConfig

The GetKubeConfig method returns the K3s cluster's kubeconfig, including the server URL, to be used for connecting to the Kubernetes Rest Client API using a Kubernetes client. It'll be returned in the format of []bytes.

package k3s_test

import (
    "context"
    "fmt"

    v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/client-go/kubernetes"
    "k8s.io/client-go/tools/clientcmd"

    "github.com/testcontainers/testcontainers-go"
    "github.com/testcontainers/testcontainers-go/modules/k3s"
)

func ExampleRunContainer() {
    // runK3sContainer {
    ctx := context.Background()

    k3sContainer, err := k3s.RunContainer(ctx,
        testcontainers.WithImage("docker.io/rancher/k3s:v1.27.1-k3s1"),
    )
    if err != nil {
        panic(err)
    }

    // Clean up the container
    defer func() {
        if err := k3sContainer.Terminate(ctx); err != nil {
            panic(err)
        }
    }()
    // }

    state, err := k3sContainer.State(ctx)
    if err != nil {
        panic(err)
    }

    fmt.Println(state.Running)

    kubeConfigYaml, err := k3sContainer.GetKubeConfig(ctx)
    if err != nil {
        panic(err)
    }

    restcfg, err := clientcmd.RESTConfigFromKubeConfig(kubeConfigYaml)
    if err != nil {
        panic(err)
    }

    k8s, err := kubernetes.NewForConfig(restcfg)
    if err != nil {
        panic(err)
    }

    nodes, err := k8s.CoreV1().Nodes().List(ctx, v1.ListOptions{})
    if err != nil {
        panic(err)
    }

    fmt.Println(len(nodes.Items))

    // Output:
    // true
    // 1
}

LoadImages

The LoadImages method loads a list of images into the kubernetes cluster and makes them available to pods.

This is useful for testing images generated locally without having to push them to a public docker registry or having to configure k3s to use a private registry.

The images must be already present in the node running the test. DockerProvider offers a method for pulling images, which can be used from the test code to ensure the image is present locally before loading them to the cluster.