测试使用 operator-sdk 搭建的操作符时,Scheme 为零

问题描述

我正在研究一个操作符的早期迭代,我使用 operator-sdk 搭建了它。我已尽力遵循 Operator SDK Golang TutorialKubebuilder book 中的示例。我发现我可以将我的操作员部署和运行到本地集群,但我无法运行测试套件。我的测试总是产生一个 panic: runtime error: invalid memory address or nil pointer dereference,我已经追踪到 Scheme 始终为零的事实。但到目前为止,我一直无法弄清楚为什么会这样。

理论上,我可以跳过测试,只测试本地集群中的操作符,但从长远来看,这将非常脆弱。我希望能够进行 TDD,更重要的是,我希望有一个测试套件与操作员一起使用,以帮助在维护模式下保持质量。

这是我的 suite_test.go,我从脚手架版本中对其进行了尽可能少的修改(我所做的更改来自 Kubebuilder Book):

package controllers

import (
    "path/filepath"
    "testing"

    . "github.com/onsi/ginkgo"
    . "github.com/onsi/gomega"
    "k8s.io/client-go/kubernetes/scheme"
    "k8s.io/client-go/rest"
    ctrl "sigs.k8s.io/controller-runtime"
    "sigs.k8s.io/controller-runtime/pkg/client"
    "sigs.k8s.io/controller-runtime/pkg/envtest"
    "sigs.k8s.io/controller-runtime/pkg/envtest/printer"
    logf "sigs.k8s.io/controller-runtime/pkg/log"
    "sigs.k8s.io/controller-runtime/pkg/log/zap"

    mybatch "mycorp.com/mybatch-operator/api/v1alpha1"
    // +kubebuilder:scaffold:imports
)

// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.

var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment

func TestAPIs(t *testing.T) {
    RegisterFailHandler(Fail)

    RunSpecsWithDefaultAndCustomreporters(t,"Controller Suite",[]Reporter{printer.NewlineReporter{}})
}

var _ = BeforeSuite(func(done Done) {
    logf.SetLogger(zap.New(zap.Writeto(GinkgoWriter),zap.UseDevMode(true)))

    By("bootstrapping test environment")
    testEnv = &envtest.Environment{
        CRDDirectoryPaths: []string{filepath.Join("..","config","crd","bases")},}

    cfg,err := testEnv.Start()
    Expect(err).Notto(HaveOccurred())
    Expect(cfg).Notto(BeNil())

    err = mybatch.AddToScheme(scheme.Scheme)
    Expect(err).Notto(HaveOccurred())

    // +kubebuilder:scaffold:scheme

    k8sManager,err := ctrl.NewManager(cfg,ctrl.Options{
        Scheme: scheme.Scheme,})
    Expect(err).ToNot(HaveOccurred())

    err = (&MyBatchReconciler{
        Client: k8sManager.GetClient(),Log:    ctrl.Log.WithName("controllers").WithName("MyBatch"),}).SetupWithManager(k8sManager)
    Expect(err).ToNot(HaveOccurred())

    go func() {
        err = k8sManager.Start(ctrl.SetupSignalHandler())
        Expect(err).ToNot(HaveOccurred())
    }()

    k8sClient = k8sManager.GetClient()
    Expect(k8sClient).ToNot(BeNil())

    close(done)
},60)

var _ = AfterSuite(func() {
    By("tearing down the test environment")
    err := testEnv.Stop()
    Expect(err).Notto(HaveOccurred())
})

这是导致它失败的测试块。我有第二个 Describe 块(此处未显示),用于测试 Reconcile 函数之外的一些业务逻辑,并且工作正常。

package controllers

import (
    "context"
    "time"

    . "github.com/onsi/ginkgo"
    . "github.com/onsi/gomega"
    corev1 "k8s.io/api/core/v1"
    Metav1 "k8s.io/apimachinery/pkg/apis/Meta/v1"
    "k8s.io/apimachinery/pkg/types"

    "github.com/jarcoal/httpmock"
    mybatch "mycorp.com/mybatch-operator/api/v1alpha1"
)

var _ = Describe("BatchController",func() {
    Describe("Reconcile",func() {
        // Define utility constants for object names and testing timeouts/durations and intervals.
        const (
            BatchName      = "test-batch"
            BatchNamespace = "default"
            BatchImage     = "mycorp/mockserver:latest"

            timeout  = time.Second * 10
            duration = time.Second * 10
            interval = time.Millisecond * 250
        )

        Context("When deploying MyBatch",func() {
            It("Should create a new Batch instance",func() {
                ctx := context.Background()

                // Define stub Batch
                testCR := &mybatch.MyBatch{
                    TypeMeta: Metav1.TypeMeta{
                        APIVersion: "mybatch.mycorp.com/v1alpha1",Kind:       "MyBatch",},ObjectMeta: Metav1.ObjectMeta{
                        Name:      BatchName,Namespace: BatchNamespace,Spec: mybatch.MyBatchSpec{
                        Replicas: 1,StatusCheck: mybatch.StatusCheck{
                            Url:         "http://mycorp.com",Endpoint:    "/rest/jobs/jobexecutions/active",PollSeconds: 20,Image: BatchImage,PodSpec: corev1.PodSpec{
                            // For simplicity,we only fill out the required fields.
                            Containers: []corev1.Container{
                                {
                                    Name:  "test-container",RestartPolicy: corev1.RestartPolicyAlways,}

                Expect(k8sClient.Create(ctx,testCR)).Should(Succeed())

                lookupKey := types.Namespacedname{Name: BatchName,Namespace: BatchNamespace}
                createdBatch := &mybatch.MyBatch{}

                // We'll need to retry getting this newly created Batch,given that creation may not immediately happen.
                Eventually(func() bool {
                    err := k8sClient.Get(ctx,lookupKey,createdBatch)
                    if err != nil {
                        return false
                    }
                    return true
                },timeout,interval).Should(BeTrue())
                // Check the container name
                Expect(createdBatch.Spec.PodSpec.Containers[0].Name).Should(Equal(BatchName))
            })
        })
    })
})

在这里遗漏了什么导致 Scheme 无法正确初始化?我不得不承认,我不太了解 Scheme 周围的大部分代码。如果有帮助,我很乐意展示额外的代码

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)