diff --git a/test/benchmarks/tools/parser_util.go b/test/benchmarks/tools/parser_util.go index dac7aa5a44..22bffd3150 100644 --- a/test/benchmarks/tools/parser_util.go +++ b/test/benchmarks/tools/parser_util.go @@ -48,26 +48,80 @@ func ParametersToName(params ...Parameter) (string, error) { } // NameToParameters parses the string created by ParametersToName and returns -// it as a set of Parameters. -// Example: BenchmarkRuby/server_threads.1/doc_size.16KB-6 -// The parameter part of this benchmark is: -// "server_threads.1/doc_size.16KB" (BenchmarkRuby is the name, and 6 is GOMAXPROCS) -// This function will return a slice with two parameters -> -// {Name: server_threads, Value: 1}, {Name: doc_size, Value: 16KB} -func NameToParameters(name string) ([]*Parameter, error) { +// the name components and parameters contained within. +// The separator between the name and value may either be '.' or '='. +// +// Example: "BenchmarkRuby/SubTest/LevelTwo/server_threads.1/doc_size.16KB-6" +// The parameter part of this benchmark is "server_threads.1/doc_size.16KB", +// whereas "BenchmarkRuby/SubTest/LevelTwo" is the name, and the "-6" suffix is +// GOMAXPROCS (optional, may be omitted). +// This function will return a slice of the name components of the benchmark: +// +// [ +// "BenchmarkRuby", +// "SubTest", +// "LevelTwo", +// ] +// +// and a slice of the parameters: +// +// [ +// {Name: "server_threads", Value: "1"}, +// {Name: "doc_size", Value: "16KB"}, +// {Name: "GOMAXPROCS", Value: "6"}, +// ] +// +// (and a nil error). +func NameToParameters(name string) ([]string, []*Parameter, error) { var params []*Parameter - for _, cond := range strings.Split(name, "/") { - cs := strings.Split(cond, ".") + var separator string + switch { + case strings.IndexRune(name, '.') != -1 && strings.IndexRune(name, '=') != -1: + return nil, nil, fmt.Errorf("ambiguity while parsing parameters from benchmark name %q: multiple types of parameter separators are present", name) + case strings.IndexRune(name, '.') != -1: + separator = "." + case strings.IndexRune(name, '=') != -1: + separator = "=" + default: + // No separator; use '=' which we know is not present in the name, + // but we still need to process the name (even if unparameterized) in + // order to possibly extract GOMAXPROCS. + separator = "=" + } + var nameComponents []string + var firstParameterCond string + var goMaxProcs *Parameter + split := strings.Split(name, "/") + for i, cond := range split { + if isLast := i == len(split)-1; isLast { + // On the last component, if it contains a dash, it is a GOMAXPROCS value. + if dashSplit := strings.Split(cond, "-"); len(dashSplit) >= 2 { + goMaxProcs = &Parameter{Name: "GOMAXPROCS", Value: dashSplit[len(dashSplit)-1]} + cond = strings.Join(dashSplit[:len(dashSplit)-1], "-") + } + } + cs := strings.Split(cond, separator) switch len(cs) { case 1: - params = append(params, &Parameter{Name: cond, Value: cond}) + if firstParameterCond != "" { + return nil, nil, fmt.Errorf("failed to parse params from %q: a non-parametrized component %q was found after a parametrized one %q", name, cond, firstParameterCond) + } + nameComponents = append(nameComponents, cond) case 2: + if firstParameterCond == "" { + firstParameterCond = cond + } params = append(params, &Parameter{Name: cs[0], Value: cs[1]}) default: - return nil, fmt.Errorf("failed to parse param: %s", cond) + return nil, nil, fmt.Errorf("failed to parse params from %q: %s", name, cond) } } - return params, nil + if goMaxProcs != nil { + // GOMAXPROCS should always be last in order to match the ordering of the + // benchmark name. + params = append(params, goMaxProcs) + } + return nameComponents, params, nil } // ReportCustomMetric reports a metric in a set format for parsing. @@ -93,9 +147,52 @@ func ParseCustomMetric(value, metric string) (*Metric, error) { if err != nil { return nil, fmt.Errorf("failed to parse value: %v", err) } - nameUnit := strings.Split(metric, ".") - if len(nameUnit) != 2 { - return nil, fmt.Errorf("failed to parse metric: %s", metric) + separators := []rune{'-', '.'} + var separator string + for _, sep := range separators { + if strings.ContainsRune(metric, sep) { + if separator != "" { + return nil, fmt.Errorf("failed to parse metric: ambiguous unit separator: %q (is the separator %q or %q?)", metric, separator, string(sep)) + } + separator = string(sep) + } + } + var name, unit string + switch separator { + case "": + unit = metric + default: + components := strings.Split(metric, separator) + name, unit = strings.Join(components[:len(components)-1], ""), components[len(components)-1] + } + // Normalize some unit names to benchstat defaults. + switch unit { + case "": + return nil, fmt.Errorf("failed to parse metric %q: no unit specified", metric) + case "s": + unit = "sec" + case "nanos": + unit = "ns" + case "byte": + unit = "B" + case "bit": + unit = "b" + default: + // Otherwise, leave unit as-is. + } + // If the metric name is unspecified, it can sometimes be inferred from + // the unit. + if name == "" { + switch unit { + case "sec": + name = "duration" + case "req/sec", "tok/sec": + name = "throughput" + case "B/sec": + name = "bandwidth" + default: + return nil, fmt.Errorf("failed to parse metric %q: ambiguous metric name, please format the unit as 'name.unit' or 'name-unit'", metric) + } } - return &Metric{Name: nameUnit[0], Unit: nameUnit[1], Sample: sample}, nil + return &Metric{Name: name, Unit: unit, Sample: sample}, nil } diff --git a/test/kubernetes/benchmarks/BUILD b/test/kubernetes/benchmarks/BUILD index e77e535b72..5d2fd2ea3b 100644 --- a/test/kubernetes/benchmarks/BUILD +++ b/test/kubernetes/benchmarks/BUILD @@ -58,6 +58,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -88,6 +89,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -120,6 +122,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -152,6 +155,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -184,6 +188,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -217,6 +222,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -248,6 +254,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -280,6 +287,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -312,6 +320,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -343,6 +352,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -382,6 +392,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -415,6 +426,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -446,6 +458,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) @@ -478,6 +491,7 @@ go_test( ], deps = [ "//test/kubernetes/k8sctx", + "//test/kubernetes/k8sctx/kubectlctx", "//test/kubernetes/testcluster", ], ) diff --git a/test/kubernetes/benchmarks/abslbuild_test.go b/test/kubernetes/benchmarks/abslbuild_test.go index 7e69da6f63..2095789b00 100644 --- a/test/kubernetes/benchmarks/abslbuild_test.go +++ b/test/kubernetes/benchmarks/abslbuild_test.go @@ -19,26 +19,21 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) // TestABSLBuild benchmarks building various Abseil C++ targets. func TestABSLBuild(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("ABSL", func(t *testing.T) { t.Parallel() BuildABSL(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestABSLBuild": TestABSLBuild, - }) -} diff --git a/test/kubernetes/benchmarks/ffmpeg_test.go b/test/kubernetes/benchmarks/ffmpeg_test.go index bc6e295dc8..ec00cb10e1 100644 --- a/test/kubernetes/benchmarks/ffmpeg_test.go +++ b/test/kubernetes/benchmarks/ffmpeg_test.go @@ -19,25 +19,20 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) func TestFfmpeg(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("ffmpeg", func(t *testing.T) { t.Parallel() RunFFMPEG(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestFfmpeg": TestFfmpeg, - }) -} diff --git a/test/kubernetes/benchmarks/grpc_test.go b/test/kubernetes/benchmarks/grpc_test.go index 85b82dd98f..023ca33d87 100644 --- a/test/kubernetes/benchmarks/grpc_test.go +++ b/test/kubernetes/benchmarks/grpc_test.go @@ -19,25 +19,20 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) func TestGRPCBuild(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("gRPC", func(t *testing.T) { t.Parallel() BuildGRPC(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestGRPCBuild": TestGRPCBuild, - }) -} diff --git a/test/kubernetes/benchmarks/gsutil_test.go b/test/kubernetes/benchmarks/gsutil_test.go index fe93a6a54c..ba687dcd61 100644 --- a/test/kubernetes/benchmarks/gsutil_test.go +++ b/test/kubernetes/benchmarks/gsutil_test.go @@ -21,25 +21,20 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) func TestGSUtil(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("GSUtil", func(t *testing.T) { t.Parallel() RunGSUtil(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestGSUtil": TestGSUtil, - }) -} diff --git a/test/kubernetes/benchmarks/nginx_test.go b/test/kubernetes/benchmarks/nginx_test.go index 2d15bd6309..28c9d56c32 100644 --- a/test/kubernetes/benchmarks/nginx_test.go +++ b/test/kubernetes/benchmarks/nginx_test.go @@ -19,25 +19,20 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) func TestNginx(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("nginx", func(t *testing.T) { t.Parallel() BenchmarkNginx(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestNginx": TestNginx, - }) -} diff --git a/test/kubernetes/benchmarks/ollama_test.go b/test/kubernetes/benchmarks/ollama_test.go index 0dc2d2f623..8dd7205c11 100644 --- a/test/kubernetes/benchmarks/ollama_test.go +++ b/test/kubernetes/benchmarks/ollama_test.go @@ -22,6 +22,7 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) @@ -29,20 +30,14 @@ func TestOllama(t *testing.T) { fmt.Fprint(os.Stderr, "HEADS UP: This test uses a huge container image which may take up to 30 minutes to download onto nodes the first time you run it.\n") ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("Ollama", func(t *testing.T) { t.Parallel() BenchmarkOllama(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestOllama": TestOllama, - }) -} diff --git a/test/kubernetes/benchmarks/postgresql_test.go b/test/kubernetes/benchmarks/postgresql_test.go index 0c32aa56c6..4062eca4e9 100644 --- a/test/kubernetes/benchmarks/postgresql_test.go +++ b/test/kubernetes/benchmarks/postgresql_test.go @@ -20,26 +20,21 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) // TestPostgresPGBench benchmarks a PostgreSQL database with pgbench. func TestPostgresPGBench(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("PostgresPGBench", func(t *testing.T) { t.Parallel() BenchmarkPostgresPGBench(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestPostgresPGBench": TestPostgresPGBench, - }) -} diff --git a/test/kubernetes/benchmarks/pytorch_test.go b/test/kubernetes/benchmarks/pytorch_test.go index d1787aba7b..1b1f37513e 100644 --- a/test/kubernetes/benchmarks/pytorch_test.go +++ b/test/kubernetes/benchmarks/pytorch_test.go @@ -19,6 +19,7 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) @@ -48,24 +49,14 @@ func TestMobileNetV2(t *testing.T) { } func runTests(ctx context.Context, t *testing.T, tests []pytorchTest) { - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("PyTorch", func(t *testing.T) { t.Parallel() RunPytorch(ctx, t, k8sCtx, cluster, tests) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestFastNLPBert": TestFastNLPBert, - "TestBigBird": TestBigBird, - "TestSpeechTransformer": TestSpeechTransformer, - "TestLearningToPaint": TestLearningToPaint, - "TestMobileNetV2": TestMobileNetV2, - }) -} diff --git a/test/kubernetes/benchmarks/redis_test.go b/test/kubernetes/benchmarks/redis_test.go index d5062a26b8..9561b3459f 100644 --- a/test/kubernetes/benchmarks/redis_test.go +++ b/test/kubernetes/benchmarks/redis_test.go @@ -19,26 +19,21 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) // TestRedis benchmarks redis servers on k8s clusters. func TestRedis(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("Redis", func(t *testing.T) { t.Parallel() BenchmarkRedis(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestRedis": TestRedis, - }) -} diff --git a/test/kubernetes/benchmarks/rubydev_test.go b/test/kubernetes/benchmarks/rubydev_test.go index 60b6bfd3e9..4c8ed0a011 100644 --- a/test/kubernetes/benchmarks/rubydev_test.go +++ b/test/kubernetes/benchmarks/rubydev_test.go @@ -19,26 +19,21 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) // TestRubyDev benchmarks a build job on k8s clusters. func TestRubyDev(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("RubyDev", func(t *testing.T) { t.Parallel() RunRubyDev(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestRubyDev": TestRubyDev, - }) -} diff --git a/test/kubernetes/benchmarks/stablediffusion_test.go b/test/kubernetes/benchmarks/stablediffusion_test.go index 0c082b9da4..625da4a132 100644 --- a/test/kubernetes/benchmarks/stablediffusion_test.go +++ b/test/kubernetes/benchmarks/stablediffusion_test.go @@ -19,25 +19,20 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) func TestStableDiffusionXL(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("stable_diffusion_xl", func(t *testing.T) { t.Parallel() RunStableDiffusionXL(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestStableDiffusionXL": TestStableDiffusionXL, - }) -} diff --git a/test/kubernetes/benchmarks/startup_test.go b/test/kubernetes/benchmarks/startup_test.go index 421bb4e3a6..f0c378d261 100644 --- a/test/kubernetes/benchmarks/startup_test.go +++ b/test/kubernetes/benchmarks/startup_test.go @@ -19,16 +19,17 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) func TestStartup(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run(benchName, func(t *testing.T) { cluster := cluster t.Parallel() @@ -36,9 +37,3 @@ func TestStartup(t *testing.T) { }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestStartup": TestStartup, - }) -} diff --git a/test/kubernetes/benchmarks/tensorflow_test.go b/test/kubernetes/benchmarks/tensorflow_test.go index 240c10458a..80e2d21f79 100644 --- a/test/kubernetes/benchmarks/tensorflow_test.go +++ b/test/kubernetes/benchmarks/tensorflow_test.go @@ -19,25 +19,20 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) func TestTensorflowOnCPU(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("TensorflowOnCPU", func(t *testing.T) { t.Parallel() RunTensorflowOnCPU(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestTensorflowOnCPU": TestTensorflowOnCPU, - }) -} diff --git a/test/kubernetes/benchmarks/wordpress_test.go b/test/kubernetes/benchmarks/wordpress_test.go index 93c2d9ca01..7338fb6744 100644 --- a/test/kubernetes/benchmarks/wordpress_test.go +++ b/test/kubernetes/benchmarks/wordpress_test.go @@ -19,25 +19,20 @@ import ( "testing" "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" "gvisor.dev/gvisor/test/kubernetes/testcluster" ) func TestWordpress(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - k8sCtx.ForEachCluster(ctx, t, func(cluster *testcluster.TestCluster) { + k8sctx.ForEachCluster(ctx, t, k8sCtx, func(cluster *testcluster.TestCluster) { t.Run("wordpress", func(t *testing.T) { t.Parallel() BenchmarkWordpress(ctx, t, k8sCtx, cluster) }) }) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestWordpress": TestWordpress, - }) -} diff --git a/test/kubernetes/benchmetric/benchmetric.go b/test/kubernetes/benchmetric/benchmetric.go index 3f66414423..2281b5dccc 100644 --- a/test/kubernetes/benchmetric/benchmetric.go +++ b/test/kubernetes/benchmetric/benchmetric.go @@ -148,7 +148,7 @@ func Count(numberOfTimes uint64, thingBeingCounted string) MetricValue { if !strings.HasSuffix(thingBeingCounted, "s") { panic("`thingBeingCounted` must be plural") } - return value(float64(numberOfTimes), thingBeingCounted) + return value(float64(numberOfTimes), fmt.Sprintf("%s-num", thingBeingCounted)) } // Checksum is a MetricValue for a checksum that is not expected to change diff --git a/test/kubernetes/k8sctx/BUILD b/test/kubernetes/k8sctx/BUILD index ce6180326f..66ea639b6a 100644 --- a/test/kubernetes/k8sctx/BUILD +++ b/test/kubernetes/k8sctx/BUILD @@ -7,20 +7,14 @@ package( go_library( name = "k8sctx", - testonly = True, srcs = [ "k8sctx.go", - "k8sctx_impl.go", ], nogo = False, visibility = [ "//visibility:public", ], deps = [ - "//runsc/flag", "//test/kubernetes/testcluster", - "//tools/gvisor_k8s_tool/provider/kubectl", - "@org_golang_google_protobuf//encoding/prototext:go_default_library", - "@org_golang_google_protobuf//types/known/anypb:go_default_library", ], ) diff --git a/test/kubernetes/k8sctx/k8sctx.go b/test/kubernetes/k8sctx/k8sctx.go index 554bd705c7..338a5b59f1 100644 --- a/test/kubernetes/k8sctx/k8sctx.go +++ b/test/kubernetes/k8sctx/k8sctx.go @@ -21,9 +21,6 @@ package k8sctx import ( "context" - "errors" - "fmt" - "sync" "testing" "gvisor.dev/gvisor/test/kubernetes/testcluster" @@ -34,29 +31,11 @@ import ( // Tests are expected to call `RegisterTest` for every of their test function, // then `TestMain`. type KubernetesContext interface { - // TestMain should be called inside tests' `TestMain` function, after having - // registered all tests with `RegisterTest`. - TestMain(m *testing.M) - - // RegisterTest registers a test. - // It should be called for every `Test*(*testing.T)` function in the test. - // Note that the `k8sctx.TestMain` helper function below will call this for - // you given a map of tests. - RegisterTest(name string, fn TestFunc) - - // AcquireCluster returns a single cluster for the test or benchmark to use. + // Cluster returns a single cluster for the test or benchmark to use. // The cluster is guaranteed to not be in use by other tests or benchmarks - // until the `ReleaseCluster` method is called. - // This method should block if there are no available clusters. - AcquireCluster(ctx context.Context, t *testing.T) *testcluster.TestCluster - - // ReleaseCluster unlocks the given cluster for use by other tests or - // benchmarks. - ReleaseCluster(ctx context.Context, t *testing.T, cluster *testcluster.TestCluster) - - // ForEachCluster reserves as many test clusters as are available, calls - // `fn` on each of them, and releases each of them when `fn` finishes. - ForEachCluster(ctx context.Context, t *testing.T, fn func(cluster *testcluster.TestCluster)) + // until the returned function is called. + // If there are no available clusters, it returns a nil TestCluster. + Cluster(ctx context.Context, t *testing.T) (*testcluster.TestCluster, func()) // ResolveImage resolves a container image name (possibly with a label) // to a fully-qualified image name. It can also return an `image:label` @@ -65,47 +44,54 @@ type KubernetesContext interface { ResolveImage(ctx context.Context, imageName string) (string, error) } -// TestFunc is a test function that is expected to call `Context` and run a -// test or benchmark within a Kubernetes context. -type TestFunc func(t *testing.T) +// ForEachCluster calls the given function for each available cluster +// sequentially. +// In order to run per-cluster subtests in parallel, call `t.Run` inside +// `fn` and then `t.Parallel` inside that. +func ForEachCluster(ctx context.Context, t *testing.T, k8sCtx KubernetesContext, fn func(cluster *testcluster.TestCluster)) { + var clusterFns []func() + for { + cluster, releaseFn := k8sCtx.Cluster(ctx, t) + if cluster == nil { + break + } + clusterFns = append(clusterFns, func() { + defer releaseFn() + fn(cluster) + }) + } + for _, clusterFn := range clusterFns { + clusterFn() + } +} -var ( - kubernetesCtxMu sync.Mutex - kubernetesCtxOnce sync.Once - kubernetesCtxFn func(context.Context) (KubernetesContext, error) - kubernetesCtx KubernetesContext - kubernetesCtxErr error -) +// clusters implements KubernetesContext using a set of clusters. +type clusters struct { + clustersCh chan *testcluster.TestCluster +} -// Context gets the global Kubernetes context. -// It must be called after SetContext has already been called. -func Context(ctx context.Context) (KubernetesContext, error) { - kubernetesCtxMu.Lock() - defer kubernetesCtxMu.Unlock() - if kubernetesCtxFn == nil { - return nil, errors.New("k8sctx.Context called prior to k8sctx.SetContextConstructor") +// Cluster implements KubernetesContext.Cluster. +func (cs *clusters) Cluster(ctx context.Context, t *testing.T) (*testcluster.TestCluster, func()) { + select { + case cluster := <-cs.clustersCh: + return cluster, func() { + cs.clustersCh <- cluster + } + default: + return nil, func() {} } - kubernetesCtxOnce.Do(func() { - kubernetesCtx, kubernetesCtxErr = kubernetesCtxFn(ctx) - }) - return kubernetesCtx, kubernetesCtxErr } -// SetContextConstructor sets the global Kubernetes context constructor. -func SetContextConstructor(fn func(context.Context) (KubernetesContext, error)) { - kubernetesCtxMu.Lock() - defer kubernetesCtxMu.Unlock() - kubernetesCtxFn = fn +// ResolveImage implements KubernetesContext.ResolveImage. +func (*clusters) ResolveImage(ctx context.Context, imageName string) (string, error) { + return imageName, nil } -// TestMain is a helper to write the TestMain function of tests. -func TestMain(m *testing.M, testFuncs map[string]TestFunc) { - k8sCtx, err := Context(context.Background()) - if err != nil { - panic(fmt.Sprintf("failed to get k8sctx: %v", err)) - } - for name, fn := range testFuncs { - k8sCtx.RegisterTest(name, fn) +// New creates a KubernetesContext that set of test clusters. +func New(testClusters ...*testcluster.TestCluster) KubernetesContext { + clustersCh := make(chan *testcluster.TestCluster, len(testClusters)) + for _, cluster := range testClusters { + clustersCh <- cluster } - k8sCtx.TestMain(m) + return &clusters{clustersCh: clustersCh} } diff --git a/test/kubernetes/k8sctx/k8sctx_impl.go b/test/kubernetes/k8sctx/k8sctx_impl.go deleted file mode 100644 index ed67d9308a..0000000000 --- a/test/kubernetes/k8sctx/k8sctx_impl.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2024 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !false -// +build !false - -package k8sctx - -import ( - "context" - "errors" - "fmt" - "os" - "testing" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/types/known/anypb" - "gvisor.dev/gvisor/runsc/flag" - "gvisor.dev/gvisor/test/kubernetes/testcluster" - "gvisor.dev/gvisor/tools/gvisor_k8s_tool/provider/kubectl" -) - -var ( - kubectlContextName = flag.String("kubectl-context-name", "", "Name of the kubectl context to use within the kubectl config") - clusterProtoPath = flag.String("cluter-proto-path", "", "Path to a `google.container.v1.Cluster` textproto file") - testNodepoolRuntime = flag.String("test-nodepool-runtime", "", "if set, override the runtime used for pods scheduled on the 'test' nodepool. If unset, the nodepool default is used") -) - -// kubectlContext implements KubernetesContext using a named `kubectl` context -// from the user's kubectl config. -type kubectlContext struct { - cluster *testcluster.TestCluster -} - -func newKubectlContext(ctx context.Context) (KubernetesContext, error) { - if *kubectlContextName == "" { - return nil, errors.New("no kubectl context name specified") - } - if *clusterProtoPath == "" { - return nil, errors.New("no cluster proto path specified") - } - cluster, err := kubectl.NewCluster(*kubectlContextName) - if err != nil { - return nil, fmt.Errorf("cannot initialize cluster %q: %w", *kubectlContextName, err) - } - var clusterPB anypb.Any - clusterBytes, err := os.ReadFile(*clusterProtoPath) - if err != nil { - return nil, fmt.Errorf("cannot read cluster textproto file %q: %w", *clusterProtoPath, err) - } - if err = prototext.Unmarshal(clusterBytes, &clusterPB); err != nil { - return nil, fmt.Errorf("cannot unmarshal cluster textproto file %q: %w", *clusterProtoPath, err) - } - testCluster := testcluster.NewTestClusterFromClient(*kubectlContextName, cluster.Client()) - if *testNodepoolRuntime != "" { - testCluster.OverrideTestNodepoolRuntime(testcluster.RuntimeType(*testNodepoolRuntime)) - } - return &kubectlContext{cluster: testCluster}, nil -} - -func (c *kubectlContext) AcquireCluster(ctx context.Context, t *testing.T) *testcluster.TestCluster { - return c.cluster -} - -func (c *kubectlContext) ReleaseCluster(ctx context.Context, t *testing.T, cluster *testcluster.TestCluster) { - // Nothing to do. -} - -func (c *kubectlContext) ForEachCluster(ctx context.Context, t *testing.T, fn func(cluster *testcluster.TestCluster)) { - fn(c.cluster) -} - -func (c *kubectlContext) ResolveImage(ctx context.Context, imageName string) (string, error) { - return imageName, nil -} - -func (c *kubectlContext) RegisterTest(name string, fn TestFunc) { - // Nothing to do here, we use the regular testing library. -} - -func (c *kubectlContext) TestMain(m *testing.M) { - os.Exit(m.Run()) -} - -func init() { - SetContextConstructor(newKubectlContext) -} diff --git a/test/kubernetes/k8sctx/kubectlctx/BUILD b/test/kubernetes/k8sctx/kubectlctx/BUILD new file mode 100644 index 0000000000..6675af376a --- /dev/null +++ b/test/kubernetes/k8sctx/kubectlctx/BUILD @@ -0,0 +1,26 @@ +load("//tools:defs.bzl", "go_library") + +package( + default_applicable_licenses = ["//:license"], + licenses = ["notice"], +) + +go_library( + name = "kubectlctx", + srcs = ["kubectlctx.go"], + nogo = False, + visibility = [ + "//visibility:public", + ], + deps = [ + "//runsc/flag", + "//test/kubernetes:test_range_config_go_proto", + "//test/kubernetes/k8sctx", + "//test/kubernetes/testcluster", + "//tools/gvisor_k8s_tool/provider/kubectl", + "@io_k8s_client_go//kubernetes:go_default_library", + "@io_k8s_client_go//tools/clientcmd:go_default_library", + "@org_golang_google_protobuf//encoding/prototext:go_default_library", + "@org_golang_x_sync//errgroup:go_default_library", + ], +) diff --git a/test/kubernetes/k8sctx/kubectlctx/kubectlctx.go b/test/kubernetes/k8sctx/kubectlctx/kubectlctx.go new file mode 100644 index 0000000000..f2288de8b2 --- /dev/null +++ b/test/kubernetes/k8sctx/kubectlctx/kubectlctx.go @@ -0,0 +1,142 @@ +// Copyright 2024 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package kubectlctx provides a KubernetesContext that uses one or more +// kubectl configs to determine the cluster(s) to use for tests and benchmarks. +// See parent package (`k8sctx`) for more info. +package kubectlctx + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "golang.org/x/sync/errgroup" + "google.golang.org/protobuf/encoding/prototext" + "gvisor.dev/gvisor/runsc/flag" + "gvisor.dev/gvisor/test/kubernetes/k8sctx" + testpb "gvisor.dev/gvisor/test/kubernetes/test_range_config_go_proto" + "gvisor.dev/gvisor/test/kubernetes/testcluster" + "gvisor.dev/gvisor/tools/gvisor_k8s_tool/provider/kubectl" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +var ( + rangeDir = flag.String("range-dir", "", "A directory containing a test_range.textproto text file describing multiple clusters to use for tests and benchmarks; takes precedence over --kubectl-context-name") + kubectlContext = flag.String("kubectl-context", "", "The name of the kubectl context to use; if unset, use the default context within the kubectl config at KUBECONFIG") + testNodepoolRuntime = flag.String("test-nodepool-runtime", "", "if set, override the runtime used for pods scheduled on the 'test' nodepool. If unset, the nodepool default is used") +) + +// New creates a KubernetesContext using flags to determine which clusters +// to use for tests and benchmarks. +func New(ctx context.Context) (k8sctx.KubernetesContext, error) { + if *rangeDir != "" && *kubectlContext != "" { + return nil, fmt.Errorf("cannot use --range-dir and --kubectl-context at the same time") + } + var clusters []*testcluster.TestCluster + var err error + if *rangeDir != "" { + clusters, err = NewFromRangeDir(ctx, *rangeDir) + } else { + clusters, err = NewFromKubectlContext(ctx, *kubectlContext) + } + if err != nil { + return nil, fmt.Errorf("cannot initialize test clusters: %w", err) + } + if *testNodepoolRuntime != "" { + overriddenRuntime := testcluster.RuntimeType(*testNodepoolRuntime) + if !overriddenRuntime.IsValid() { + return nil, fmt.Errorf("invalid runtime type %q", *testNodepoolRuntime) + } + for _, cluster := range clusters { + cluster.OverrideTestNodepoolRuntime(overriddenRuntime) + } + } + if err := verifyClusters(ctx, clusters); err != nil { + return nil, fmt.Errorf("cannot verify clusters are working: %w", err) + } + return k8sctx.New(clusters...), nil +} + +// NewFromRangeDir creates a set of test clusters from a test range directory. +func NewFromRangeDir(ctx context.Context, rangeDir string) ([]*testcluster.TestCluster, error) { + rangeFile := filepath.Join(rangeDir, "test_range.textproto") + rangeFileData, err := os.ReadFile(rangeFile) + if err != nil { + return nil, fmt.Errorf("cannot read range file %q: %w", rangeFile, err) + } + var testRange testpb.TestRange + if err := prototext.Unmarshal(rangeFileData, &testRange); err != nil { + return nil, fmt.Errorf("error unmarshalling range file %q: %v", rangeFile, err) + } + if len(testRange.GetClusters()) == 0 { + return nil, fmt.Errorf("range file %q has no clusters", rangeFile) + } + clusters := make([]*testcluster.TestCluster, len(testRange.GetClusters())) + for i, cluster := range testRange.GetClusters() { + configPath := cluster.GetKubectlConfig() + if configPath == "" { + return nil, fmt.Errorf("cluster %q has no kubectl config path", cluster.GetCluster()) + } + cfg, err := clientcmd.LoadFromFile(configPath) + if err != nil { + return nil, fmt.Errorf("cannot load kubectl config at %q for cluster %q: %w", configPath, cluster.GetCluster(), err) + } + contextName := cluster.GetKubectlContext() + if contextName == "" { + contextName = cfg.CurrentContext + } + restConfig, err := clientcmd.NewNonInteractiveClientConfig(*cfg, contextName, nil, clientcmd.NewDefaultClientConfigLoadingRules()).ClientConfig() + if err != nil { + return nil, fmt.Errorf("cannot load REST client config for cluster %q: %w", cluster.GetCluster(), err) + } + kubeClient, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("cannot create Kubernetes client for cluster %q: %w", cluster.GetCluster(), err) + } + clusters[i] = testcluster.NewTestClusterFromClient(cluster.GetCluster(), kubeClient) + } + return clusters, nil +} + +// NewFromKubectlContext creates a test cluster from a kubectl config. +// +// If the kubectl config is not specified, the default kubectl config is used. +// If the kubectl context is not specified, the default context within the +// kubectl config is used. +func NewFromKubectlContext(ctx context.Context, kubectlContext string) ([]*testcluster.TestCluster, error) { + cluster, err := kubectl.NewCluster(kubectlContext) + if err != nil { + return nil, fmt.Errorf("cannot initialize cluster from kubectl config: %w", err) + } + clusterName := "test-cluster" // Default name. + if kubectlContext != "" { + clusterName = kubectlContext + } + return []*testcluster.TestCluster{testcluster.NewTestClusterFromClient(clusterName, cluster.Client())}, nil +} + +// verifyClusters verifies that all clusters are working. +func verifyClusters(ctx context.Context, clusters []*testcluster.TestCluster) error { + var g errgroup.Group + for _, cluster := range clusters { + c := cluster + g.Go(func() error { + return c.SanityCheck(ctx) + }) + } + return g.Wait() +} diff --git a/test/kubernetes/test_range_config.proto b/test/kubernetes/test_range_config.proto index a0ea255f9b..2f6136fa68 100644 --- a/test/kubernetes/test_range_config.proto +++ b/test/kubernetes/test_range_config.proto @@ -64,10 +64,24 @@ message TestRange { // Cluster holds the created cluster and its credential file. message Cluster { - // Created cluster proto. - google.protobuf.Any cluster = 1; + // Cluster name. + string cluster = 1; - // The setup step will create individual credential files for each created - // cluster. - string credential_file = 2; + // A kubectl config file that can be used to connect to the cluster. + string kubectl_config = 2; + + // The kubectl context name within `kubectl_config` to use to connect + // to the cluster. + // If empty, the default context will be used. + string kubectl_context = 3; + + // The GCP project ID that the cluster is in. + // Optional; only used for cluster management tasks (e.g. deletion). + // Leave empty for non-GCP clusters. + string gcp_project_id = 4; + + // The GCP location that the cluster is created in. + // Optional; only used for cluster management tasks (e.g. deletion). + // Leave empty for non-GCP clusters. + string gcp_location = 5; } diff --git a/test/kubernetes/testcluster/objects.go b/test/kubernetes/testcluster/objects.go index aa1e8908c6..e5390669a7 100644 --- a/test/kubernetes/testcluster/objects.go +++ b/test/kubernetes/testcluster/objects.go @@ -310,6 +310,47 @@ func (n *Namespace) WaitForResources(ctx context.Context, requests ContainerReso } } +// SanityCheck ensures that the cluster is working. +// It does so by creating sanity-check pod in the sanity namespace and ensure +// it runs successfully. +func (t *TestCluster) SanityCheck(ctx context.Context) error { + sanityNS := t.Namespace(NamespaceSanity) + resetCtx, resetCancel := context.WithTimeout(ctx, 6*time.Minute) + defer resetCancel() + defer sanityNS.Cleanup(resetCtx) + sanityCtx, resetCancel := context.WithTimeout(ctx, 5*time.Minute) + defer resetCancel() + var lastErr error + for sanityCtx.Err() == nil { + err := func() error { + if err := sanityNS.Reset(ctx); err != nil { + return fmt.Errorf("cannot reset %v namespace: %w", NamespaceSanity, err) + } + defer sanityNS.Cleanup(ctx) + sanityPod := sanityNS.NewAlpinePod("check", "alpine", []string{"/bin/sh", "-c", "echo", "hello"}) + sanityPod, err := t.CreatePod(ctx, sanityPod) + if err != nil { + return fmt.Errorf("cannot create sanity check pod: %w", err) + } + if err := t.WaitForPodCompleted(ctx, sanityPod); err != nil { + _ = t.DeletePod(ctx, sanityPod) + return fmt.Errorf("failed waiting for sanity check pod to complete: %w", err) + } + if err := t.DeletePod(ctx, sanityPod); err != nil { + return fmt.Errorf("cannot delete sanity check pod: %w", err) + } + return nil + }() + if err == nil { + return nil + } + if sanityCtx.Err() == nil { + lastErr = err + } + } + return fmt.Errorf("cannot ensure cluster %v is working: %w", t.GetName(), lastErr) +} + // RuntimeType is a supported runtime for the test nodepool. type RuntimeType string @@ -321,6 +362,21 @@ const ( RuntimeTypeUnsandboxedTPU = RuntimeType("runc-tpu") ) +// IsValid returns true if the runtime type is valid. +func (t RuntimeType) IsValid() bool { + switch t { + case RuntimeTypeGVisor, RuntimeTypeUnsandboxed, RuntimeTypeGVisorTPU, RuntimeTypeUnsandboxedTPU: + return true + default: + return false + } +} + +// IsGVisor returns true if the runtime is a gVisor-based runtime. +func (t RuntimeType) IsGVisor() bool { + return t == RuntimeTypeGVisor || t == RuntimeTypeGVisorTPU +} + // ApplyNodepool modifies the nodepool to configure it to use the runtime. func (t RuntimeType) ApplyNodepool(nodepool *cspb.NodePool) { if nodepool.GetConfig().GetLabels() == nil { diff --git a/test/kubernetes/testcluster/testcluster.go b/test/kubernetes/testcluster/testcluster.go index a21dee7869..80319ef339 100644 --- a/test/kubernetes/testcluster/testcluster.go +++ b/test/kubernetes/testcluster/testcluster.go @@ -25,7 +25,6 @@ import ( "time" "golang.org/x/sync/errgroup" - cspb "google.golang.org/genproto/googleapis/container/v1" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sync" testpb "gvisor.dev/gvisor/test/kubernetes/test_range_config_go_proto" @@ -192,20 +191,23 @@ type NodePool struct { // NewTestClusterFromProto returns a new TestCluster client from a proto. func NewTestClusterFromProto(ctx context.Context, cluster *testpb.Cluster) (*TestCluster, error) { - config, err := clientcmd.BuildConfigFromFlags("" /*masterURL*/, cluster.GetCredentialFile()) + kubeCfg, err := clientcmd.LoadFromFile(cluster.GetKubectlConfig()) if err != nil { - return nil, fmt.Errorf("BuildConfigFromFlags: %w", err) + return nil, fmt.Errorf("cannot load kubectl config at %q: %w", cluster.GetKubectlConfig(), err) } - client, err := kubernetes.NewForConfig(config) + kubeContext := cluster.GetKubectlContext() + if kubeContext == "" { + kubeContext = kubeCfg.CurrentContext + } + restCfg, err := clientcmd.NewNonInteractiveClientConfig(*kubeCfg, kubeContext, nil, clientcmd.NewDefaultClientConfigLoadingRules()).ClientConfig() if err != nil { - return nil, fmt.Errorf("kubernetes.NewForConfig: %w", err) + return nil, fmt.Errorf("kubectl config: %w", err) } - var clusterPB cspb.Cluster - if err := cluster.GetCluster().UnmarshalTo(&clusterPB); err != nil { - return nil, fmt.Errorf("cannot unmarshal cluster: %w", err) + client, err := kubernetes.NewForConfig(restCfg) + if err != nil { + return nil, fmt.Errorf("kubernetes.NewForConfig: %w", err) } - clusterName := clusterPB.GetName() - return NewTestClusterFromClient(clusterName, client), nil + return NewTestClusterFromClient(cluster.GetCluster(), client), nil } // NewTestClusterFromClient returns a new TestCluster client with a given client. diff --git a/test/kubernetes/tests/BUILD b/test/kubernetes/tests/BUILD index 35bb8a5e4d..8f3937816a 100644 --- a/test/kubernetes/tests/BUILD +++ b/test/kubernetes/tests/BUILD @@ -27,5 +27,5 @@ go_test( "noguitar", "notap", ], - deps = ["//test/kubernetes/k8sctx"], + deps = ["//test/kubernetes/k8sctx/kubectlctx"], ) diff --git a/test/kubernetes/tests/hello_test.go b/test/kubernetes/tests/hello_test.go index 8caa1f0a90..5910500cb2 100644 --- a/test/kubernetes/tests/hello_test.go +++ b/test/kubernetes/tests/hello_test.go @@ -18,23 +18,17 @@ import ( "context" "testing" - "gvisor.dev/gvisor/test/kubernetes/k8sctx" + "gvisor.dev/gvisor/test/kubernetes/k8sctx/kubectlctx" ) // TestHello tests that a trivial alpine container runs correctly. func TestHello(t *testing.T) { ctx := context.Background() - k8sCtx, err := k8sctx.Context(ctx) + k8sCtx, err := kubectlctx.New(ctx) if err != nil { t.Fatalf("Failed to get kubernetes context: %v", err) } - cluster := k8sCtx.AcquireCluster(ctx, t) - defer k8sCtx.ReleaseCluster(ctx, t, cluster) + cluster, releaseFn := k8sCtx.Cluster(ctx, t) + defer releaseFn() RunHello(ctx, t, k8sCtx, cluster) } - -func TestMain(m *testing.M) { - k8sctx.TestMain(m, map[string]k8sctx.TestFunc{ - "TestHello": TestHello, - }) -} diff --git a/tools/bigquery/bigquery.go b/tools/bigquery/bigquery.go index 21ffb71bf1..ec27fc0a54 100644 --- a/tools/bigquery/bigquery.go +++ b/tools/bigquery/bigquery.go @@ -85,9 +85,12 @@ func (s *Suite) debugString(sb *strings.Builder, prefix string) { // Benchstat returns a benchstat-formatted output string. // See https://pkg.go.dev/golang.org/x/perf/cmd/benchstat -// `includeConditions` contains names of `Condition`s that should be included -// as part of the benchmark name. -func (s *Suite) Benchstat(includeConditions []string) string { +// `includeCondition` returns whether a `Condition` name should be included +// as part of the benchmark name. If nil, all conditions are included. +func (s *Suite) Benchstat(includeCondition func(string) bool) string { + if includeCondition == nil { + includeCondition = func(string) bool { return true } + } var sb strings.Builder benchmarkNames := make([]string, 0, len(s.Benchmarks)) benchmarks := make(map[string]*Benchmark, len(s.Benchmarks)) @@ -98,12 +101,8 @@ func (s *Suite) Benchstat(includeConditions []string) string { } } sort.Strings(benchmarkNames) - includeConditionsMap := make(map[string]bool, len(includeConditions)) - for _, condName := range includeConditions { - includeConditionsMap[condName] = true - } for _, bmName := range benchmarkNames { - benchmarks[bmName].benchstat(&sb, s.Name, includeConditionsMap, s.Conditions) + benchmarks[bmName].benchstat(&sb, s.Name, includeCondition, s.Conditions) } return sb.String() } @@ -153,20 +152,20 @@ func noSpace(s string) string { } // benchstat produces benchmark-formatted output for this Benchmark. -func (bm *Benchmark) benchstat(sb *strings.Builder, suiteName string, includeConditions map[string]bool, suiteConditions []*Condition) { +func (bm *Benchmark) benchstat(sb *strings.Builder, suiteName string, includeCondition func(string) bool, suiteConditions []*Condition) { var conditionsStr string conditionNames := make([]string, 0, len(suiteConditions)+len(bm.Condition)) conditionMap := make(map[string]string, len(suiteConditions)+len(bm.Condition)) for _, c := range suiteConditions { cName := noSpace(c.Name) - if _, found := conditionMap[cName]; !found && includeConditions[cName] { + if _, found := conditionMap[cName]; !found && includeCondition(cName) { conditionNames = append(conditionNames, cName) conditionMap[cName] = noSpace(c.Value) } } for _, c := range bm.Condition { cName := noSpace(c.Name) - if _, found := conditionMap[cName]; !found && includeConditions[cName] { + if _, found := conditionMap[cName]; !found && includeCondition(cName) { conditionNames = append(conditionNames, cName) conditionMap[cName] = noSpace(c.Value) } diff --git a/tools/parsers/go_parser.go b/tools/parsers/go_parser.go index cef869416d..d57a361fe5 100644 --- a/tools/parsers/go_parser.go +++ b/tools/parsers/go_parser.go @@ -53,8 +53,8 @@ func ParseOutput(output string, name string, official bool) (*bigquery.Suite, er // *bigquery.Benchmark{ // Name: BenchmarkRuby // []*bigquery.Condition{ -// {Name: GOMAXPROCS, 6} // {Name: server_threads, 1} +// {Name: GOMAXPROCS, 6} // } // []*bigquery.Metric{ // {Name: ns/op, Unit: ns/op, Sample: 1397875880} @@ -74,12 +74,17 @@ func parseLine(line string) (*bigquery.Benchmark, error) { return nil, fmt.Errorf("expecting number of runs, got %s: %v", fields[1], err) } - name, params, err := parseNameParams(fields[0]) + nameComponents, params, err := tools.NameToParameters(fields[0]) if err != nil { return nil, fmt.Errorf("parse name/params: %v", err) } - bm := bigquery.NewBenchmark(name, iters) + // Treat the first name component as the benchmark name, and all other + // components as conditions with key = value. + bm := bigquery.NewBenchmark(nameComponents[0], iters) + for _, c := range nameComponents[1:] { + bm.AddCondition(c, c) + } for _, p := range params { bm.AddCondition(p.Name, p.Value) } @@ -94,40 +99,6 @@ func parseLine(line string) (*bigquery.Benchmark, error) { return bm, nil } -// parseNameParams parses the Name, GOMAXPROCS, and Params from the test. -// Field here should be of the format TESTNAME/PARAMS-GOMAXPROCS. -// Parameters will be separated by a "/" with individual params being -// "name.value". -func parseNameParams(field string) (string, []*tools.Parameter, error) { - var params []*tools.Parameter - // Remove GOMAXPROCS from end. - maxIndex := strings.LastIndex(field, "-") - if maxIndex < 0 { - return "", nil, fmt.Errorf("GOMAXPROCS not found: %s", field) - } - maxProcs := field[maxIndex+1:] - params = append(params, &tools.Parameter{ - Name: "GOMAXPROCS", - Value: maxProcs, - }) - - remainder := field[0:maxIndex] - index := strings.Index(remainder, "/") - if index == -1 { - return remainder, params, nil - } - - name := remainder[0:index] - p := remainder[index+1:] - - ps, err := tools.NameToParameters(p) - if err != nil { - return "", nil, fmt.Errorf("NameToParameters %s: %v", field, err) - } - params = append(params, ps...) - return name, params, nil -} - // makeMetric parses metrics and adds them to the passed Benchmark. func makeMetric(bm *bigquery.Benchmark, value, metric string) error { switch metric { diff --git a/tools/parsers/go_parser_test.go b/tools/parsers/go_parser_test.go index 39a13b4afe..84fae05818 100644 --- a/tools/parsers/go_parser_test.go +++ b/tools/parsers/go_parser_test.go @@ -23,9 +23,10 @@ import ( func TestParseLine(t *testing.T) { testCases := []struct { - name string - data string - want *bigquery.Benchmark + name string + data string + want *bigquery.Benchmark + wantErr bool }{ { name: "Iperf", @@ -37,14 +38,14 @@ func TestParseLine(t *testing.T) { Name: "iterations", Value: "1", }, - { - Name: "GOMAXPROCS", - Value: "6", - }, { Name: "Upload", Value: "Upload", }, + { + Name: "GOMAXPROCS", + Value: "6", + }, }, Metric: []*bigquery.Metric{ { @@ -70,14 +71,60 @@ func TestParseLine(t *testing.T) { Name: "iterations", Value: "1", }, + { + Name: "server_threads", + Value: "1", + }, { Name: "GOMAXPROCS", Value: "6", }, + }, + Metric: []*bigquery.Metric{ + { + Name: "ns/op", + Unit: "ns/op", + Sample: 1397875880.0, + }, + { + Name: "average_latency", + Unit: "sec", + Sample: 0.00710, + }, + { + Name: "requests_per_second", + Unit: "QPS", + Sample: 140.0, + }, + }, + }, + }, + { + name: "Ruby with alternate parameter syntax", + data: "BenchmarkRuby/SubTest/server_threads=1/clients=8-6 1 1397875880 ns/op 0.00710 average_latency.s 140 requests_per_second.QPS", + want: &bigquery.Benchmark{ + Name: "BenchmarkRuby", + Condition: []*bigquery.Condition{ + { + Name: "iterations", + Value: "1", + }, + { + Name: "SubTest", + Value: "SubTest", + }, { Name: "server_threads", Value: "1", }, + { + Name: "clients", + Value: "8", + }, + { + Name: "GOMAXPROCS", + Value: "6", + }, }, Metric: []*bigquery.Metric{ { @@ -87,7 +134,7 @@ func TestParseLine(t *testing.T) { }, { Name: "average_latency", - Unit: "s", + Unit: "sec", Sample: 0.00710, }, { @@ -98,16 +145,57 @@ func TestParseLine(t *testing.T) { }, }, }, + { + name: "No GOMAXPROCS is allowed", + data: "BenchmarkRuby/server_threads.1/clients.8 1 1397875880 ns/op 0.00710 average_latency.s", + want: &bigquery.Benchmark{ + Name: "BenchmarkRuby", + Condition: []*bigquery.Condition{ + { + Name: "iterations", + Value: "1", + }, + { + Name: "server_threads", + Value: "1", + }, + { + Name: "clients", + Value: "8", + }, + }, + Metric: []*bigquery.Metric{ + { + Name: "ns/op", + Unit: "ns/op", + Sample: 1397875880.0, + }, + { + Name: "average_latency", + Unit: "sec", + Sample: 0.00710, + }, + }, + }, + }, + { + name: "Ambiguous parameter separator", + data: "BenchmarkRuby/server_threads.4/clients=8 1 1397875880 ns/op 0.00710 average_latency.s", + wantErr: true, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { got, err := parseLine(tc.data) - if err != nil { + if err != nil && !tc.wantErr { t.Fatalf("parseLine failed with: %v", err) } + if err == nil && tc.wantErr { + t.Fatal("parseLine unexpectedly succeeded") + } - if !cmp.Equal(tc.want, got, nil) { + if err == nil && !cmp.Equal(tc.want, got, nil) { for i := range got.Condition { t.Logf("Metric: want: %+v got:%+v", got.Condition[i], tc.want.Condition[i]) } @@ -146,13 +234,13 @@ func TestParseOutput(t *testing.T) { { name: "Ruby", data: `BenchmarkRuby -BenchmarkRuby/server_threads.1 -BenchmarkRuby/server_threads.1-6 1 1397875880 ns/op 0.00710 average_latency.s 140 requests_per_second.QPS -BenchmarkRuby/server_threads.5 -BenchmarkRuby/server_threads.5-6 1 1416003331 ns/op 0.00950 average_latency.s 465 requests_per_second.QPS`, +BenchmarkRuby/server_threads=1 +BenchmarkRuby/server_threads=1 1 1397875880 ns/op 0.00710 average_latency.s 140 requests_per_second.QPS +BenchmarkRuby/server_threads=5 +BenchmarkRuby/server_threads=5 1 1416003331 ns/op 0.00950 average_latency.s 465 requests_per_second.QPS`, numBenchmarks: 2, numMetrics: 3, - numConditions: 3, + numConditions: 2, }, }