Spinning up services for tests is one of the slower pieces of writing a test. Wherever possible, keep this setup in your suite's Setup
to avoid long test times.
var (
a, b echo.Instance
)
func TestMain(m *testing.M) {
framework.
NewSuite(m).
Setup(istio.Setup(nil, nil)).
Setup(func(ctx resource.Context) (err error) {
echoboot.NewBuilder(ctx).
With(&a, cfgA).
With(&b, cfgB).
Build()
})
}
func TestFeatureA(t *testing.T) {
framework.
NewTest(t).
Run(func(ctx framework.TestContext) {
ctx.Config().ApplyYAMLOrFail(...)
defer ctx.Config().DeleteYAMLOrFail(...) // make sure to cleanup test-specific config
/* test the config on the existing echo services instead of creating/deleting new ones just for this test */
})
}
The test framework supports running with a variable number of clusters with different control-plane and network topologies. Thesee environmental concerns are configured outside the tests themselves. New tests should take this into account and support running and exercising the environment they're running in. Currently, there are plenty of existing tests don't follow this and are skipped using RequireSingleCluster
or other mechanisms. In the future, we should avoid this to make sure features work everywhere.
Supporting multicluster requires:
- Avoiding
RequireSingleCluster
orRequireMaxClusters
on the suite/test. - Deploying services in all clusters from the environment. (using ctx.Clusters() from the test context)
- Ensuring all interactions between services are tested between every pair of clusters.
- Use ParsedResponse from echo calls to investigate where traffic actually went.
Setup echos in all clusters:
func TestMain(m *testing.M) {
framework.
NewSuite(m).
Setup(istio.Setup(nil, nil)).
Setup(func(ctx resource.Context) (err error) {
builder := echoboot.NewBuilder(ctx)
for _, c := ctx.Clusters() {
builder.
With(nil, echo.Config{Service: "a", Cluster: a}).
With(nil, echo.Config{Service: "b", Cluster: b}).
With(nil, echo.Config{Service: fmt.Sprintf("c-%d", c.Index()), Cluster: c})
}
echos, _ := builder.Build()
// package-level echos!
a = echos.Match(echo.Service("a"))
b = echos.Match(echo.Service("b"))
c = echos.Match(echo.ServicePrefix("c-"))
})
}
Test cross-cluster reachability with service that is the same across clusters:
func TestCrossClusterLoadBalancing(t *testing.T) {
framework.NewTest(t).Run(func(ctx framework.TestContext) {
// loop through the source services
for _, a := a {
a := a
t.NewSubtest(a.Config().Cluster.Name()).Run(func(ctx framework.TestContext) {
// we only need to target b[0]; all `b` services should be hit since they have the same name
// set Count to something proportional to the number of possible targets to give load-balancing a chance to work
res := a.CallOrFail(ctx, echo.CallOptions{Target: b[0], Count: 2*len(b)})
// ensure 100% success
res.CheckOKOrFail(ctx)
// verify we reached all instances by using ParsedResponse
clusterHits := map[string]int{}
for _, r := range responses {
clusterHits[r.Cluster]++
}
if len(hits) < targetCount {
ctx.Fatal("did not hit all clsuters")
}
})
})
}
Test cross-cluster reachability with services that are unique per cluster:
func TestCrossClusterLoadBalancing(t *testing.T) {
framework.NewTest(t).Run(func(ctx framework.TestContext) {
// loop through the all sources and destinations
for _, a := range a {
for _, c := range c {
a, c := a, c
t.NewSubtest(a.Config().Cluster.Name()).Run(func(ctx framework.TestContext) {
// no need to set Count or verify that responses went to all instances
a.CallOrFail(ctx, echo.CallOptions{Target: c}).CheckOKOrFail(ctx)
})
}
}
})
}