From f1ce9fa9fda972067aa813f87d09a29cbebce94b Mon Sep 17 00:00:00 2001 From: "Masih H. Derkani" Date: Thu, 30 Jan 2025 20:29:21 +0000 Subject: [PATCH] Rework time dilution in F3 tests and refine assertions Progress the clock during tests independently of condition check. Rework the assertions to use built-in eventual assertion. Use consistent names for the asserting functions. Fixes #818 Fixes #847 --- f3_test.go | 407 +++++++++++++++++++++++++------------------------ partial_msg.go | 4 +- 2 files changed, 207 insertions(+), 204 deletions(-) diff --git a/f3_test.go b/f3_test.go index 60094047..0471a161 100644 --- a/f3_test.go +++ b/f3_test.go @@ -3,9 +3,8 @@ package f3_test import ( "context" "fmt" - "os" "path/filepath" - "reflect" + "strings" "sync/atomic" "testing" "time" @@ -30,32 +29,36 @@ import ( "golang.org/x/sync/errgroup" ) +const ( + conditionCheckInterval = 500 * time.Millisecond + manifestPublishInterval = time.Second + advanceClockEvery = 15 * time.Millisecond + advanceClockBy = 500 * time.Millisecond +) + func init() { // Hash-based deduplication breaks fast rebroadcast, even if we set the time window to be // really short because gossipsub has a minimum 1m cache scan interval. psutil.GPBFTMessageIdFn = pubsub.DefaultMsgIdFn psutil.ManifestMessageIdFn = pubsub.DefaultMsgIdFn + psutil.ChainExchangeMessageIdFn = pubsub.DefaultMsgIdFn } -var manifestSenderTimeout = 10 * time.Second - func TestF3Simple(t *testing.T) { + t.Parallel() env := newTestEnvironment(t).withNodes(2).start() - env.waitForInstanceNumber(5, 10*time.Second, false) + env.requireInstanceEventually(5, 30*time.Second, true) } func TestF3WithLookback(t *testing.T) { - env := newTestEnvironment(t).withNodes(2).withManifest(func(m *manifest.Manifest) { - m.EC.HeadLookback = 20 - }).start() - - env.waitForInstanceNumber(5, 10*time.Second, false) - - // Wait a second to let everything settle. - time.Sleep(10 * time.Millisecond) + t.Parallel() + env := newTestEnvironment(t). + withNodes(2). + withManifest(func(m *manifest.Manifest) { m.EC.HeadLookback = 20 }). + start() + env.requireInstanceEventually(5, 20*time.Second, true) headEpoch := env.ec.GetCurrentHead() - cert, err := env.nodes[0].f3.GetLatestCert(env.testCtx) require.NoError(t, err) require.NotNil(t, cert) @@ -68,7 +71,6 @@ func TestF3WithLookback(t *testing.T) { // Advance by 100 periods. for i := 0; i < 200; i++ { env.clock.Add(env.manifest.EC.Period / 2) - time.Sleep(time.Millisecond) } // Now make sure we've advanced by less than 100 instances. We want to make sure we're not @@ -83,54 +85,60 @@ func TestF3WithLookback(t *testing.T) { env.ec.Resume() for i := 0; i < 10; i++ { env.clock.Add(env.manifest.EC.Period / 10) - time.Sleep(time.Millisecond) } - env.waitForInstanceNumber(5, 10*time.Second, true) + env.requireInstanceEventually(5, 10*time.Second, true) } func TestF3PauseResumeCatchup(t *testing.T) { + t.Parallel() env := newTestEnvironment(t).withNodes(3).start() - if _, set := os.LookupEnv("GO_TEST_DEBUG"); set { - env.logStatus() - } - - env.waitForInstanceNumber(1, 30*time.Second, true) + env.requireInstanceEventually(1, 30*time.Second, true) // Pausing two nodes should pause the network. env.pauseNode(1) env.pauseNode(2) - // Wait until node 0 stops receiving new instances - env.waitForCondition(func() bool { - oldInstance := env.nodes[0].currentGpbftInstance() - env.clock.Add(10 * env.manifest.EC.Period) - newInstance := env.nodes[0].currentGpbftInstance() - return oldInstance == newInstance - }, 30*time.Second) + env.requireF3NotRunningEventually(10*time.Second, func(node *testNode) bool { + return node.id == 1 || node.id == 2 + }) + + // Wait until node 0 stops progressing + env.whileAdvancingClock(advanceClockBy, advanceClockEvery, func() { + require.Eventually(t, func() bool { + oldInstance := env.nodes[0].currentGpbftInstance() + env.clock.Add(10 * env.manifest.EC.Period) + newInstance := env.nodes[0].currentGpbftInstance() + return oldInstance == newInstance + }, 30*time.Second, conditionCheckInterval) + }) // Resuming node 1 should continue agreeing on instances. env.resumeNode(1) + env.requireF3RunningEventually(10*time.Second, func(node *testNode) bool { return node.id == 1 }) // Wait until we're far enough that pure GPBFT catchup should be impossible. targetInstance := env.nodes[1].currentGpbftInstance() + env.manifest.CommitteeLookback + 1 - env.waitForInstanceNumber(targetInstance, 90*time.Second, false) + env.requireInstanceEventually(targetInstance, 30*time.Second, false) env.resumeNode(2) + env.requireF3RunningEventually(10*time.Second, func(node *testNode) bool { return node.id == 2 }) // Everyone should catch up eventually - env.waitForInstanceNumber(targetInstance, 30*time.Second, true) + env.requireInstanceEventually(targetInstance, 30*time.Second, true) // Pause the "good" node. node0failInstance := env.nodes[0].currentGpbftInstance() env.pauseNode(0) + env.requireF3NotRunningEventually(10*time.Second, func(node *testNode) bool { return node.id == 0 }) // We should be able to make progress with the remaining nodes. - env.waitForInstanceNumber(node0failInstance+3, 60*time.Second, false) + env.requireInstanceEventually(node0failInstance+3, time.Minute, false) } func TestF3FailRecover(t *testing.T) { + t.Parallel() env := newTestEnvironment(t).withNodes(2) // Make it possible to fail a single write for node 0. @@ -149,7 +157,7 @@ func TestF3FailRecover(t *testing.T) { env.injectDatastoreFailures(0, dsFailureFunc) env.start() - env.waitForInstanceNumber(1, 10*time.Second, true) + env.requireInstanceEventually(1, 10*time.Second, true) // Inject a single write failure. This should prevent us from storing a single decision // decision. @@ -157,29 +165,30 @@ func TestF3FailRecover(t *testing.T) { // We should proceed anyways (catching up via the certificate exchange protocol). oldInstance := env.nodes[0].currentGpbftInstance() - env.waitForInstanceNumber(oldInstance+3, 10*time.Second, true) + env.requireInstanceEventually(oldInstance+3, 20*time.Second, true) } func TestF3DynamicManifest_WithoutChanges(t *testing.T) { + t.Parallel() env := newTestEnvironment(t).withNodes(2).withDynamicManifest() env.start() prev := env.nodes[0].f3.Manifest() - env.waitForInstanceNumber(5, 10*time.Second, false) + env.requireInstanceEventually(5, 20*time.Second, false) // no changes in manifest require.Equal(t, prev, env.nodes[0].f3.Manifest()) - env.requireEqualManifests(false) + env.requireConsistentManifest(true) } func TestF3DynamicManifest_WithRebootstrap(t *testing.T) { + t.Parallel() env := newTestEnvironment(t).withNodes(2).withDynamicManifest().start() - prev := env.nodes[0].f3.Manifest() - env.waitForInstanceNumber(3, 15*time.Second, true) + prevManifest := env.nodes[0].f3.Manifest() + env.requireInstanceEventually(3, 15*time.Second, true) - env.manifest.BootstrapEpoch = 1253 - for i := 0; i < 2; i++ { + for range 2 { nd := env.addNode() pubkey, _ := env.signingBackend.GenerateKey() env.manifest.ExplicitPower = append(env.manifest.ExplicitPower, gpbft.PowerEntry{ @@ -190,21 +199,23 @@ func TestF3DynamicManifest_WithRebootstrap(t *testing.T) { } env.updateManifest() - env.waitForManifest() + env.requireManifestPropagatedEventually(30 * time.Second) // check that it rebootstrapped and has a new base epoch. targetBaseEpoch := env.manifest.BootstrapEpoch - env.manifest.EC.Finality - env.waitForCondition(func() bool { - env.clock.Add(env.manifest.EC.Period) - c, err := env.nodes[0].f3.GetCert(env.testCtx, 0) - if err != nil || c == nil { - return false - } - return c.ECChain.Base().Epoch == targetBaseEpoch - }, 20*time.Second) - env.waitForInstanceNumber(3, 15*time.Second, false) - require.NotEqual(t, prev, env.nodes[0].f3.Manifest()) - env.requireEqualManifests(false) + env.whileAdvancingClock(advanceClockBy, advanceClockEvery, func() { + require.Eventually(t, func() bool { + env.clock.Add(env.manifest.EC.Period) + c, err := env.nodes[0].f3.GetCert(env.testCtx, 0) + if err != nil || c == nil { + return false + } + return c.ECChain.Base().Epoch == targetBaseEpoch + }, 30*time.Second, conditionCheckInterval, "Base epoch on node 0 not reached in time. Environment: %s", env) + }) + env.requireInstanceEventually(3, 20*time.Second, false) + require.NotEqual(t, prevManifest, env.nodes[0].f3.Manifest()) + env.requireConsistentManifest(false) // check that the power table is updated ts, err := env.ec.GetTipsetByEpoch(env.testCtx, int64(env.nodes[0].currentGpbftInstance())) @@ -215,25 +226,26 @@ func TestF3DynamicManifest_WithRebootstrap(t *testing.T) { } func TestF3DynamicManifest_WithPauseAndRebootstrap(t *testing.T) { + t.Parallel() env := newTestEnvironment(t).withNodes(2).withDynamicManifest().start() - env.waitForInstanceNumber(10, 30*time.Second, true) + env.requireInstanceEventually(10, time.Minute, true) env.manifest.Pause = true env.updateManifest() // check that it paused - env.waitForNodesStopped() + env.requireF3NotRunningEventually(30*time.Second, matchers.allNodes) env.manifest.BootstrapEpoch = 956 env.manifest.Pause = false env.updateManifest() - env.waitForManifest() + env.requireManifestPropagatedEventually(30 * time.Second) env.clock.Add(1 * time.Minute) - env.waitForInstanceNumber(3, 30*time.Second, true) - env.requireEqualManifests(true) + env.requireInstanceEventually(3, 30*time.Second, true) + env.requireConsistentManifest(true) // Now check that we have the correct base for certificate 0. cert0, err := env.nodes[0].f3.GetCert(env.testCtx, 0) @@ -242,24 +254,25 @@ func TestF3DynamicManifest_WithPauseAndRebootstrap(t *testing.T) { } func TestF3DynamicManifest_RebootstrapWithCompression(t *testing.T) { + t.Parallel() env := newTestEnvironment(t).withNodes(2).withDynamicManifest().start() - env.waitForInstanceNumber(10, 30*time.Second, true) + env.requireInstanceEventually(10, time.Minute, true) env.manifest.Pause = true env.updateManifest() - env.waitForNodesStopped() + env.requireF3NotRunningEventually(30*time.Second, matchers.allNodes) env.manifest.BootstrapEpoch = 956 env.manifest.PubSub.CompressionEnabled = true env.manifest.Pause = false env.updateManifest() - env.waitForManifest() + env.requireManifestPropagatedEventually(30 * time.Second) env.clock.Add(1 * time.Minute) - env.waitForInstanceNumber(3, 30*time.Second, true) - env.requireEqualManifests(true) + env.requireInstanceEventually(3, 30*time.Second, true) + env.requireConsistentManifest(true) cert0, err := env.nodes[0].f3.GetCert(env.testCtx, 0) require.NoError(t, err) @@ -267,16 +280,18 @@ func TestF3DynamicManifest_RebootstrapWithCompression(t *testing.T) { } func TestF3LateBootstrap(t *testing.T) { + t.Parallel() env := newTestEnvironment(t).withNodes(2).start() // Wait till we're "caught up". bootstrapInstances := uint64(env.manifest.EC.Finality/(gpbft.ChainDefaultLen-1)) + 1 - env.waitForInstanceNumber(bootstrapInstances, 30*time.Second, true) + env.requireInstanceEventually(bootstrapInstances, 30*time.Second, true) // Wait until we've finalized a distant epoch. Once we do, our EC will forget the historical // chain (importantly, forget the bootstrap power table). targetEpoch := 2*env.manifest.EC.Finality + env.manifest.BootstrapEpoch - env.waitForEpochFinalized(targetEpoch) + env.clock.Add(time.Duration(targetEpoch/3*2) * env.manifest.EC.Period) // Fast-forward to 2/3 of the target epoch for faster test e2e time. + env.requireEpochFinalizedEventually(time.Second, advanceClockEvery, targetEpoch, time.Minute) // Now update the manifest with the initial power-table CID. cert0, err := env.nodes[0].f3.GetCert(env.testCtx, 0) @@ -293,19 +308,18 @@ func TestF3LateBootstrap(t *testing.T) { }) // Wait for it to finish starting. - env.waitForCondition(func() bool { - env.clock.Add(env.manifest.EC.Period) - // This step takes time, give it time. - time.Sleep(10 * time.Millisecond) - return f3.IsRunning() - }, 10*time.Second) + env.whileAdvancingClock(advanceClockBy, advanceClockEvery, func() { + require.Eventually(t, f3.IsRunning, 10*time.Second, conditionCheckInterval) + }) // It should eventually catch up. - env.waitForCondition(func() bool { - cert, err := f3.GetLatestCert(env.testCtx) - require.NoError(t, err) - return cert != nil && cert.ECChain.Head().Epoch > targetEpoch - }, 10*time.Second) + env.whileAdvancingClock(advanceClockBy, advanceClockEvery, func() { + require.Eventually(t, func() bool { + cert, err := f3.GetLatestCert(env.testCtx) + require.NoError(t, err) + return cert != nil && cert.ECChain.Head().Epoch > targetEpoch + }, 10*time.Second, conditionCheckInterval) + }) } var base = manifest.Manifest{ @@ -402,9 +416,9 @@ func (s testNodeStatus) String() string { case !s.running: return fmt.Sprint("node ", s.id, ": paused") case s.latestCert == nil: - return fmt.Sprint("node ", s.id, ": at", s.progress, " with no finality certs") + return fmt.Sprint("node ", s.id, ": progress ", s.progress, ", with no finality certs") default: - return fmt.Sprint("node ", s.id, ": at ", s.progress, ", finalized epoch ", s.latestCert.ECChain.Head().Epoch, " {", s.latestCert.GPBFTInstance, "}") + return fmt.Sprint("node ", s.id, ": progress ", s.progress, ", finalized epoch ", s.latestCert.ECChain.Head().Epoch, " at instance ", s.latestCert.GPBFTInstance) } } @@ -459,114 +473,103 @@ func (e *testEnv) updateManifest() { e.manifestSender.UpdateManifest(e.currentManifest()) } -func (e *testEnv) waitForManifest() { +func (e *testEnv) requireManifestPropagatedEventually(timeout time.Duration) { e.t.Helper() newManifest := e.currentManifest() - e.waitForCondition(func() bool { - for _, n := range e.nodes { - if n.f3 == nil { - continue - } - - m := n.f3.Manifest() - if m == nil { - return false - } - if !newManifest.Equal(m) { - return false + e.whileAdvancingClock(advanceClockBy, advanceClockEvery, func() { + require.Eventually(e.t, func() bool { + for _, n := range e.nodes { + if n.f3 == nil { + continue + } + m := n.f3.Manifest() + if m == nil { + return false + } + if !newManifest.Equal(m) { + return false + } } - } - return true - }, 60*time.Second) -} - -func (e *testEnv) waitForCondition(condition func() bool, timeout time.Duration) { - e.t.Helper() - start := time.Now() - for !condition() { - if time.Since(start) > timeout { - e.t.Fatalf("test took too long (more than %s)", timeout) - } - e.advance() - } + return true + }, timeout, conditionCheckInterval, "Manifest did not propagate in time. Environment: %s", e) + }) } // waits for all nodes to reach a specific instance number. // If the `strict` flag is enabled the check also applies to the non-running nodes -func (e *testEnv) waitForInstanceNumber(instanceNumber uint64, timeout time.Duration, strict bool) { +func (e *testEnv) requireInstanceEventually(instanceNumber uint64, timeout time.Duration, strict bool) { e.t.Helper() - e.waitForCondition(func() bool { - for _, n := range e.nodes { - // nodes that are not running are not required to reach the instance - // (it will actually panic if we try to fetch it because there is no - // runner initialized) - if n.f3 == nil || !n.f3.IsRunning() { - if strict { - return false + e.whileAdvancingClock(advanceClockBy, advanceClockEvery, + func() { + require.Eventually(e.t, func() bool { + for _, n := range e.nodes { + // nodes that are not running are not required to reach the instance + // (it will actually panic if we try to fetch it because there is no + // runner initialized) + if n.f3 == nil || !n.f3.IsRunning() { + if strict { + return false + } + continue + } + if n.currentGpbftInstance() < instanceNumber { + return false + } } - continue - } - if n.currentGpbftInstance() < instanceNumber { - return false - } - } - return true - }, timeout) -} - -func (e *testEnv) advance() { - e.clock.Add(500 * time.Millisecond) + return true + }, timeout, conditionCheckInterval, "Instance %d not reached in time. Environment: %s", instanceNumber, e) + }, + ) } func (e *testEnv) withNodes(n int) *testEnv { - for i := 0; i < n; i++ { + for range n { e.addNode() } - return e } -// waits for all nodes to reach a specific instance number. -// If the `strict` flag is enabled the check also applies to the non-running nodes -func (e *testEnv) waitForEpochFinalized(epoch int64) { +func (e *testEnv) whileAdvancingClock(advanceClockBy, advanceEvery time.Duration, do func()) { + e.t.Helper() + ctx, cancel := context.WithCancel(e.testCtx) + defer cancel() + go func() { + ticker := time.NewTicker(advanceClockEvery) + + defer ticker.Stop() + for ctx.Err() == nil { + select { + case <-ctx.Done(): + return + case <-ticker.C: + e.clock.Add(advanceClockBy) + } + } + }() + do() +} + +func (e *testEnv) requireEpochFinalizedEventually(advanceClockBy, advanceEvery time.Duration, epoch int64, timeout time.Duration) { e.t.Helper() - for { - head := e.ec.GetCurrentHead() - if head > e.manifest.BootstrapEpoch { - e.waitForCondition(func() bool { - // TODO: the advancing logic relative to condition check and specially the way - // waitForEpochFinalized advances the clock needs rework. Under race detector - // the current logic advances too fast, where the QUALITY phase is likely to - // timeout resulting in repeated decision to base. For now, increase the wait - // here and reduce the clock advance to give messages a chance of being - // delivered in time. See: - // - https://github.com/filecoin-project/go-f3/issues/818 - for _, nd := range e.nodes { - if nd.f3 == nil || !nd.f3.IsRunning() { + e.whileAdvancingClock(advanceClockBy, advanceEvery, + func() { + require.Eventually(e.t, func() (_met bool) { + for _, n := range e.nodes { + if n.f3 == nil || !n.f3.IsRunning() { continue } - cert, err := nd.f3.GetLatestCert(e.testCtx) + cert, err := n.f3.GetLatestCert(e.testCtx) if cert == nil || err != nil { continue } - if cert.ECChain.Head().Epoch >= head { + if cert.ECChain.Head().Epoch >= epoch { return true } } return false - }, 120*time.Second) - } - - if head < epoch-100 { - e.clock.Add(100 * e.manifest.EC.Period) - } else if head < epoch-10 { - e.clock.Add(10 * e.manifest.EC.Period) - } else if head < epoch { - e.clock.Add(1 * e.manifest.EC.Period) - } else { - break - } - } + }, timeout, conditionCheckInterval, "Epoch %d not reached in time. Environment: %s", epoch, e) + }, + ) } func newTestEnvironment(t *testing.T) *testEnv { @@ -609,7 +612,7 @@ func (e *testEnv) addNode() *testNode { return n } -func (e *testEnv) requireEqualManifests(strict bool) { +func (e *testEnv) requireConsistentManifest(strict bool) { m := e.nodes[0].f3.Manifest() for _, n := range e.nodes { // only check running nodes @@ -619,30 +622,46 @@ func (e *testEnv) requireEqualManifests(strict bool) { } } -func (e *testEnv) waitFor(f func(n *testNode) bool, timeout time.Duration) { - e.t.Helper() - e.waitForCondition(func() bool { - for _, n := range e.nodes { - if !f(n) { - return false - } - } - return true - }, timeout) +type matcher func(*testNode) bool + +var matchers = struct { + allNodes matcher +}{ + allNodes: func(*testNode) bool { return true }, } -func (e *testEnv) waitForNodesInitialization() { +func (e *testEnv) requireF3RunningEventually(timeout time.Duration, matcher matcher) { e.t.Helper() - e.waitFor(func(n *testNode) bool { - return n.f3.IsRunning() - }, 30*time.Second) + e.whileAdvancingClock(advanceClockBy, advanceClockEvery, func() { + require.Eventually(e.t, func() bool { + for _, n := range e.nodes { + if !matcher(n) { + continue + } + if !n.f3.IsRunning() { + return false + } + } + return true + }, timeout, conditionCheckInterval, "F3 not running in time. Environment: %s", e) + }) } -func (e *testEnv) waitForNodesStopped() { +func (e *testEnv) requireF3NotRunningEventually(timeout time.Duration, matcher matcher) { e.t.Helper() - e.waitFor(func(n *testNode) bool { - return !n.f3.IsRunning() - }, 30*time.Second) + e.whileAdvancingClock(advanceClockBy, advanceClockEvery, func() { + require.Eventually(e.t, func() bool { + for _, n := range e.nodes { + if !matcher(n) { + continue + } + if n.f3.IsRunning() { + return false + } + } + return true + }, timeout, conditionCheckInterval, "F3 still running after timeout. Environment: %s", e) + }) } // Connect & link all nodes. This operation is idempotent. @@ -695,7 +714,7 @@ func (e *testEnv) start() *testEnv { } // wait for nodes to initialize - e.waitForNodesInitialization() + e.requireF3RunningEventually(30*time.Second, matchers.allNodes) // If it exists, start the manifest sender if e.manifestSender != nil { @@ -725,8 +744,7 @@ func (e *testEnv) withDynamicManifest() *testEnv { ps, err := pubsub.NewGossipSub(e.testCtx, h, pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) require.NoError(e.t, err) - e.manifestSender, err = manifest.NewManifestSender(e.testCtx, h, ps, - e.currentManifest(), manifestSenderTimeout) + e.manifestSender, err = manifest.NewManifestSender(e.testCtx, h, ps, e.currentManifest(), manifestPublishInterval) require.NoError(e.t, err) return e @@ -736,28 +754,13 @@ func (e *testEnv) injectDatastoreFailures(i int, fn func(op string) error) { e.nodes[i].dsErrFunc = fn } -func (e *testEnv) logStatus() { - e.t.Helper() - - ctx, cancel := context.WithCancel(e.testCtx) - e.t.Cleanup(cancel) - - go func() { - latest := make([]testNodeStatus, len(e.nodes)) - for ctx.Err() == nil { - for i, n := range e.nodes { - current := n.status() - if len(latest) == i { - latest = append(latest, testNodeStatus{}) - } - if !reflect.DeepEqual(current, latest[i]) { - latest[i] = current - e.t.Log(current.String()) - } - } - time.Sleep(5 * time.Millisecond) - } - }() +func (e *testEnv) String() string { + var out strings.Builder + for _, n := range e.nodes { + out.WriteString(fmt.Sprintln()) + out.WriteString(n.status().String()) + } + return out.String() } // TODO: This code is copy-pasta from cmd/f3/run.go, consider taking it out into a shared testing lib. diff --git a/partial_msg.go b/partial_msg.go index 5155db8c..775025da 100644 --- a/partial_msg.go +++ b/partial_msg.go @@ -202,7 +202,7 @@ func (pmm *partialMessageManager) Start(ctx context.Context) (<-chan *PartiallyV if current != nil { current.Timestamp = roundDownToUnixTime(t, pmm.rebroadcastInterval) if err := pmm.chainex.Broadcast(ctx, *current); err != nil { - log.Errorw("Failed to re-broadcast chain.", "instance", current.Instance, "chain", current.Chain, "error", err) + log.Debugw("Failed to re-broadcast chain.", "instance", current.Instance, "chain", current.Chain, "error", err) } } case pending, ok := <-pmm.pendingChainBroadcasts: @@ -217,7 +217,7 @@ func (pmm *partialMessageManager) Start(ctx context.Context) (<-chan *PartiallyV current = &pending current.Timestamp = roundDownToUnixTime(time.Now(), pmm.rebroadcastInterval) if err := pmm.chainex.Broadcast(ctx, *current); err != nil { - log.Errorw("Failed to immediately re-broadcast chain.", "instance", current.Instance, "chain", current.Chain, "error", err) + log.Debugw("Failed to immediately re-broadcast chain.", "instance", current.Instance, "chain", current.Chain, "error", err) } ticker.Reset(pmm.rebroadcastInterval) case pending.Instance == current.Instance: