Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tbcd: fix startup/shutdown messages #378

Merged
merged 2 commits into from
Jan 16, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions service/tbc/peermanager.go
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (c) 2024 Hemi Labs, Inc.
// Copyright (c) 2024-2025 Hemi Labs, Inc.
// Use of this source code is governed by the MIT License,
// which can be found in the LICENSE file.

Expand Down Expand Up @@ -393,7 +393,7 @@ func (pm *PeerManager) Run(ctx context.Context) error {
defer log.Tracef("Run")

if len(pm.seeds) == 0 {
log.Infof("Starting DNS seeder")
log.Infof("DNS seeder started")
minW := 5
maxW := 59
for {
Expand All @@ -415,8 +415,8 @@ func (pm *PeerManager) Run(ctx context.Context) error {
}
pm.HandleAddr(pm.seeds) // Add all seeds to good list

log.Infof("Starting peer manager")
defer log.Infof("Peer manager stopped")
log.Debugf("Peer manager starting")
defer log.Debugf("Peer manager stopped")

// Start connecting "want" number of peers.
pm.slotsC = make(chan int, pm.want)
Expand All @@ -436,8 +436,11 @@ func (pm *PeerManager) Run(ctx context.Context) error {
go pm.connectSlot(ctx, p)

case <-ctx.Done():
log.Infof("exit")
return ctx.Err()
err := ctx.Err()
if errors.Is(err, context.Canceled) {
return nil
}
return err
}
}
}
26 changes: 23 additions & 3 deletions service/tbc/tbc.go
Original file line number Diff line number Diff line change
Expand Up @@ -862,7 +862,8 @@ func (s *Server) downloadBlock(ctx context.Context, p *rawpeer.RawPeer, ch *chai
err := p.Write(defaultCmdTimeout, getData)
if err != nil {
if !errors.Is(err, net.ErrClosed) &&
!errors.Is(err, os.ErrDeadlineExceeded) {
!errors.Is(err, os.ErrDeadlineExceeded) &&
!errors.Is(err, rawpeer.ErrNoConn) {
log.Errorf("download block write: %v %v", p, err)
}
}
Expand Down Expand Up @@ -912,6 +913,14 @@ func (s *Server) handleBlockExpired(ctx context.Context, key any, value any) err
log.Tracef("handleBlockExpired")
defer log.Tracef("handleBlockExpired exit")

// handleBlockExpired is called numerous times after SIGTERM. This call
// wil fail with database closed error and is very loud.
select {
case <-ctx.Done():
return nil
default:
}

p, ok := value.(*rawpeer.RawPeer)
if !ok {
// this really should not happen
Expand Down Expand Up @@ -1116,6 +1125,11 @@ func (s *Server) syncBlocks(ctx context.Context) {
// This can happen during startup or when the network
// is starved.
// XXX: Probably too loud, remove later.
select {
case <-ctx.Done():
return
default:
}
log.Errorf("random peer %v: %v", hashS, err)
return
}
Expand Down Expand Up @@ -2470,7 +2484,9 @@ func (s *Server) Run(pctx context.Context) error {
defer s.wg.Done()
err := s.promPoll(ctx)
if err != nil {
log.Errorf("prometheus poll terminated with error: %v", err)
if !errors.Is(err, context.Canceled) {
log.Errorf("prometheus poll terminated with error: %v", err)
}
return
}
}()
Expand All @@ -2480,11 +2496,15 @@ func (s *Server) Run(pctx context.Context) error {
s.wg.Add(1)
go func() {
defer s.wg.Done()
if err := s.pm.Run(ctx); err != nil {
err := s.pm.Run(ctx)
log.Infof("Peer manager shutting down")
if err != nil {
select {
case errC <- err:
default:
}
} else {
log.Infof("Peer manager clean shutdown")
}
}()

Expand Down
Loading