Skip to content

Commit

Permalink
Merge pull request #229 from bitcoin-sv/fix/BUX-668-synch-stuck-on-st…
Browse files Browse the repository at this point in the history
…ale-tip

fix(BUX-668): fix the synchronisation when we choose a tip, which is later considered stale by a network.
  • Loading branch information
dorzepowski authored Apr 2, 2024
2 parents 1717fd5 + d2887b6 commit 1899f55
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 8 deletions.
15 changes: 15 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,18 @@ services:
- DB_PREPAREDDBFILEPATH=./data/blockheaders.csv.gz
- HTTP_AUTHTOKEN=admin_only_afUMlv5iiDgQtj22O9n5fADeSb
restart: unless-stopped

db:
container_name: p2p-headers-db
image: postgres:13
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=postgres
volumes:
- db-data:/var/lib/postgresql/data
restart: unless-stopped

volumes:
db-data:
driver: local
18 changes: 10 additions & 8 deletions transports/p2p/p2psync/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -606,14 +606,16 @@ func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
// from the block after this one up to the end of the chain (zero hash).
sm.log.Info().Msgf("Reached the final checkpoint -- switching to normal mode")
sm.log.Info().Msgf("Reached the final checkpoint -- lastHash: %#v", finalHash.String())
sm.sendGetHeadersWithPassedParams([]*chainhash.Hash{finalHash}, &zeroHash, peer)
locator := sm.Services.Headers.LatestHeaderLocator()
sm.sendGetHeadersWithPassedParams(locator, &zeroHash, peer)
return
}

// This header is not a checkpoint, so request the next batch of
// headers starting from the latest known header and ending with the
// next checkpoint.
sm.sendGetHeadersWithPassedParams([]*chainhash.Hash{finalHash}, sm.nextCheckpoint.Hash, peer)
locator := sm.Services.Headers.LatestHeaderLocator()
sm.sendGetHeadersWithPassedParams(locator, sm.nextCheckpoint.Hash, peer)
}

func (sm *SyncManager) requestForNextHeaderBatch(prevHash *chainhash.Hash, peer *peerpkg.Peer, prevHeight int32) {
Expand Down Expand Up @@ -676,9 +678,6 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
}
sm.log.Info().Msgf("[Headers] handleInvMsg, peer.ID: %d, invType: %s", imsg.peer.ID(), typeMap[imsg.inv.InvList[0].Type])

lastHeader := sm.Services.Headers.GetTip()
sm.log.Info().Msgf("[Manager] handleInvMsg lastHeaderNode.height : %d", lastHeader.Height)

peer := imsg.peer
_, exists := sm.peerStates[peer]
if !exists {
Expand Down Expand Up @@ -712,15 +711,18 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
blkHeight, err := sm.Services.Headers.GetHeightByHash(&invVects[lastBlock].Hash)
if err == nil {
peer.UpdateLastBlockHeight(blkHeight)
// we know about the block, no need to fetch it
return
}
}

if lastBlock != -1 {
sm.log.Info().Msgf("[Manager] handleInvMsg lastConfirmedHeaderNode.hash : %s", lastHeader.Hash)
sm.log.Info().Msgf("[Manager] handleInvMsg lastConfirmedHeaderNode.height : %d", lastHeader.Height)
locator := sm.Services.Headers.LatestHeaderLocator()
sm.log.Info().Msgf("[Manager] handleInvMsg tip hash : %s", locator[0])
sm.log.Info().Msgf("[Manager] handleInvMsg &invVects[lastBlock].Hash : %v", &invVects[lastBlock].Hash)
sm.log.Info().Msg("[Manager] handleInvMsg requesting for all the headers since our tip")

sm.sendGetHeadersWithPassedParams([]*chainhash.Hash{&lastHeader.Hash}, &invVects[lastBlock].Hash, peer)
sm.sendGetHeadersWithPassedParams(locator, &zeroHash, peer)
}
}

Expand Down

0 comments on commit 1899f55

Please sign in to comment.