From 2182f9f24a09614e610a9b76b3558febc0c13109 Mon Sep 17 00:00:00 2001 From: alainjr10 Date: Fri, 28 Jun 2024 13:15:11 +0100 Subject: [PATCH 1/4] feat: add new getcfilters message draft --- blockchain/chainio.go | 26 ++++++++ blockchain/indexers/cfindex.go | 5 +- blockchain/indexers/utreexoproofindex.go | 84 +++++++++++++++++++++++- btcutil/gcs/builder/builder.go | 16 +++++ server.go | 2 + wire/msgcfilter.go | 1 + 6 files changed, 132 insertions(+), 2 deletions(-) diff --git a/blockchain/chainio.go b/blockchain/chainio.go index fdcbb4c3..3a86fcd6 100644 --- a/blockchain/chainio.go +++ b/blockchain/chainio.go @@ -1080,6 +1080,32 @@ func SerializeUtreexoRoots(numLeaves uint64, roots []utreexo.Hash) ([]byte, erro return w.Bytes(), nil } +// SerializeUtreexoRootsHash serializes the numLeaves and the roots into a byte slice. +// it takes in a slice of chainhash.Hash instead of utreexo.Hash. chainhash.Hash is the hashed +// value of the utreexo.Hash. +func SerializeUtreexoRootsHash(numLeaves uint64, roots []*chainhash.Hash) ([]byte, error) { + // 8 byte NumLeaves + (32 byte roots * len(roots)) + w := bytes.NewBuffer(make([]byte, 0, 8+(len(roots)*chainhash.HashSize))) + + // Write the NumLeaves first. + var buf [8]byte + byteOrder.PutUint64(buf[:], numLeaves) + _, err := w.Write(buf[:]) + if err != nil { + return nil, err + } + + // Then write the roots. + for _, root := range roots { + _, err = w.Write(root[:]) + if err != nil { + return nil, err + } + } + + return w.Bytes(), nil +} + // DeserializeUtreexoRoots deserializes the provided byte slice into numLeaves and roots. func DeserializeUtreexoRoots(serializedUView []byte) (uint64, []utreexo.Hash, error) { totalLen := len(serializedUView) diff --git a/blockchain/indexers/cfindex.go b/blockchain/indexers/cfindex.go index a1394ccd..044a1734 100644 --- a/blockchain/indexers/cfindex.go +++ b/blockchain/indexers/cfindex.go @@ -33,19 +33,22 @@ var ( // cfIndexKeys is an array of db bucket names used to house indexes of // block hashes to cfilters. cfIndexKeys = [][]byte{ - []byte("cf0byhashidx"), + []byte("cf0byhashidx"), // bucket for basic filter indexes + []byte("cf1byhashidx"), // bucket for UtreexoCFilter } // cfHeaderKeys is an array of db bucket names used to house indexes of // block hashes to cf headers. cfHeaderKeys = [][]byte{ []byte("cf0headerbyhashidx"), + []byte("cf1headerbyhashidx"), } // cfHashKeys is an array of db bucket names used to house indexes of // block hashes to cf hashes. cfHashKeys = [][]byte{ []byte("cf0hashbyhashidx"), + []byte("cf1hashbyhashidx"), } maxFilterType = uint8(len(cfHeaderKeys) - 1) diff --git a/blockchain/indexers/utreexoproofindex.go b/blockchain/indexers/utreexoproofindex.go index 9b7d7338..00c13fcf 100644 --- a/blockchain/indexers/utreexoproofindex.go +++ b/blockchain/indexers/utreexoproofindex.go @@ -6,12 +6,14 @@ package indexers import ( "bytes" + "errors" "fmt" "sync" "github.com/utreexo/utreexo" "github.com/utreexo/utreexod/blockchain" "github.com/utreexo/utreexod/btcutil" + "github.com/utreexo/utreexod/btcutil/gcs/builder" "github.com/utreexo/utreexod/chaincfg" "github.com/utreexo/utreexod/chaincfg/chainhash" "github.com/utreexo/utreexod/database" @@ -236,6 +238,58 @@ func (idx *UtreexoProofIndex) Create(dbTx database.Tx) error { return nil } +// storeFilter stores a given filter, and performs the steps needed to +// generate the filter's header. +func storeUtreexoCFilter(dbTx database.Tx, block *btcutil.Block, filterData []byte, + filterType wire.FilterType) error { + if uint8(filterType) > maxFilterType { + return errors.New("unsupported filter type") + } + + // Figure out which buckets to use. + fkey := cfIndexKeys[filterType] + hkey := cfHeaderKeys[filterType] + hashkey := cfHashKeys[filterType] + + // Start by storing the filter. + h := block.Hash() + err := dbStoreFilterIdxEntry(dbTx, fkey, h, filterData) + if err != nil { + return err + } + + // Next store the filter hash. + filterHash := chainhash.DoubleHashH(filterData) + err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:]) + if err != nil { + return err + } + + // Then fetch the previous block's filter header. + var prevHeader *chainhash.Hash + ph := &block.MsgBlock().Header.PrevBlock + if ph.IsEqual(&zeroHash) { + prevHeader = &zeroHash + } else { + pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph) + if err != nil { + return err + } + + // Construct the new block's filter header, and store it. + prevHeader, err = chainhash.NewHash(pfh) + if err != nil { + return err + } + } + + fh, err := builder.MakeHeaderForUtreexoCFilter(filterData, *prevHeader) + if err != nil { + return err + } + return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:]) +} + // ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. // @@ -299,8 +353,36 @@ func (idx *UtreexoProofIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Bloc if err != nil { return err } + blockHash := block.Hash() + var serializedUtreexo []byte + isCSN := true + var leaves uint64 + var roots []*chainhash.Hash - return nil + // For compact state nodes + if isCSN { + viewPoint, err := idx.chain.FetchUtreexoViewpoint(blockHash) + if err != nil { + return err + } + roots = viewPoint.GetRoots() + leaves = viewPoint.NumLeaves() + } else { // for bridge nodes + uroots, uleaves, err := idx.FetchUtreexoState(dbTx, blockHash) + if err != nil { + return err + } + roots = uroots + leaves = uleaves + } + + // serialize the hashes of the utreexo roots hash + serializedUtreexo, err = blockchain.SerializeUtreexoRootsHash(leaves, roots) + if err != nil { + return err + } + + return storeUtreexoCFilter(dbTx, block, serializedUtreexo, wire.UtreexoCFilter) } // getUndoData returns the data needed for undo. For pruned nodes, we fetch the data from diff --git a/btcutil/gcs/builder/builder.go b/btcutil/gcs/builder/builder.go index fe11b80d..33bbd3e8 100644 --- a/btcutil/gcs/builder/builder.go +++ b/btcutil/gcs/builder/builder.go @@ -369,3 +369,19 @@ func MakeHeaderForFilter(filter *gcs.Filter, prevHeader chainhash.Hash) (chainha // above. return chainhash.DoubleHashH(filterTip), nil } + +// MakeHeaderForUtreexoCFilter makes a filter chain header for a utreexoc filter, given the +// filter data and the previous filter chain header. +func MakeHeaderForUtreexoCFilter(filterData []byte, prevHeader chainhash.Hash) (chainhash.Hash, error) { + filterTip := make([]byte, 2*chainhash.HashSize) + filterHash := chainhash.DoubleHashH(filterData) + + // In the buffer we created above we'll compute hash || prevHash as an + // intermediate value. + copy(filterTip, filterHash[:]) + copy(filterTip[chainhash.HashSize:], prevHeader[:]) + + // The final filter hash is the double-sha256 of the hash computed + // above. + return chainhash.DoubleHashH(filterTip), nil +} diff --git a/server.go b/server.go index b62f71f1..a461d848 100644 --- a/server.go +++ b/server.go @@ -867,6 +867,7 @@ func (sp *serverPeer) OnGetCFilters(_ *peer.Peer, msg *wire.MsgGetCFilters) { // filters that we actually currently maintain. switch msg.FilterType { case wire.GCSFilterRegular: + case wire.UtreexoCFilter: break default: @@ -923,6 +924,7 @@ func (sp *serverPeer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) { // headers for filters that we actually currently maintain. switch msg.FilterType { case wire.GCSFilterRegular: + case wire.UtreexoCFilter: break default: diff --git a/wire/msgcfilter.go b/wire/msgcfilter.go index 682e9fd2..1e33d6e2 100644 --- a/wire/msgcfilter.go +++ b/wire/msgcfilter.go @@ -17,6 +17,7 @@ type FilterType uint8 const ( // GCSFilterRegular is the regular filter type. GCSFilterRegular FilterType = iota + UtreexoCFilter ) const ( From 65cbe7cf6c64f8e55bbbff6b740cabe5f27fd5e5 Mon Sep 17 00:00:00 2001 From: alainjr10 Date: Fri, 28 Jun 2024 15:57:06 +0100 Subject: [PATCH 2/4] fix: change static isCSN variable to to actual check --- blockchain/indexers/utreexoproofindex.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/blockchain/indexers/utreexoproofindex.go b/blockchain/indexers/utreexoproofindex.go index 00c13fcf..f9820461 100644 --- a/blockchain/indexers/utreexoproofindex.go +++ b/blockchain/indexers/utreexoproofindex.go @@ -355,12 +355,11 @@ func (idx *UtreexoProofIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Bloc } blockHash := block.Hash() var serializedUtreexo []byte - isCSN := true var leaves uint64 var roots []*chainhash.Hash // For compact state nodes - if isCSN { + if idx.chain.IsUtreexoViewActive() { viewPoint, err := idx.chain.FetchUtreexoViewpoint(blockHash) if err != nil { return err From 90d347a791d2c6df761603edb8ad7ca895ba8207 Mon Sep 17 00:00:00 2001 From: alainjr10 Date: Fri, 5 Jul 2024 13:40:16 +0100 Subject: [PATCH 3/4] fix: rm redundant storing of roots --- blockchain/indexers/cfindex.go | 2 - blockchain/indexers/flatutreexoproofindex.go | 19 +- blockchain/indexers/utreexoproofindex.go | 26 +- server.go | 405 +++++++++++++------ 4 files changed, 311 insertions(+), 141 deletions(-) diff --git a/blockchain/indexers/cfindex.go b/blockchain/indexers/cfindex.go index 044a1734..72ea66cf 100644 --- a/blockchain/indexers/cfindex.go +++ b/blockchain/indexers/cfindex.go @@ -34,7 +34,6 @@ var ( // block hashes to cfilters. cfIndexKeys = [][]byte{ []byte("cf0byhashidx"), // bucket for basic filter indexes - []byte("cf1byhashidx"), // bucket for UtreexoCFilter } // cfHeaderKeys is an array of db bucket names used to house indexes of @@ -48,7 +47,6 @@ var ( // block hashes to cf hashes. cfHashKeys = [][]byte{ []byte("cf0hashbyhashidx"), - []byte("cf1hashbyhashidx"), } maxFilterType = uint8(len(cfHeaderKeys) - 1) diff --git a/blockchain/indexers/flatutreexoproofindex.go b/blockchain/indexers/flatutreexoproofindex.go index 72a77463..c60d6758 100644 --- a/blockchain/indexers/flatutreexoproofindex.go +++ b/blockchain/indexers/flatutreexoproofindex.go @@ -354,7 +354,24 @@ func (idx *FlatUtreexoProofIndex) ConnectBlock(dbTx database.Tx, block *btcutil. } } - return nil + blockHash := block.Hash() + height, err := idx.chain.BlockHeightByHash(blockHash) + if err != nil { + return err + } + + roots, leaves, err := idx.FetchUtreexoState(height) + if err != nil { + return err + } + + // serialize the hashes of the utreexo roots hash + serializedUtreexo, err := blockchain.SerializeUtreexoRootsHash(leaves, roots) + if err != nil { + return err + } + + return storeUtreexoCFilterHeader(dbTx, block, serializedUtreexo, wire.UtreexoCFilter) } // calcProofOverhead calculates the overhead of the current utreexo accumulator proof diff --git a/blockchain/indexers/utreexoproofindex.go b/blockchain/indexers/utreexoproofindex.go index 67af564c..af333189 100644 --- a/blockchain/indexers/utreexoproofindex.go +++ b/blockchain/indexers/utreexoproofindex.go @@ -238,34 +238,18 @@ func (idx *UtreexoProofIndex) Create(dbTx database.Tx) error { return nil } -// storeFilter stores a given filter, and performs the steps needed to -// generate the filter's header. -func storeUtreexoCFilter(dbTx database.Tx, block *btcutil.Block, filterData []byte, +// storeUtreexoCFilter stores a given utreexocfilter header +func storeUtreexoCFilterHeader(dbTx database.Tx, block *btcutil.Block, filterData []byte, filterType wire.FilterType) error { if uint8(filterType) > maxFilterType { return errors.New("unsupported filter type") } - // Figure out which buckets to use. - fkey := cfIndexKeys[filterType] + // Figure out which header bucket to use. hkey := cfHeaderKeys[filterType] - hashkey := cfHashKeys[filterType] - - // Start by storing the filter. h := block.Hash() - err := dbStoreFilterIdxEntry(dbTx, fkey, h, filterData) - if err != nil { - return err - } - - // Next store the filter hash. - filterHash := chainhash.DoubleHashH(filterData) - err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:]) - if err != nil { - return err - } - // Then fetch the previous block's filter header. + // fetch the previous block's filter header. var prevHeader *chainhash.Hash ph := &block.MsgBlock().Header.PrevBlock if ph.IsEqual(&zeroHash) { @@ -381,7 +365,7 @@ func (idx *UtreexoProofIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Bloc return err } - return storeUtreexoCFilter(dbTx, block, serializedUtreexo, wire.UtreexoCFilter) + return storeUtreexoCFilterHeader(dbTx, block, serializedUtreexo, wire.UtreexoCFilter) } // getUndoData returns the data needed for undo. For pruned nodes, we fetch the data from diff --git a/server.go b/server.go index b3ed4793..ff37dddd 100644 --- a/server.go +++ b/server.go @@ -863,172 +863,343 @@ func (sp *serverPeer) OnGetCFilters(_ *peer.Peer, msg *wire.MsgGetCFilters) { return } + var hashes []chainhash.Hash + var hashPtrs []*chainhash.Hash + // if the filter type is supported, we initialize variables to avoid duplicate code + if msg.FilterType == wire.GCSFilterRegular || msg.FilterType == wire.UtreexoCFilter { + var err error + // get the block hashes included in the getcfilters message + hashes, err = sp.server.chain.HeightToHashRange( + int32(msg.StartHeight), &msg.StopHash, wire.MaxGetCFiltersReqRange, + ) + if err != nil { + peerLog.Debugf("Invalid getcfilters request: %v", err) + return + } + + // Create []*chainhash.Hash from []chainhash.Hash to pass to + // FiltersByBlockHashes. + hashPtrs = make([]*chainhash.Hash, len(hashes)) + for i := range hashes { + hashPtrs[i] = &hashes[i] + } + } + // We'll also ensure that the remote party is requesting a set of // filters that we actually currently maintain. switch msg.FilterType { case wire.GCSFilterRegular: + filters, err := sp.server.cfIndex.FiltersByBlockHashes( + hashPtrs, msg.FilterType, + ) + if err != nil { + peerLog.Errorf("Error retrieving cfilters: %v", err) + return + } + + for i, filterBytes := range filters { + if len(filterBytes) == 0 { + peerLog.Warnf("Could not obtain cfilter for %v", + hashes[i]) + return + } + + filterMsg := wire.NewMsgCFilter( + msg.FilterType, &hashes[i], filterBytes, + ) + sp.QueueMessage(filterMsg, nil) + } + case wire.UtreexoCFilter: - break + for i, blockHash := range hashPtrs { + var serializedUtreexo []byte + + leaves, roots, err := sp.getUtreexoRoots(blockHash) + if err != nil { + return + } + + // serialize the hashes of the utreexo roots hash + serializedUtreexo, err = blockchain.SerializeUtreexoRootsHash(leaves, roots) + if err != nil { + peerLog.Errorf("error serializing utreexoc filter: %v", err) + return + } + + if len(serializedUtreexo) == 0 { + peerLog.Warnf("Could not obtain utreexocfilter for %v", + hashes[i]) + return + } + + filterMsg := wire.NewMsgCFilter( + msg.FilterType, &hashes[i], serializedUtreexo, + ) + sp.QueueMessage(filterMsg, nil) + } default: peerLog.Debug("Filter request for unknown filter: %v", msg.FilterType) return } +} - hashes, err := sp.server.chain.HeightToHashRange( - int32(msg.StartHeight), &msg.StopHash, wire.MaxGetCFiltersReqRange, - ) - if err != nil { - peerLog.Debugf("Invalid getcfilters request: %v", err) +// OnGetCFHeaders is invoked when a peer receives a getcfheader bitcoin message. +func (sp *serverPeer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) { + // Ignore getcfilterheader requests if not in sync. + if !sp.server.syncManager.IsCurrent() { return } - // Create []*chainhash.Hash from []chainhash.Hash to pass to - // FiltersByBlockHashes. - hashPtrs := make([]*chainhash.Hash, len(hashes)) - for i := range hashes { - hashPtrs[i] = &hashes[i] - } + var startHeight int32 + var maxResults int + var hashList []chainhash.Hash + var hashPtrs []*chainhash.Hash + // if the filter type is supported, we initialize variables to avoid duplicate code + if msg.FilterType == wire.GCSFilterRegular || msg.FilterType == wire.UtreexoCFilter { - filters, err := sp.server.cfIndex.FiltersByBlockHashes( - hashPtrs, msg.FilterType, - ) - if err != nil { - peerLog.Errorf("Error retrieving cfilters: %v", err) - return - } + startHeight = int32(msg.StartHeight) + maxResults = wire.MaxCFHeadersPerMsg - for i, filterBytes := range filters { - if len(filterBytes) == 0 { - peerLog.Warnf("Could not obtain cfilter for %v", - hashes[i]) - return + // If StartHeight is positive, fetch the predecessor block hash so we + // can populate the PrevFilterHeader field. + if msg.StartHeight > 0 { + startHeight-- + maxResults++ } - filterMsg := wire.NewMsgCFilter( - msg.FilterType, &hashes[i], filterBytes, + // Fetch the hashes from the block index. + var err error + hashList, err = sp.server.chain.HeightToHashRange( + startHeight, &msg.StopHash, maxResults, ) - sp.QueueMessage(filterMsg, nil) - } -} + if err != nil { + peerLog.Debugf("Invalid getcfheaders request: %v", err) + } -// OnGetCFHeaders is invoked when a peer receives a getcfheader bitcoin message. -func (sp *serverPeer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) { - // Ignore getcfilterheader requests if not in sync. - if !sp.server.syncManager.IsCurrent() { - return + // This is possible if StartHeight is one greater that the height of + // StopHash, and we pull a valid range of hashes including the previous + // filter header. + if len(hashList) == 0 || (msg.StartHeight > 0 && len(hashList) == 1) { + peerLog.Debug("No results for getcfheaders request") + return + } + + // Create []*chainhash.Hash from []chainhash.Hash to pass to + // FilterHeadersByBlockHashes. + hashPtrs = make([]*chainhash.Hash, len(hashList)) + for i := range hashList { + hashPtrs[i] = &hashList[i] + } } // We'll also ensure that the remote party is requesting a set of // headers for filters that we actually currently maintain. switch msg.FilterType { case wire.GCSFilterRegular: + + // Fetch the raw filter hash bytes from the database for all blocks. + filterHashes, err := sp.server.cfIndex.FilterHashesByBlockHashes( + hashPtrs, msg.FilterType, + ) + if err != nil { + peerLog.Errorf("Error retrieving cfilter hashes: %v", err) + return + } + + // Generate cfheaders message and send it. + headersMsg := wire.NewMsgCFHeaders() + + // Populate the PrevFilterHeader field. + if msg.StartHeight > 0 { + prevBlockHash := &hashList[0] + + // Fetch the raw committed filter header bytes from the + // database. + headerBytes, err := sp.server.cfIndex.FilterHeaderByBlockHash( + prevBlockHash, msg.FilterType) + if err != nil { + peerLog.Errorf("Error retrieving CF header: %v", err) + return + } + if len(headerBytes) == 0 { + peerLog.Warnf("Could not obtain CF header for %v", prevBlockHash) + return + } + + // Deserialize the hash into PrevFilterHeader. + err = headersMsg.PrevFilterHeader.SetBytes(headerBytes) + if err != nil { + peerLog.Warnf("Committed filter header deserialize "+ + "failed: %v", err) + return + } + + hashList = hashList[1:] + filterHashes = filterHashes[1:] + } + + // Populate HeaderHashes. + for i, hashBytes := range filterHashes { + if len(hashBytes) == 0 { + peerLog.Warnf("Could not obtain CF hash for %v", hashList[i]) + return + } + + // Deserialize the hash. + filterHash, err := chainhash.NewHash(hashBytes) + if err != nil { + peerLog.Warnf("Committed filter hash deserialize "+ + "failed: %v", err) + return + } + + headersMsg.AddCFHash(filterHash) + } + + headersMsg.FilterType = msg.FilterType + headersMsg.StopHash = msg.StopHash + + sp.QueueMessage(headersMsg, nil) + + // handle custom utreexocfilter message case wire.UtreexoCFilter: - break - default: - peerLog.Debug("Filter request for unknown headers for "+ - "filter: %v", msg.FilterType) - return - } + // Generate cfheaders message and send it. + headersMsg := wire.NewMsgCFHeaders() - startHeight := int32(msg.StartHeight) - maxResults := wire.MaxCFHeadersPerMsg + // Populate the PrevFilterHeader field. + if msg.StartHeight > 0 { + prevBlockHash := &hashList[0] - // If StartHeight is positive, fetch the predecessor block hash so we - // can populate the PrevFilterHeader field. - if msg.StartHeight > 0 { - startHeight-- - maxResults++ - } + // Fetch the raw committed filter header bytes from the + // database. + headerBytes, err := sp.server.cfIndex.FilterHeaderByBlockHash( + prevBlockHash, msg.FilterType) + if err != nil { + peerLog.Errorf("Error retrieving CF header: %v", err) + return + } + if len(headerBytes) == 0 { + peerLog.Warnf("Could not obtain CF header for %v", prevBlockHash) + return + } - // Fetch the hashes from the block index. - hashList, err := sp.server.chain.HeightToHashRange( - startHeight, &msg.StopHash, maxResults, - ) - if err != nil { - peerLog.Debugf("Invalid getcfheaders request: %v", err) - } + // Deserialize the hash into PrevFilterHeader. + err = headersMsg.PrevFilterHeader.SetBytes(headerBytes) + if err != nil { + peerLog.Warnf("Committed filter header deserialize "+ + "failed: %v", err) + return + } + } - // This is possible if StartHeight is one greater that the height of - // StopHash, and we pull a valid range of hashes including the previous - // filter header. - if len(hashList) == 0 || (msg.StartHeight > 0 && len(hashList) == 1) { - peerLog.Debug("No results for getcfheaders request") - return - } + // fetch filter hashes and add to cf hashes field + for i, blockHash := range hashPtrs { + var serializedUtreexo []byte + // skip the first index as this index was added so as to enable us + // to get the previous filter's header + if i == 0 { + continue + } - // Create []*chainhash.Hash from []chainhash.Hash to pass to - // FilterHeadersByBlockHashes. - hashPtrs := make([]*chainhash.Hash, len(hashList)) - for i := range hashList { - hashPtrs[i] = &hashList[i] - } + leaves, roots, err := sp.getUtreexoRoots(blockHash) + if err != nil { + return + } - // Fetch the raw filter hash bytes from the database for all blocks. - filterHashes, err := sp.server.cfIndex.FilterHashesByBlockHashes( - hashPtrs, msg.FilterType, - ) - if err != nil { - peerLog.Errorf("Error retrieving cfilter hashes: %v", err) + // serialize the hashes of the utreexo roots hash + serializedUtreexo, err = blockchain.SerializeUtreexoRootsHash(leaves, roots) + if err != nil { + peerLog.Errorf("error serializing utreexoc filter: %v", err) + return + } + + if len(serializedUtreexo) == 0 { + peerLog.Warnf("Could not obtain utreexocfilter for %v", + hashList[i]) + return + } + hashBytes := chainhash.DoubleHashB(serializedUtreexo) + + if len(hashBytes) == 0 { + peerLog.Warnf("Could not obtain CF hash for %v", hashList[i]) + return + } + + // Deserialize the hash. + filterHash, err := chainhash.NewHash(hashBytes) + if err != nil { + peerLog.Warnf("Committed filter hash deserialize "+ + "failed: %v", err) + return + } + + headersMsg.AddCFHash(filterHash) + } + headersMsg.FilterType = msg.FilterType + headersMsg.StopHash = msg.StopHash + + sp.QueueMessage(headersMsg, nil) + + default: + peerLog.Debug("Filter request for unknown headers for "+ + "filter: %v", msg.FilterType) return } +} - // Generate cfheaders message and send it. - headersMsg := wire.NewMsgCFHeaders() +func (sp *serverPeer) getUtreexoRoots(blockHash *chainhash.Hash) (uint64, []*chainhash.Hash, error) { - // Populate the PrevFilterHeader field. - if msg.StartHeight > 0 { - prevBlockHash := &hashList[0] + var leaves uint64 + var roots []*chainhash.Hash - // Fetch the raw committed filter header bytes from the - // database. - headerBytes, err := sp.server.cfIndex.FilterHeaderByBlockHash( - prevBlockHash, msg.FilterType) + // For compact state nodes + if !cfg.NoUtreexo { + viewPoint, err := sp.server.chain.FetchUtreexoViewpoint(blockHash) if err != nil { - peerLog.Errorf("Error retrieving CF header: %v", err) - return - } - if len(headerBytes) == 0 { - peerLog.Warnf("Could not obtain CF header for %v", prevBlockHash) - return + peerLog.Errorf("could not obtain utreexo view: %v", err) + return 0, nil, err } + roots = viewPoint.GetRoots() + leaves = viewPoint.NumLeaves() + } + // for bridge nodes + if sp.server.utreexoProofIndex != nil { + var uleaves uint64 + var uroots []*chainhash.Hash + var err error + err = sp.server.db.View(func(dbTx database.Tx) error { + uroots, uleaves, err = sp.server.utreexoProofIndex.FetchUtreexoState(dbTx, blockHash) + if err != nil { + return err + } - // Deserialize the hash into PrevFilterHeader. - err = headersMsg.PrevFilterHeader.SetBytes(headerBytes) + return nil + }) if err != nil { - peerLog.Warnf("Committed filter header deserialize "+ - "failed: %v", err) - return + peerLog.Errorf("error fetching utreexo view for blockhash %s: error: %v", blockHash, err) + return 0, nil, err } - - hashList = hashList[1:] - filterHashes = filterHashes[1:] - } - - // Populate HeaderHashes. - for i, hashBytes := range filterHashes { - if len(hashBytes) == 0 { - peerLog.Warnf("Could not obtain CF hash for %v", hashList[i]) - return + roots = uroots + leaves = uleaves + } else if sp.server.flatUtreexoProofIndex != nil { + height, err := sp.server.chain.BlockHeightByHash(blockHash) + if err != nil { + peerLog.Errorf("couldn't fetch the block height for blockhash %s from "+ + "the blockindex. Error: %v", blockHash, err) + return 0, nil, err } - - // Deserialize the hash. - filterHash, err := chainhash.NewHash(hashBytes) + uroots, uleaves, err := sp.server.flatUtreexoProofIndex.FetchUtreexoState(height) if err != nil { - peerLog.Warnf("Committed filter hash deserialize "+ - "failed: %v", err) - return + peerLog.Errorf("error fetching utreexo view for blockhash: %s: error: %v", err) + return 0, nil, err } - - headersMsg.AddCFHash(filterHash) + roots = uroots + leaves = uleaves } - - headersMsg.FilterType = msg.FilterType - headersMsg.StopHash = msg.StopHash - - sp.QueueMessage(headersMsg, nil) + return leaves, roots, nil } // OnGetCFCheckpt is invoked when a peer receives a getcfcheckpt bitcoin message. From 296c58f6d89d1fd1f8313294c843343796852f0b Mon Sep 17 00:00:00 2001 From: alainjr10 Date: Wed, 17 Jul 2024 01:47:45 +0100 Subject: [PATCH 4/4] fix: add new indexer --- blockchain/indexers/cfindex.go | 1 - blockchain/indexers/flatutreexoproofindex.go | 19 +- blockchain/indexers/utreexocfindex.go | 301 +++++++++++++++++++ blockchain/indexers/utreexoproofindex.go | 67 +---- config.go | 2 + rpcserver.go | 18 +- server.go | 9 + utreexod.go | 33 ++ 8 files changed, 363 insertions(+), 87 deletions(-) create mode 100644 blockchain/indexers/utreexocfindex.go diff --git a/blockchain/indexers/cfindex.go b/blockchain/indexers/cfindex.go index 72ea66cf..a256df95 100644 --- a/blockchain/indexers/cfindex.go +++ b/blockchain/indexers/cfindex.go @@ -40,7 +40,6 @@ var ( // block hashes to cf headers. cfHeaderKeys = [][]byte{ []byte("cf0headerbyhashidx"), - []byte("cf1headerbyhashidx"), } // cfHashKeys is an array of db bucket names used to house indexes of diff --git a/blockchain/indexers/flatutreexoproofindex.go b/blockchain/indexers/flatutreexoproofindex.go index c60d6758..72a77463 100644 --- a/blockchain/indexers/flatutreexoproofindex.go +++ b/blockchain/indexers/flatutreexoproofindex.go @@ -354,24 +354,7 @@ func (idx *FlatUtreexoProofIndex) ConnectBlock(dbTx database.Tx, block *btcutil. } } - blockHash := block.Hash() - height, err := idx.chain.BlockHeightByHash(blockHash) - if err != nil { - return err - } - - roots, leaves, err := idx.FetchUtreexoState(height) - if err != nil { - return err - } - - // serialize the hashes of the utreexo roots hash - serializedUtreexo, err := blockchain.SerializeUtreexoRootsHash(leaves, roots) - if err != nil { - return err - } - - return storeUtreexoCFilterHeader(dbTx, block, serializedUtreexo, wire.UtreexoCFilter) + return nil } // calcProofOverhead calculates the overhead of the current utreexo accumulator proof diff --git a/blockchain/indexers/utreexocfindex.go b/blockchain/indexers/utreexocfindex.go new file mode 100644 index 00000000..c0ac2910 --- /dev/null +++ b/blockchain/indexers/utreexocfindex.go @@ -0,0 +1,301 @@ +package indexers + +import ( + "errors" + + "github.com/utreexo/utreexod/blockchain" + "github.com/utreexo/utreexod/btcutil" + "github.com/utreexo/utreexod/btcutil/gcs/builder" + "github.com/utreexo/utreexod/chaincfg" + "github.com/utreexo/utreexod/chaincfg/chainhash" + "github.com/utreexo/utreexod/database" + "github.com/utreexo/utreexod/wire" +) + +// utreexoProofIndexName is the human-readable name for the index. +const ( + utreexoCFIndexName = "utreexo custom cfilter index" +) + +// utreexocfilter is a custom commited filter which serves utreexo roots +// these roots are already present, so they need not be created/stored, their +// headers could be stored though +var ( + // utreexoCFIndexParentBucketKey is the name of the parent bucket used to + // house the index. The rest of the buckets live below this bucket. + utreexoCFIndexParentBucketKey = []byte("utreexocfindexparentbucket") + + // utreexoCfHeaderKeys is an array of db bucket names used to house indexes of + // block hashes to cf headers. + utreexoCfHeaderKeys = [][]byte{ + []byte("utreexocfheaderbyhashidx"), + } +) + +// dbFetchFilterIdxEntry retrieves a data blob from the filter index database. +// An entry's absence is not considered an error. +func dbFetchUtreexoCFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) { + idx := dbTx.Metadata().Bucket(utreexoCFIndexParentBucketKey).Bucket(key) + return idx.Get(h[:]), nil +} + +// dbStoreFilterIdxEntry stores a data blob in the filter index database. +func dbStoreUtreexoCFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash, f []byte) error { + idx := dbTx.Metadata().Bucket(utreexoCFIndexParentBucketKey).Bucket(key) + return idx.Put(h[:], f) +} + +// dbDeleteFilterIdxEntry deletes a data blob from the filter index database. +func dbDeleteUtreexoCFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) error { + idx := dbTx.Metadata().Bucket(utreexoCFIndexParentBucketKey).Bucket(key) + return idx.Delete(h[:]) +} + +var _ Indexer = (*UtreexoCFIndex)(nil) + +var _ NeedsInputser = (*UtreexoCFIndex)(nil) + +type UtreexoCFIndex struct { + db database.DB + chainParams *chaincfg.Params + + chain *blockchain.BlockChain + + utreexoProofIndex *UtreexoProofIndex + + flatUtreexoProofIndex *FlatUtreexoProofIndex +} + +func (idx *UtreexoCFIndex) NeedsInputs() bool { + return true +} + +// Init initializes the utreexo cf index. This is part of the Indexer +// interface. +func (idx *UtreexoCFIndex) Init(_ *blockchain.BlockChain) error { + return nil // Nothing to do. +} + +// Key returns the database key to use for the index as a byte slice. This is +// part of the Indexer interface. +func (idx *UtreexoCFIndex) Key() []byte { + return utreexoCFIndexParentBucketKey +} + +// Name returns the human-readable name of the index. This is part of the +// Indexer interface. +func (idx *UtreexoCFIndex) Name() string { + return utreexoCFIndexName +} + +// Create is invoked when the index manager determines the index needs to +// be created for the first time. It creates buckets for the custom utreexo +// filter index. +func (idx *UtreexoCFIndex) Create(dbTx database.Tx) error { + meta := dbTx.Metadata() + + utreexoCfIndexParentBucket, err := meta.CreateBucket(utreexoCFIndexParentBucketKey) + if err != nil { + return err + } + + for _, bucketName := range utreexoCfHeaderKeys { + _, err = utreexoCfIndexParentBucket.CreateBucket(bucketName) + if err != nil { + return err + } + } + + return nil +} + +// storeUtreexoCFilter stores a given utreexocfilter header +func storeUtreexoCFHeader(dbTx database.Tx, block *btcutil.Block, filterData []byte, + filterType wire.FilterType) error { + if filterType != wire.UtreexoCFilter { + return errors.New("invalid filter type") + } + + // Figure out which header bucket to use. + hkey := utreexoCfHeaderKeys[0] + h := block.Hash() + + // fetch the previous block's filter header. + var prevHeader *chainhash.Hash + ph := &block.MsgBlock().Header.PrevBlock + if ph.IsEqual(&zeroHash) { + prevHeader = &zeroHash + } else { + pfh, err := dbFetchUtreexoCFilterIdxEntry(dbTx, hkey, ph) + if err != nil { + return err + } + + // Construct the new block's filter header, and store it. + prevHeader, err = chainhash.NewHash(pfh) + if err != nil { + return err + } + } + + fh, err := builder.MakeHeaderForUtreexoCFilter(filterData, *prevHeader) + if err != nil { + return err + } + return dbStoreUtreexoCFilterIdxEntry(dbTx, hkey, h, fh[:]) +} + +// ConnectBlock is invoked by the index manager when a new block has been +// connected to the main chain. +// This is part of the Indexer interface. +func (idx *UtreexoCFIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block, + stxos []blockchain.SpentTxOut) error { + + blockHash := block.Hash() + roots, leaves, err := idx.fetchUtreexoRoots(dbTx, blockHash) + + if err != nil { + return err + } + + // serialize the hashes of the utreexo roots hash + serializedUtreexo, err := blockchain.SerializeUtreexoRootsHash(leaves, roots) + if err != nil { + return err + } + + return storeUtreexoCFHeader(dbTx, block, serializedUtreexo, wire.UtreexoCFilter) +} + +// fetches the utreexo roots for a given block hash +func (idx *UtreexoCFIndex) fetchUtreexoRoots(dbTx database.Tx, + blockHash *chainhash.Hash) ([]*chainhash.Hash, uint64, error) { + + var leaves uint64 + var roots []*chainhash.Hash + + // For compact state nodes + if idx.chain.IsUtreexoViewActive() && idx.chain.IsAssumeUtreexo() { + viewPoint, err := idx.chain.FetchUtreexoViewpoint(blockHash) + if err != nil { + return nil, 0, err + } + roots = viewPoint.GetRoots() + leaves = viewPoint.NumLeaves() + } + // for bridge nodes + if idx.utreexoProofIndex != nil { + roots, leaves, err := idx.utreexoProofIndex.FetchUtreexoState(dbTx, blockHash) + if err != nil { + return nil, 0, err + } + return roots, leaves, nil + } else if idx.flatUtreexoProofIndex != nil { + height, err := idx.chain.BlockHeightByHash(blockHash) + if err != nil { + return nil, 0, err + } + roots, leaves, err := idx.flatUtreexoProofIndex.FetchUtreexoState(height) + if err != nil { + return nil, 0, err + } + return roots, leaves, nil + } + + return roots, leaves, nil +} + +// DisconnectBlock is invoked by the index manager when a block has been +// disconnected from the main chain. This indexer removes the hash-to-cf +// mapping for every passed block. This is part of the Indexer interface. +func (idx *UtreexoCFIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block, + _ []blockchain.SpentTxOut) error { + + for _, key := range utreexoCfHeaderKeys { + err := dbDeleteUtreexoCFilterIdxEntry(dbTx, key, block.Hash()) + if err != nil { + return err + } + } + + return nil +} + +// PruneBlock is invoked when an older block is deleted after it's been +// processed. +// TODO (kcalvinalvin): Consider keeping the filters at a later date to help with +// reindexing as a pruned node. +// +// This is part of the Indexer interface. +func (idx *UtreexoCFIndex) PruneBlock(dbTx database.Tx, blockHash *chainhash.Hash) error { + + for _, key := range utreexoCfHeaderKeys { + err := dbDeleteUtreexoCFilterIdxEntry(dbTx, key, blockHash) + if err != nil { + return err + } + } + + return nil +} + +// entryByBlockHash fetches a filter index entry of a particular type +// (eg. filter, filter header, etc) for a filter type and block hash. +func (idx *UtreexoCFIndex) entryByBlockHash(dbTx database.Tx, + filterType wire.FilterType, h *chainhash.Hash) ([]byte, error) { + + if uint8(filterType) != uint8(wire.UtreexoCFilter) { + return nil, errors.New("unsupported filter type") + } + + roots, leaves, err := idx.fetchUtreexoRoots(dbTx, h) + + if err != nil { + return nil, err + } + + // serialize the hashes of the utreexo roots hash + serializedUtreexo, err := blockchain.SerializeUtreexoRootsHash(leaves, roots) + if err != nil { + return nil, err + } + + return serializedUtreexo, err +} + +// FilterByBlockHash returns the serialized contents of a block's utreexo +// cfilter. +func (idx *UtreexoCFIndex) FilterByBlockHash(dbTx database.Tx, h *chainhash.Hash, + filterType wire.FilterType) ([]byte, error) { + return idx.entryByBlockHash(dbTx, filterType, h) +} + +// NewCfIndex returns a new instance of an indexer that is used to create a +// mapping of the hashes of all blocks in the blockchain to their respective +// committed filters. +// +// It implements the Indexer interface which plugs into the IndexManager that +// in turn is used by the blockchain package. This allows the index to be +// seamlessly maintained along with the chain. +func NewUtreexoCfIndex(db database.DB, chainParams *chaincfg.Params, utreexoProofIndex *UtreexoProofIndex, + flatUtreexoProofIndex *FlatUtreexoProofIndex) *UtreexoCFIndex { + return &UtreexoCFIndex{db: db, chainParams: chainParams, utreexoProofIndex: utreexoProofIndex, + flatUtreexoProofIndex: flatUtreexoProofIndex} +} + +// DropCfIndex drops the CF index from the provided database if exists. +func DropUtreexoCfIndex(db database.DB, interrupt <-chan struct{}) error { + return dropIndex(db, utreexoCFIndexParentBucketKey, utreexoCFIndexName, interrupt) +} + +// CfIndexInitialized returns true if the cfindex has been created previously. +func UtreexoCfIndexInitialized(db database.DB) bool { + var exists bool + db.View(func(dbTx database.Tx) error { + bucket := dbTx.Metadata().Bucket(utreexoCFIndexParentBucketKey) + exists = bucket != nil + return nil + }) + + return exists +} diff --git a/blockchain/indexers/utreexoproofindex.go b/blockchain/indexers/utreexoproofindex.go index af333189..27224899 100644 --- a/blockchain/indexers/utreexoproofindex.go +++ b/blockchain/indexers/utreexoproofindex.go @@ -6,14 +6,12 @@ package indexers import ( "bytes" - "errors" "fmt" "sync" "github.com/utreexo/utreexo" "github.com/utreexo/utreexod/blockchain" "github.com/utreexo/utreexod/btcutil" - "github.com/utreexo/utreexod/btcutil/gcs/builder" "github.com/utreexo/utreexod/chaincfg" "github.com/utreexo/utreexod/chaincfg/chainhash" "github.com/utreexo/utreexod/database" @@ -238,42 +236,6 @@ func (idx *UtreexoProofIndex) Create(dbTx database.Tx) error { return nil } -// storeUtreexoCFilter stores a given utreexocfilter header -func storeUtreexoCFilterHeader(dbTx database.Tx, block *btcutil.Block, filterData []byte, - filterType wire.FilterType) error { - if uint8(filterType) > maxFilterType { - return errors.New("unsupported filter type") - } - - // Figure out which header bucket to use. - hkey := cfHeaderKeys[filterType] - h := block.Hash() - - // fetch the previous block's filter header. - var prevHeader *chainhash.Hash - ph := &block.MsgBlock().Header.PrevBlock - if ph.IsEqual(&zeroHash) { - prevHeader = &zeroHash - } else { - pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph) - if err != nil { - return err - } - - // Construct the new block's filter header, and store it. - prevHeader, err = chainhash.NewHash(pfh) - if err != nil { - return err - } - } - - fh, err := builder.MakeHeaderForUtreexoCFilter(filterData, *prevHeader) - if err != nil { - return err - } - return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:]) -} - // ConnectBlock is invoked by the index manager when a new block has been // connected to the main chain. // @@ -337,35 +299,8 @@ func (idx *UtreexoProofIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Bloc if err != nil { return err } - blockHash := block.Hash() - var serializedUtreexo []byte - var leaves uint64 - var roots []*chainhash.Hash - // For compact state nodes - if idx.chain.IsUtreexoViewActive() { - viewPoint, err := idx.chain.FetchUtreexoViewpoint(blockHash) - if err != nil { - return err - } - roots = viewPoint.GetRoots() - leaves = viewPoint.NumLeaves() - } else { // for bridge nodes - uroots, uleaves, err := idx.FetchUtreexoState(dbTx, blockHash) - if err != nil { - return err - } - roots = uroots - leaves = uleaves - } - - // serialize the hashes of the utreexo roots hash - serializedUtreexo, err = blockchain.SerializeUtreexoRootsHash(leaves, roots) - if err != nil { - return err - } - - return storeUtreexoCFilterHeader(dbTx, block, serializedUtreexo, wire.UtreexoCFilter) + return nil } // getUndoData returns the data needed for undo. For pruned nodes, we fetch the data from diff --git a/config.go b/config.go index 59f21754..0a54de97 100644 --- a/config.go +++ b/config.go @@ -204,9 +204,11 @@ type config struct { FlatUtreexoProofIndex bool `long:"flatutreexoproofindex" description:"Maintain a utreexo proof for all blocks in flat files"` UtreexoProofIndexMaxMemory int64 `long:"utreexoproofindexmaxmemory" description:"The maxmimum memory in mebibytes (MiB) that the utreexo proof indexes will use up. Passing in 0 will make the entire proof index stay on disk. Passing in a negative value will make the entire proof index stay in memory. Default of 250MiB."` CFilters bool `long:"cfilters" description:"Enable committed filtering (CF) support"` + UtreexoCFilters bool `long:"utreexocfilters" description:"Enable committed filtering (CF) support serving utreexo roots."` NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"` DropAddrIndex bool `long:"dropaddrindex" description:"Deletes the address-based transaction index from the database on start up and then exits."` DropCfIndex bool `long:"dropcfindex" description:"Deletes the index used for committed filtering (CF) support from the database on start up and then exits."` + DropUtreexoCfIndex bool `long:"droputreexocfindex" description:"Deletes the index used for custom utreexo commited filter indexing support serving utreexo roots from the database on start up and then exits."` DropTxIndex bool `long:"droptxindex" description:"Deletes the hash-based transaction index from the database on start up and then exits."` DropTTLIndex bool `long:"dropttlindex" description:"Deletes the time to live index from the database on start up and then exits."` DropUtreexoProofIndex bool `long:"droputreexoproofindex" description:"Deletes the utreexo proof index from the database on start up and then exits."` diff --git a/rpcserver.go b/rpcserver.go index 14b0cea3..a3bc1163 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -2381,7 +2381,7 @@ func handleGetChainTips(s *rpcServer, cmd interface{}, closeChan <-chan struct{} // handleGetCFilter implements the getcfilter command. func handleGetCFilter(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { - if s.cfg.CfIndex == nil { + if s.cfg.CfIndex == nil || s.cfg.UtreexoCfIndex != nil { return nil, &btcjson.RPCError{ Code: btcjson.ErrRPCNoCFIndex, Message: "The CF index must be enabled for this command", @@ -2394,7 +2394,20 @@ func handleGetCFilter(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) return nil, rpcDecodeHexError(c.Hash) } - filterBytes, err := s.cfg.CfIndex.FilterByBlockHash(hash, c.FilterType) + var filterBytes []byte + if c.FilterType == wire.UtreexoCFilter { + err = s.cfg.DB.View(func(dbTx database.Tx) error { + var err error + filterBytes, err = s.cfg.UtreexoCfIndex.FilterByBlockHash(dbTx, hash, c.FilterType) + return err + }) + if err != nil { + return nil, rpcNoTxInfoError(hash) + } + } else { + filterBytes, err = s.cfg.CfIndex.FilterByBlockHash(hash, c.FilterType) + } + if err != nil { rpcsLog.Debugf("Could not find committed filter for %v: %v", hash, err) @@ -5649,6 +5662,7 @@ type rpcserverConfig struct { TxIndex *indexers.TxIndex AddrIndex *indexers.AddrIndex CfIndex *indexers.CfIndex + UtreexoCfIndex *indexers.UtreexoCFIndex TTLIndex *indexers.TTLIndex UtreexoProofIndex *indexers.UtreexoProofIndex FlatUtreexoProofIndex *indexers.FlatUtreexoProofIndex diff --git a/server.go b/server.go index ff37dddd..aeaf88bc 100644 --- a/server.go +++ b/server.go @@ -252,6 +252,7 @@ type server struct { txIndex *indexers.TxIndex addrIndex *indexers.AddrIndex cfIndex *indexers.CfIndex + utreexoCfIndex *indexers.UtreexoCFIndex ttlIndex *indexers.TTLIndex utreexoProofIndex *indexers.UtreexoProofIndex flatUtreexoProofIndex *indexers.FlatUtreexoProofIndex @@ -1150,6 +1151,8 @@ func (sp *serverPeer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) { } } +// getUtreexoRoots fetches utreexo roots from the appropriate locations, i.e fetches +// roots for CSN from a different location from utreexoviewpoint and flatfile func (sp *serverPeer) getUtreexoRoots(blockHash *chainhash.Hash) (uint64, []*chainhash.Hash, error) { var leaves uint64 @@ -3401,6 +3404,12 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string, s.cfIndex = indexers.NewCfIndex(db, chainParams) indexes = append(indexes, s.cfIndex) } + if cfg.UtreexoCFilters { + indxLog.Info("Utreexo C filter index enabled") + s.utreexoCfIndex = indexers.NewUtreexoCfIndex(db, chainParams, + s.utreexoProofIndex, s.flatUtreexoProofIndex) + indexes = append(indexes, s.utreexoCfIndex) + } if cfg.TTLIndex { indxLog.Info("TTL index is enabled") s.ttlIndex = indexers.NewTTLIndex(db, chainParams) diff --git a/utreexod.go b/utreexod.go index 427e408a..0da65281 100644 --- a/utreexod.go +++ b/utreexod.go @@ -94,6 +94,14 @@ func pruneChecks(db database.DB) error { "and sync from the beginning to enable the desired index. You may "+ "start the node up without the --cfilters flag", cfg.DataDir) } + // If we've previously been pruned and the utreexocfindex isn't present, it means that the + // user wants to enable the utreexocfindex after the node has already synced up while being pruned. + if beenPruned && !indexers.UtreexoCfIndexInitialized(db) && cfg.UtreexoCFilters { + return fmt.Errorf("utreeco cfilters cannot be enabled as the node has been "+ + "previously pruned. You must delete the files in the datadir: \"%s\" "+ + "and sync from the beginning to enable the desired index. You may "+ + "start the node up without the --utreexocfilters flag", cfg.DataDir) + } // If the user wants to disable the cfindex and is pruned or has enabled pruning, force // the user to either drop the cfindex manually or restart the node without the --cfilters @@ -114,6 +122,25 @@ func pruneChecks(db database.DB) error { "To keep the compact filters, restart the node with the --cfilters "+ "flag", prunedStr) } + // If the user wants to disable the utreexocfindex and is pruned or has enabled pruning, force + // the user to either drop the utreexocfindex manually or restart the node without the + // --utreexocfilters flag. + if (beenPruned || cfg.Prune != 0) && indexers.UtreexoCfIndexInitialized(db) && !cfg.UtreexoCFilters { + var prunedStr string + if beenPruned { + prunedStr = "has been previously pruned" + } else { + prunedStr = fmt.Sprintf("was started with prune flag (--prune=%d)", cfg.Prune) + } + return fmt.Errorf("--utreexocfilters flag was not given but the utreexo cfilters have "+ + "previously been enabled on this node and the index data currently "+ + "exists in the database. The node %s and "+ + "the database would be left in an inconsistent state if the utreexo c "+ + "filters don't get indexed now. To disable utreeco cfilters, please drop the "+ + "index completely with the --droputreexocfindex flag and restart the node. "+ + "To keep the compact filters, restart the node with the --utreexocfilters "+ + "flag", prunedStr) + } // If the user wants to disable the utreexo proof index and is pruned or has enabled pruning, // force the user to either drop the utreexo proof index manually or restart the node without // the --utreexoproofindex flag. @@ -287,6 +314,12 @@ func btcdMain(serverChan chan<- *server) error { return nil } + if cfg.DropUtreexoCfIndex { + if err := indexers.DropUtreexoCfIndex(db, interrupt); err != nil { + btcdLog.Errorf("%v", err) + return err + } + } if cfg.DropTTLIndex { if err := indexers.DropTTLIndex(db, interrupt); err != nil { btcdLog.Errorf("%v", err)