Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add new getcfilters message for utreexo nodes #192

Draft
wants to merge 5 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions blockchain/chainio.go
Original file line number Diff line number Diff line change
Expand Up @@ -1080,6 +1080,32 @@ func SerializeUtreexoRoots(numLeaves uint64, roots []utreexo.Hash) ([]byte, erro
return w.Bytes(), nil
}

// SerializeUtreexoRootsHash serializes the numLeaves and the roots into a byte slice.
alainjr10 marked this conversation as resolved.
Show resolved Hide resolved
// it takes in a slice of chainhash.Hash instead of utreexo.Hash. chainhash.Hash is the hashed
// value of the utreexo.Hash.
func SerializeUtreexoRootsHash(numLeaves uint64, roots []*chainhash.Hash) ([]byte, error) {
// 8 byte NumLeaves + (32 byte roots * len(roots))
w := bytes.NewBuffer(make([]byte, 0, 8+(len(roots)*chainhash.HashSize)))

// Write the NumLeaves first.
var buf [8]byte
byteOrder.PutUint64(buf[:], numLeaves)
_, err := w.Write(buf[:])
if err != nil {
return nil, err
}

// Then write the roots.
for _, root := range roots {
_, err = w.Write(root[:])
if err != nil {
return nil, err
}
}

return w.Bytes(), nil
}

// DeserializeUtreexoRoots deserializes the provided byte slice into numLeaves and roots.
func DeserializeUtreexoRoots(serializedUView []byte) (uint64, []utreexo.Hash, error) {
totalLen := len(serializedUView)
Expand Down
5 changes: 4 additions & 1 deletion blockchain/indexers/cfindex.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,19 +33,22 @@ var (
// cfIndexKeys is an array of db bucket names used to house indexes of
// block hashes to cfilters.
cfIndexKeys = [][]byte{
[]byte("cf0byhashidx"),
[]byte("cf0byhashidx"), // bucket for basic filter indexes
alainjr10 marked this conversation as resolved.
Show resolved Hide resolved
[]byte("cf1byhashidx"), // bucket for UtreexoCFilter
}

// cfHeaderKeys is an array of db bucket names used to house indexes of
// block hashes to cf headers.
cfHeaderKeys = [][]byte{
[]byte("cf0headerbyhashidx"),
[]byte("cf1headerbyhashidx"),
}

// cfHashKeys is an array of db bucket names used to house indexes of
// block hashes to cf hashes.
cfHashKeys = [][]byte{
[]byte("cf0hashbyhashidx"),
[]byte("cf1hashbyhashidx"),
}

maxFilterType = uint8(len(cfHeaderKeys) - 1)
Expand Down
83 changes: 82 additions & 1 deletion blockchain/indexers/utreexoproofindex.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@ package indexers

import (
"bytes"
"errors"
"fmt"
"sync"

"github.com/utreexo/utreexo"
"github.com/utreexo/utreexod/blockchain"
"github.com/utreexo/utreexod/btcutil"
"github.com/utreexo/utreexod/btcutil/gcs/builder"
"github.com/utreexo/utreexod/chaincfg"
"github.com/utreexo/utreexod/chaincfg/chainhash"
"github.com/utreexo/utreexod/database"
Expand Down Expand Up @@ -236,6 +238,58 @@ func (idx *UtreexoProofIndex) Create(dbTx database.Tx) error {
return nil
}

// storeFilter stores a given filter, and performs the steps needed to
// generate the filter's header.
func storeUtreexoCFilter(dbTx database.Tx, block *btcutil.Block, filterData []byte,
alainjr10 marked this conversation as resolved.
Show resolved Hide resolved
filterType wire.FilterType) error {
if uint8(filterType) > maxFilterType {
return errors.New("unsupported filter type")
}

// Figure out which buckets to use.
fkey := cfIndexKeys[filterType]
hkey := cfHeaderKeys[filterType]
hashkey := cfHashKeys[filterType]

// Start by storing the filter.
h := block.Hash()
err := dbStoreFilterIdxEntry(dbTx, fkey, h, filterData)
if err != nil {
return err
}

// Next store the filter hash.
filterHash := chainhash.DoubleHashH(filterData)
err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:])
if err != nil {
return err
}

// Then fetch the previous block's filter header.
var prevHeader *chainhash.Hash
ph := &block.MsgBlock().Header.PrevBlock
if ph.IsEqual(&zeroHash) {
prevHeader = &zeroHash
} else {
pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph)
if err != nil {
return err
}

// Construct the new block's filter header, and store it.
prevHeader, err = chainhash.NewHash(pfh)
if err != nil {
return err
}
}

fh, err := builder.MakeHeaderForUtreexoCFilter(filterData, *prevHeader)
if err != nil {
return err
}
return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:])
}

// ConnectBlock is invoked by the index manager when a new block has been
// connected to the main chain.
//
Expand Down Expand Up @@ -299,8 +353,35 @@ func (idx *UtreexoProofIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Bloc
if err != nil {
return err
}
blockHash := block.Hash()
var serializedUtreexo []byte
var leaves uint64
var roots []*chainhash.Hash

return nil
// For compact state nodes
if idx.chain.IsUtreexoViewActive() {
viewPoint, err := idx.chain.FetchUtreexoViewpoint(blockHash)
if err != nil {
return err
}
roots = viewPoint.GetRoots()
leaves = viewPoint.NumLeaves()
} else { // for bridge nodes
uroots, uleaves, err := idx.FetchUtreexoState(dbTx, blockHash)
if err != nil {
return err
}
roots = uroots
leaves = uleaves
}

// serialize the hashes of the utreexo roots hash
serializedUtreexo, err = blockchain.SerializeUtreexoRootsHash(leaves, roots)
if err != nil {
return err
}

return storeUtreexoCFilter(dbTx, block, serializedUtreexo, wire.UtreexoCFilter)
}

// getUndoData returns the data needed for undo. For pruned nodes, we fetch the data from
Expand Down
16 changes: 16 additions & 0 deletions btcutil/gcs/builder/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -369,3 +369,19 @@ func MakeHeaderForFilter(filter *gcs.Filter, prevHeader chainhash.Hash) (chainha
// above.
return chainhash.DoubleHashH(filterTip), nil
}

// MakeHeaderForUtreexoCFilter makes a filter chain header for a utreexoc filter, given the
// filter data and the previous filter chain header.
func MakeHeaderForUtreexoCFilter(filterData []byte, prevHeader chainhash.Hash) (chainhash.Hash, error) {
filterTip := make([]byte, 2*chainhash.HashSize)
filterHash := chainhash.DoubleHashH(filterData)
alainjr10 marked this conversation as resolved.
Show resolved Hide resolved

// In the buffer we created above we'll compute hash || prevHash as an
// intermediate value.
copy(filterTip, filterHash[:])
copy(filterTip[chainhash.HashSize:], prevHeader[:])

// The final filter hash is the double-sha256 of the hash computed
// above.
return chainhash.DoubleHashH(filterTip), nil
}
2 changes: 2 additions & 0 deletions server.go
Original file line number Diff line number Diff line change
Expand Up @@ -867,6 +867,7 @@ func (sp *serverPeer) OnGetCFilters(_ *peer.Peer, msg *wire.MsgGetCFilters) {
// filters that we actually currently maintain.
switch msg.FilterType {
case wire.GCSFilterRegular:
case wire.UtreexoCFilter:
break

default:
Expand Down Expand Up @@ -923,6 +924,7 @@ func (sp *serverPeer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) {
// headers for filters that we actually currently maintain.
switch msg.FilterType {
case wire.GCSFilterRegular:
case wire.UtreexoCFilter:
break

default:
Expand Down
1 change: 1 addition & 0 deletions wire/msgcfilter.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ type FilterType uint8
const (
// GCSFilterRegular is the regular filter type.
GCSFilterRegular FilterType = iota
UtreexoCFilter
alainjr10 marked this conversation as resolved.
Show resolved Hide resolved
)

const (
Expand Down
Loading