diff --git a/blockchain/indexers/flatutreexoproofindex.go b/blockchain/indexers/flatutreexoproofindex.go index 7511670c..a62b516d 100644 --- a/blockchain/indexers/flatutreexoproofindex.go +++ b/blockchain/indexers/flatutreexoproofindex.go @@ -1243,12 +1243,14 @@ func loadFlatFileState(dataDir, name string) (*FlatFileState, error) { } // NewFlatUtreexoProofIndex returns a new instance of an indexer that is used to create a flat utreexo proof index. +// The passed in maxMemoryUsage should be in bytes and it determines how much memory the proof index will use up. +// A maxMemoryUsage of 0 will keep all the elements on disk and a negative maxMemoryUsage will keep all the elements in memory. // // It implements the Indexer interface which plugs into the IndexManager that in // turn is used by the blockchain package. This allows the index to be // seamlessly maintained along with the chain. -func NewFlatUtreexoProofIndex(dataDir string, pruned bool, chainParams *chaincfg.Params, - proofGenInterVal *int32) (*FlatUtreexoProofIndex, error) { +func NewFlatUtreexoProofIndex(pruned bool, chainParams *chaincfg.Params, + proofGenInterVal *int32, maxMemoryUsage int64, dataDir string) (*FlatUtreexoProofIndex, error) { // If the proofGenInterVal argument is nil, use the default value. var intervalToUse int32 @@ -1269,9 +1271,8 @@ func NewFlatUtreexoProofIndex(dataDir string, pruned bool, chainParams *chaincfg uState, err := InitUtreexoState(&UtreexoConfig{ DataDir: dataDir, Name: flatUtreexoProofIndexType, - // Default to ram for now. - Params: chainParams, - }) + Params: chainParams, + }, maxMemoryUsage) if err != nil { return nil, err } diff --git a/blockchain/indexers/indexers_test.go b/blockchain/indexers/indexers_test.go index 52e6f82d..989b05f2 100644 --- a/blockchain/indexers/indexers_test.go +++ b/blockchain/indexers/indexers_test.go @@ -75,12 +75,12 @@ func initIndexes(interval int32, dbPath string, db *database.DB, params *chaincf proofGenInterval := new(int32) *proofGenInterval = interval - flatUtreexoProofIndex, err := NewFlatUtreexoProofIndex(dbPath, false, params, proofGenInterval) + flatUtreexoProofIndex, err := NewFlatUtreexoProofIndex(false, params, proofGenInterval, 50*1024*1024, dbPath) if err != nil { return nil, nil, err } - utreexoProofIndex, err := NewUtreexoProofIndex(*db, false, dbPath, params) + utreexoProofIndex, err := NewUtreexoProofIndex(*db, false, 50*1024*1024, params, dbPath) if err != nil { return nil, nil, err } diff --git a/blockchain/indexers/utreexobackend.go b/blockchain/indexers/utreexobackend.go index 52828cbe..2a02e04e 100644 --- a/blockchain/indexers/utreexobackend.go +++ b/blockchain/indexers/utreexobackend.go @@ -7,6 +7,7 @@ package indexers import ( "bytes" "encoding/binary" + "fmt" "os" "path/filepath" @@ -63,11 +64,13 @@ func utreexoBasePath(cfg *UtreexoConfig) string { // InitUtreexoState returns an initialized utreexo state. If there isn't an // existing state on disk, it creates one and returns it. -func InitUtreexoState(cfg *UtreexoConfig) (*UtreexoState, error) { +// maxMemoryUsage of 0 will keep every element on disk. A negaive maxMemoryUsage will +// load every element to the memory. +func InitUtreexoState(cfg *UtreexoConfig, maxMemoryUsage int64) (*UtreexoState, error) { basePath := utreexoBasePath(cfg) log.Infof("Initializing Utreexo state from '%s'", basePath) defer log.Info("Utreexo state loaded") - return initUtreexoState(cfg, basePath) + return initUtreexoState(cfg, maxMemoryUsage, basePath) } // deleteUtreexoState removes the utreexo state directory and all the contents @@ -311,18 +314,24 @@ func deserializeUndoBlock(serialized []byte) (uint64, []uint64, []utreexo.Hash, return numAdds, targets, delHashes, nil } -// initUtreexoState creates a new utreexo state and returns it. -func initUtreexoState(cfg *UtreexoConfig, basePath string) (*UtreexoState, error) { +// initUtreexoState creates a new utreexo state and returns it. maxMemoryUsage of 0 will keep +// every element on disk and a negative maxMemoryUsage will load all the elemnts to memory. +func initUtreexoState(cfg *UtreexoConfig, maxMemoryUsage int64, basePath string) (*UtreexoState, error) { p := utreexo.NewMapPollard(true) + // 60% of the memory for the nodes map, 40% for the cache leaves map. + // TODO Totally arbitrary, it there's something better than change it to that. + maxNodesMem := maxMemoryUsage * 6 / 10 + maxCachedLeavesMem := maxMemoryUsage - maxNodesMem + nodesPath := filepath.Join(basePath, nodesDBDirName) - nodesDB, err := blockchain.InitNodesBackEnd(nodesPath) + nodesDB, err := blockchain.InitNodesBackEnd(nodesPath, maxNodesMem) if err != nil { return nil, err } cachedLeavesPath := filepath.Join(basePath, cachedLeavesDBDirName) - cachedLeavesDB, err := blockchain.InitCachedLeavesBackEnd(cachedLeavesPath) + cachedLeavesDB, err := blockchain.InitCachedLeavesBackEnd(cachedLeavesPath, maxCachedLeavesMem) if err != nil { return nil, err } @@ -341,20 +350,76 @@ func initUtreexoState(cfg *UtreexoConfig, basePath string) (*UtreexoState, error p.NumLeaves = binary.LittleEndian.Uint64(buf[:]) } - p.Nodes = nodesDB - p.CachedLeaves = cachedLeavesDB - closeDB := func() error { - err := nodesDB.Close() + var closeDB func() error + if maxMemoryUsage >= 0 { + p.Nodes = nodesDB + p.CachedLeaves = cachedLeavesDB + closeDB = func() error { + err := nodesDB.Close() + if err != nil { + return err + } + + err = cachedLeavesDB.Close() + if err != nil { + return err + } + + return nil + } + } else { + log.Infof("loading the utreexo state from disk...") + err = nodesDB.ForEach(func(k uint64, v utreexo.Leaf) error { + p.Nodes.Put(k, v) + return nil + }) if err != nil { - return err + return nil, err } - err = cachedLeavesDB.Close() + err = cachedLeavesDB.ForEach(func(k utreexo.Hash, v uint64) error { + p.CachedLeaves.Put(k, v) + return nil + }) if err != nil { - return err + return nil, err } - return nil + log.Infof("Finished loading the utreexo state from disk.") + + closeDB = func() error { + log.Infof("Flushing the utreexo state to disk. May take a while...") + + p.Nodes.ForEach(func(k uint64, v utreexo.Leaf) error { + nodesDB.Put(k, v) + return nil + }) + + p.CachedLeaves.ForEach(func(k utreexo.Hash, v uint64) error { + cachedLeavesDB.Put(k, v) + return nil + }) + + // We want to try to close both of the DBs before returning because of an error. + errStr := "" + err := nodesDB.Close() + if err != nil { + errStr += fmt.Sprintf("Error while closing nodes db. %v", err.Error()) + } + err = cachedLeavesDB.Close() + if err != nil { + errStr += fmt.Sprintf("Error while closing cached leaves db. %v", err.Error()) + } + + // If the err string isn't "", then return the error here. + if errStr != "" { + return fmt.Errorf(errStr) + } + + log.Infof("Finished flushing the utreexo state to disk.") + + return nil + } } uState := &UtreexoState{ diff --git a/blockchain/indexers/utreexoproofindex.go b/blockchain/indexers/utreexoproofindex.go index 43edaea9..9b7d7338 100644 --- a/blockchain/indexers/utreexoproofindex.go +++ b/blockchain/indexers/utreexoproofindex.go @@ -559,12 +559,17 @@ func (idx *UtreexoProofIndex) PruneBlock(dbTx database.Tx, blockHash *chainhash. return nil } -// NewUtreexoProofIndex returns a new instance of an indexer that is used to create a +// NewUtreexoProofIndex returns a new instance of an indexer that is used to create a utreexo +// proof index using the database passed in. The passed in maxMemoryUsage should be in bytes and +// it determines how much memory the proof index will use up. A maxMemoryUsage of 0 will keep +// all the elements on disk and a negative maxMemoryUsage will keep all the elements in memory. // // It implements the Indexer interface which plugs into the IndexManager that in // turn is used by the blockchain package. This allows the index to be // seamlessly maintained along with the chain. -func NewUtreexoProofIndex(db database.DB, pruned bool, dataDir string, chainParams *chaincfg.Params) (*UtreexoProofIndex, error) { +func NewUtreexoProofIndex(db database.DB, pruned bool, maxMemoryUsage int64, + chainParams *chaincfg.Params, dataDir string) (*UtreexoProofIndex, error) { + idx := &UtreexoProofIndex{ db: db, chainParams: chainParams, @@ -575,7 +580,7 @@ func NewUtreexoProofIndex(db database.DB, pruned bool, dataDir string, chainPara DataDir: dataDir, Name: db.Type(), Params: chainParams, - }) + }, maxMemoryUsage) if err != nil { return nil, err } diff --git a/blockchain/utreexoio.go b/blockchain/utreexoio.go index 73012f2a..71da315a 100644 --- a/blockchain/utreexoio.go +++ b/blockchain/utreexoio.go @@ -6,6 +6,7 @@ package blockchain import ( "fmt" + "sync" "github.com/syndtr/goleveldb/leveldb" "github.com/utreexo/utreexo" @@ -38,65 +39,415 @@ func deserializeLeaf(serialized [leafLength]byte) utreexo.Leaf { return leaf } +// cachedFlag is the status of each of the cached elements in the NodesBackEnd. +type cachedFlag uint8 + +const ( + // fresh means it's never been in the database + fresh cachedFlag = 1 << iota + + // modified means it's been in the database and has been modified in the cache. + modified + + // removed means that the key it belongs to has been removed but it's still + // in the cache. + removed +) + +// cachedLeaf has the leaf and a flag for the status in the cache. +type cachedLeaf struct { + leaf utreexo.Leaf + flags cachedFlag +} + +// isFresh returns if the cached leaf has never been in the database. +func (c *cachedLeaf) isFresh() bool { + return c.flags&fresh == fresh +} + +// isModified returns if the cached leaf has been in the database and was modified in the cache. +func (c *cachedLeaf) isModified() bool { + return c.flags&modified == modified +} + +// isRemoved returns if the key for this cached leaf has been removed. +func (c *cachedLeaf) isRemoved() bool { + return c.flags&removed == removed +} + +const ( + // Calculated with unsafe.Sizeof(cachedLeaf{}). + cachedLeafSize = 34 + + // Bucket size for the node map. + nodesMapBucketSize = 16 + uint64Size*uint64Size + uint64Size*cachedLeafSize + + // Bucket size for the cached leaves map. + cachedLeavesMapBucketSize = 16 + uint64Size*chainhash.HashSize + uint64Size*uint64Size +) + +// nodesMapSlice is a slice of maps for utxo entries. The slice of maps are needed to +// guarantee that the map will only take up N amount of bytes. As of v1.20, the +// go runtime will allocate 2^N + few extra buckets, meaning that for large N, we'll +// allocate a lot of extra memory if the amount of entries goes over the previously +// allocated buckets. A slice of maps allows us to have a better control of how much +// total memory gets allocated by all the maps. +type nodesMapSlice struct { + // mtx protects against concurrent access for the map slice. + mtx *sync.Mutex + + // maps are the underlying maps in the slice of maps. + maps []map[uint64]cachedLeaf + + // maxEntries is the maximum amount of elemnts that the map is allocated for. + maxEntries []int + + // maxTotalMemoryUsage is the maximum memory usage in bytes that the state + // should contain in normal circumstances. + maxTotalMemoryUsage uint64 +} + +// length returns the length of all the maps in the map slice added together. +// +// This function is safe for concurrent access. +func (ms *nodesMapSlice) length() int { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + var l int + for _, m := range ms.maps { + l += len(m) + } + + return l +} + +// get looks for the outpoint in all the maps in the map slice and returns +// the entry. nil and false is returned if the outpoint is not found. +// +// This function is safe for concurrent access. +func (ms *nodesMapSlice) get(k uint64) (cachedLeaf, bool) { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + var v cachedLeaf + var found bool + + for _, m := range ms.maps { + v, found = m[k] + if found { + return v, found + } + } + + return v, found +} + +// put puts the keys and the values into one of the maps in the map slice. If the +// existing maps are all full and it fails to put the entry in the cache, it will +// return false. +// +// This function is safe for concurrent access. +func (ms *nodesMapSlice) put(k uint64, v cachedLeaf) bool { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + for i := range ms.maxEntries { + m := ms.maps[i] + _, found := m[k] + if found { + m[k] = v + return true + } + } + + for i, maxNum := range ms.maxEntries { + m := ms.maps[i] + if len(m) >= maxNum { + // Don't try to insert if the map already at max since + // that'll force the map to allocate double the memory it's + // currently taking up. + continue + } + + m[k] = v + return true // Return as we were successful in adding the entry. + } + + // We only reach this code if we've failed to insert into the map above as + // all the current maps were full. + return false +} + +// delete attempts to delete the given outpoint in all of the maps. No-op if the +// key doesn't exist. +// +// This function is safe for concurrent access. +func (ms *nodesMapSlice) delete(k uint64) { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + for i := 0; i < len(ms.maps); i++ { + delete(ms.maps[i], k) + } +} + +// deleteMaps deletes all maps and allocate new ones with the maxEntries defined in +// ms.maxEntries. +// +// This function is safe for concurrent access. +func (ms *nodesMapSlice) deleteMaps() { + for i := range ms.maxEntries { + ms.maps[i] = make(map[uint64]cachedLeaf, ms.maxEntries[i]) + } +} + +// calcNumEntries returns a list of ints that represent how much entries a map +// should allocate for to stay under the maxMemoryUsage and an int that's a sum +// of the returned list of ints. +func calcNumEntries(bucketSize uintptr, maxMemoryUsage int64) ([]int, int) { + entries := []int{} + + totalElemCount := 0 + totalMapSize := int64(0) + for maxMemoryUsage > totalMapSize { + numMaxElements := calculateMinEntries(int(maxMemoryUsage-totalMapSize), nodesMapBucketSize) + if numMaxElements == 0 { + break + } + + mapSize := int64(calculateRoughMapSize(numMaxElements, nodesMapBucketSize)) + if maxMemoryUsage <= totalMapSize+mapSize { + break + } + totalMapSize += mapSize + + entries = append(entries, numMaxElements) + totalElemCount += numMaxElements + } + + return entries, totalElemCount +} + +// createMaps creates a slice of maps and returns the total count that the maps +// can handle. maxEntries are also set along with the newly created maps. +func (ms *nodesMapSlice) createMaps(maxMemoryUsage int64) int64 { + if maxMemoryUsage <= 0 { + return 0 + } + + // Get the entry count for the maps we'll allocate. + var totalElemCount int + ms.maxEntries, totalElemCount = calcNumEntries(nodesMapBucketSize, maxMemoryUsage) + + // maxMemoryUsage that's smaller than the minimum map size will return a totalElemCount + // that's equal to 0. + if totalElemCount <= 0 { + return 0 + } + + // Create the maps. + ms.maps = make([]map[uint64]cachedLeaf, len(ms.maxEntries)) + for i := range ms.maxEntries { + ms.maps[i] = make(map[uint64]cachedLeaf, ms.maxEntries[i]) + } + + return int64(totalElemCount) +} + +// newNodesMapSlice returns a newNodesMapSlice and the total amount of elements +// that the map slice can accomodate. +func newNodesMapSlice(maxTotalMemoryUsage int64) (nodesMapSlice, int64) { + ms := nodesMapSlice{ + mtx: new(sync.Mutex), + maxTotalMemoryUsage: uint64(maxTotalMemoryUsage), + } + + totalCacheElem := ms.createMaps(maxTotalMemoryUsage) + return ms, totalCacheElem +} + var _ utreexo.NodesInterface = (*NodesBackEnd)(nil) -// NodesBackEnd implements the NodesInterface interface. It's really just the database. +// NodesBackEnd implements the NodesInterface interface. type NodesBackEnd struct { - db *leveldb.DB + db *leveldb.DB + maxCacheElem int64 + cache nodesMapSlice } // InitNodesBackEnd returns a newly initialized NodesBackEnd which implements // utreexo.NodesInterface. -func InitNodesBackEnd(datadir string) (*NodesBackEnd, error) { +func InitNodesBackEnd(datadir string, maxTotalMemoryUsage int64) (*NodesBackEnd, error) { db, err := leveldb.OpenFile(datadir, nil) if err != nil { return nil, err } - return &NodesBackEnd{db: db}, nil + cache, maxCacheElems := newNodesMapSlice(maxTotalMemoryUsage) + nb := NodesBackEnd{ + db: db, + maxCacheElem: maxCacheElems, + cache: cache, + } + + return &nb, nil } -// Get returns the leaf from the underlying map. -func (m *NodesBackEnd) Get(k uint64) (utreexo.Leaf, bool) { +// dbPut serializes and puts the key value pair into the database. +func (m *NodesBackEnd) dbPut(k uint64, v utreexo.Leaf) error { size := serializeSizeVLQ(k) buf := make([]byte, size) putVLQ(buf, k) - val, err := m.db.Get(buf[:], nil) + serialized := serializeLeaf(v) + return m.db.Put(buf[:], serialized[:], nil) +} + +// dbGet fetches the value from the database and deserializes it and returns +// the leaf value and a boolean for whether or not it was successful. +func (m *NodesBackEnd) dbGet(k uint64) (utreexo.Leaf, bool) { + size := serializeSizeVLQ(k) + buf := make([]byte, size) + putVLQ(buf, k) + + val, err := m.db.Get(buf, nil) if err != nil { return utreexo.Leaf{}, false } - // Must be leafLength bytes long. if len(val) != leafLength { return utreexo.Leaf{}, false } - return deserializeLeaf(*(*[leafLength]byte)(val)), true + leaf := deserializeLeaf(*(*[leafLength]byte)(val)) + return leaf, true } -// Put puts the given position and the leaf to the underlying map. -func (m *NodesBackEnd) Put(k uint64, v utreexo.Leaf) { +// dbDel removes the key from the database. +func (m *NodesBackEnd) dbDel(k uint64) error { size := serializeSizeVLQ(k) buf := make([]byte, size) putVLQ(buf, k) + return m.db.Delete(buf, nil) +} - serialized := serializeLeaf(v) - m.db.Put(buf, serialized[:], nil) +// Get returns the leaf from the underlying map. +func (m *NodesBackEnd) Get(k uint64) (utreexo.Leaf, bool) { + if m.maxCacheElem == 0 { + return m.dbGet(k) + } + + // Look it up on the cache first. + cLeaf, found := m.cache.get(k) + if found { + // The leaf might not have been cleaned up yet. + if cLeaf.isRemoved() { + return utreexo.Leaf{}, false + } + + // If the cache is full, flush the cache then put + // the leaf in. + if !m.cache.put(k, cLeaf) { + m.flush() + m.cache.put(k, cLeaf) + } + + // If we found it, return here. + return cLeaf.leaf, true + } + + // Since it's not in the cache, look it up in the database. + leaf, found := m.dbGet(k) + if !found { + // If it's not in the database and the cache, it + // doesn't exist. + return utreexo.Leaf{}, false + } + + // Cache the leaf before returning it. + if !m.cache.put(k, cachedLeaf{leaf: leaf}) { + m.flush() + m.cache.put(k, cachedLeaf{leaf: leaf}) + } + return leaf, true +} + +// Put puts the given position and the leaf to the underlying map. +func (m *NodesBackEnd) Put(k uint64, v utreexo.Leaf) { + if m.maxCacheElem == 0 { + err := m.dbPut(k, v) + if err != nil { + log.Warnf("NodesBackEnd dbPut fail. %v", err) + } + + return + } + + if int64(m.cache.length()) > m.maxCacheElem { + m.flush() + } + + leaf, found := m.cache.get(k) + if found { + leaf.flags &^= removed + l := cachedLeaf{ + leaf: v, + flags: leaf.flags | modified, + } + + // It shouldn't fail here but handle it anyways. + if !m.cache.put(k, l) { + m.flush() + m.cache.put(k, l) + } + } else { + // If the key isn't found, mark it as fresh. + l := cachedLeaf{ + leaf: v, + flags: fresh, + } + + // It shouldn't fail here but handle it anyways. + if !m.cache.put(k, l) { + m.flush() + m.cache.put(k, l) + } + } } // Delete removes the given key from the underlying map. No-op if the key // doesn't exist. func (m *NodesBackEnd) Delete(k uint64) { - size := serializeSizeVLQ(k) - buf := make([]byte, size) - putVLQ(buf, k) + if m.maxCacheElem == 0 { + err := m.dbDel(k) + if err != nil { + log.Warnf("NodesBackEnd dbDel fail. %v", err) + } - m.db.Delete(buf, nil) + return + } + + leaf, found := m.cache.get(k) + if !found { + if int64(m.cache.length()) >= m.maxCacheElem { + m.flush() + } + } + l := cachedLeaf{ + leaf: leaf.leaf, + flags: leaf.flags | removed, + } + if !m.cache.put(k, l) { + m.flush() + m.cache.put(k, l) + } } // Length returns the amount of items in the underlying database. func (m *NodesBackEnd) Length() int { + m.flush() + length := 0 iter := m.db.NewIterator(nil, nil) for iter.Next() { @@ -109,6 +460,8 @@ func (m *NodesBackEnd) Length() int { // ForEach calls the given function for each of the elements in the underlying map. func (m *NodesBackEnd) ForEach(fn func(uint64, utreexo.Leaf) error) error { + m.flush() + iter := m.db.NewIterator(nil, nil) for iter.Next() { // Remember that the contents of the returned slice should not be modified, and @@ -131,57 +484,268 @@ func (m *NodesBackEnd) ForEach(fn func(uint64, utreexo.Leaf) error) error { return iter.Error() } -// Close closes the underlying database. +// flush saves all the cached entries to disk and resets the cache map. +func (m *NodesBackEnd) flush() { + if m.maxCacheElem == 0 { + return + } + + for _, mm := range m.cache.maps { + for k, v := range mm { + if v.isRemoved() { + err := m.dbDel(k) + if err != nil { + log.Warnf("NodesBackEnd flush error. %v", err) + } + } else if v.isFresh() || v.isModified() { + err := m.dbPut(k, v.leaf) + if err != nil { + log.Warnf("NodesBackEnd flush error. %v", err) + } + } + } + } + + m.cache.deleteMaps() +} + +// Close flushes the cache and closes the underlying database. func (m *NodesBackEnd) Close() error { + m.flush() + return m.db.Close() } +// cachedLeavesMapSlice is a slice of maps for utxo entries. The slice of maps are needed to +// guarantee that the map will only take up N amount of bytes. As of v1.20, the +// go runtime will allocate 2^N + few extra buckets, meaning that for large N, we'll +// allocate a lot of extra memory if the amount of entries goes over the previously +// allocated buckets. A slice of maps allows us to have a better control of how much +// total memory gets allocated by all the maps. +type cachedLeavesMapSlice struct { + // mtx protects against concurrent access for the map slice. + mtx *sync.Mutex + + // maps are the underlying maps in the slice of maps. + maps []map[utreexo.Hash]uint64 + + // maxEntries is the maximum amount of elemnts that the map is allocated for. + maxEntries []int + + // maxTotalMemoryUsage is the maximum memory usage in bytes that the state + // should contain in normal circumstances. + maxTotalMemoryUsage uint64 +} + +// length returns the length of all the maps in the map slice added together. +// +// This function is safe for concurrent access. +func (ms *cachedLeavesMapSlice) length() int { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + var l int + for _, m := range ms.maps { + l += len(m) + } + + return l +} + +// get looks for the outpoint in all the maps in the map slice and returns +// the entry. nil and false is returned if the outpoint is not found. +// +// This function is safe for concurrent access. +func (ms *cachedLeavesMapSlice) get(k utreexo.Hash) (uint64, bool) { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + var v uint64 + var found bool + + for _, m := range ms.maps { + v, found = m[k] + if found { + return v, found + } + } + + return 0, false +} + +// put puts the keys and the values into one of the maps in the map slice. If the +// existing maps are all full and it fails to put the entry in the cache, it will +// return false. +// +// This function is safe for concurrent access. +func (ms *cachedLeavesMapSlice) put(k utreexo.Hash, v uint64) bool { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + for i := range ms.maxEntries { + m := ms.maps[i] + _, found := m[k] + if found { + m[k] = v + return true + } + } + + for i, maxNum := range ms.maxEntries { + m := ms.maps[i] + if len(m) >= maxNum { + // Don't try to insert if the map already at max since + // that'll force the map to allocate double the memory it's + // currently taking up. + continue + } + + m[k] = v + return true // Return as we were successful in adding the entry. + } + + // We only reach this code if we've failed to insert into the map above as + // all the current maps were full. + return false +} + +// delete attempts to delete the given outpoint in all of the maps. No-op if the +// outpoint doesn't exist. +// +// This function is safe for concurrent access. +func (ms *cachedLeavesMapSlice) delete(k utreexo.Hash) { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + for i := 0; i < len(ms.maps); i++ { + delete(ms.maps[i], k) + } +} + +// createMaps creates a slice of maps and returns the total count that the maps +// can handle. maxEntries are also set along with the newly created maps. +func (ms *cachedLeavesMapSlice) createMaps(maxMemoryUsage int64) int64 { + if maxMemoryUsage <= 0 { + return 0 + } + + // Get the entry count for the maps we'll allocate. + var totalElemCount int + ms.maxEntries, totalElemCount = calcNumEntries(nodesMapBucketSize, maxMemoryUsage) + + // maxMemoryUsage that's smaller than the minimum map size will return a totalElemCount + // that's equal to 0. + if totalElemCount <= 0 { + return 0 + } + + // Create the maps. + ms.maps = make([]map[utreexo.Hash]uint64, len(ms.maxEntries)) + for i := range ms.maxEntries { + ms.maps[i] = make(map[utreexo.Hash]uint64, ms.maxEntries[i]) + } + + return int64(totalElemCount) +} + +// newCachedLeavesMapSlice returns a newCachedLeavesMapSlice and the total amount of elements +// that the map slice can accomodate. +func newCachedLeavesMapSlice(maxTotalMemoryUsage int64) (cachedLeavesMapSlice, int64) { + ms := cachedLeavesMapSlice{ + mtx: new(sync.Mutex), + maxTotalMemoryUsage: uint64(maxTotalMemoryUsage), + } + + totalCacheElem := ms.createMaps(maxTotalMemoryUsage) + return ms, totalCacheElem +} + var _ utreexo.CachedLeavesInterface = (*CachedLeavesBackEnd)(nil) -// CachedLeavesBackEnd implements the CachedLeavesInterface interface. It's really just a map. +// CachedLeavesBackEnd implements the CachedLeavesInterface interface. The cache assumes +// that anything in the cache doesn't exist in the db and vise-versa. type CachedLeavesBackEnd struct { - db *leveldb.DB + db *leveldb.DB + maxCacheElem int64 + cache cachedLeavesMapSlice +} + +// dbPut serializes and puts the key and the value into the database. +func (m *CachedLeavesBackEnd) dbPut(k utreexo.Hash, v uint64) error { + size := serializeSizeVLQ(v) + buf := make([]byte, size) + putVLQ(buf, v) + return m.db.Put(k[:], buf, nil) +} + +// dbGet fetches and deserializes the value from the database. +func (m *CachedLeavesBackEnd) dbGet(k utreexo.Hash) (uint64, bool) { + val, err := m.db.Get(k[:], nil) + if err != nil { + return 0, false + } + pos, _ := deserializeVLQ(val) + + return pos, true } // InitCachedLeavesBackEnd returns a newly initialized CachedLeavesBackEnd which implements // utreexo.CachedLeavesInterface. -func InitCachedLeavesBackEnd(datadir string) (*CachedLeavesBackEnd, error) { +func InitCachedLeavesBackEnd(datadir string, maxMemoryUsage int64) (*CachedLeavesBackEnd, error) { db, err := leveldb.OpenFile(datadir, nil) if err != nil { return nil, err } - return &CachedLeavesBackEnd{db: db}, nil + cache, maxCacheElem := newCachedLeavesMapSlice(maxMemoryUsage) + return &CachedLeavesBackEnd{maxCacheElem: maxCacheElem, db: db, cache: cache}, nil } -// Get returns the data from the underlying map. +// Get returns the data from the underlying cache or the database. func (m *CachedLeavesBackEnd) Get(k utreexo.Hash) (uint64, bool) { - val, err := m.db.Get(k[:], nil) - if err != nil { - return 0, false + if m.maxCacheElem == 0 { + return m.dbGet(k) } - pos, _ := deserializeVLQ(val) - return pos, true + pos, found := m.cache.get(k) + if !found { + pos, found = m.dbGet(k) + } + + return pos, found } -// Put puts the given data to the underlying map. +// Put puts the given data to the underlying cache. If the cache is full, it evicts +// the earliest entries to make room. func (m *CachedLeavesBackEnd) Put(k utreexo.Hash, v uint64) { - size := serializeSizeVLQ(v) - buf := make([]byte, size) - putVLQ(buf, v) + if m.maxCacheElem == 0 { + err := m.dbPut(k, v) + if err != nil { + log.Warnf("NodesBackEnd dbPut fail. %v", err) + } + + return + } - m.db.Put(k[:], buf, nil) + length := m.cache.length() + if int64(length) >= m.maxCacheElem { + m.flush() + } + + m.cache.put(k, v) } // Delete removes the given key from the underlying map. No-op if the key // doesn't exist. func (m *CachedLeavesBackEnd) Delete(k utreexo.Hash) { + m.cache.delete(k) m.db.Delete(k[:], nil) } -// Length returns the amount of items in the underlying db. +// Length returns the amount of items in the underlying db and the cache. func (m *CachedLeavesBackEnd) Length() int { + m.flush() + length := 0 iter := m.db.NewIterator(nil, nil) for iter.Next() { @@ -194,6 +758,8 @@ func (m *CachedLeavesBackEnd) Length() int { // ForEach calls the given function for each of the elements in the underlying map. func (m *CachedLeavesBackEnd) ForEach(fn func(utreexo.Hash, uint64) error) error { + m.flush() + iter := m.db.NewIterator(nil, nil) for iter.Next() { // Remember that the contents of the returned slice should not be modified, and @@ -210,7 +776,27 @@ func (m *CachedLeavesBackEnd) ForEach(fn func(utreexo.Hash, uint64) error) error return iter.Error() } -// Close closes the underlying database. +// Flush resets the cache and saves all the key values onto the database. +func (m *CachedLeavesBackEnd) flush() { + for i := range m.cache.maxEntries { + mp := m.cache.maps[i] + for k, v := range mp { + err := m.dbPut(k, v) + if err != nil { + log.Warnf("CachedLeavesBackEnd dbPut fail. %v", err) + } + } + } + + // Create the maps. + m.cache.maps = make([]map[utreexo.Hash]uint64, len(m.cache.maxEntries)) + for i := range m.cache.maxEntries { + m.cache.maps[i] = make(map[utreexo.Hash]uint64, m.cache.maxEntries[i]) + } +} + +// Close flushes all the cached entries and then closes the underlying database. func (m *CachedLeavesBackEnd) Close() error { + m.flush() return m.db.Close() } diff --git a/blockchain/utreexoio_test.go b/blockchain/utreexoio_test.go new file mode 100644 index 00000000..84d38d57 --- /dev/null +++ b/blockchain/utreexoio_test.go @@ -0,0 +1,443 @@ +package blockchain + +import ( + "crypto/sha256" + "encoding/binary" + "math" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/utreexo/utreexo" +) + +// isApproximate returns if a and b are within the given percentage of each other. +func isApproximate(a, b, percentage float64) bool { + // Calculate % of 'a' + percentageOfA := math.Abs(a) * percentage + + // Calculate the absolute difference between 'a' and 'b' + difference := math.Abs(a - b) + + // Check if the absolute difference is less than or equal to 1% of 'a' + return difference <= percentageOfA +} + +func TestCalcNumEntries(t *testing.T) { + tests := []struct { + maxSize int64 + bucketSize uintptr + }{ + {100 * 1024 * 1024, nodesMapBucketSize}, + {150 * 1024 * 1024, nodesMapBucketSize}, + {250 * 1024 * 1024, nodesMapBucketSize}, + {1000 * 1024 * 1024, nodesMapBucketSize}, + {10000 * 1024 * 1024, nodesMapBucketSize}, + + {100 * 1024 * 1024, cachedLeavesMapBucketSize}, + {150 * 1024 * 1024, cachedLeavesMapBucketSize}, + {250 * 1024 * 1024, cachedLeavesMapBucketSize}, + {1000 * 1024 * 1024, cachedLeavesMapBucketSize}, + {10000 * 1024 * 1024, cachedLeavesMapBucketSize}, + } + + for _, test := range tests { + entries, _ := calcNumEntries(test.bucketSize, test.maxSize) + + roughSize := 0 + for _, entry := range entries { + roughSize += calculateRoughMapSize(entry, nodesMapBucketSize) + } + + // Check if the roughSize is within 1% of test.maxSize. + if !isApproximate(float64(test.maxSize), float64(roughSize), 0.01) { + t.Fatalf("Expected value to be approximately %v but got %v", + test.maxSize, roughSize) + } + } +} + +func TestNodesMapSliceMaxCacheElems(t *testing.T) { + _, maxCacheElems := newNodesMapSlice(0) + if maxCacheElems != 0 { + t.Fatalf("expected %v got %v", 0, maxCacheElems) + } + + _, maxCacheElems = newNodesMapSlice(-1) + if maxCacheElems != 0 { + t.Fatalf("expected %v got %v", 0, maxCacheElems) + } + + _, maxCacheElems = newNodesMapSlice(8000) + if maxCacheElems <= 0 { + t.Fatalf("expected something bigger than 0 but got %v", maxCacheElems) + } + + _, maxCacheElems = newCachedLeavesMapSlice(0) + if maxCacheElems != 0 { + t.Fatalf("expected %v got %v", 0, maxCacheElems) + } + + _, maxCacheElems = newCachedLeavesMapSlice(-1) + if maxCacheElems != 0 { + t.Fatalf("expected %v got %v", 0, maxCacheElems) + } + + _, maxCacheElems = newCachedLeavesMapSlice(8000) + if maxCacheElems <= 0 { + t.Fatalf("expected something bigger than 0 but got %v", maxCacheElems) + } +} + +func TestNodesMapSliceDuplicates(t *testing.T) { + m, maxElems := newNodesMapSlice(8000) + for i := 0; i < 10; i++ { + for j := int64(0); j < maxElems; j++ { + if !m.put(uint64(j), cachedLeaf{}) { + t.Fatalf("unexpected error on m.put") + } + } + } + + if m.length() != int(maxElems) { + t.Fatalf("expected length of %v but got %v", + maxElems, m.length()) + } + + // Try inserting x which should be unique. Should fail as the map is full. + x := uint64(0) + x -= 1 + if m.put(x, cachedLeaf{}) { + t.Fatalf("expected error but successfully called put") + } + + // Remove the first element in the first map and then try inserting + // a duplicate element. + m.delete(0) + x = uint64(maxElems) - 1 + if !m.put(x, cachedLeaf{}) { + t.Fatalf("unexpected failure on put") + } + + // Make sure the length of the map is 1 less than the max elems. + if m.length() != int(maxElems)-1 { + t.Fatalf("expected length of %v but got %v", + maxElems-1, m.length()) + } + + // Put 0 back in and then compare the map. + if !m.put(0, cachedLeaf{}) { + t.Fatalf("didn't expect error but unsuccessfully called put") + } + if m.length() != int(maxElems) { + t.Fatalf("expected length of %v but got %v", + maxElems, m.length()) + } +} + +func uint64ToHash(v uint64) utreexo.Hash { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], v) + return sha256.Sum256(buf[:]) +} + +func TestCachedLeaveMapSliceDuplicates(t *testing.T) { + m, maxElems := newCachedLeavesMapSlice(8000) + for i := 0; i < 10; i++ { + for j := int64(0); j < maxElems; j++ { + if !m.put(uint64ToHash(uint64(j)), 0) { + t.Fatalf("unexpected error on m.put") + } + } + } + + if m.length() != int(maxElems) { + t.Fatalf("expected length of %v but got %v", + maxElems, m.length()) + } + + // Try inserting x which should be unique. Should fail as the map is full. + x := uint64(0) + x -= 1 + if m.put(uint64ToHash(x), 0) { + t.Fatalf("expected error but successfully called put") + } + + // Remove the first element in the first map and then try inserting + // a duplicate element. + m.delete(uint64ToHash(0)) + x = uint64(maxElems) - 1 + if !m.put(uint64ToHash(x), 0) { + t.Fatalf("unexpected failure on put") + } + + // Make sure the length of the map is 1 less than the max elems. + if m.length() != int(maxElems)-1 { + t.Fatalf("expected length of %v but got %v", + maxElems-1, m.length()) + } + + // Put 0 back in and then compare the map. + if !m.put(uint64ToHash(0), 0) { + t.Fatalf("didn't expect error but unsuccessfully called put") + } + if m.length() != int(maxElems) { + t.Fatalf("expected length of %v but got %v", + maxElems, m.length()) + } +} + +func TestCachedLeavesBackEnd(t *testing.T) { + tests := []struct { + tmpDir string + maxMemUsage int64 + }{ + { + tmpDir: func() string { + return filepath.Join(os.TempDir(), "TestCachedLeavesBackEnd0") + }(), + maxMemUsage: -1, + }, + { + tmpDir: func() string { + return filepath.Join(os.TempDir(), "TestCachedLeavesBackEnd1") + }(), + maxMemUsage: 0, + }, + { + tmpDir: func() string { + return filepath.Join(os.TempDir(), "TestCachedLeavesBackEnd2") + }(), + maxMemUsage: 1 * 1024 * 1024, + }, + } + + for _, test := range tests { + cachedLeavesBackEnd, err := InitCachedLeavesBackEnd(test.tmpDir, test.maxMemUsage) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(test.tmpDir) + + count := uint64(1000) + compareMap := make(map[utreexo.Hash]uint64) + for i := uint64(0); i < count/2; i++ { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], i) + hash := sha256.Sum256(buf[:]) + + compareMap[hash] = i + cachedLeavesBackEnd.Put(hash, i) + } + + // Close and reopen the backend. + err = cachedLeavesBackEnd.Close() + if err != nil { + t.Fatal(err) + } + cachedLeavesBackEnd, err = InitCachedLeavesBackEnd(test.tmpDir, test.maxMemUsage) + if err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + wg.Add(1) + // Delete every other element from the backend that we currently have. + go func() { + for i := uint64(0); i < count/2; i++ { + if i%2 != 0 { + continue + } + + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], i) + hash := sha256.Sum256(buf[:]) + + cachedLeavesBackEnd.Delete(hash) + } + wg.Done() + }() + + wg.Add(1) + // Put the rest of the elements into the backend. + go func() { + for i := count / 2; i < count; i++ { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], i) + hash := sha256.Sum256(buf[:]) + + cachedLeavesBackEnd.Put(hash, i) + } + wg.Done() + }() + + wg.Wait() + + // Do the same for the compare map. We do it here because a hashmap + // isn't concurrency safe. + for i := uint64(0); i < count/2; i++ { + if i%2 != 0 { + continue + } + + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], i) + hash := sha256.Sum256(buf[:]) + delete(compareMap, hash) + } + for i := count / 2; i < count; i++ { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], i) + hash := sha256.Sum256(buf[:]) + + compareMap[hash] = i + } + + if cachedLeavesBackEnd.Length() != len(compareMap) { + t.Fatalf("compareMap has %d elements but the backend has %d elements", + len(compareMap), cachedLeavesBackEnd.Length()) + } + + // Compare the map and the backend. + for k, v := range compareMap { + got, found := cachedLeavesBackEnd.Get(k) + if !found { + t.Fatalf("expected %v but it wasn't found", v) + } + + if got != v { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], got) + gotHash := sha256.Sum256(buf[:]) + + binary.LittleEndian.PutUint64(buf[:], v) + expectHash := sha256.Sum256(buf[:]) + + if gotHash != expectHash { + t.Fatalf("for key %v, expected %v but got %v", k.String(), v, got) + } + } + } + } +} + +func TestNodesBackEnd(t *testing.T) { + tests := []struct { + tmpDir string + maxMemUsage int64 + }{ + { + tmpDir: func() string { + return filepath.Join(os.TempDir(), "TestNodesBackEnd0") + }(), + maxMemUsage: -1, + }, + { + tmpDir: func() string { + return filepath.Join(os.TempDir(), "TestNodesBackEnd1") + }(), + maxMemUsage: 0, + }, + { + tmpDir: func() string { + return filepath.Join(os.TempDir(), "TestNodesBackEnd2") + }(), + maxMemUsage: 1 * 1024 * 1024, + }, + } + + for _, test := range tests { + nodesBackEnd, err := InitNodesBackEnd(test.tmpDir, test.maxMemUsage) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(test.tmpDir) + + count := uint64(1000) + compareMap := make(map[uint64]cachedLeaf) + for i := uint64(0); i < count/2; i++ { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], i) + hash := sha256.Sum256(buf[:]) + + compareMap[i] = cachedLeaf{leaf: utreexo.Leaf{Hash: hash}} + nodesBackEnd.Put(i, utreexo.Leaf{Hash: hash}) + } + + // Close and reopen the backend. + err = nodesBackEnd.Close() + if err != nil { + t.Fatal(err) + } + nodesBackEnd, err = InitNodesBackEnd(test.tmpDir, test.maxMemUsage) + if err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + wg.Add(1) + // Delete every other element from the backend that we currently have. + go func() { + for i := uint64(0); i < count/2; i++ { + if i%2 != 0 { + continue + } + + nodesBackEnd.Delete(i) + } + wg.Done() + }() + + wg.Add(1) + // Put the rest of the elements into the backend. + go func() { + for i := count / 2; i < count; i++ { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], i) + hash := sha256.Sum256(buf[:]) + + nodesBackEnd.Put(i, utreexo.Leaf{Hash: hash}) + } + wg.Done() + }() + + wg.Wait() + + // Do the same for the compare map. We do it here because a hashmap + // isn't concurrency safe. + for i := uint64(0); i < count/2; i++ { + if i%2 != 0 { + continue + } + + delete(compareMap, i) + } + for i := count / 2; i < count; i++ { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], i) + hash := sha256.Sum256(buf[:]) + + compareMap[i] = cachedLeaf{leaf: utreexo.Leaf{Hash: hash}} + } + + if nodesBackEnd.Length() != len(compareMap) { + t.Fatalf("compareMap has %d elements but the backend has %d elements", + len(compareMap), nodesBackEnd.Length()) + } + + // Compare the map and the backend. + for k, v := range compareMap { + got, found := nodesBackEnd.Get(k) + if !found { + t.Fatalf("expected %v but it wasn't found", v) + } + + if got.Hash != v.leaf.Hash { + if got.Hash != v.leaf.Hash { + t.Fatalf("for key %v, expected %v but got %v", k, v.leaf.Hash, got.Hash) + } + } + } + } +} diff --git a/config.go b/config.go index a1db8749..59f21754 100644 --- a/config.go +++ b/config.go @@ -197,19 +197,20 @@ type config struct { BlockPrioritySize uint32 `long:"blockprioritysize" description:"Size in bytes for high-priority/low-fee transactions when creating a block"` // Indexing options. - AddrIndex bool `long:"addrindex" description:"Maintain a full address-based transaction index which makes the searchrawtransactions RPC available"` - TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"` - TTLIndex bool `long:"ttlindex" description:"Maintain a full time to live index for all stxos available via the getttl RPC"` - UtreexoProofIndex bool `long:"utreexoproofindex" description:"Maintain a utreexo proof for all blocks"` - FlatUtreexoProofIndex bool `long:"flatutreexoproofindex" description:"Maintain a utreexo proof for all blocks in flat files"` - CFilters bool `long:"cfilters" description:"Enable committed filtering (CF) support"` - NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"` - DropAddrIndex bool `long:"dropaddrindex" description:"Deletes the address-based transaction index from the database on start up and then exits."` - DropCfIndex bool `long:"dropcfindex" description:"Deletes the index used for committed filtering (CF) support from the database on start up and then exits."` - DropTxIndex bool `long:"droptxindex" description:"Deletes the hash-based transaction index from the database on start up and then exits."` - DropTTLIndex bool `long:"dropttlindex" description:"Deletes the time to live index from the database on start up and then exits."` - DropUtreexoProofIndex bool `long:"droputreexoproofindex" description:"Deletes the utreexo proof index from the database on start up and then exits."` - DropFlatUtreexoProofIndex bool `long:"dropflatutreexoproofindex" description:"Deletes the flat utreexo proof index from the database on start up and then exits."` + AddrIndex bool `long:"addrindex" description:"Maintain a full address-based transaction index which makes the searchrawtransactions RPC available"` + TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"` + TTLIndex bool `long:"ttlindex" description:"Maintain a full time to live index for all stxos available via the getttl RPC"` + UtreexoProofIndex bool `long:"utreexoproofindex" description:"Maintain a utreexo proof for all blocks"` + FlatUtreexoProofIndex bool `long:"flatutreexoproofindex" description:"Maintain a utreexo proof for all blocks in flat files"` + UtreexoProofIndexMaxMemory int64 `long:"utreexoproofindexmaxmemory" description:"The maxmimum memory in mebibytes (MiB) that the utreexo proof indexes will use up. Passing in 0 will make the entire proof index stay on disk. Passing in a negative value will make the entire proof index stay in memory. Default of 250MiB."` + CFilters bool `long:"cfilters" description:"Enable committed filtering (CF) support"` + NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"` + DropAddrIndex bool `long:"dropaddrindex" description:"Deletes the address-based transaction index from the database on start up and then exits."` + DropCfIndex bool `long:"dropcfindex" description:"Deletes the index used for committed filtering (CF) support from the database on start up and then exits."` + DropTxIndex bool `long:"droptxindex" description:"Deletes the hash-based transaction index from the database on start up and then exits."` + DropTTLIndex bool `long:"dropttlindex" description:"Deletes the time to live index from the database on start up and then exits."` + DropUtreexoProofIndex bool `long:"droputreexoproofindex" description:"Deletes the utreexo proof index from the database on start up and then exits."` + DropFlatUtreexoProofIndex bool `long:"dropflatutreexoproofindex" description:"Deletes the flat utreexo proof index from the database on start up and then exits."` // Wallet options. WatchOnlyWallet bool `long:"watchonlywallet" description:"Enable the watch only wallet with utreexo proofs. Must have --noutreexo disabled"` @@ -465,35 +466,36 @@ func newConfigParser(cfg *config, so *serviceOptions, options flags.Options) *fl func loadConfig() (*config, []string, error) { // Default config. cfg := config{ - ConfigFile: defaultConfigFile, - DebugLevel: defaultLogLevel, - MaxPeers: defaultMaxPeers, - BanDuration: defaultBanDuration, - BanThreshold: defaultBanThreshold, - RPCMaxClients: defaultMaxRPCClients, - RPCMaxWebsockets: defaultMaxRPCWebsockets, - RPCMaxConcurrentReqs: defaultMaxRPCConcurrentReqs, - DataDir: defaultDataDir, - LogDir: defaultLogDir, - DbType: defaultDbType, - RPCKey: defaultRPCKeyFile, - RPCCert: defaultRPCCertFile, - MinRelayTxFee: mempool.DefaultMinRelayTxFee.ToBTC(), - FreeTxRelayLimit: defaultFreeTxRelayLimit, - TrickleInterval: defaultTrickleInterval, - BlockMinSize: defaultBlockMinSize, - BlockMaxSize: defaultBlockMaxSize, - BlockMinWeight: defaultBlockMinWeight, - BlockMaxWeight: defaultBlockMaxWeight, - BlockPrioritySize: mempool.DefaultBlockPrioritySize, - MaxOrphanTxs: defaultMaxOrphanTransactions, - SigCacheMaxSize: defaultSigCacheMaxSize, - UtxoCacheMaxSizeMiB: defaultUtxoCacheMaxSizeMiB, - Generate: defaultGenerate, - TxIndex: defaultTxIndex, - TTLIndex: defaultTTLIndex, - AddrIndex: defaultAddrIndex, - Prune: pruneMinSize, + ConfigFile: defaultConfigFile, + DebugLevel: defaultLogLevel, + MaxPeers: defaultMaxPeers, + BanDuration: defaultBanDuration, + BanThreshold: defaultBanThreshold, + RPCMaxClients: defaultMaxRPCClients, + RPCMaxWebsockets: defaultMaxRPCWebsockets, + RPCMaxConcurrentReqs: defaultMaxRPCConcurrentReqs, + DataDir: defaultDataDir, + LogDir: defaultLogDir, + DbType: defaultDbType, + RPCKey: defaultRPCKeyFile, + RPCCert: defaultRPCCertFile, + MinRelayTxFee: mempool.DefaultMinRelayTxFee.ToBTC(), + FreeTxRelayLimit: defaultFreeTxRelayLimit, + TrickleInterval: defaultTrickleInterval, + BlockMinSize: defaultBlockMinSize, + BlockMaxSize: defaultBlockMaxSize, + BlockMinWeight: defaultBlockMinWeight, + BlockMaxWeight: defaultBlockMaxWeight, + BlockPrioritySize: mempool.DefaultBlockPrioritySize, + MaxOrphanTxs: defaultMaxOrphanTransactions, + SigCacheMaxSize: defaultSigCacheMaxSize, + UtxoCacheMaxSizeMiB: defaultUtxoCacheMaxSizeMiB, + UtreexoProofIndexMaxMemory: defaultUtxoCacheMaxSizeMiB, + Generate: defaultGenerate, + TxIndex: defaultTxIndex, + TTLIndex: defaultTTLIndex, + AddrIndex: defaultAddrIndex, + Prune: pruneMinSize, } // Service options which are only added on Windows. diff --git a/server.go b/server.go index 24c18043..765614d7 100644 --- a/server.go +++ b/server.go @@ -3225,7 +3225,8 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string, var err error s.utreexoProofIndex, err = indexers.NewUtreexoProofIndex( - db, cfg.Prune != 0, cfg.DataDir, chainParams) + db, cfg.Prune != 0, cfg.UtreexoProofIndexMaxMemory*1024*1024, + chainParams, cfg.DataDir) if err != nil { return nil, err } @@ -3241,7 +3242,8 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string, var err error s.flatUtreexoProofIndex, err = indexers.NewFlatUtreexoProofIndex( - cfg.DataDir, cfg.Prune != 0, chainParams, interval) + cfg.Prune != 0, chainParams, interval, + cfg.UtreexoProofIndexMaxMemory*1024*1024, cfg.DataDir) if err != nil { return nil, err }