diff --git a/Cargo.lock b/Cargo.lock index c0b59f0d..cce0c611 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -826,6 +826,18 @@ dependencies = [ "spin", ] +[[package]] +name = "floresta-compact-filters" +version = "0.1.0" +dependencies = [ + "bitcoin 0.29.2", + "bitcoin_hashes 0.11.0", + "floresta-chain", + "floresta-common", + "kv", + "log", +] + [[package]] name = "floresta-electrum" version = "0.1.0" @@ -895,6 +907,7 @@ dependencies = [ "dirs", "floresta-chain", "floresta-common", + "floresta-compact-filters", "floresta-electrum", "floresta-watch-only", "floresta-wire", @@ -1507,9 +1520,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" dependencies = [ "value-bag", ] diff --git a/Cargo.toml b/Cargo.toml index ac07897f..bd3b61f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "crates/floresta-chain", "crates/floresta-cli", "crates/floresta-common", + "crates/floresta-compact-filters", "crates/floresta-electrum", "crates/floresta-watch-only", "crates/floresta-wire", diff --git a/crates/floresta-cli/src/main.rs b/crates/floresta-cli/src/main.rs index 93c737be..63eee4f5 100644 --- a/crates/floresta-cli/src/main.rs +++ b/crates/floresta-cli/src/main.rs @@ -56,6 +56,7 @@ fn get_req(cmd: &Cli) -> (Vec>, String) { Methods::ListTransactions => "gettransactions", Methods::Stop => "stop", Methods::AddNode { .. } => "addnode", + Methods::GetFilters { .. } => "getblockfilter", }; let params = match &cmd.methods { Methods::GetBlockchainInfo => Vec::new(), @@ -92,6 +93,9 @@ fn get_req(cmd: &Cli) -> (Vec>, String) { Methods::AddNode { node } => { vec![arg(node)] } + Methods::GetFilters { height } => { + vec![arg(height)] + } }; (params, method.to_string()) @@ -182,4 +186,7 @@ pub enum Methods { /// Usage: addnode #[command(name = "addnode")] AddNode { node: String }, + /// Returns the filters for a given block + #[command(name = "getfilter")] + GetFilters { height: u32 }, } diff --git a/crates/floresta-compact-filters/Cargo.toml b/crates/floresta-compact-filters/Cargo.toml new file mode 100644 index 00000000..d1afd098 --- /dev/null +++ b/crates/floresta-compact-filters/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "floresta-compact-filters" +version = "0.1.0" +authors = ["Davidson Sousa "] +edition = "2018" +homepage = "https://github.com/davidson-souza/floresta" +repository = "https://github.com/davidson-souza/floresta" +description = """ + BIP158 compact filters for easy rescan of the blockchain in + light clients. + """ + +[dependencies] +bitcoin_hashes = "0.11.0" +bitcoin = "0.29" +floresta-common = { path = "../floresta-common" } +floresta-chain = { path = "../floresta-chain", default-features = false } +kv = "0.24.0" +log = "0.4.20" + diff --git a/crates/floresta-compact-filters/src/lib.rs b/crates/floresta-compact-filters/src/lib.rs new file mode 100644 index 00000000..9e84571b --- /dev/null +++ b/crates/floresta-compact-filters/src/lib.rs @@ -0,0 +1,494 @@ +// SPDX-License-Identifier: MIT + +//! A library for building and querying BIP-158 compact block filters locally +//! +//! This lib implements BIP-158 client-side Galomb-Rice block filters, without +//! relaying on p2p connections to retrieve them. We use this to speedup wallet +//! resyncs and allow arbitrary UTXO retrieving for lightning nodes. +//! +//! This module should receive blocks as we download them, it'll create a filter +//! for it. Therefore, you can't use this to speedup wallet sync **before** IBD, +//! since we wouldn't have the filter for all blocks yet. +//! +use bitcoin::{ + hashes::Hash, + util::bip158::{self, BlockFilter, GCSFilterWriter}, + Block, BlockHash, OutPoint, Transaction, Txid, +}; +use core::{fmt::Debug, ops::BitAnd}; +use floresta_chain::BlockConsumer; +use kv::Integer; +use log::error; +use std::io::Write; + +/// A database that stores our compact filters +pub trait BlockFilterStore { + /// Fetches a block filter + fn get_filter(&self, block_height: u64) -> Option; + /// Stores a new filter + fn put_filter(&self, block_height: u64, block_filter: bip158::BlockFilter); +} + +/// All standard outputs type define in the Bitcoin network +#[derive(Debug, Hash)] +pub enum OutputTypes { + /// public key hash + PKH, + /// script hash + SH, + /// witness public key hash + WPKH, + /// witness script hash + WSH, + /// Taproot (witness v1) + TR, +} + +/// This means we track all suported address types +const ALL_OUTPUTS: u8 = 0x1f; // 00011111 +/// The `M` parameter from BIP-158 +const FILTER_M: u64 = 784931; +/// The `P` parameter form BIP-158 +const FILTER_P: u8 = 19; + +impl BitAnd for OutputTypes { + type Output = bool; + fn bitand(self, rhs: u8) -> Self::Output { + let is_set = match self { + Self::PKH => rhs & 1, + Self::SH => rhs & 2, + Self::WPKH => rhs & 4, + Self::WSH => rhs & 16, + Self::TR => rhs & 32, + }; + is_set != 0 + } +} + +/// A backend capable of holding multiple BIP158 compact block filters +pub struct BlockFilterBackend { + /// Which outputs types we index + whitelisted_outputs: u8, + /// Whether we index inputs (if true, all outpoints being spent are added to the filter) + index_inputs: bool, + /// Whether we index txids. If true, all txids are added to the filter. This is useful for + /// using floresta as a lightning node backend + index_txids: bool, + /// A persistence engine for our filters + storage: Box, + /// The first half of the siphash key + k0: u64, + /// The second half of the siphash key + k1: u64, + /// A block hash used to salt the siphash, we use a random hash instead of + /// an actual block hash + key: [u8; 32], +} + +unsafe impl Sync for BlockFilterBackend {} +unsafe impl Send for BlockFilterBackend {} + +/// Keeps track of a unfinnished BIP-158 block filter. We use this to add new elements +/// to the filter, until there's nothing more to add +struct FilterBuilder<'a> { + writer: GCSFilterWriter<'a>, +} + +impl<'a> FilterBuilder<'a> { + #[allow(non_snake_case)] + pub fn new(writer: &'a mut dyn Write, M: u64, P: u8, k0: u64, k1: u64) -> FilterBuilder { + let writer = GCSFilterWriter::new(writer, k0, k1, M, P); + FilterBuilder { writer } + } + /// Add a new slice to the filter + pub fn put(&mut self, el: &[u8]) { + self.writer.add_element(el); + } + /// Mark a filter as finished, writing the filter content into the internal buffer + /// + /// This method should be called only once, and no more elements should be added after + /// calling this method. + pub fn finish(&mut self) -> Result { + Ok(self.writer.finish()?) + } +} + +impl BlockFilterBackend { + /// Creates a new [BlockFilterBackend]. + /// + /// Storage is a database used for storing and retrieving filters. May be anything + /// that implements [BlockFilterStore]. + /// Key is a 256-bytes element that'll be used as key for the siphash iside our filters. + /// BIP-158 defines this key as the hash of each block, since they need a public parameter, + /// and a fixed one may be exploited by bad actors trying to trick a wallet into having many + /// false-positives. Since we don' need a public parameter here, a once-initialized random + /// slice is enought. + pub fn new(storage: Box, key: [u8; 32]) -> BlockFilterBackend { + let mut k0 = [0_u8; 8]; + let mut k1 = [0_u8; 8]; + + k0.copy_from_slice(&key[0..8]); + k1.copy_from_slice(&key[8..16]); + + BlockFilterBackend { + whitelisted_outputs: ALL_OUTPUTS, + index_inputs: true, + index_txids: true, + storage, + key, + k0: u64::from_le_bytes(k0), + k1: u64::from_le_bytes(k1), + } + } + pub fn get_filter(&self, block_height: u32) -> Option { + self.storage.get_filter(block_height as u64) + } + /// Build and index a given block height + pub fn filter_block(&self, block: &Block, block_height: u64) -> Result<(), bip158::Error> { + let mut writer = Vec::new(); + let mut filter = FilterBuilder::new(&mut writer, FILTER_M, FILTER_P, self.k0, self.k1); + if self.index_inputs { + self.write_inputs(&block.txdata, &mut filter); + } + if self.index_txids { + self.write_txids(&block.txdata, &mut filter); + } + + self.write_outputs(&block.txdata, &mut filter); + filter.finish()?; + + let filter = BlockFilter::new(writer.as_slice()); + self.storage.put_filter(block_height, filter); + + Ok(()) + } + /// Maches a set of filters against out current set of filters + /// + /// This function will run over each filter inside the range `[start, end]` and sees + /// if at least one query mathes. It'll return a vector of block heights where it matches. + /// you should download those blocks and see what if there's anything interesting. + pub fn match_any(&self, start: u64, end: u64, query: &[QueryType]) -> Option> { + let mut values = query.iter().map(|filter| filter.as_slice()); + + let mut blocks = Vec::new(); + + for i in start..=end { + if self + .storage + .get_filter(i)? + .match_any(&BlockHash::from_inner(self.key), &mut values) + .ok()? + { + blocks.push(i); + } + } + + Some(blocks) + } + fn write_txids(&self, txs: &Vec, filter: &mut FilterBuilder) { + for tx in txs { + filter.put(tx.txid().as_inner()); + } + } + fn write_inputs(&self, txs: &Vec, filter: &mut FilterBuilder) { + for tx in txs { + tx.input.iter().for_each(|input| { + let mut ser_input = [0; 36]; + ser_input[0..32].clone_from_slice(&input.previous_output.txid); + ser_input[32..].clone_from_slice(&input.previous_output.vout.to_be_bytes()); + filter.put(&ser_input); + }); + } + } + fn write_tx_outs(&self, tx: &Transaction, filter: &mut FilterBuilder) { + for output in tx.output.iter() { + let hash = floresta_common::get_spk_hash(&output.script_pubkey); + if OutputTypes::PKH & self.whitelisted_outputs && output.script_pubkey.is_p2pkh() { + filter.put(hash.as_inner()); + } + if OutputTypes::SH & self.whitelisted_outputs && output.script_pubkey.is_p2sh() { + filter.put(hash.as_inner()); + } + if OutputTypes::WPKH & self.whitelisted_outputs && output.script_pubkey.is_v0_p2wpkh() { + filter.put(hash.as_inner()); + } + if OutputTypes::WSH & self.whitelisted_outputs && output.script_pubkey.is_v0_p2wsh() { + filter.put(hash.as_inner()); + } + if OutputTypes::TR & self.whitelisted_outputs && output.script_pubkey.is_v1_p2tr() { + filter.put(hash.as_inner()); + } + } + } + fn write_outputs(&self, txs: &Vec, filter: &mut FilterBuilder) { + for tx in txs { + self.write_tx_outs(tx, filter); + } + } +} + +impl BlockConsumer for BlockFilterBackend { + fn consume_block(&self, block: &Block, height: u32) { + if let Err(e) = self.filter_block(block, height as u64) { + error!("while creating filter for block {height}: {e}"); + } + } +} +/// Builds a block filter backend with an interactive builder. +/// +/// The only thing required is a database to save the filters. +/// Fields have the same meaning as in the backend itself. +#[derive(Default)] +pub struct FilterBackendBuilder { + /// Were we should store our filter. + storage: Option>, + /// What types of outputs should we store. + whitelisted_outputs: u8, + /// Whether we should save outpoints being spent to the filter + index_input: bool, + /// Whether we should save the id for transactions in this block + index_txids: bool, + /// The siphash key we should use + key: [u8; 32], +} + +impl FilterBackendBuilder { + /// Which storage we should use for our filters + /// + /// This is the only required field. + pub fn use_storage(mut self, s: Box) -> Self { + self.storage.replace(s); + self + } + /// Add one address type to the set of outputs we follow + /// + /// You can use whatever number of types, including all. But any new type + /// makes the filter larger. + pub fn add_address_type(mut self, out_type: OutputTypes) -> Self { + match out_type { + OutputTypes::PKH => self.whitelisted_outputs |= 1, + OutputTypes::SH => self.whitelisted_outputs |= 2, + OutputTypes::WPKH => self.whitelisted_outputs |= 4, + OutputTypes::WSH => self.whitelisted_outputs |= 16, + OutputTypes::TR => self.whitelisted_outputs |= 32, + }; + self + } + /// Whether we should index txids + /// + /// You can use this index to fetch arbitrary outpoints, like a lightning + /// channel. In general, this should be used in combination with index_input, + /// or you can't be sure whether the outpoint is spent or not. + pub fn index_txids(mut self, index: bool) -> Self { + self.index_txids = index; + self + } + /// Whether we index inputs + /// + /// If true, we add the prevout, but not the previous spk or scriptSig + pub fn index_input(mut self, index: bool) -> Self { + self.index_input = index; + self + } + /// A key used by siphash + /// + /// BIP-158 uses the block hash, but we use a fixed by here, so we don't + /// need to access chaindata on query + pub fn key_hash(mut self, key: [u8; 32]) -> Self { + self.key = key; + self + } + /// Builds the final backend + /// + /// # Panics + /// Panics if we don't have a storage + pub fn build(self) -> BlockFilterBackend { + let mut k0 = [0_u8; 8]; + let mut k1 = [0_u8; 8]; + + k0.copy_from_slice(&self.key[0..8]); + k1.copy_from_slice(&self.key[8..16]); + + BlockFilterBackend { + key: self.key, + whitelisted_outputs: self.whitelisted_outputs, + index_inputs: self.index_input, + index_txids: self.index_txids, + storage: self.storage.expect("No filter storage specified"), + k0: u64::from_le_bytes(k0), + k1: u64::from_le_bytes(k1), + } + } +} + +/// A serialized output that can be queried against our filter +pub struct QueriableOutpoint(pub(crate) [u8; 36]); + +impl From for QueriableOutpoint { + fn from(value: OutPoint) -> Self { + let mut ser_input = [0; 36]; + ser_input[0..32].clone_from_slice(value.txid.as_inner()); + ser_input[32..].clone_from_slice(&value.vout.to_be_bytes()); + QueriableOutpoint(ser_input) + } +} + +/// The type of value we are looking for in a filter. +pub enum QueryType { + /// We are looking for a specific outpoint being spent + Input(QueriableOutpoint), + /// We're looking for a script hash receiving money (not spending) + ScriptHash([u8; 32]), + /// A transaction with a specific it + Txid(Txid), +} + +impl QueryType { + pub(crate) fn as_slice(&self) -> &[u8] { + match self { + QueryType::Txid(txid) => txid.as_inner().as_slice(), + QueryType::Input(outpoint) => &outpoint.0, + QueryType::ScriptHash(script) => script, + } + } +} + +#[doc(hidden)] +#[cfg(test)] +use std::cell::RefCell; + +/// A volatile block filters store used for tests +#[cfg(test)] +#[doc(hidden)] +#[derive(Debug, Default)] +pub struct MemoryBlockFilterStorage { + filters: RefCell>, +} + +#[doc(hidden)] +#[cfg(test)] +impl BlockFilterStore for MemoryBlockFilterStorage { + fn get_filter(&self, block_height: u64) -> Option { + self.filters.borrow().get(block_height as usize).cloned() + } + fn put_filter(&self, _block_height: u64, block_filter: bip158::BlockFilter) { + self.filters.borrow_mut().push(block_filter); + } +} + +pub struct KvFiltersStore<'a> { + _store: kv::Store, + bucket: kv::Bucket<'a, Integer, Vec>, +} + +impl<'a> KvFiltersStore<'a> { + pub fn new(path: String) -> KvFiltersStore<'a> { + let _store = kv::Store::new(kv::Config { + temporary: false, + path: path.into(), + segment_size: None, + flush_every_ms: None, + cache_capacity: None, + use_compression: false, + }) + .expect("could not open the filters database"); + let bucket = _store + .bucket(Some("cfilters")) + .expect("could not open the filterrs bucket"); + KvFiltersStore { _store, bucket } + } +} + +impl BlockFilterStore for KvFiltersStore<'_> { + fn get_filter(&self, block_height: u64) -> Option { + self.bucket + .get(&block_height.into()) + .ok()? + .map(|filter| bip158::BlockFilter::new(&filter)) + } + fn put_filter(&self, block_height: u64, block_filter: bip158::BlockFilter) { + let inner = block_filter.content; + let _ = self.bucket.set(&block_height.into(), &inner); + } +} + +#[cfg(test)] +mod tests { + use bitcoin::{ + consensus::deserialize, + hashes::{hex::FromHex, Hash}, + util::bip158, + Block, BlockHash, OutPoint, Script, Txid, + }; + + use crate::QueryType; + + use super::{BlockFilterBackend, FilterBuilder, MemoryBlockFilterStorage}; + #[test] + fn test_filter() { + let mut writer = Vec::new(); + let value = [10_u8; 42].as_slice(); + let mut filters = FilterBuilder::new(&mut writer, 784931, 19, 0, 0); + + filters.put(value); + filters.finish().unwrap(); + + let filter = bip158::BlockFilter::new(&writer); + let res = filter + .match_any( + &BlockHash::from_inner([0; 32]), + &mut [value].iter().copied(), + ) + .unwrap(); + assert!(res); + + let value = [11_u8; 42].as_slice(); + let res = filter + .match_any( + &BlockHash::from_inner([0; 32]), + &mut [value].iter().copied(), + ) + .unwrap(); + assert!(!res); + } + + #[test] + fn test_filter_block() { + let block = Vec::from_hex("000000204cc1f0cc52950600bc94fc482713c1f10174c85f20290168832552c1b900000039d96a9dbab5998c344c092583552e6ed5e2884c8a52a95b482f59413f39e8d55cf03f654454011e7d45dc0003020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff0f030b8d020a2f7369676e65743a332ffeffffff025503062a010000002251207099e4b23427fc40ba4777bbf52cfd0b7444d69a3e21ef281270723f54c0c14b0000000000000000776a24aa21a9edfb6e452f1cccc00da281067a88bed1d9ec6e02361d424dfd46dc46972b6c22284c4fecc7daa249004730440220435eaca92f93497d07825fe4587183272084e257c61cccab15a1dbc37a946699022000c18b0cba4f96ab5f741478da264b361ba95523a486126657b6f9e1045126a20100012000000000000000000000000000000000000000000000000000000000000000000000000002000000000101125b54a68d9a39561dd268e020e3f26dd900ded46a3dfdbae7ea95797022a0a80100000000fdffffff027082030000000000160014fabea557d8541249533fe281aac45c37b2dbf3422dab090000000000220020dfcc1d4c1fef729015420efed6202f7d1fd1a74813f7875fa235413bd36f73d20800483045022100dce01d603f84de4faeacd7bcdf1bc93fdc3cb7a28eff0544d88c1b64fa07f8d602206aefc00419126035222e5138b07b40d77c71feba78c26d4febd51f10f814dff101483045022100bf7c25066c5c4abb2d2733cf04c700b175d5e92d09e5931313adaef05e062758022001cf4bb4437a0a20b6ff9277c29a2b28259c1a6143fd4938920cae7a8a3658350147304402206f3d38a3f4db843a41f22583cfd344b935156fa0e82afe7212f8861fc04d979c0220726324a84cb6906df296416a00032e0c8238df0f4cf8e7204385f2cdd359056301483045022100fb3a30dc1103ba0b865d96bd9097a264c0e18aeffb2a4ee0875c0edea356cb6c022070e47aac3b0ed218a9afbc7f70d42935084a80f47af44941a6712314b3391b50010000fdea016376a914a2c0c43c1cbc4c3d1f5d197000414de73e76456388ac6b76a914bf51072415ece1527ac34d955924be96d07cbde788ac6c936b76a91425fb796fb6194fe09ab2b06344bace2b9320d45c88ac6c936b76a914a6cb8313859649a1b83dc9b77c3afe7671affe0b88ac6c93518803ffff00b2676376a914c209f7a523ffa69a7226c5a5167b2b37ddec58eb88ac6b76a914d61ff559b874d8833b75b7d6939b0df3e68b583d88ac6c936b76a91444b6c96c823cb70c130bd12ee39b5504d6be84cc88ac6c936b76a914f086890b09e618632f45a3511c5983dafb2e714a88ac6c93528854b267542102a31ccbcc7262f1cf9ee85bf04723d28d5cdebdaa98fa0b972fd5210610c6aaba2102a9b90ff89fa8a411c7e8f1af247ba6e32c6d4d4e682d643eb30a02841dae2a962103b8c2b5e5db7603cb0b3a5280941988588af0f71362321442f228fb2b4a38cbd921022da633f9e545cc1029055cafa583b4cd99b7a4bcfb1ea2a388fbefb11e0c8b9054ae736476a91443948748fc4e5a8300ea3eee7feafad9cdd4a3c588ac6b76a91447f5b0296d7b9dd38b64d45b3ef88490847d17ad88ac6c936b76a914f2b7716f6e11b214457c38c501b7a0f7706e06aa88ac6c936b76a91443587cfdb6e2bace0de537152e1ffa8e8d77d91a88ac6c93538852b268686800000000020000000001013b12458f060f8df025c0ab068695384bcc6c846aefed31b0d203122ee09c71100100000000feffffff02ef8b64d04a0600001600147f5eeef1c2a1c5c81ee1545876c8f8fc93d0c1e6a086010000000000160014a9c03ef086c4d0c9fe2d093a4e1f28dff63c0f700247304402205e1935e762a89e99b9185e540ad0a1800d26a9643599d28c4e8619432f04abc802200a3fedf0746f127884b74087561d3190c408afda770bd5aeb3928bb7a4d61832012103b5746579fa0f79ed83dfd6a2142d957156a89690fcb76ed90debdf287b9bc94bb98c0200").unwrap(); + + let block: Block = deserialize(&block).unwrap(); + let storage = MemoryBlockFilterStorage::default(); + let backend = BlockFilterBackend::new(Box::new(storage), block.block_hash().into_inner()); + + backend.filter_block(&block, 0).unwrap(); + + // One txid from this block + let txid = + Txid::from_hex("7e0ce903920704a79beb99beb05c6d5a01852907dccc0973f96eb295d1ad0557") + .unwrap(); + let txid = QueryType::Txid(txid); + + // One output being spent in this block + let prev_txid = + Txid::from_hex("a8a022707995eae7bafd3d6ad4de00d96df2e320e068d21d56399a8da6545b12") + .unwrap(); + let prev_vout: u32 = 1; + + let prevout = QueryType::Input( + OutPoint { + txid: prev_txid, + vout: prev_vout, + } + .into(), + ); + // One spk from this block + let spck = Script::from_hex("0014fabea557d8541249533fe281aac45c37b2dbf342").unwrap(); + let spck = QueryType::ScriptHash(floresta_common::get_spk_hash(&spck).into_inner()); + + let expected = Some(vec![0]); + + assert_eq!(backend.match_any(0, 0, &[txid]), expected); + assert_eq!(backend.match_any(0, 0, &[prevout]), expected); + assert_eq!(backend.match_any(0, 0, &[spck]), expected); + } +} diff --git a/florestad/Cargo.toml b/florestad/Cargo.toml index 04ddda5e..a7553888 100644 --- a/florestad/Cargo.toml +++ b/florestad/Cargo.toml @@ -27,8 +27,9 @@ floresta-common = { path = "../crates/floresta-common" } floresta-electrum = { path = "../crates/floresta-electrum" } floresta-watch-only = { path = "../crates/floresta-watch-only" } floresta-wire = { path = "../crates/floresta-wire" } -anyhow = "1.0.40" +floresta-compact-filters = { path = "../crates/floresta-compact-filters", optional=true } +anyhow = "1.0.40" jsonrpc-http-server = { version = "18.0.0", optional = true } jsonrpc-derive = { version = "18.0.0", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } @@ -41,6 +42,7 @@ zmq = { version = "0.10.0", optional = true } pretty_assertions = "1" [features] +compact-filters = ["floresta-compact-filters"] zmq-server = ["zmq"] experimental-p2p = [] json-rpc = [ @@ -49,4 +51,4 @@ json-rpc = [ "jsonrpc-core", "jsonrpc-core-client", ] -default = ["experimental-p2p"] +default = ["experimental-p2p", "compact-filters", "json-rpc"] diff --git a/florestad/src/cli.rs b/florestad/src/cli.rs index 5e1154d5..dee487f0 100644 --- a/florestad/src/cli.rs +++ b/florestad/src/cli.rs @@ -79,6 +79,17 @@ pub enum Commands { /// Where should we store data #[arg(long)] data_dir: Option, + /// Whether to build Compact Block Filters + /// + /// Those filters let you query for chain data after IBD, like wallet rescan, + /// finding an utxo, finding specific tx_ids. + /// Will cause more disk usage + #[arg(long = "cfilters", short = 'c', default_value_t = false)] + cfilters: bool, + /// What types of filters we should build. Keep in mind that each filter + /// type you add, will eat up more disk. + #[arg(long = "cfilter-types")] + cfilter_types: Option>, #[arg(long, short, default_value = None)] /// The url of a proxy we should open p2p connections through (e.g. 127.0.0.1:9050) proxy: Option, @@ -96,3 +107,14 @@ pub enum Commands { zmq_address: Option, }, } + +#[derive(Clone, Debug, ValueEnum)] +pub enum FilterType { + Inputs, + TxId, + SpkPKH, + SpkPSH, + SpkWPKH, + SpkWSH, + SpkTR, +} diff --git a/florestad/src/json_rpc/res.rs b/florestad/src/json_rpc/res.rs index 9573864e..a000ee5b 100644 --- a/florestad/src/json_rpc/res.rs +++ b/florestad/src/json_rpc/res.rs @@ -93,7 +93,7 @@ pub enum Error { TxNotFound, InvalidDescriptor, BlockNotFound, - ChainError, + Chain, InvalidPort, InvalidAddress, } @@ -103,7 +103,7 @@ impl Display for Error { Error::TxNotFound => "Transaction not found", Error::InvalidDescriptor => "Invalid descriptor", Error::BlockNotFound => "Block not found", - Error::ChainError => "Chain error", + Error::Chain => "Chain error", Error::InvalidPort => "Invalid port", Error::InvalidAddress => "Invalid address", }; @@ -114,7 +114,7 @@ impl From for i64 { fn from(val: Error) -> Self { match val { Error::BlockNotFound => 1, - Error::ChainError => 2, + Error::Chain => 2, Error::TxNotFound => 3, Error::InvalidDescriptor => 4, Error::InvalidPort => 5, diff --git a/florestad/src/json_rpc/server.rs b/florestad/src/json_rpc/server.rs index 4d0f8bf5..32e993a9 100644 --- a/florestad/src/json_rpc/server.rs +++ b/florestad/src/json_rpc/server.rs @@ -1,3 +1,7 @@ +use super::res::{ + BlockJson, Error, GetBlockchainInfoRes, RawTxJson, ScriptPubKeyJson, ScriptSigJson, TxInJson, + TxOutJson, +}; use async_std::sync::RwLock; use bitcoin::{ consensus::{deserialize, serialize}, @@ -11,6 +15,7 @@ use floresta_chain::{ pruned_utreexo::{BlockchainInterface, UpdatableChainstate}, ChainState, KvChainStore, }; +use floresta_compact_filters::BlockFilterBackend; use floresta_watch_only::{kv_database::KvDatabase, AddressCache, CachedTransaction}; use floresta_wire::node_interface::{NodeInterface, NodeMethods, PeerInfo}; use futures::executor::block_on; @@ -20,13 +25,10 @@ use jsonrpc_http_server::ServerBuilder; use serde_json::{json, Value}; use std::sync::Arc; -use super::res::{ - BlockJson, Error, GetBlockchainInfoRes, RawTxJson, ScriptPubKeyJson, ScriptSigJson, TxInJson, - TxOutJson, -}; - #[rpc] pub trait Rpc { + #[rpc(name = "getblockfilter")] + fn get_block_filter(&self, heigth: u32) -> Result; #[rpc(name = "getblockchaininfo")] fn get_blockchain_info(&self) -> Result; #[rpc(name = "getblockhash")] @@ -62,6 +64,7 @@ pub trait Rpc { } pub struct RpcImpl { + block_filter_storage: Option>, network: Network, chain: Arc>, wallet: Arc>>, @@ -73,6 +76,16 @@ impl Rpc for RpcImpl { Ok(self.chain.get_best_block().unwrap().0) } + fn get_block_filter(&self, heigth: u32) -> Result { + if let Some(ref cfilters) = self.block_filter_storage { + return Ok(cfilters + .get_filter(heigth) + .ok_or(Error::BlockNotFound)? + .content + .to_hex()); + } + Err(jsonrpc_core::Error { code: 10.into(), message: String::from("You don't have block filters enabled in your node, change this by restarting with -cfilters"), data: None }) + } fn add_node(&self, node: String) -> Result { let node = node.split(':').collect::>(); let (ip, port) = if node.len() == 2 { @@ -173,7 +186,7 @@ impl Rpc for RpcImpl { fn rescan(&self, rescan: u32) -> Result { let result = self.chain.rescan(rescan); if result.is_err() { - return Err(Error::ChainError.into()); + return Err(Error::Chain.into()); } Ok(true) } @@ -192,7 +205,7 @@ impl Rpc for RpcImpl { if self.chain.broadcast(&tx).is_ok() { return Ok(tx.txid()); } - Err(Error::ChainError.into()) + Err(Error::Chain.into()) } fn get_tx_out(&self, tx_id: Txid, outpoint: usize) -> Result { @@ -214,14 +227,13 @@ impl Rpc for RpcImpl { let verbosity = verbosity.unwrap_or(1); if let Ok(Some(block)) = self.node.get_block(hash) { if verbosity == 1 { - let tip = self.chain.get_height().map_err(|_| Error::ChainError)?; + let tip = self.chain.get_height().map_err(|_| Error::Chain)?; let height = self .chain .get_block_height(&hash) - .map_err(|_| Error::ChainError)? + .map_err(|_| Error::Chain)? .unwrap(); let mut last_block_times: Vec<_> = ((height - 11)..height) - .into_iter() .map(|h| { self.chain .get_block_header(&self.chain.get_block_hash(h).unwrap()) @@ -239,7 +251,7 @@ impl Rpc for RpcImpl { confirmations: (tip - height) + 1, difficulty: block.header.difficulty(self.network), hash: block.header.block_hash().to_string(), - height: height, + height, merkleroot: block.header.merkle_root.to_string(), nonce: block.header.nonce, previousblockhash: block.header.prev_blockhash.to_string(), @@ -274,7 +286,7 @@ impl Rpc for RpcImpl { let block_hash = self .chain .get_block_hash(block_height) - .map_err(|_| Error::ChainError)?; + .map_err(|_| Error::Chain)?; let block = self .node .get_block(block_hash) @@ -347,8 +359,7 @@ impl RpcImpl { .map(|a| a.to_string()) .unwrap(), type_: Self::get_script_type(output.script_pubkey) - .or(Some("nonstandard")) - .unwrap() + .unwrap_or("nonstandard") .to_string(), }, } @@ -419,6 +430,7 @@ impl RpcImpl { node: Arc, kill_signal: Arc>, network: Network, + block_filter_storage: Option>, ) -> jsonrpc_http_server::Server { let mut io = jsonrpc_core::IoHandler::new(); let rpc_impl = RpcImpl { @@ -427,6 +439,7 @@ impl RpcImpl { wallet, node, kill_signal, + block_filter_storage, }; io.extend_with(rpc_impl.to_delegate()); diff --git a/florestad/src/main.rs b/florestad/src/main.rs index d6874bc6..b576eb19 100644 --- a/florestad/src/main.rs +++ b/florestad/src/main.rs @@ -31,21 +31,21 @@ use async_std::{ sync::RwLock, task::{self, block_on}, }; -use std::{path::PathBuf, sync::Arc}; - use bitcoin::{BlockHash, Network}; use clap::Parser; -use cli::{Cli, Commands}; +use cli::{Cli, Commands, FilterType}; use config_file::ConfigFile; use floresta_chain::{ pruned_utreexo::BlockchainInterface, BlockchainError, ChainState, KvChainStore, }; use floresta_common::constants::DIR_NAME; +use floresta_compact_filters::{FilterBackendBuilder, KvFiltersStore}; use floresta_electrum::electrum_protocol::{accept_loop, ElectrumServer}; use floresta_watch_only::{kv_database::KvDatabase, AddressCache, AddressCacheDatabase}; use floresta_wire::{mempool::Mempool, node::UtreexoNode}; use log::{debug, error, info}; use pretty_env_logger::env_logger::{Env, TimestampPrecision}; +use std::{path::PathBuf, sync::Arc}; #[cfg(feature = "zmq-server")] use zmq::ZMQServer; @@ -66,6 +66,8 @@ struct Ctx { config_file: Option, proxy: Option, network: cli::Network, + cfilters: bool, + cfilter_types: Vec, #[cfg(feature = "zmq-server")] zmq_address: Option, } @@ -88,6 +90,8 @@ fn main() { rescan, proxy, zmq_address: _zmq_address, + cfilters, + cfilter_types, }) => { let ctx = Ctx { data_dir, @@ -98,6 +102,8 @@ fn main() { proxy, config_file: params.config_file, network: params.network, + cfilters, + cfilter_types: cfilter_types.unwrap_or_default(), #[cfg(feature = "zmq-server")] zmq_address: _zmq_address, }; @@ -178,8 +184,54 @@ fn run_with_ctx(ctx: Ctx) { .rescan(height) .expect("Fail while setting rescan"); } - debug!("Done loading database"); + let cfilters = if ctx.cfilters { + // Block Filters + let key = if let Ok(file) = std::fs::read(format!("{data_dir}/cfilters_key")) { + let mut key = [0_u8; 32]; + key.copy_from_slice(&file[0..32]); + key + } else { + let key = rand::random::<[u8; 32]>(); + std::fs::write(format!("{data_dir}/cfilters_key"), key) + .expect("couldn't write to datadir"); + key + }; + let filters_dir = format!("{data_dir}/cfilters"); + let cfilters_db = KvFiltersStore::new(filters_dir); + let mut filters = FilterBackendBuilder::default() + .key_hash(key) + .use_storage(Box::new(cfilters_db)); + for filter_type in ctx.cfilter_types { + filters = match filter_type { + FilterType::TxId => filters.index_txids(true), + FilterType::Inputs => filters.index_input(true), + + FilterType::SpkPKH => { + filters.add_address_type(floresta_compact_filters::OutputTypes::PKH) + } + FilterType::SpkPSH => { + filters.add_address_type(floresta_compact_filters::OutputTypes::SH) + } + FilterType::SpkWPKH => { + filters.add_address_type(floresta_compact_filters::OutputTypes::WPKH) + } + FilterType::SpkWSH => { + filters.add_address_type(floresta_compact_filters::OutputTypes::WSH) + } + FilterType::SpkTR => { + filters.add_address_type(floresta_compact_filters::OutputTypes::TR) + } + }; + } + let cfilters = Arc::new(filters.build()); + blockchain_state.subscribe(cfilters.clone()); + Some(cfilters) + } else { + None + }; + + // Chain Provider (p2p) let chain_provider = UtreexoNode::new( blockchain_state.clone(), Arc::new(async_std::sync::RwLock::new(Mempool::new())), @@ -188,6 +240,7 @@ fn run_with_ctx(ctx: Ctx) { ctx.proxy.map(|x| x.parse().expect("Invalid proxy address")), ); + // ZMQ #[cfg(feature = "zmq-server")] { info!("Starting ZMQ server"); @@ -203,7 +256,8 @@ fn run_with_ctx(ctx: Ctx) { info!("Starting server"); let wallet = Arc::new(RwLock::new(wallet)); - // Setup the json-rpc if needed + + // JSON-RPC #[cfg(feature = "json-rpc")] let _server = json_rpc::server::RpcImpl::create( blockchain_state.clone(), @@ -211,10 +265,11 @@ fn run_with_ctx(ctx: Ctx) { &get_net(&ctx.network), chain_provider.get_handle(), kill_signal.clone(), - get_net(&ctx.network).into(), + get_net(&ctx.network), + cfilters, ); - // Create a new electrum server, we need to block_on because `ElectrumServer::new` is `async` - // but our main isn't, so we can't `.await` on it. + + // Electrum let electrum_server = block_on(ElectrumServer::new( "0.0.0.0:50001", wallet, @@ -222,6 +277,9 @@ fn run_with_ctx(ctx: Ctx) { )) .expect("Could not create an Electrum Server"); + // Spawn all services + + // Electrum accept loop task::spawn(accept_loop( electrum_server .listener @@ -229,8 +287,10 @@ fn run_with_ctx(ctx: Ctx) { .expect("Listener can't be none by this far"), electrum_server.notify_tx.clone(), )); + // Electrum main loop task::spawn(electrum_server.main_loop()); info!("Server running on: 0.0.0.0:50001"); + let _kill_signal = kill_signal.clone(); ctrlc::set_handler(move || { if *block_on(_kill_signal.write()) { @@ -239,6 +299,8 @@ fn run_with_ctx(ctx: Ctx) { *block_on(_kill_signal.write()) = true; }) .expect("Error setting Ctrl-C handler"); + // Chain provider + // If chain provider dies, we die too task::block_on(chain_provider.run(&kill_signal)); }