diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index c44f832d27..fe873e4530 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1870,12 +1870,8 @@ struct controller_impl { fork_db_reset_root_to_chain_head(); } else if( !except_ptr && !check_shutdown() && !irreversible_mode() ) { if (auto fork_db_head = fork_db.head()) { - // applies all blocks up to fork_db head from fork_db, shouldn't return incomplete, but if it does loop until complete - ilog("applying ${n} fork database blocks from ${ch} to ${fh}", + ilog("fork database contains ${n} blocks after head from ${ch} to ${fh}", ("n", fork_db_head->block_num() - chain_head.block_num())("ch", chain_head.block_num())("fh", fork_db_head->block_num())); - while (maybe_apply_blocks(forked_callback_t{}, trx_meta_cache_lookup{}) == controller::apply_blocks_result::incomplete) - ; - ilog( "reversible blocks replayed to ${bn} : ${id}", ("bn", fork_db_head->block_num())("id", fork_db_head->id()) ); } } @@ -2042,42 +2038,6 @@ struct controller_impl { // Furthermore, fork_db.root()->block_num() <= lib_num. // Also, even though blog.head() may still be nullptr, blog.first_block_num() is guaranteed to be lib_num + 1. - auto finish_init = [&](auto& fork_db) { - if( read_mode != db_read_mode::IRREVERSIBLE ) { - auto pending_head = fork_db.head(); - if ( pending_head && pending_head->id() != chain_head.id() ) { - // chain_head equal to root means that read_mode was changed from irreversible mode to head/speculative - bool chain_head_is_root = chain_head.id() == fork_db.root()->id(); - if (chain_head_is_root) { - ilog( "read_mode has changed from irreversible: applying best branch from fork database" ); - } - - // See comment below about pause-at-block for why `|| conf.num_configured_p2p_peers > 0` - if (chain_head_is_root || conf.num_configured_p2p_peers > 0) { - ilog("applying branch from fork database ending with block: ${id}", ("id", pending_head->id())); - // applies all blocks up to forkdb head from forkdb, shouldn't return incomplete, but if it does loop until complete - while (maybe_apply_blocks(forked_callback_t{}, trx_meta_cache_lookup{}) == controller::apply_blocks_result::incomplete) - ; - } - } - } else { - // It is possible that the node was shutdown with blocks to process in the fork database. For example, if - // it was syncing and had processed blocks into the fork database but not yet applied them. In general, - // it makes sense to process those blocks on startup. However, if the node was shutdown via - // terminate-at-block, the current expectation is that the node can be restarted to examine the state at - // which it was shutdown. For now, we will only process these blocks if there are peers configured. This - // is a bit of a hack for Spring 1.0.0 until we can add a proper pause-at-block (issue #570) which could - // be used to explicitly request a node to not process beyond a specified block. - if (conf.num_configured_p2p_peers > 0) { - ilog("Process blocks out of fork_db if needed"); - log_irreversible(); - transition_to_savanna_if_needed(); - } - } - }; - - fork_db_.apply(finish_init); - // At Leap startup, we want to provide to our local finalizers the correct safety information // to use if they don't already have one. // If we start at a block prior to the IF transition, that information will be provided when @@ -4232,11 +4192,11 @@ struct controller_impl { assert(!verify_qc_future.valid()); } - bool best_head = fork_db.add(bsp, ignore_duplicate_t::yes); + fork_db_add_t add_result = fork_db.add(bsp, ignore_duplicate_t::yes); if constexpr (is_proper_savanna_block) vote_processor.notify_new_block(async_aggregation); - return controller::accepted_block_result{best_head, block_handle{std::move(bsp)}}; + return controller::accepted_block_result{add_result, block_handle{std::move(bsp)}}; } // thread safe, expected to be called from thread other than the main thread @@ -4440,7 +4400,6 @@ struct controller_impl { const auto start_apply_blocks_loop = fc::time_point::now(); for( auto ritr = new_head_branch.rbegin(); ritr != new_head_branch.rend(); ++ritr ) { - const auto start_apply_block = fc::time_point::now(); auto except = std::exception_ptr{}; const auto& bsp = *ritr; try { @@ -4465,11 +4424,9 @@ struct controller_impl { throw; } catch (const fc::exception& e) { if (e.code() == interrupt_exception::code_value) { - if (fc::time_point::now() - start_apply_block < fc::milliseconds(2 * config::block_interval_ms)) { - ilog("interrupt while applying block ${bn} : ${id}", ("bn", bsp->block_num())("id", bsp->id())); - throw; // do not want to remove block from fork_db if not interrupting a long, maybe infinite, block - } - ilog("interrupt while applying block, removing block ${bn} : ${id}", ("bn", bsp->block_num())("id", bsp->id())); + // do not want to remove block from fork_db if interrupted + ilog("interrupt while applying block ${bn} : ${id}", ("bn", bsp->block_num())("id", bsp->id())); + throw; } else { elog("exception thrown while applying block ${bn} : ${id}, previous ${p}, error: ${e}", ("bn", bsp->block_num())("id", bsp->id())("p", bsp->previous())("e", e.to_detail_string())); @@ -4538,7 +4495,7 @@ struct controller_impl { return applied_trxs; } - void interrupt_transaction() { + void interrupt_apply_block_transaction() { // Only interrupt transaction if applying a block. Speculative trxs already have a deadline set so they // have limited run time already. This is to allow killing a long-running transaction in a block being // validated. @@ -5308,8 +5265,8 @@ deque controller::abort_block() { return my->abort_block(); } -void controller::interrupt_transaction() { - my->interrupt_transaction(); +void controller::interrupt_apply_block_transaction() { + my->interrupt_apply_block_transaction(); } boost::asio::io_context& controller::get_thread_pool() { diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 706398f7d0..fa4229a323 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -94,7 +94,7 @@ namespace eosio::chain { void open_impl( const char* desc, const std::filesystem::path& fork_db_file, fc::cfile_datastream& ds, validator_t& validator ); void close_impl( std::ofstream& out ); - bool add_impl( const bsp_t& n, ignore_duplicate_t ignore_duplicate, bool validate, validator_t& validator ); + fork_db_add_t add_impl( const bsp_t& n, ignore_duplicate_t ignore_duplicate, bool validate, validator_t& validator ); bool is_valid() const; bsp_t get_block_impl( const block_id_type& id, include_root_t include_root = include_root_t::no ) const; @@ -241,8 +241,8 @@ namespace eosio::chain { } template - bool fork_database_impl::add_impl(const bsp_t& n, ignore_duplicate_t ignore_duplicate, - bool validate, validator_t& validator) { + fork_db_add_t fork_database_impl::add_impl(const bsp_t& n, ignore_duplicate_t ignore_duplicate, + bool validate, validator_t& validator) { EOS_ASSERT( root, fork_database_exception, "root not yet set" ); EOS_ASSERT( n, fork_database_exception, "attempt to add null block state" ); @@ -278,15 +278,25 @@ namespace eosio::chain { EOS_RETHROW_EXCEPTIONS( fork_database_exception, "serialized fork database is incompatible with configured protocol features" ) } + auto prev_head = head_impl(include_root_t::yes); + auto inserted = index.insert(n); EOS_ASSERT(ignore_duplicate == ignore_duplicate_t::yes || inserted.second, fork_database_exception, "duplicate block added: ${id}", ("id", n->id())); - return inserted.second && n == head_impl(include_root_t::no); + if (!inserted.second) + return fork_db_add_t::duplicate; + const bool new_head = n == head_impl(include_root_t::no); + if (new_head && n->previous() == prev_head->id()) + return fork_db_add_t::appended_to_head; + if (new_head) + return fork_db_add_t::fork_switch; + + return fork_db_add_t::added; } template - bool fork_database_t::add( const bsp_t& n, ignore_duplicate_t ignore_duplicate ) { + fork_db_add_t fork_database_t::add( const bsp_t& n, ignore_duplicate_t ignore_duplicate ) { std::lock_guard g( my->mtx ); return my->add_impl(n, ignore_duplicate, false, [](block_timestamp_type timestamp, diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index daf151ae2a..fc485486a0 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -96,6 +96,7 @@ namespace eosio::chain { using resource_limits::resource_limits_manager; using apply_handler = std::function; + enum class fork_db_add_t; using forked_callback_t = std::function; // lookup transaction_metadata via supplied function to avoid re-creation @@ -207,8 +208,8 @@ namespace eosio::chain { */ deque abort_block(); - /// Expected to be called from signal handler - void interrupt_transaction(); + /// Expected to be called from signal handler, or producer_plugin + void interrupt_apply_block_transaction(); /** * @@ -235,7 +236,7 @@ namespace eosio::chain { void set_async_aggregation(async_t val); struct accepted_block_result { - const bool is_new_best_head = false; // true if new best head + const fork_db_add_t add_result; std::optional block; // empty optional if block is unlinkable }; // thread-safe diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index fef3b12b68..7373628bf4 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -12,6 +12,13 @@ namespace eosio::chain { using block_branch_t = std::vector; enum class ignore_duplicate_t { no, yes }; enum class include_root_t { no, yes }; + enum class fork_db_add_t { + failure, // add failed + duplicate, // already added and ignore_duplicate=true + added, // inserted into an existing branch or started a new branch, but not best branch + appended_to_head, // new best head of current best branch; no fork switch + fork_switch // new best head of new branch, fork switch to new branch + }; // Used for logging of comparison values used for best fork determination std::string log_fork_comparison(const block_state& bs); @@ -67,9 +74,11 @@ namespace eosio::chain { /** * Add block state to fork database. * Must link to existing block in fork database or the root. - * @return true if n becomes the new best head (and was not the best head before) + * @returns fork_db_add_t - result of the add + * @throws unlinkable_block_exception - unlinkable to any branch + * @throws fork_database_exception - no root, n is nullptr, protocol feature error, duplicate when ignore_duplicate=false */ - bool add( const bsp_t& n, ignore_duplicate_t ignore_duplicate ); + fork_db_add_t add( const bsp_t& n, ignore_duplicate_t ignore_duplicate ); void remove( const block_id_type& id ); @@ -306,3 +315,6 @@ namespace eosio::chain { static constexpr uint32_t max_supported_version = 3; }; } /// eosio::chain + +FC_REFLECT_ENUM( eosio::chain::fork_db_add_t, + (failure)(duplicate)(added)(appended_to_head)(fork_switch) ) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 4cc3a29ffe..822a96dcc9 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -218,6 +218,7 @@ namespace eosio::testing { // producer become inactive void produce_min_num_of_blocks_to_spend_time_wo_inactive_prod(const fc::microseconds target_elapsed_time = fc::microseconds()); void push_block(const signed_block_ptr& b); + void apply_blocks(); /** * These transaction IDs represent transactions available in the head chain state as scheduled diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index fef57536de..96011aa39f 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -340,8 +340,9 @@ namespace eosio::testing { case block_signal::accepted_block: // should get accepted_block signal after accepted_block_header signal // or after accepted_block (on fork switch, accepted block signaled when block re-applied) - return present && (itr->second == block_signal::accepted_block_header || - itr->second == block_signal::accepted_block); + // or first thing on restart if applying out of the forkdb + return !present || (present && (itr->second == block_signal::accepted_block_header || + itr->second == block_signal::accepted_block)); case block_signal::irreversible_block: // can be signaled on restart as the first thing since other signals happened before shutdown @@ -423,6 +424,7 @@ namespace eosio::testing { open(std::move(pfs), snapshot_chain_id, [&snapshot,&control=this->control]() { control->startup( [](){}, []() { return false; }, snapshot ); }); + apply_blocks(); } void base_tester::open( protocol_feature_set&& pfs, const genesis_state& genesis, call_startup_t call_startup ) { @@ -430,6 +432,7 @@ namespace eosio::testing { open(std::move(pfs), genesis.compute_chain_id(), [&genesis,&control=this->control]() { control->startup( [](){}, []() { return false; }, genesis ); }); + apply_blocks(); } else { open(std::move(pfs), genesis.compute_chain_id(), nullptr); } @@ -439,6 +442,7 @@ namespace eosio::testing { open(std::move(pfs), expected_chain_id, [&control=this->control]() { control->startup( [](){}, []() { return false; } ); }); + apply_blocks(); } void base_tester::push_block(const signed_block_ptr& b) { @@ -460,6 +464,11 @@ namespace eosio::testing { _check_for_vote_if_needed(*control, bh); } + void base_tester::apply_blocks() { + while (control->apply_blocks( {}, {} ) == controller::apply_blocks_result::incomplete) + ; + } + signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs ) { auto res = _produce_block( skip_time, skip_pending_trxs, false ); return res.block; diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 866401f617..a19ee0e6cb 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1133,7 +1133,10 @@ void chain_plugin::plugin_initialize(const variables_map& options) { void chain_plugin_impl::plugin_startup() { try { try { - auto shutdown = [](){ return app().quit(); }; + auto shutdown = []() { + dlog("controller shutdown, quitting..."); + return app().quit(); + }; auto check_shutdown = [](){ return app().is_quiting(); }; if (snapshot_path) chain->startup(shutdown, check_shutdown, std::make_shared(*snapshot_path)); diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index b58cad4c72..b6eb399387 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -94,6 +94,9 @@ namespace eosio { void register_increment_failed_p2p_connections(std::function&&); void register_increment_dropped_trxs(std::function&&); + // for testing + void broadcast_block(const signed_block_ptr& b, const block_id_type& id); + private: std::shared_ptr my; }; diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8eab485ba6..0a49072766 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -456,6 +457,7 @@ namespace eosio { void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); void start_expire_timer(); void start_monitors(); + void process_blocks(); void expire(); /** \name Peer Timestamps @@ -3712,7 +3714,7 @@ namespace eosio { std::optional obh; bool exception = false; - bool best_head = false; + fork_db_add_t fork_db_add_result = fork_db_add_t::failure; bool unlinkable = false; sync_manager::closing_mode close_mode = sync_manager::closing_mode::immediately; try { @@ -3722,7 +3724,7 @@ namespace eosio { } // this will return empty optional if block is not linkable controller::accepted_block_result abh = cc.accept_block( id, ptr ); - best_head = abh.is_new_best_head; + fork_db_add_result = abh.add_result; obh = std::move(abh.block); unlinkable = !obh; close_mode = sync_manager::closing_mode::handshake; @@ -3756,8 +3758,8 @@ namespace eosio { uint32_t block_num = obh->block_num(); proper_svnn_block_seen = obh->header().is_proper_svnn_block(); - fc_dlog( logger, "validated block header, best_head ${bt}, broadcasting immediately, connection - ${cid}, blk num = ${num}, id = ${id}", - ("bt", best_head)("cid", cid)("num", block_num)("id", obh->id()) ); + fc_dlog( logger, "validated block header, forkdb add ${bt}, broadcasting immediately, connection - ${cid}, blk num = ${num}, id = ${id}", + ("bt", fork_db_add_result)("cid", cid)("num", block_num)("id", obh->id()) ); my_impl->dispatcher.add_peer_block( obh->id(), cid ); // no need to send back to sender my_impl->dispatcher.bcast_block( obh->block(), obh->id() ); c->block_status_monitor_.accepted(); @@ -3770,32 +3772,36 @@ namespace eosio { }); } - if (best_head) { + if (fork_db_add_result == fork_db_add_t::appended_to_head || fork_db_add_result == fork_db_add_t::fork_switch) { ++c->unique_blocks_rcvd_count; fc_dlog(logger, "posting incoming_block to app thread, block ${n}", ("n", ptr->block_num())); + my_impl->process_blocks(); - auto process_incoming_blocks = [](auto self) -> void { - try { - auto r = my_impl->producer_plug->on_incoming_block(); - if (r == controller::apply_blocks_result::incomplete) { - app().executor().post(handler_id::process_incoming_block, priority::medium, exec_queue::read_write, [self]() { - self(self); - }); - } - } catch (...) {} // errors on applied blocks logged in controller - }; - - app().executor().post(handler_id::process_incoming_block, priority::medium, exec_queue::read_write, - [process_incoming_blocks]() { - process_incoming_blocks(process_incoming_blocks); - }); // ready to process immediately, so signal producer to interrupt start_block - my_impl->producer_plug->received_block(block_num); + my_impl->producer_plug->received_block(block_num, fork_db_add_result); } }); } + void net_plugin_impl::process_blocks() { + auto process_incoming_blocks = [](auto self) -> void { + try { + auto r = my_impl->producer_plug->on_incoming_block(); + if (r == controller::apply_blocks_result::incomplete) { + app().executor().post(handler_id::process_incoming_block, priority::medium, exec_queue::read_write, [self]() { + self(self); + }); + } + } catch (...) {} // errors on applied blocks logged in controller + }; + + app().executor().post(handler_id::process_incoming_block, priority::medium, exec_queue::read_write, + [process_incoming_blocks]() { + process_incoming_blocks(process_incoming_blocks); + }); + } + // thread safe void net_plugin_impl::start_expire_timer() { fc::lock_guard g( expire_timer_mtx ); @@ -4458,6 +4464,11 @@ namespace eosio { my->increment_dropped_trxs = std::move(fun); } + void net_plugin::broadcast_block(const signed_block_ptr& b, const block_id_type& id) { + fc_dlog(logger, "broadcasting block ${n} ${id}", ("n", b->block_num())("id", id)); + my->dispatcher.bcast_block(b, id); + } + //---------------------------------------------------------------------------- size_t connections_manager::number_connections() const { @@ -4498,6 +4509,16 @@ namespace eosio { for (const auto& peer : peers) { resolve_and_connect(peer, p2p_address); } + if (!peers.empty()) { + // It is possible that the node was shutdown with blocks to process in the fork database. For example, if + // it was syncing and had processed blocks into the fork database but not yet applied them. + // If the node was shutdown via terminate-at-block, the current expectation is that the node can be restarted + // to examine the state at which it was shutdown. For now, we will only process these blocks if there are + // peers configured. This is a bit of a hack for Spring 1.0.0 until we can add a proper + // pause-at-block (issue #570) which could be used to explicitly request a node to not process beyond + // a specified block. + my_impl->process_blocks(); + } } void connections_manager::add( connection_ptr c ) { diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index fa4f235684..ffe586cff7 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -141,7 +141,7 @@ class producer_plugin : public appbase::plugin { void log_failed_transaction(const transaction_id_type& trx_id, const chain::packed_transaction_ptr& packed_trx_ptr, const char* reason) const; // thread-safe, called when a new block is received - void received_block(uint32_t block_num); + void received_block(uint32_t block_num, chain::fork_db_add_t fork_db_add_result); const std::set& producer_accounts() const; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index c271654dee..a85e87f9cf 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -557,9 +558,8 @@ struct implicit_production_pause_vote_tracker { class producer_plugin_impl : public std::enable_shared_from_this { public: producer_plugin_impl() - : _timer(app().make_timer()) - , _transaction_ack_channel(app().get_channel()) - , _ro_timer(app().make_timer()) {} + : _transaction_ack_channel(app().get_channel()) + {} void schedule_production_loop(); void schedule_maybe_produce_block(bool exhausted); @@ -683,12 +683,14 @@ class producer_plugin_impl : public std::enable_shared_from_this _timer_thread; + boost::asio::deadline_timer _timer{_timer_thread.get_executor()}; + using signature_provider_type = signature_provider_plugin::signature_provider_type; std::map _signature_providers; chain::bls_pub_priv_key_map_t _finalizer_keys; // public, private std::set _producers; chain::db_read_mode _db_read_mode = db_read_mode::HEAD; - boost::asio::deadline_timer _timer; block_timing_util::producer_watermarks _producer_watermarks; pending_block_mode _pending_block_mode = pending_block_mode::speculating; unapplied_transaction_queue _unapplied_transactions; @@ -801,7 +803,7 @@ class producer_plugin_impl : public std::enable_shared_from_this= pending_block_num); } -producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { - chain::controller& chain = chain_plug->chain(); - - if (!chain_plug->accept_transactions()) - return start_block_result::waiting_for_block; - - abort_block(); - - auto r = chain.apply_blocks([this](const transaction_metadata_ptr& trx) { _unapplied_transactions.add_forked(trx); }, - [this](const transaction_id_type& id) { return _unapplied_transactions.get_trx(id); }); - if (r != controller::apply_blocks_result::complete) - return start_block_result::waiting_for_block; - - if (chain.should_terminate()) { - app().quit(); - return start_block_result::failed; - } - - _time_tracker.clear(); // make sure we start tracking block time after `maybe_switch_forks()` - - block_handle head = chain.head(); - block_num_type head_block_num = head.block_num(); - const fc::time_point now = fc::time_point::now(); - const block_timestamp_type block_time = calculate_pending_block_time(); - const uint32_t pending_block_num = head_block_num + 1; +producer_plugin_impl::start_block_result +producer_plugin_impl::determine_pending_block_mode(const fc::time_point& now, + const block_handle& head, + const block_timestamp_type& block_time, + const producer_authority& scheduled_producer) +{ + block_num_type head_block_num = head.block_num(); _pending_block_mode = pending_block_mode::producing; - // copy as reference is invalidated by abort_block() below - const producer_authority scheduled_producer = chain.head_active_producers(block_time).get_scheduled_producer(block_time); - size_t num_relevant_signatures = 0; scheduled_producer.for_each_key([&](const public_key_type& key) { const auto& iter = _signature_providers.find(key); @@ -2035,7 +2028,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } }); - auto irreversible_block_age = get_irreversible_block_age(); + auto irreversible_block_age = get_irreversible_block_age(now); bool not_producing_when_time = false; // If the next block production opportunity is in the present or future, we're synced. @@ -2094,7 +2087,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { static fc::time_point last_start_block_time = fc::time_point::maximum(); // always start with speculative block // Determine if we are syncing: if we have recently started an old block then assume we are syncing if (last_start_block_time < now + fc::microseconds(config::block_interval_us)) { - auto head_block_age = now - chain.head().block_time(); + auto head_block_age = now - head.block_time(); if (head_block_age > fc::minutes(5)) return start_block_result::waiting_for_block; // if syncing no need to create a block just to immediately abort it } @@ -2144,8 +2137,71 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { _pending_block_deadline = now + fc::milliseconds(config::block_interval_ms); } } + + return start_block_result::succeeded; +} + +producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { + chain::controller& chain = chain_plug->chain(); + + if (!chain_plug->accept_transactions()) + return start_block_result::waiting_for_block; + + abort_block(); + + auto apply_blocks = [&]() -> controller::apply_blocks_result { + try { + return chain.apply_blocks([this](const transaction_metadata_ptr& trx) { _unapplied_transactions.add_forked(trx); }, + [this](const transaction_id_type& id) { return _unapplied_transactions.get_trx(id); }); + } catch (...) {} // errors logged in apply_blocks + return controller::apply_blocks_result::incomplete; + }; + + // producers need to be able to start producing on schedule, do not apply blocks as it might take a long time to apply + if (!is_configured_producer()) { + auto r = apply_blocks(); + if (r != controller::apply_blocks_result::complete) + return start_block_result::waiting_for_block; + } + + if (chain.should_terminate()) { + app().quit(); + return start_block_result::failed; + } + + _time_tracker.clear(); // make sure we start tracking block time after `apply_blocks()` + + block_handle head = chain.head(); + fc::time_point now = fc::time_point::now(); + block_timestamp_type block_time = calculate_pending_block_time(); + producer_authority scheduled_producer = chain.head_active_producers(block_time).get_scheduled_producer(block_time); + + start_block_result r = determine_pending_block_mode(now, head, block_time, scheduled_producer); + if (r != start_block_result::succeeded) + return r; + + if (is_configured_producer() && in_speculating_mode()) { + // if not producing right now, see if any blocks have come in that need to be applied + const block_id_type head_id = head.id(); + schedule_delayed_production_loop(weak_from_this(), _pending_block_deadline); // interrupt apply_blocks at deadline + apply_blocks(); + head = chain.head(); + if (head_id != head.id()) { // blocks were applied + now = fc::time_point::now(); + block_time = calculate_pending_block_time(); + scheduled_producer = chain.head_active_producers(block_time).get_scheduled_producer(block_time); + + r = determine_pending_block_mode(now, head, block_time, scheduled_producer); + if (r != start_block_result::succeeded) + return r; + } + } + const auto& preprocess_deadline = _pending_block_deadline; + const block_num_type head_block_num = head.block_num(); + const uint32_t pending_block_num = head_block_num + 1; + fc_dlog(_log, "Starting block #${n} ${bt} producer ${p}, deadline ${d}", ("n", pending_block_num)("bt", block_time)("p", scheduled_producer.producer_name)("d", _pending_block_deadline)); @@ -2756,13 +2812,14 @@ void producer_plugin_impl::schedule_production_loop() { _timer.expires_from_now(boost::posix_time::microseconds(config::block_interval_us / 10)); // we failed to start a block, so try again later? - _timer.async_wait( - app().executor().wrap(priority::high, exec_queue::read_write, - [this, cid = ++_timer_corelation_id](const boost::system::error_code& ec) { - if (ec != boost::asio::error::operation_aborted && cid == _timer_corelation_id) { - schedule_production_loop(); - } - })); + _timer.async_wait([this, cid = ++_timer_corelation_id](const boost::system::error_code& ec) { + if (ec != boost::asio::error::operation_aborted && cid == _timer_corelation_id) { + chain_plug->chain().interrupt_apply_block_transaction(); + app().executor().post(priority::high, exec_queue::read_write, [this]() { + schedule_production_loop(); + }); + } + }); } else if (result == start_block_result::waiting_for_block) { if (is_configured_producer() && !production_disabled_by_policy()) { chain::controller& chain = chain_plug->chain(); @@ -2822,16 +2879,17 @@ void producer_plugin_impl::schedule_maybe_produce_block(bool exhausted) { ("num", chain.head().block_num() + 1)("desc", block_is_exhausted() ? "Exhausted" : "Deadline exceeded")); } - _timer.async_wait(app().executor().wrap(priority::high, exec_queue::read_write, - [&chain, this, cid = ++_timer_corelation_id](const boost::system::error_code& ec) { - if (ec != boost::asio::error::operation_aborted && cid == _timer_corelation_id) { + _timer.async_wait([&chain, this, cid = ++_timer_corelation_id](const boost::system::error_code& ec) { + if (ec != boost::asio::error::operation_aborted && cid == _timer_corelation_id) { + app().executor().post(priority::high, exec_queue::read_write, [&chain, this]() { // pending_block_state expected, but can't assert inside async_wait auto block_num = chain.is_building_block() ? chain.head().block_num() + 1 : 0; fc_dlog(_log, "Produce block timer for ${num} running at ${time}", ("num", block_num)("time", fc::time_point::now())); auto res = maybe_produce_block(); fc_dlog(_log, "Producing Block #${num} returned: ${res}", ("num", block_num)("res", res)); - } - })); + }); + } + }); } void producer_plugin_impl::schedule_delayed_production_loop(const std::weak_ptr& weak_this, @@ -2840,12 +2898,14 @@ void producer_plugin_impl::schedule_delayed_production_loop(const std::weak_ptr< fc_dlog(_log, "Scheduling Speculative/Production Change at ${time}", ("time", wake_up_time)); static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); _timer.expires_at(epoch + boost::posix_time::microseconds(wake_up_time->time_since_epoch().count())); - _timer.async_wait(app().executor().wrap(priority::high, exec_queue::read_write, - [this, cid = ++_timer_corelation_id](const boost::system::error_code& ec) { - if (ec != boost::asio::error::operation_aborted && cid == _timer_corelation_id) { + _timer.async_wait([this, cid = ++_timer_corelation_id](const boost::system::error_code& ec) { + if (ec != boost::asio::error::operation_aborted && cid == _timer_corelation_id) { + chain_plug->chain().interrupt_apply_block_transaction(); + app().executor().post(priority::high, exec_queue::read_write, [this]() { schedule_production_loop(); - } - })); + }); + } + }); } else { fc_dlog(_log, "Not Scheduling Speculative/Production, no local producers had valid wake up times"); } @@ -2938,8 +2998,14 @@ void producer_plugin_impl::produce_block() { _time_tracker.clear(); } -void producer_plugin::received_block(uint32_t block_num) { +void producer_plugin::received_block(uint32_t block_num, chain::fork_db_add_t fork_db_add_result) { my->_received_block = block_num; + // fork_db_add_t::fork_switch means head block of best fork (different from the current branch) is received. + // Since a better fork is available, interrupt current block validation and allow a fork switch to the better branch. + if (fork_db_add_result == fork_db_add_t::fork_switch) { + fc_ilog(_log, "new best fork received"); + my->chain_plug->chain().interrupt_apply_block_transaction(); + } } void producer_plugin::log_failed_transaction(const transaction_id_type& trx_id, @@ -2985,13 +3051,14 @@ void producer_plugin_impl::start_write_window() { _ro_window_deadline = now + _ro_write_window_time_us; // not allowed on block producers, so no need to limit to block deadline auto expire_time = boost::posix_time::microseconds(_ro_write_window_time_us.count()); _ro_timer.expires_from_now(expire_time); - _ro_timer.async_wait(app().executor().wrap( // stay on app thread - priority::high, exec_queue::read_write, // placed in read_write so only called from main thread - [this](const boost::system::error_code& ec) { - if (ec != boost::asio::error::operation_aborted) { - switch_to_read_window(); - } - })); + _ro_timer.async_wait([this](const boost::system::error_code& ec) { + if (ec != boost::asio::error::operation_aborted) { + app().executor().post(priority::high, exec_queue::read_write, // placed in read_write so only called from main thread + [this]() { + switch_to_read_window(); + }); + } + }); } // Called only from app thread @@ -3031,8 +3098,8 @@ void producer_plugin_impl::switch_to_read_window() { auto expire_time = boost::posix_time::microseconds(_ro_read_window_time_us.count()); _ro_timer.expires_from_now(expire_time); // Needs to be on read_only because that is what is being processed until switch_to_write_window(). - _ro_timer.async_wait( - app().executor().wrap(priority::high, exec_queue::read_only, [this](const boost::system::error_code& ec) { + _ro_timer.async_wait([this](const boost::system::error_code& ec) { + app().executor().post(priority::high, exec_queue::read_only, [this, ec]() { if (ec != boost::asio::error::operation_aborted) { // use future to make sure all read-only tasks finished before switching to write window for (auto& task : _ro_exec_tasks_fut) { @@ -3044,7 +3111,8 @@ void producer_plugin_impl::switch_to_read_window() { } else { _ro_exec_tasks_fut.clear(); } - })); + }); + }); } // Called from a read only thread. Run in parallel with app and other read only threads diff --git a/plugins/test_control_plugin/CMakeLists.txt b/plugins/test_control_plugin/CMakeLists.txt index 7e2a79d6c5..ef1e059293 100644 --- a/plugins/test_control_plugin/CMakeLists.txt +++ b/plugins/test_control_plugin/CMakeLists.txt @@ -4,6 +4,6 @@ add_library( test_control_plugin test_control_plugin.cpp ${HEADERS} ) -target_link_libraries( test_control_plugin producer_plugin chain_plugin http_client_plugin appbase eosio_chain ) +target_link_libraries( test_control_plugin producer_plugin chain_plugin net_plugin http_client_plugin appbase eosio_chain ) target_include_directories( test_control_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp b/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp index 3f772f4bc2..2fa1094fc3 100644 --- a/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp +++ b/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp @@ -1,6 +1,7 @@ #pragma once #include #include +#include #include #include @@ -40,6 +41,7 @@ class read_write { chain::name to; fc::crypto::private_key trx_priv_key; fc::crypto::private_key blk_priv_key; + bool shutdown = false; // shutdown node before next block }; empty swap_action(const swap_action_params& params) const; @@ -53,7 +55,7 @@ class read_write { class test_control_plugin : public plugin { public: - APPBASE_PLUGIN_REQUIRES((chain_plugin)) + APPBASE_PLUGIN_REQUIRES((chain_plugin)(net_plugin)) test_control_plugin(); test_control_plugin(const test_control_plugin&) = delete; @@ -78,4 +80,4 @@ class test_control_plugin : public plugin { FC_REFLECT(eosio::test_control_apis::empty, ) FC_REFLECT(eosio::test_control_apis::read_write::kill_node_on_producer_params, (producer)(where_in_sequence)(based_on_lib) ) FC_REFLECT(eosio::test_control_apis::read_write::throw_on_params, (signal)(exception) ) -FC_REFLECT(eosio::test_control_apis::read_write::swap_action_params, (from)(to)(trx_priv_key)(blk_priv_key) ) +FC_REFLECT(eosio::test_control_apis::read_write::swap_action_params, (from)(to)(trx_priv_key)(blk_priv_key)(shutdown) ) diff --git a/plugins/test_control_plugin/test_control_plugin.cpp b/plugins/test_control_plugin/test_control_plugin.cpp index 8644e56fa9..4859403b7a 100644 --- a/plugins/test_control_plugin/test_control_plugin.cpp +++ b/plugins/test_control_plugin/test_control_plugin.cpp @@ -1,4 +1,5 @@ #include +#include namespace eosio { @@ -160,9 +161,12 @@ void test_control_plugin_impl::swap_action_in_block(const chain::signed_block_pt copy_b->producer_signature = _swap_on_options.blk_priv_key.sign(copy_b->calculate_id()); // will be processed on the next start_block if is_new_best_head - const auto&[is_new_best_head, bh] = _chain.accept_block(copy_b->calculate_id(), copy_b); - ilog("Swapped action ${f} to ${t}, is_new_best_head ${bh}, block ${bn}", - ("f", _swap_on_options.from)("t", _swap_on_options.to)("bh", is_new_best_head)("bn", bh ? bh->block_num() : 0)); + const auto&[add_result, bh] = _chain.accept_block(copy_b->calculate_id(), copy_b); + ilog("Swapped action ${f} to ${t}, add_result ${a}, block ${bn}", + ("f", _swap_on_options.from)("t", _swap_on_options.to)("a", add_result)("bn", bh ? bh->block_num() : 0)); + app().find_plugin()->broadcast_block(copy_b, copy_b->calculate_id()); + if (_swap_on_options.shutdown) + app().quit(); reset_swap_action(); } diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index bf161cf79a..eb904a1128 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -191,7 +191,7 @@ int main(int argc, char** argv) controller& chain = app->get_plugin().chain(); app->set_stop_executor_cb([&app, &chain]() { ilog("appbase quit called"); - chain.interrupt_transaction(); + chain.interrupt_apply_block_transaction(); app->get_io_context().stop(); }); if (auto resmon_plugin = app->find_plugin()) { diff --git a/tests/interrupt_trx_test.py b/tests/interrupt_trx_test.py index e680f49a79..7350631849 100755 --- a/tests/interrupt_trx_test.py +++ b/tests/interrupt_trx_test.py @@ -9,8 +9,8 @@ ############################################################### # interrupt_trx_test # -# Test applying a block with an infinite trx and verify SIGTERM kill -# interrupts the transaction and aborts the block. +# Verify an infinite trx in a block is auto recovered when a new +# best head is received. # ############################################################### @@ -32,18 +32,19 @@ try: TestHelper.printSystemInfo("BEGIN") assert cluster.launch( - pnodes=1, - prodCount=1, - totalProducers=1, - totalNodes=2, + pnodes=2, + prodCount=2, + totalProducers=2, + totalNodes=3, loadSystemContract=False, activateIF=True, extraNodeosArgs="--plugin eosio::test_control_api_plugin") prodNode = cluster.getNode(0) - validationNode = cluster.getNode(1) + prodNode2 = cluster.getNode(1) + validationNode = cluster.getNode(2) - # Create a transaction to create an account + # load payloadless contract Utils.Print("create a new account payloadless from the producer node") payloadlessAcc = Account("payloadless") payloadlessAcc.ownerPublicKey = EOSIO_ACCT_PUBLIC_DEFAULT_KEY @@ -56,6 +57,7 @@ Utils.Print("Publish payloadless contract") trans = prodNode.publishContract(payloadlessAcc, contractDir, wasmFile, abiFile, waitForTransBlock=True) + # test normal trx contract="payloadless" action="doit" data="{}" @@ -63,30 +65,52 @@ trans=prodNode.pushMessage(contract, action, data, opts) assert trans and trans[0], "Failed to push doit action" + # test trx that will be replaced later action="doitslow" trans=prodNode.pushMessage(contract, action, data, opts) assert trans and trans[0], "Failed to push doitslow action" + # infinite trx, will fail since it will hit trx exec limit action="doitforever" trans=prodNode.pushMessage(contract, action, data, opts, silentErrors=True) assert trans and not trans[0], "push doitforever action did not fail as expected" + # swap out doitslow action in block with doitforever action + prodNode.waitForProducer("defproducerb") prodNode.waitForProducer("defproducera") prodNode.processUrllibRequest("test_control", "swap_action", {"from":"doitslow", "to":"doitforever", "trx_priv_key":EOSIO_ACCT_PRIVATE_DEFAULT_KEY, - "blk_priv_key":cluster.defproduceraAccount.activePrivateKey}) + "blk_priv_key":cluster.defproduceraAccount.activePrivateKey, + "shutdown":"true"}) + # trx that will be swapped out for doitforever action="doitslow" trans=prodNode.pushMessage(contract, action, data, opts) assert trans and trans[0], "Failed to push doitslow action" - assert not prodNode.waitForHeadToAdvance(3), f"prodNode did advance head after doitforever action" + assert prodNode.waitForNodeToExit(5), f"prodNode did not exit after doitforever action and shutdown" + assert not prodNode.verifyAlive(), f"prodNode did not exit after doitforever action" + + # relaunch and verify auto recovery + prodNode.relaunch(timeout=365) # large timeout to wait on other producer + + prodNode.waitForProducer("defproducerb") + prodNode.waitForProducer("defproducera") + + # verify auto recovery without any restart + prodNode.processUrllibRequest("test_control", "swap_action", + {"from":"doitslow", "to":"doitforever", + "trx_priv_key":EOSIO_ACCT_PRIVATE_DEFAULT_KEY, + "blk_priv_key":cluster.defproduceraAccount.activePrivateKey}) - prodNode.interruptAndVerifyExitStatus() + action="doitslow" + trans=prodNode.pushMessage(contract, action, data, opts) + assert trans and trans[0], "Failed to push doitslow action" - assert not prodNode.verifyAlive(), "prodNode did not exit from SIGINT" + assert prodNode.waitForLibToAdvance(), "prodNode did not advance lib after doitforever action" + assert prodNode2.waitForLibToAdvance(), "prodNode2 did not advance lib after doitforever action" testSuccessful = True finally: diff --git a/tests/trx_finality_status_forked_test.py b/tests/trx_finality_status_forked_test.py index cfc67a123f..99e8d19785 100755 --- a/tests/trx_finality_status_forked_test.py +++ b/tests/trx_finality_status_forked_test.py @@ -212,6 +212,9 @@ def getBlockID(status): info = prodD.getInfo() retStatus = prodD.getTransactionStatus(transId) state = getState(retStatus) + if state == forkedOutState: + prodD.waitForNextBlock() + continue blockNum = getBlockNum(retStatus) + 2 # Add 2 to give time to move from locally applied to in-block if (state == inBlockState or state == irreversibleState) or ( info['head_block_producer'] == 'defproducerd' and info['last_irreversible_block_num'] > blockNum ): break diff --git a/unittests/blocks_log_replay_tests.cpp b/unittests/blocks_log_replay_tests.cpp index 273d595873..55c4a340e0 100644 --- a/unittests/blocks_log_replay_tests.cpp +++ b/unittests/blocks_log_replay_tests.cpp @@ -87,6 +87,7 @@ struct blog_replay_fixture { // Resume replay eosio::testing::tester replay_chain_1(copied_config_1, *genesis, call_startup_t::no); replay_chain_1.control->startup( [](){}, []()->bool{ return false; } ); + replay_chain_1.apply_blocks(); // Make sure new chain contain the account created by original chain BOOST_REQUIRE_NO_THROW(replay_chain_1.get_account("replay1"_n)); diff --git a/unittests/checktime_tests.cpp b/unittests/checktime_tests.cpp index 66d0ef91df..b26da087fd 100644 --- a/unittests/checktime_tests.cpp +++ b/unittests/checktime_tests.cpp @@ -128,7 +128,7 @@ BOOST_AUTO_TEST_CASE( checktime_interrupt_test) { try { std::thread th( [&c=*other.control]() { std::this_thread::sleep_for( std::chrono::milliseconds(50) ); - c.interrupt_transaction(); + c.interrupt_apply_block_transaction(); } ); // apply block, caught in an "infinite" loop diff --git a/unittests/fork_db_tests.cpp b/unittests/fork_db_tests.cpp index 6ba25923b0..ac56827cec 100644 --- a/unittests/fork_db_tests.cpp +++ b/unittests/fork_db_tests.cpp @@ -7,8 +7,9 @@ namespace eosio::chain { +uint32_t nonce = 0; + inline block_id_type make_block_id(block_num_type block_num) { - static uint32_t nonce = 0; ++nonce; block_id_type id = fc::sha256::hash(std::to_string(block_num) + "-" + std::to_string(nonce)); id._hash[0] &= 0xffffffff00000000; @@ -57,26 +58,38 @@ using namespace eosio::chain; struct generate_fork_db_state { generate_fork_db_state() { fork_db.reset_root(root); - fork_db.add(bsp11a, ignore_duplicate_t::no); - fork_db.add(bsp11b, ignore_duplicate_t::no); - fork_db.add(bsp11c, ignore_duplicate_t::no); - fork_db.add(bsp12a, ignore_duplicate_t::no); - fork_db.add(bsp13a, ignore_duplicate_t::no); - fork_db.add(bsp12b, ignore_duplicate_t::no); - fork_db.add(bsp12bb, ignore_duplicate_t::no); - fork_db.add(bsp12bbb, ignore_duplicate_t::no); - fork_db.add(bsp12c, ignore_duplicate_t::no); - fork_db.add(bsp13b, ignore_duplicate_t::no); - fork_db.add(bsp13bb, ignore_duplicate_t::no); - fork_db.add(bsp13bbb, ignore_duplicate_t::no); - fork_db.add(bsp14b, ignore_duplicate_t::no); - fork_db.add(bsp13c, ignore_duplicate_t::no); + BOOST_TEST((fork_db.add(bsp11a, ignore_duplicate_t::no) == fork_db_add_t::appended_to_head)); + BOOST_TEST((fork_db.add(bsp11b, ignore_duplicate_t::no) == fork_db_add_t::added)); + BOOST_TEST((fork_db.add(bsp11c, ignore_duplicate_t::no) == fork_db_add_t::added)); + BOOST_TEST((fork_db.add(bsp12a, ignore_duplicate_t::no) == fork_db_add_t::appended_to_head)); + BOOST_TEST((fork_db.add(bsp13a, ignore_duplicate_t::no) == fork_db_add_t::appended_to_head)); + BOOST_TEST((fork_db.add(bsp12b, ignore_duplicate_t::no) == fork_db_add_t::added)); + BOOST_TEST((fork_db.add(bsp12bb, ignore_duplicate_t::no) == fork_db_add_t::added)); + BOOST_TEST((fork_db.add(bsp12bbb, ignore_duplicate_t::no) == fork_db_add_t::added)); + BOOST_TEST((fork_db.add(bsp12c, ignore_duplicate_t::no) == fork_db_add_t::added)); + BOOST_TEST((fork_db.add(bsp13b, ignore_duplicate_t::no) == fork_db_add_t::fork_switch)); + + // no fork_switch, because id is less + BOOST_TEST(bsp13bb->latest_qc_block_timestamp() == bsp13b->latest_qc_block_timestamp()); + BOOST_TEST(bsp13bb->timestamp() == bsp13b->timestamp()); + BOOST_TEST(bsp13bb->id() < bsp13b->id()); + BOOST_TEST((fork_db.add(bsp13bb, ignore_duplicate_t::no) == fork_db_add_t::added)); + + // fork_switch by id, everything else is the same + BOOST_TEST(bsp13bbb->latest_qc_block_timestamp() == bsp13b->latest_qc_block_timestamp()); + BOOST_TEST(bsp13bbb->timestamp() == bsp13b->timestamp()); + BOOST_TEST(bsp13bbb->id() > bsp13b->id()); + BOOST_TEST((fork_db.add(bsp13bbb, ignore_duplicate_t::no) == fork_db_add_t::fork_switch)); + + BOOST_TEST((fork_db.add(bsp14b, ignore_duplicate_t::no) == fork_db_add_t::fork_switch)); + BOOST_TEST((fork_db.add(bsp13c, ignore_duplicate_t::no) == fork_db_add_t::added)); } fork_database_if_t fork_db; // Setup fork database with blocks based on a root of block 10 // Add a number of forks in the fork database + bool reset_nonce = [&]() { nonce = 0; return true; }(); block_state_ptr root = test_block_state_accessor::make_genesis_block_state(); block_state_ptr bsp11a = test_block_state_accessor::make_unique_block_state(11, root); block_state_ptr bsp12a = test_block_state_accessor::make_unique_block_state(12, bsp11a); @@ -112,12 +125,12 @@ BOOST_FIXTURE_TEST_CASE(add_remove_test, generate_fork_db_state) try { BOOST_TEST(!fork_db.get_block(bsp12b->id())); BOOST_TEST(!fork_db.get_block(bsp13b->id())); BOOST_TEST(!fork_db.get_block(bsp14b->id())); - BOOST_TEST(!fork_db.add(bsp12b, ignore_duplicate_t::no)); // will throw if already exists + BOOST_TEST((fork_db.add(bsp12b, ignore_duplicate_t::no) == fork_db_add_t::added)); // will throw if already exists // 13b not the best branch because 13c has higher timestamp - BOOST_TEST(!fork_db.add(bsp13b, ignore_duplicate_t::no)); // will throw if already exists + BOOST_TEST((fork_db.add(bsp13b, ignore_duplicate_t::no) == fork_db_add_t::added)); // will throw if already exists // 14b has a higher timestamp than 13c - BOOST_TEST(fork_db.add(bsp14b, ignore_duplicate_t::no)); // will throw if already exists - BOOST_TEST(!fork_db.add(bsp14b, ignore_duplicate_t::yes)); + BOOST_TEST((fork_db.add(bsp14b, ignore_duplicate_t::no) == fork_db_add_t::fork_switch)); // will throw if already exists + BOOST_TEST((fork_db.add(bsp14b, ignore_duplicate_t::yes) == fork_db_add_t::duplicate)); // test search BOOST_TEST(fork_db.search_on_branch( bsp13bb->id(), 11) == bsp11b); @@ -143,7 +156,7 @@ BOOST_FIXTURE_TEST_CASE(add_remove_test, generate_fork_db_state) try { BOOST_TEST(branch[1] == bsp11a); auto bsp14c = test_block_state_accessor::make_unique_block_state(14, bsp13c); // should be best branch - BOOST_TEST(fork_db.add(bsp14c, ignore_duplicate_t::yes)); + BOOST_TEST((fork_db.add(bsp14c, ignore_duplicate_t::yes) == fork_db_add_t::fork_switch)); // test fetch branch when lib is greater than head branch = fork_db.fetch_branch(bsp13b->id(), bsp12a->id()); diff --git a/unittests/restart_chain_tests.cpp b/unittests/restart_chain_tests.cpp index a183c3c8af..00ed4cf2fc 100644 --- a/unittests/restart_chain_tests.cpp +++ b/unittests/restart_chain_tests.cpp @@ -63,6 +63,7 @@ class replay_tester : public base_tester { control->applied_transaction().connect(on_applied_trx); control->startup( [](){}, []() { return false; }, genesis ); }); + apply_blocks(); } using base_tester::produce_block; @@ -222,7 +223,6 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( test_light_validation_restart_from_block_log, T, } }); - BOOST_REQUIRE(other_trace); BOOST_REQUIRE(other_trace->receipt); BOOST_CHECK_EQUAL(other_trace->receipt->status, transaction_receipt::executed);