From 4d8650f02655f0414a64d292b24339c15cc65073 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 27 Jul 2020 00:05:02 -0400 Subject: [PATCH 1/7] remove all calls to indexed_commit() --- src/chainstate/burn/db/sortdb.rs | 1 - src/chainstate/stacks/db/mod.rs | 4 ---- 2 files changed, 5 deletions(-) diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index bb1f85df6..c2b4fe713 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -1911,7 +1911,6 @@ impl <'a> SortitionHandleTx <'a> { self.put_indexed_begin(&parent_snapshot.sortition_id, &snapshot.sortition_id)?; let root_hash = self.put_indexed_all(&keys, &values)?; - self.indexed_commit()?; self.context.chain_tip = snapshot.sortition_id.clone(); Ok(root_hash) } diff --git a/src/chainstate/stacks/db/mod.rs b/src/chainstate/stacks/db/mod.rs index 6ec1a8ce9..0feb62713 100644 --- a/src/chainstate/stacks/db/mod.rs +++ b/src/chainstate/stacks/db/mod.rs @@ -716,8 +716,6 @@ impl StacksChainState { .map_err(Error::DBError)?; let first_root_hash = headers_tx.put_indexed_all(&vec![], &vec![]) .map_err(Error::DBError)?; - headers_tx.indexed_commit() - .map_err(Error::DBError)?; test_debug!("Boot code headers index_commit {}-{}", &parent_hash, &first_index_hash); let first_tip_info = StacksHeaderInfo::genesis_block_header_info(first_root_hash); @@ -1096,8 +1094,6 @@ impl StacksChainState { .map_err(Error::DBError)?; let root_hash = headers_tx.put_indexed_all(&indexed_keys, &indexed_values) .map_err(Error::DBError)?; - headers_tx.indexed_commit() - .map_err(Error::DBError)?; test_debug!("Headers index_commit {}-{}", &parent_hash, &new_tip.index_block_hash(new_burn_block)); let new_tip_info = StacksHeaderInfo { From 2de0598a7c8768275b64cce20c9c1a7fb09bec6f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 27 Jul 2020 00:05:13 -0400 Subject: [PATCH 2/7] remove indexed_commit; let commit() take care of storing the MARF'ed data and then the index --- src/util/db.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/util/db.rs b/src/util/db.rs index b751560fc..75ac8d3d6 100644 --- a/src/util/db.rs +++ b/src/util/db.rs @@ -660,20 +660,13 @@ impl<'a, C: Clone, T: MarfTrieId> IndexDBTx<'a, C, T> { Ok(root_hash) } - /// Commit the indexed data - pub fn indexed_commit(&mut self) -> Result<(), Error> { - if self.block_linkage.is_some() { - self.index.commit().map_err(Error::IndexError)?; - self.block_linkage = None; - } - Ok(()) - } - /// Commit the tx pub fn commit(mut self) -> Result<(), Error> { let tx = self._tx.take(); + test_debug!("Indexed-commit: storage"); tx.unwrap().commit().map_err(Error::SqliteError)?; if self.block_linkage.is_some() { + test_debug!("Indexed-commit: MARF index"); self.index.commit().map_err(Error::IndexError)?; self.block_linkage = None; } From 5ba2293a2ce72e21045e28ab1e22e01ffde0aaa5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 27 Jul 2020 01:13:25 -0400 Subject: [PATCH 3/7] note on how to use get_indexed --- src/chainstate/burn/db/sortdb.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index c2b4fe713..1e2c5484e 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -586,6 +586,11 @@ impl <'a> SortitionDBTx <'a> { /// Mark an existing snapshot's stacks block as accepted at a particular burn chain tip, and calculate and store its arrival index. /// If this Stacks block extends the canonical stacks chain tip, then also update the memoized canonical /// stacks chain tip metadata on the burn chain tip. + // TODO: this method's inner call to get_indexed() occurs within a MARF transaction, which + // means it will clone() the underlying TrieRAM. Until this is rectified, care should be taken + // to ensure that no keys are inserted until after this method is called. This should already + // be the case, since the only time keys are inserted into the sortition DB MARF is when the + // next snapshot is processed (whereas this method is called when a Stacks epoch is processed). fn set_stacks_block_accepted_at_tip(&mut self, burn_tip: &BlockSnapshot, burn_header_hash: &BurnchainHeaderHash, parent_stacks_block_hash: &BlockHeaderHash, stacks_block_hash: &BlockHeaderHash, stacks_block_height: u64) -> Result<(), db_error> { let arrival_index = SortitionDB::get_max_arrival_index(self)?; @@ -619,12 +624,13 @@ impl <'a> SortitionDBTx <'a> { let height_opt = match SortitionDB::get_accepted_stacks_block_pointer(self, &burn_tip.burn_header_hash, parent_stacks_block_hash)? { // this block builds on a block accepted _after_ this burn chain tip was processed? Some(accepted_header) => Some(accepted_header.height), - None => + None => { match self.get_indexed(&burn_tip.sortition_id, &parent_key)? { // this block builds on a block accepted _before_ this burn chain tip was processed? Some(height_str) => Some(height_str.parse::().expect(&format!("BUG: MARF stacks block key '{}' does not map to a u64", parent_key))), None => None } + } }; match height_opt { Some(height) => { From d569c0e6ef19c1d1ac56d9bce2e7a4d6dcceb70c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 27 Jul 2020 01:13:46 -0400 Subject: [PATCH 4/7] fix test so it can run more than once --- src/chainstate/stacks/index/marf.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/chainstate/stacks/index/marf.rs b/src/chainstate/stacks/index/marf.rs index c53fc9200..f5d24d30e 100644 --- a/src/chainstate/stacks/index/marf.rs +++ b/src/chainstate/stacks/index/marf.rs @@ -2358,6 +2358,10 @@ mod test { #[test] fn test_marf_unconfirmed() { + if fs::metadata("/tmp/test_marf_unconfirmed").is_ok() { + fs::remove_file("/tmp/test_marf_unconfirmed").unwrap(); + } + let f = TrieFileStorage::::open_unconfirmed("/tmp/test_marf_unconfirmed").unwrap(); let mut marf = MARF::::from_storage(f); From 2ebefe9603f20de19238a43b0a9a6ef06f00b6d5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 27 Jul 2020 01:13:59 -0400 Subject: [PATCH 5/7] make it so you can reopen-readonly a trie file storage that has a trie in progress. it's not the most efficient thing to do if there are keys pending, but it'll do for now --- src/chainstate/stacks/index/storage.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/chainstate/stacks/index/storage.rs b/src/chainstate/stacks/index/storage.rs index e8094054b..d6b4212e0 100644 --- a/src/chainstate/stacks/index/storage.rs +++ b/src/chainstate/stacks/index/storage.rs @@ -705,21 +705,17 @@ impl TrieFileStorage { } pub fn reopen_readonly(&self) -> Result, Error> { - if let Some((ref block_bhh, _)) = self.last_extended { - error!("MARF storage already opened to in-progress block {}", block_bhh); - return Err(Error::InProgressError); - } - let db = Connection::open_with_flags(&self.db_path, OpenFlags::SQLITE_OPEN_READ_ONLY)?; db.busy_handler(Some(tx_busy_handler))?; trace!("Make read-only view of TrieFileStorage: {}", &self.db_path); - + + // TODO: borrow self.last_extended and self.block_hash_cache; don't copy them let ret = TrieFileStorage { db_path: self.db_path.clone(), db: db, - last_extended: None, + last_extended: self.last_extended.clone(), cur_block: self.cur_block.clone(), cur_block_id: self.cur_block_id.clone(), From b1dab6f1398633bc548e95d7f5471d0042c808b2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 27 Jul 2020 01:14:25 -0400 Subject: [PATCH 6/7] raise debug level of p2p message validation --- src/net/p2p.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/net/p2p.rs b/src/net/p2p.rs index 17516a480..0fe372b79 100644 --- a/src/net/p2p.rs +++ b/src/net/p2p.rs @@ -2405,7 +2405,7 @@ impl PeerNetwork { } }; - test_debug!("{:?}: Process BlocksAvailable from {:?} with {} entries", &self.local_peer, outbound_neighbor_key, new_blocks.available.len()); + debug!("{:?}: Process BlocksAvailable from {:?} with {} entries", &self.local_peer, outbound_neighbor_key, new_blocks.available.len()); for (consensus_hash, burn_header_hash) in new_blocks.available.iter() { let block_sortition_height = match self.handle_unsolicited_inv_update(sortdb, event_id, &outbound_neighbor_key, consensus_hash, burn_header_hash, false) { @@ -2436,7 +2436,7 @@ impl PeerNetwork { } }; - test_debug!("{:?}: Process MicroblocksAvailable from {:?} with {} entries", &self.local_peer, outbound_neighbor_key, new_mblocks.available.len()); + debug!("{:?}: Process MicroblocksAvailable from {:?} with {} entries", &self.local_peer, outbound_neighbor_key, new_mblocks.available.len()); for (consensus_hash, burn_header_hash) in new_mblocks.available.iter() { let mblock_sortition_height = match self.handle_unsolicited_inv_update(sortdb, event_id, &outbound_neighbor_key, consensus_hash, burn_header_hash, true) { @@ -2467,7 +2467,7 @@ impl PeerNetwork { } }; - test_debug!("{:?}: Process BlocksData from {:?} with {} entries", &self.local_peer, outbound_neighbor_key, new_blocks.blocks.len()); + debug!("{:?}: Process BlocksData from {:?} with {} entries", &self.local_peer, outbound_neighbor_key, new_blocks.blocks.len()); for (burn_header_hash, block) in new_blocks.blocks.iter() { let sortid = SortitionId::stubbed(burn_header_hash); @@ -2494,6 +2494,7 @@ impl PeerNetwork { /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. /// Return messages that we couldn't handle here, but key them by neighbor, not event. + /// Drop invalid messages. fn handle_unsolicited_messages(&mut self, sortdb: &SortitionDB, mut unsolicited: HashMap>) -> Result>, net_error> { let mut unhandled : HashMap> = HashMap::new(); for (event_id, messages) in unsolicited.drain() { From 72deb87f10044f13469f9d749ee1125b9e56ddcc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 27 Jul 2020 01:14:42 -0400 Subject: [PATCH 7/7] add documentation on the use of get_indexed() within a index db conn transaction --- src/util/db.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/util/db.rs b/src/util/db.rs index 75ac8d3d6..f272fec20 100644 --- a/src/util/db.rs +++ b/src/util/db.rs @@ -626,6 +626,10 @@ impl<'a, C: Clone, T: MarfTrieId> IndexDBTx<'a, C, T> { } /// Get a value from the fork index + /// NOTE: until the TrieFileStorage implementation of reopen_readonly() is made zero-copy -- + /// namely, made so it doesn't just naively clone the underlying TrieRAM when reopening + /// read-only, the caller should make sure to only use the get_indexed() _before_ writing any + /// MARF key/value pairs. Doing so afterwards will clone all uncommitted trie state. pub fn get_indexed(&self, header_hash: &T, key: &str) -> Result, Error> { get_indexed(self.tx(), &self.index, header_hash, key) }