PR review feedback

This commit is contained in:
Aaron Blankstein
2020-12-07 18:34:26 -06:00
parent af479f5ab7
commit b4c7b8878a
8 changed files with 109 additions and 88 deletions

View File

@@ -541,13 +541,13 @@ operations on Bitcoin as well as on the Stacks blockchain. The Stacks
chain also allows addresses to submit STX transfers on the Bitcoin
chain. Such operations are only evaluated by the miner of an anchor block
elected in the burn block that immediately follows the burn block that included the
operations. For example, if a TransferStxOp occurs in burnchain block 100, then the
operations. For example, if a `TransferStxOp` occurs in burnchain block 100, then the
Stacks block elected by burnchain block 101 will process that transfer.
In order to submit on the Bitcoin chain, stackers must submit two Bitcoin transactions:
* `PreStxOp`: this operation prepares the Stacks blockchain node to validate the subsequent
StackStxOp or TransferStxOp.
`StackStxOp` or `TransferStxOp`.
* `StackStxOp`: this operation executes the `stack-stx` operation.
* `TransferStxOp`: this operation transfers STX from a sender to a recipient
@@ -606,4 +606,6 @@ Where `op = $` (ascii encoded).
Where the unsigned integer is big-endian encoded.
The second Bitcoin output is the B58-encoding of the recipient Stacks address.
The second Bitcoin output is either a `p2pkh` or `p2sh` output such
that the recipient Stacks address can be derived from the
corresponding 20-byte hash (hash160).

View File

@@ -520,6 +520,9 @@ impl BitcoinTxInput {
}
fn to_txid(txin: &BtcTxIn) -> (Txid, u32) {
// bitcoin-rs library (which deps::bitcoin is based on)
// operates in a different endian-ness for txids than the rest of
// the codebase. so this method reverses the txid bits.
let mut bits = txin.previous_output.txid.0.clone();
bits.reverse();
(Txid(bits), txin.previous_output.vout)

View File

@@ -641,7 +641,7 @@ impl Burnchain {
/// Try to parse a burnchain transaction into a Blockstack operation
/// `pre_stx_op_map` should contain any valid PreStxOps that occurred before
/// the currently-being-evaluated tx.
/// the currently-being-evaluated tx in the same burn block.
pub fn classify_transaction(
burnchain_db: &BurnchainDB,
block_header: &BurnchainBlockHeader,

View File

@@ -146,7 +146,6 @@ impl TransferStxOp {
}
/// parse a StackStxOp
/// `pox_sunset_ht` is the height at which PoX *disables*
pub fn parse_from_tx(
block_height: u64,
block_hash: &BurnchainHeaderHash,

View File

@@ -1718,50 +1718,54 @@ fn test_stx_transfer_btc_ops() {
let stacks_tip =
SortitionDB::get_canonical_stacks_chain_tip_hash(sort_db.conn()).unwrap();
let mut chainstate = get_chainstate(path);
let (sender_balance, burn_height) = chainstate.with_read_only_clarity_tx(
&sort_db.index_conn(),
&StacksBlockId::new(&stacks_tip.0, &stacks_tip.1),
|conn| {
conn.with_clarity_db_readonly(|db| {
(
db.get_account_stx_balance(&stacker.clone().into()),
db.get_current_block_height(),
)
})
},
);
let (sender_balance, burn_height) = chainstate
.with_read_only_clarity_tx(
&sort_db.index_conn(),
&StacksBlockId::new(&stacks_tip.0, &stacks_tip.1),
|conn| {
conn.with_clarity_db_readonly(|db| {
(
db.get_account_stx_balance(&stacker.clone().into()),
db.get_current_block_height(),
)
})
},
)
.unwrap();
let (recipient_balance, burn_height) = chainstate.with_read_only_clarity_tx(
&sort_db.index_conn(),
&StacksBlockId::new(&stacks_tip.0, &stacks_tip.1),
|conn| {
conn.with_clarity_db_readonly(|db| {
(
db.get_account_stx_balance(&recipient.clone().into()),
db.get_current_block_height(),
)
})
},
);
let (recipient_balance, burn_height) = chainstate
.with_read_only_clarity_tx(
&sort_db.index_conn(),
&StacksBlockId::new(&stacks_tip.0, &stacks_tip.1),
|conn| {
conn.with_clarity_db_readonly(|db| {
(
db.get_account_stx_balance(&recipient.clone().into()),
db.get_current_block_height(),
)
})
},
)
.unwrap();
if ix > 2 {
assert_eq!(
sender_balance.get_available_balance_at_block(burn_height as u64),
sender_balance.get_available_balance_at_burn_block(burn_height as u64),
(balance as u128) - transfer_amt,
"Transfer should have decremented balance"
);
assert_eq!(
recipient_balance.get_available_balance_at_block(burn_height as u64),
recipient_balance.get_available_balance_at_burn_block(burn_height as u64),
transfer_amt,
"Recipient should have incremented balance"
);
} else {
assert_eq!(
sender_balance.get_available_balance_at_block(burn_height as u64),
sender_balance.get_available_balance_at_burn_block(burn_height as u64),
balance as u128,
);
assert_eq!(
recipient_balance.get_available_balance_at_block(burn_height as u64),
recipient_balance.get_available_balance_at_burn_block(burn_height as u64),
0,
);
}
@@ -1814,17 +1818,19 @@ fn test_stx_transfer_btc_ops() {
let stacks_tip = SortitionDB::get_canonical_stacks_chain_tip_hash(sort_db.conn()).unwrap();
let mut chainstate = get_chainstate(path);
assert_eq!(
chainstate.with_read_only_clarity_tx(
&sort_db.index_conn(),
&StacksBlockId::new(&stacks_tip.0, &stacks_tip.1),
|conn| conn
.with_readonly_clarity_env(
PrincipalData::parse("SP3Q4A5WWZ80REGBN0ZXNE540ECJ9JZ4A765Q5K2Q").unwrap(),
LimitedCostTracker::new_free(),
|env| env.eval_raw("block-height")
)
.unwrap()
),
chainstate
.with_read_only_clarity_tx(
&sort_db.index_conn(),
&StacksBlockId::new(&stacks_tip.0, &stacks_tip.1),
|conn| conn
.with_readonly_clarity_env(
PrincipalData::parse("SP3Q4A5WWZ80REGBN0ZXNE540ECJ9JZ4A765Q5K2Q").unwrap(),
LimitedCostTracker::new_free(),
|env| env.eval_raw("block-height")
)
.unwrap()
)
.unwrap(),
Value::UInt(50)
);

View File

@@ -3793,44 +3793,43 @@ impl StacksChainState {
clarity_tx: &mut ClarityTx,
operations: Vec<TransferStxOp>,
) -> Vec<StacksTransactionReceipt> {
let mut all_receipts = vec![];
// NOTE: do _not_ return from this function without replacing this original tracker
let original_tracker = clarity_tx.set_cost_tracker(LimitedCostTracker::new_free());
for transfer_stx_op in operations.into_iter() {
let TransferStxOp {
sender,
recipient,
transfered_ustx,
txid,
burn_header_hash,
..
} = transfer_stx_op;
let result = clarity_tx.connection().as_transaction(|tx| {
tx.run_stx_transfer(&sender.into(), &recipient.into(), transfered_ustx)
let (all_receipts, _) =
clarity_tx.with_temporary_cost_tracker(LimitedCostTracker::new_free(), |clarity_tx| {
operations
.into_iter()
.filter_map(|transfer_stx_op| {
let TransferStxOp {
sender,
recipient,
transfered_ustx,
txid,
burn_header_hash,
..
} = transfer_stx_op;
let result = clarity_tx.connection().as_transaction(|tx| {
tx.run_stx_transfer(&sender.into(), &recipient.into(), transfered_ustx)
});
match result {
Ok((value, _, events)) => Some(StacksTransactionReceipt {
transaction: TransactionOrigin::Burn(txid),
events,
result: value,
post_condition_aborted: false,
stx_burned: 0,
contract_analysis: None,
execution_cost: ExecutionCost::zero(),
}),
Err(e) => {
info!("TransferStx burn op processing error.";
"error" => ?e,
"txid" => %txid,
"burn_block" => %burn_header_hash);
None
}
}
})
.collect()
});
match result {
Ok((value, _, events)) => {
let receipt = StacksTransactionReceipt {
transaction: TransactionOrigin::Burn(txid),
events,
result: value,
post_condition_aborted: false,
stx_burned: 0,
contract_analysis: None,
execution_cost: ExecutionCost::zero(),
};
all_receipts.push(receipt);
}
Err(e) => {
info!("TransferStx burn op processing error.";
"error" => ?e,
"txid" => %txid,
"burn_block" => %burn_header_hash);
}
};
}
clarity_tx.set_cost_tracker(original_tracker);
all_receipts
}

View File

@@ -302,12 +302,28 @@ impl<'a> ClarityTx<'a> {
self.block.cost_so_far()
}
/// set the clarity tx's cost tracker
/// returns the replaced cost tracker
pub fn set_cost_tracker(&mut self, new_tracker: LimitedCostTracker) -> LimitedCostTracker {
/// Set the ClarityTx's cost tracker.
/// Returns the replaced cost tracker.
fn set_cost_tracker(&mut self, new_tracker: LimitedCostTracker) -> LimitedCostTracker {
self.block.set_cost_tracker(new_tracker)
}
/// Run `todo` in this ClarityTx with `new_tracker`.
/// Returns the result of `todo` and the `new_tracker`
pub fn with_temporary_cost_tracker<F, R>(
&mut self,
new_tracker: LimitedCostTracker,
todo: F,
) -> (R, LimitedCostTracker)
where
F: FnOnce(&mut ClarityTx) -> R,
{
let original_tracker = self.set_cost_tracker(new_tracker);
let result = todo(self);
let new_tracker = self.set_cost_tracker(original_tracker);
(result, new_tracker)
}
#[cfg(test)]
pub fn commit_block(self) -> () {
self.block.commit_block();

View File

@@ -1074,10 +1074,6 @@ impl InitializedNeonNode {
debug!("Using key {:?}", &key.vrf_public_key);
// sleep a little before building the anchor block, to give any broadcasted
// microblocks time to propagate.
info!(
"Sleeping {} before issuing tenure",
self.sleep_before_tenure
);
thread::sleep(std::time::Duration::from_millis(self.sleep_before_tenure));
self.relay_channel
.send(RelayerDirective::RunTenure(key.clone(), burnchain_tip))