Branch off Jude's work on adding storage and namespacing.

This commit is contained in:
Jude Nelson
2015-07-26 06:34:20 -04:00
parent f731b49e74
commit 31512f0bc6
21 changed files with 1300 additions and 383 deletions

2
.gitignore vendored
View File

@@ -14,8 +14,6 @@ var
sdist
develop-eggs
.installed.cfg
lib
lib64
__pycache__
# Installer logs

View File

@@ -172,11 +172,70 @@ def run_cli():
# ------------------------------------
subparser = subparsers.add_parser(
'storedata',
help='<data> | data value to store in DHT')
'namespace_define',
help='<namespace_id> <lifetime> <base_name_cost> <cost_decay_rate> <privatekey> | define a namespace, in preparation for importing names.')
subparser.add_argument(
'namespace_id', type=str,
help='the human-readable namespace identifier')
subparser.add_argument(
'lifetime', type=int,
help='the number of blocks for which a name will be valid (any value less than zero means "forever")')
subparser.add_argument(
'base_name_cost', type=int,
help='the cost (in satoshis) for a 1-character name in this namespace')
subparser.add_argument(
'cost_decay_rate', type=float,
help='the rate at which the value of a name decays, based on its length: if L is the length, R is the rate, and B is the base name cost, then the cost per name shall be ceil(B / (R^(L-1)))')
subparser.add_argument(
'privatekey', type=str,
help='the privatekey of the owner Bitcoin address')
# ------------------------------------
subparser = subparsers.add_parser(
'namespace_begin',
help='<namespace_id> <privatekey> | begin the namespace, completing its definition and opening it for registration.')
subparser.add_argument(
'namespace_id', type=str,
help='the human-readable namespace identifier')
subparser.add_argument(
'privatekey', type=str,
help='the privatekey of the owner Bitcoin address')
# ------------------------------------
subparser = subparsers.add_parser(
'putdata',
help='<data> | store unsigned data into the DHT')
subparser.add_argument(
'data', type=str,
help='the data to store in DHT')
# ------------------------------------
subparser = subparsers.add_parser(
'signdata',
help='<name> <data> <privatekey> | data value to sign in the blockchain')
subparser.add_argument(
'name', type=str,
help='the name that owns this data')
subparser.add_argument(
'data', type=str,
help='the data to sign')
subparser.add_argument(
'privatekey', type=str,
help='the private key associated with the name')
# ------------------------------------
subparser = subparsers.add_parser(
'putsigned',
help='<name> <data> <privatekey> | data value to sign in the blockchain')
subparser.add_argument(
'name', type=str,
help='the name that owns this data')
subparser.add_argument(
'data', type=str,
help='the data to sign')
subparser.add_argument(
'privatekey', type=str,
help='the private key associated with the name')
# ------------------------------------
subparser = subparsers.add_parser(
@@ -186,6 +245,30 @@ def run_cli():
'hash', type=str,
help='the hash of the data, used as lookup key for DHT')
# ------------------------------------
subparser = subparsers.add_parser(
'verifydata',
help='<name> <hash> | verify that a datum was signed by a user')
subparser.add_argument(
'name', type=str,
help='the name of the user that signed the data')
subparser.add_argument(
'hash', type=str,
help='the hash of the data')
# ------------------------------------
subparser = subparsers.add_parser(
'getverified',
help='<name> <hash> | get the data from DHT for given hash, and verify that it was signed by a user')
subparser.add_argument(
'name', type=str,
help='the name of the user that signed the data')
subparser.add_argument(
'hash', type=str,
help='the hash of the data, used as lookup key for DHT')
# ------------------------------------
subparser = subparsers.add_parser(
'lookup',
help='<name> | get the record for a given name')
@@ -217,33 +300,71 @@ def run_cli():
elif args.action == 'update':
logger.debug('Updating %s', args.name)
client = proxy.callRemote('update', args.name, args.data,
args.privatekey)
client = proxy.callRemote('update', args.name, args.data, args.privatekey)
elif args.action == 'transfer':
logger.debug('Transfering %s', args.name)
client = proxy.callRemote('transfer', args.name, args.address,
args.privatekey)
client = proxy.callRemote('transfer', args.name, args.address, args.privatekey)
elif args.action == 'renew':
logger.debug('Renewing %s', args.name)
client = proxy.callRemote('renew', args.name, args.privatekey)
elif args.action == 'storedata':
reply = {}
elif args.action == 'namespace_define':
logger.debug('Defining namespace %s' % args.namespace_id)
client = proxy.callRemote('namespace_define', args.namespace_id, args.lifetime, args.base_name_cost, args.cost_decay_rate, args.privatekey )
elif args.action == 'namespace_begin':
logger.debug('Starting namespace %s' % args.namespace_id)
client = proxy.callRemote('namespace_begin', args.namespace_id, args.privatekey )
elif args.action == 'putdata':
value = args.data
key = coinkit.hex_hash160(value)
logger.debug('Storing %s', value)
client = proxy.callRemote('set', key, value)
client = proxy.callRemote('put', key, value)
elif args.action == 'signdata':
name = args.name
value = args.data
key = coinit.hex_hash160(value)
logger.debug("Signing hash '%s' by '%s'", key, name)
client = proxy.callRemote('signdata', name, key, value, args.privatekey)
elif args.action == 'putsigned':
name = args.name
value = args.data
key = coinkit.hex_hash160(value)
logger.debug("Storing and signing hash '%s' by '%s'", key, name)
client = proxy.callRemote('putsigned', name, key, value, args.privatekey )
elif args.action == 'verifydata':
name = args.name
key = args.hash
logger.debug("Verifying that hash '%s' was signed by '%s'", key, name )
client = proxy.callRemote('verifydata', name, key )
elif args.action == 'getdata':
logger.debug('Getting %s', args.hash)
client = proxy.callRemote('get', args.hash)
client.addCallback(getFormat)
elif args.action == 'getverified':
logger.debug("Getting %s and verifying that '%s' put it", args.hash, args.name )
client = proxy.callRemote('getverified', args.name, args.hash )
elif args.action == 'lookup':
logger.debug('Looking up %s', args.name)
client = proxy.callRemote('lookup', args.name)

View File

@@ -5,7 +5,7 @@ from lib import config
from opennamed import bitcoind
# DEPRECATED
def refresh_index(first_block, last_block):
"""
"""

View File

@@ -14,13 +14,13 @@ def is_b40(s):
def b40_to_bin(s):
if not is_b40(s):
raise ValueError('s must only contain characters in the b40 char set')
raise ValueError('%s must only contain characters in the b40 char set' % s)
return unhexlify(charset_to_hex(s, B40_CHARS))
def bin_to_b40(s):
if not isinstance(s, str):
raise ValueError('s must be a string')
raise ValueError('%s must be a string' % s)
return hex_to_charset(hexlify(s), B40_CHARS)

View File

@@ -2,20 +2,27 @@
def get_nulldata(tx):
if not ('vout' in tx):
return None
outputs = tx['vout']
# go through all the outputs
for output in outputs:
# make sure the output is valid
if not ('scriptPubKey' in output):
continue
# grab the script pubkey
script_pubkey = output['scriptPubKey']
# get the script parts and script type
script_parts = str(script_pubkey.get('asm')).split(' ')
script_type = str(script_pubkey.get('type'))
# if we're looking at a nulldata tx, get the nulldata
if script_type == 'nulldata' and len(script_parts) == 2:
return script_parts[1]
return None

View File

@@ -26,13 +26,11 @@ import ssl
import threading
import time
import socket
from multiprocessing import Pool
import config
import cache
from utilitybelt import is_valid_int
from . import get_nameops_in_block, get_nameops_in_blocks, build_nameset, NameDb
from ConfigParser import SafeConfigParser
create_ssl_authproxy = False
do_wrap_socket = False
@@ -56,14 +54,7 @@ log.addHandler(console)
from bitcoinrpc.authproxy import AuthServiceProxy
bitcoin_opts = {
"bitcoind_user": config.BITCOIND_USER,
"bitcoind_passwd": config.BITCOIND_PASSWD,
"bitcoind_server": config.BITCOIND_SERVER,
"bitcoind_port": config.BITCOIND_PORT,
"bitcoind_use_https": config.BITCOIND_USE_HTTPS
}
bitcoin_opts = {}
class BitcoindConnection( httplib.HTTPSConnection ):
"""
@@ -90,12 +81,21 @@ def create_bitcoind_connection(
rpc_password=None,
server=None,
port=None,
use_https=None ):
use_https=None,
config_file=None ):
""" creates an auth service proxy object, to connect to bitcoind
"""
global bitcoin_opts, do_wrap_socket, create_ssl_authproxy
if len(bitcoin_opts) == 0:
if config_file is None:
config_file = get_config_file( config.BLOCKSTORED_WORKING_DIR, config.BLOCKSTORED_CONFIG_FILE )
log.debug("Loading default bitcoind options from %s" % config_file)
bitcoin_opts = config.default_bitcoind_opts( config_file )
if rpc_username is None:
rpc_username = bitcoin_opts.get( "bitcoind_user" )
@@ -158,9 +158,6 @@ def get_config_file( working_dir, config_file ):
return os.path.join(working_dir, config_file )
from ConfigParser import SafeConfigParser
def prompt_user_for_bitcoind_details( working_dir, config_file ):
"""
"""
@@ -206,61 +203,20 @@ def prompt_user_for_bitcoind_details( working_dir, config_file ):
fout = open(config_file, 'w')
parser.write(fout)
return create_bitcoind_connection()
def refresh_index( bitcoind, first_block, last_block, working_dir, initial_index=False):
"""
"""
import workpool
working_dir = get_working_dir( working_dir )
namespace_file = os.path.join( working_dir, config.BLOCKSTORED_NAMESPACE_FILE)
snapshots_file = os.path.join( working_dir, config.BLOCKSTORED_SNAPSHOTS_FILE)
lastblock_file = os.path.join( working_dir, config.BLOCKSTORED_LASTBLOCK_FILE)
start = datetime.datetime.now()
num_workers = config.MULTIPROCESS_NUM_WORKERS
nameop_sequence = []
# feed workers bitcoind this way
workpool.multiprocess_bitcoind_factory( create_bitcoind_connection )
workpool = Pool( processes=num_workers )
# get *all* the block nameops!
nameop_sequence = get_nameops_in_blocks( workpool, range(first_block, last_block+1) )
workpool.close()
workpool.join()
nameop_sequence.sort()
time_taken = "%s seconds" % (datetime.datetime.now() - start).seconds
log.info(time_taken)
db = get_namedb()
merkle_snapshot = build_nameset(db, nameop_sequence)
db.save_names(namespace_file)
db.save_snapshots(snapshots_file)
merkle_snapshot = "merkle snapshot: %s\n" % merkle_snapshot
log.info(merkle_snapshot)
log.info(db.name_records)
fout = open(lastblock_file, 'w') # to overwrite
fout.write(str(last_block))
fout.close()
def get_index_range( bitcoind, working_dir, start_block=0):
"""
"""
from config import FIRST_BLOCK_MAINNET
from config import FIRST_BLOCK_MAINNET, FIRST_BLOCK_TESTNET, TESTNET
if start_block == 0:
start_block = FIRST_BLOCK_MAINNET
if TESTNET:
start_block = FIRST_BLOCK_TESTNET
else:
start_block = FIRST_BLOCK_MAINNET
try:
current_block = int(bitcoind.getblockcount())
@@ -327,24 +283,16 @@ def init_bitcoind( working_dir, config_file ):
return create_bitcoind_connection()
def parse_bitcoind_args( return_parser=False ):
def parse_bitcoind_args( return_parser=False, parser=None ):
"""
Get bitcoind command-line arguments.
Optionally return the parser as well.
"""
global bitcoin_opts
opts = {}
bitcoin_opts = {
"bitcoind_user": config.BITCOIND_USER,
"bitcoind_passwd": config.BITCOIND_PASSWD,
"bitcoind_server": config.BITCOIND_SERVER,
"bitcoind_port": config.BITCOIND_PORT,
"bitcoind_use_https": config.BITCOIND_USE_HTTPS
}
parser = argparse.ArgumentParser(
description='Blockstore Core Daemon version {}'.format(config.VERSION))
if parser is None:
parser = argparse.ArgumentParser( description='Blockstore Core Daemon version {}'.format(config.VERSION))
parser.add_argument(
'--bitcoind-server',
@@ -363,24 +311,26 @@ def parse_bitcoind_args( return_parser=False ):
help='use HTTPS to connect to bitcoind')
args, _ = parser.parse_known_args()
# propagate options
for (argname, config_name) in zip( ["bitcoind_server", "bitcoind_port", "bitcoind_user", "bitcoind_passwd"], \
["BITCOIND_SERVER", "BITCOIND_PORT", "BITCOIND_USER", "BITCOIND_PASSWD"] ):
for (argname, config_name) in zip( ["bitcoind_server", "bitcoind_port", "bitcoind_user", "bitcoind_passwd", "bitcoind_use_https"], \
["BITCOIND_SERVER", "BITCOIND_PORT", "BITCOIND_USER", "BITCOIND_PASSWD", "BITCOIND_USE_HTTPS"] ):
if hasattr( args, argname ) and getattr( args, argname ) is not None:
bitcoin_opts[ argname ] = getattr( args, argname )
opts[ argname ] = getattr( args, argname )
setattr( config, config_name, getattr( args, argname ) )
if hasattr( args, "bitcoind_use_https" ):
if args.bitcoind_use_https:
config.BITCOIND_USE_HTTPS = True
bitcoin_opts[ "bitcoind_use_https" ] = True
if return_parser:
return bitcoin_opts, parser
return opts, parser
else:
return bitcoin_opts
return opts
def setup( _bitcoin_opts ):
"""
Set up the daemon--give it information it will need to carry out the above methods.
"""
global bitcoin_opts
bitcoin_opts = _bitcoin_opts

View File

@@ -8,8 +8,10 @@
import os
from ConfigParser import SafeConfigParser
import schemas
DEBUG = True
TESTNET = False
TESTNET = True
TESTSET = True
""" constants
@@ -41,15 +43,29 @@ BLOCKSTORED_TAC_FILE = 'blockstored.tac'
BLOCKSTORED_WORKING_DIR = '.blockstore'
BLOCKSTORED_NAMESPACE_FILE = 'namespace.txt'
BLOCKSTORED_SNAPSHOTS_FILE = 'snapshots.txt'
BLOCKSTORED_STORAGEDB_FILE = 'storagedb.txt'
BLOCKSTORED_LASTBLOCK_FILE = 'lastblock.txt'
BLOCKSTORED_CONFIG_FILE = 'blockstore.ini'
DEFAULT_BLOCKMIRRORD_PORT = 6266 # port 6263 is 'NAME' on a phone keypad
BLOCKMIRRORD_PID_FILE = 'blockmirrord.pid'
BLOCKMIRRORD_LOG_FILE = 'blockmirrord.log'
BLOCKMIRRORD_WORKING_DIR = '.blockmirror'
BLOCKMIRRORD_CONFIG_FILE = 'blockmirror.ini'
try:
BLOCKSTORED_SERVER = os.environ['BLOCKSTORED_SERVER']
BLOCKSTORED_PORT = os.environ['BLOCKSTORED_PORT']
BLOCKMIRRORD_SERVER = os.environ['BLOCKMIRRORD_SERVER']
BLOCKMIRRORD_PORT = os.environ['BLOCKMIRRORD_PORT']
except KeyError:
BLOCKSTORED_SERVER = 'localhost'
BLOCKSTORED_PORT = DEFAULT_BLOCKSTORED_PORT
BLOCKMIRRORD_SERVER = 'localhost'
BLOCKMIRRORD_PORT = DEFAULT_BLOCKMIRRORD_PORT
""" DHT configs
"""
@@ -63,37 +79,108 @@ DEFAULT_DHT_SERVERS = [('dht.openname.org', DHT_SERVER_PORT),
STORAGE_TTL = 3 * SECONDS_PER_YEAR
from os.path import expanduser
home = expanduser("~")
working_dir = os.path.join(home, BLOCKSTORED_WORKING_DIR)
config_file = os.path.join(working_dir, BLOCKSTORED_CONFIG_FILE)
parser = SafeConfigParser()
parser.read(config_file)
DEFAULT_BITCOIND_SERVER = 'btcd.onename.com'
if parser.has_section('bitcoind'):
BITCOIND_SERVER = None
BITCOIND_PORT = None
BITCOIND_USER = None
BITCOIND_PASSWD = None
BITCOIND_USE_HTTPS = None
BITCOIND_SERVER = parser.get('bitcoind', 'server')
BITCOIND_PORT = parser.get('bitcoind', 'port')
BITCOIND_USER = parser.get('bitcoind', 'user')
BITCOIND_PASSWD = parser.get('bitcoind', 'passwd')
use_https = parser.get('bitcoind', 'use_https')
""" Caching
"""
if use_https.lower() == "yes" or use_https.lower() == "y":
BITCOIND_USE_HTTPS = True
else:
BITCOIND_USE_HTTPS = False
# cache for raw transactions: map txid to tx
CACHE_ENABLE = False
CACHE_BUFLEN = 10000
CACHE_ROOT = os.path.expanduser("~/.blockstore/cache")
CACHE_TX_DIR = os.path.join( CACHE_ROOT, "tx_data" )
CACHE_BLOCK_HASH_DIR = os.path.join( CACHE_ROOT, "block_hashes" )
CACHE_BLOCK_DATA_DIR = os.path.join( CACHE_ROOT, "block_data" )
CACHE_BLOCK_ID_DIR = os.path.join( CACHE_ROOT, "blocks" )
""" Multiprocessing
"""
MULTIPROCESS_NUM_WORKERS = 1
MULTIPROCESS_WORKER_BATCH = 64
MULTIPROCESS_RPC_RETRY = 3
def default_bitcoind_opts( config_file=None ):
"""
Set bitcoind options globally.
Call this before trying to talk to bitcoind.
"""
global BITCOIND_SERVER, BITCOIND_PORT, BITCOIND_USER, BITCOIND_PASSWD, BITCOIND_USE_HTTPS, TESTNET
global CACHE_ENABLE
global MULTIPROCESS_NUM_WORKERS, MULTIPROCESS_WORKER_BATCH
loaded = False
if config_file is not None:
parser = SafeConfigParser()
parser.read(config_file)
if parser.has_section('bitcoind'):
BITCOIND_SERVER = parser.get('bitcoind', 'server')
BITCOIND_PORT = parser.get('bitcoind', 'port')
BITCOIND_USER = parser.get('bitcoind', 'user')
BITCOIND_PASSWD = parser.get('bitcoind', 'passwd')
if parser.has_option('bitcoind', 'use_https'):
use_https = parser.get('bitcoind', 'use_https')
else:
use_https = 'no'
if use_https.lower() == "yes" or use_https.lower() == "y":
BITCOIND_USE_HTTPS = True
else:
BITCOIND_USE_HTTPS = False
loaded = True
if not loaded:
if TESTNET:
BITCOIND_SERVER = "localhost"
BITCOIND_PORT = 18332
BITCOIND_USER = 'openname'
BITCOIND_PASSWD = 'opennamesystem'
BITCOIND_USE_HTTPS = False
else:
BITCOIND_SERVER = DEFAULT_BITCOIND_SERVER
BITCOIND_PORT = '8332'
BITCOIND_USER = 'openname'
BITCOIND_PASSWD = 'opennamesystem'
BITCOIND_USE_HTTPS = True
default_bitcoin_opts = {
"bitcoind_user": BITCOIND_USER,
"bitcoind_passwd": BITCOIND_PASSWD,
"bitcoind_server": BITCOIND_SERVER,
"bitcoind_port": BITCOIND_PORT,
"bitcoind_use_https": BITCOIND_USE_HTTPS
}
# configure caching and multiiprocessing based on local vs nonlocal
if CACHE_ENABLE is None:
if BITCOIND_SERVER == "localhost" or BITCOIND_SERVER == "127.0.0.1" or BITCOIND_SERVER == "::1":
CACHE_ENABLE = False
MULTIPROCESS_NUM_WORKERS = 1
MULTIPROCESS_WORKER_BATCH = 64
else:
CACHE_ENABLE = True
MULTIPROCESS_NUM_WORKERS = 8
MULTIPROCESS_WORKER_BATCH = 8
return default_bitcoin_opts
else:
BITCOIND_SERVER = DEFAULT_BITCOIND_SERVER
BITCOIND_PORT = '8332'
BITCOIND_USER = 'openname'
BITCOIND_PASSWD = 'opennamesystem'
BITCOIND_USE_HTTPS = True
""" block indexing configs
"""
@@ -102,7 +189,8 @@ REINDEX_FREQUENCY = 10 # in seconds
FIRST_BLOCK_MAINNET = 343883
FIRST_BLOCK_MAINNET_TESTSET = FIRST_BLOCK_MAINNET
FIRST_BLOCK_TESTNET = 343883
# FIRST_BLOCK_TESTNET = 343883
FIRST_BLOCK_TESTNET = 508800
FIRST_BLOCK_TESTNET_TESTSET = FIRST_BLOCK_TESTNET
if TESTNET:
@@ -116,21 +204,11 @@ else:
else:
START_BLOCK = FIRST_BLOCK_MAINNET
""" api configs
"""
if parser.has_section('chain_com'):
CHAIN_COM_API_ID = parser.get('chain_com', 'api_key_id')
CHAIN_COM_API_SECRET = parser.get('chain_com', 'api_key_secret')
try:
BLOCKCHAIN_INFO_API_KEY = os.environ['BLOCKCHAIN_INFO_API_KEY']
except KeyError:
pass
""" magic bytes configs
"""
NAME_SCHEME = "id://"
MAGIC_BYTES_TESTSET = 'X\x88'
MAGIC_BYTES_MAINSET = 'X\x08'
@@ -149,25 +227,46 @@ NAME_UPDATE = 'c'
NAME_TRANSFER = 'd'
NAME_RENEWAL = 'e'
DATA_PUT = 'f'
DATA_REMOVE = 'g'
NAMESPACE_DEFINE = 'h'
NAMESPACE_BEGIN = 'i'
NAMESPACE_LIFE_INFINITE = 0xffffffff
# Other
LENGTHS = {
'magic_bytes': 2,
'opcode': 1,
'name_hash': 20,
'preorder_name_hash': 20,
'consensus_hash': 16,
'namelen': 1,
'name_min': 1,
'name_max': 16,
'unencoded_name': 24,
'name_max': 34,
'unencoded_name': 34,
'name_hash': 16,
'update_hash': 20,
'data_hash': 20,
'blockchain_id_name': 40,
'blockchain_id_scheme': len(NAME_SCHEME),
'blockchain_id_namespace_life': 4,
'blockchain_id_namespace_cost': 8,
'blockchain_id_namespace_price_decay': 4,
'blockchain_id_namespace_id_len': 1,
'blockchain_id_namespace_id': 19
}
MIN_OP_LENGTHS = {
'preorder': LENGTHS['name_hash'],
'preorder': LENGTHS['preorder_name_hash'],
'registration': LENGTHS['namelen'] + LENGTHS['name_min'],
'update': (
LENGTHS['namelen'] + LENGTHS['name_min'] + LENGTHS['update_hash']),
'transfer': LENGTHS['namelen'] + LENGTHS['name_min']
'update': LENGTHS['name_hash'] + LENGTHS['update_hash'],
'transfer': LENGTHS['namelen'] + LENGTHS['name_min'],
'data_put': LENGTHS['name_hash'] + LENGTHS['data_hash'],
'data_remove': LENGTHS['name_hash'] + LENGTHS['data_hash'],
'namespace_define': LENGTHS['blockchain_id_namespace_id'] + LENGTHS['blockchain_id_namespace_id_len'],
'namespace_begin': LENGTHS['blockchain_id_namespace_life'] + LENGTHS['blockchain_id_namespace_cost'] + \
LENGTHS['blockchain_id_namespace_price_decay'] + LENGTHS['blockchain_id_namespace_id_len'] + LENGTHS['blockchain_id_namespace_id']
}
OP_RETURN_MAX_SIZE = 40
@@ -189,25 +288,76 @@ PRICE_DROP_PER_LETTER = 10
PRICE_DROP_FOR_NON_ALPHABETIC = 10
ALPHABETIC_PRICE_FLOOR = 10**4
# default namespace record (i.e. for names with no namespace ID)
NAMESPACE_DEFAULT = {
'opcode': 'NAMESPACE_DEFINE',
'lifetime': EXPIRATION_PERIOD,
'cost': PRICE_FOR_1LETTER_NAMES,
'price_decay': float(PRICE_DROP_PER_LETTER),
'namespace_id': None
}
""" consensus hash configs
"""
BLOCKS_CONSENSUS_HASH_IS_VALID = 4*AVERAGE_BLOCKS_PER_HOUR
""" Caching
""" Validation
"""
# cache for raw transactions: map txid to tx
CACHE_ENABLE = True
CACHE_BUFLEN = 10000
CACHE_ROOT = os.path.expanduser("~/.blockstore/cache")
CACHE_TX_DIR = os.path.join( CACHE_ROOT, "tx_data" )
CACHE_BLOCK_HASH_DIR = os.path.join( CACHE_ROOT, "block_hashes" )
CACHE_BLOCK_DATA_DIR = os.path.join( CACHE_ROOT, "block_data" )
CACHE_BLOCK_ID_DIR = os.path.join( CACHE_ROOT, "blocks" )
PASSCARD_SCHEMA_V2 = {
""" Multiprocessing
"""
MULTIPROCESS_NUM_WORKERS = 8
MULTIPROCESS_WORKER_BATCH = 8
MULTIPROCESS_RPC_RETRY = 3
"name": {
"formatted": schemas.STRING
},
"bio": schemas.STRING,
"location": {
"formatted": schemas.STRING
},
"website": schemas.URL,
"bitcoin": {
"address": schemas.BITCOIN_ADDRESS
},
"avatar": {
"url": schemas.URL,
},
"cover": {
"url": schemas.URL,
},
"pgp": {
"url": schemas.URL,
"fingerprint": schemas.PGP_FINGERPRINT,
},
"email": schemas.EMAIL,
"twitter": {
"username": schemas.STRING,
"proof": {
"url": schemas.URL
}
},
"facebook": {
"username": schemas.STRING,
"proof": {
"url": schemas.URL
}
},
"github": {
"username": schemas.STRING,
"proof": {
"url": schemas.URL
}
},
"v": schemas.STRING
}

View File

@@ -1,7 +1,7 @@
import re
import math
from .config import DEFAULT_OP_RETURN_FEE, PRICE_FOR_1LETTER_NAMES, \
PRICE_DROP_PER_LETTER, PRICE_DROP_FOR_NON_ALPHABETIC, ALPHABETIC_PRICE_FLOOR
from .config import DEFAULT_OP_RETURN_FEE, SATOSHIS_PER_BTC
def is_alphabetic(s):
@@ -19,25 +19,22 @@ def has_underscores_or_dashes(s):
def calculate_basic_name_tx_fee():
return DEFAULT_OP_RETURN_FEE
def calculate_name_price(name):
# establish the base price
price = PRICE_FOR_1LETTER_NAMES
def calculate_name_price(name, namespace_base_price, namespace_decay):
# establish the base price (in satoshis)
price = float(namespace_base_price)
# adjust the price by a factor X for every character beyond the first
price /= PRICE_DROP_PER_LETTER**(len(name)-1)
if has_numerics(name) or has_underscores_or_dashes(name):
# for names with numerics or special chars, reduce the price further
price /= PRICE_DROP_FOR_NON_ALPHABETIC
else:
# for alphabetic names, enforce a price floor
if price < ALPHABETIC_PRICE_FLOOR:
price = ALPHABETIC_PRICE_FLOOR
price = ceil( price / (namespace_decay**(len(name)-1)) )
# price cannot be lower than 1 satoshi
if price < 1:
price = 1
return price
def is_mining_fee_sufficient(name, mining_fee):
def is_mining_fee_sufficient(name, mining_fee, namespace_base_price, namespace_decay):
name_price = 0
# name_price = calculate_name_price(name)
# name_price = calculate_name_price(name, namespace_base_price, namespace_decay)
return (mining_fee >= name_price)

View File

@@ -12,8 +12,21 @@ def hash_name(name, script_pubkey):
name_and_pubkey = bin_name + unhexlify(script_pubkey)
return hex_hash160(name_and_pubkey)
def calculate_consensus_hash128(consensus_hash):
return hexlify(bin_hash160(consensus_hash, True)[0:16])
def hash256_trunc128( data ):
"""
Hash a string of data by taking its 256-bit sha256 and truncating it to 128 bits.
"""
return hexlify( bin_sha256( data )[0:16] )
def get_owner_hash( owner_name, owner_script_pubkey ):
"""
Generate an owner hash for a piece of data from the owner's username and script_pubkey
"""
return hash256_trunc128( owner_name + owner_script_pubkey )
from coinkit import bin_double_sha256, hex_to_bin_reversed, bin_to_hex_reversed

View File

@@ -4,153 +4,264 @@ from binascii import hexlify, unhexlify
from .check import *
from .commit import commit_registration, commit_update, commit_transfer, \
commit_renewal
from .log import log_preorder, log_registration, log_update, log_transfer
commit_renewal, commit_namespace, commit_putdata, commit_deletedata
from .log import log_preorder, log_registration, log_update, log_transfer, log_namespace_define, log_namespace_begin
from ..fees import is_mining_fee_sufficient
from ..parsing import parse_nameop
from ..parsing import parse_blockstore_op
from ..config import *
from ..hashing import bin_double_sha256, calculate_consensus_hash128
from coinkit import MerkleTree
from ..blockchain import get_nulldata_txs_in_block, get_nulldata_txs_in_blocks
def process_pending_nameops_in_block(db, current_block_number):
""" process logged registrations, updates, and transfers
def process_pending_ops_in_block(db, current_block_number):
"""
Process logged blockstore operations for this block.
For name operations, if there are duplicates for a given name, then they are all ignored
(i.e. clients will need to try again). This does not apply to storage operations--a name
can put multiple storage operations per block.
"""
# move nameops in pending imports to their respective lists of pending operations,
# so we can go on to process them as pending registrations, transfers, etc.
for namespace_id, nameops in db.pending_imports.items():
commit_namespace( db, namespace_id, nameops )
# commit the pending registrations
for name, nameops in db.pending_registrations.items():
if len(nameops) == 1:
commit_registration(db, nameops[0], current_block_number)
# commit the pending updates
for name, nameops in db.pending_updates.items():
if len(nameops) == 1:
commit_update(db, nameops[0])
# commit the pending transfers
for name, nameops in db.pending_transfers.items():
if len(nameops) == 1:
commit_transfer(db, nameops[0])
# commit the pending renewals
for name, nameops in db.pending_renewals.items():
if len(nameops) == 1:
commit_renewal(db, nameops[0], current_block_number)
# commit all pending data-signature writes for each name
for name, storageops in db.pending_data_puts.items():
for storageop in storageops:
commit_putdata( db, storageop )
# commit all pending data-deletions for each name
for name, storageops in db.pending_data_deletes.items():
for storageop in storageops:
commit_deletedata( db, storageop )
# delete all the pending operations
db.pending_registrations = defaultdict(list)
db.pending_updates = defaultdict(list)
db.pending_transfers = defaultdict(list)
db.pending_renewals = defaultdict(list)
db.pending_data_puts = defaultdict(list)
db.pending_data_deletes = defaultdict(list)
db.pending_imports = defaultdict(list)
def clean_out_expired_names(db, current_block_number):
""" clean out expired names
"""
Clear out expired names, as well as all signed data committed by them.
"""
expiring_block_number = current_block_number - EXPIRATION_PERIOD
names_expiring = db.block_expirations[expiring_block_number]
for name, _ in names_expiring.items():
del db.name_records[name]
del db.signed_data[name]
def log_nameop(db, nameop, block_number):
""" record nameop
def log_blockstore_op(db, blockstore_op, block_number):
"""
opcode = eval(nameop['opcode'])
record blockstore operations
"""
opcode = eval(blockstore_op['opcode'])
if opcode == NAME_PREORDER:
log_preorder(db, nameop, block_number)
log_preorder(db, blockstore_op, block_number)
elif opcode == NAME_REGISTRATION:
log_registration(db, nameop)
log_registration(db, blockstore_op, block_number)
elif opcode == NAME_UPDATE:
log_update(db, nameop)
log_update(db, blockstore_op, block_number)
elif opcode == NAME_TRANSFER:
log_transfer(db, nameop)
log_transfer(db, blockstore_op, block_number)
elif opcode == NAMESPACE_DEFINE:
log_namespace_define(db, blockstore_op, block_number)
elif opcode == NAMESPACE_BEGIN:
log_namespace_begin(db, blockstore_op, block_number)
elif opcode == DATA_PUT:
log_data_put( db, blockstore_op, block_number )
elif opcode == DATA_DELETE:
log_data_delete( db, blockstore_op, block_number )
def name_record_to_string(name, name_record):
"""
Convert a name and its metadata into a UTF8 string that
represents the name, namespace ID, owner, and latest associated value.
"""
value_hash = name_record.get('value_hash', '')
if value_hash is None:
value_hash = ''
name_string = (name + name_record['owner'] + value_hash).encode('utf8')
return name_string
def calculate_merkle_snapshot(db):
"""
Calculate the current Merkle snapshot of the set of blockstore operations.
The Merkle tree is constructed by joining the lists of [hash(name.ns_id.script_pubkey.value_hash), hash1, hash2, ...],
ordered by name.nsid. The sequence of hash1, hash2, ... are sorted alphanumerically.
The intuition behind generating a Merkle tree is that it represents the global state
of all name and storage operations that have occurred at this point in the database.
This is useful for detecting forks in the underlying blockchain--if peers' snapshots diverge,
then there is a fork going on. However, it is critical that the Merkle tree covers *all* blockstore
operations; otherwise, it is possible for the sequence of storage operations to diverge from the
sequence of name operations undetected (leading to inconsistencies, like data randomly disappearing or
getting transferred to another user).
"""
names = sorted(db.name_records)
signed_data = sorted(db.signed_data)
hashes = []
for name in names:
name_string = name_record_to_string(name, db.name_records[name])
name_string_hash = hexlify(bin_double_sha256(name_string))
hashes.append(name_string_hash)
# data this name owns
data_hashes = sorted( db.signed_data.get( name, [] ) )
hashes += data_hashes
if len(hashes) == 0:
hashes.append(hexlify(bin_double_sha256("")))
merkle_tree = MerkleTree(hashes)
merkle_root = merkle_tree.root()
consensus_hash128 = calculate_consensus_hash128(merkle_root)
return consensus_hash128
def record_consensus_hash(db, consensus_hash, block_number):
"""
Record the consensus hash for a particular block number.
"""
db.consensus_hashes[str(block_number)] = consensus_hash
def build_nameset(db, nameop_sequence):
def build_nameset(db, blockstore_op_sequence):
"""
Process the sequence of blockstore operations to derive the
current set of all such operations.
blockstore_op_sequence must be a list of (block number, blockstore operation dict)
"""
# set the current consensus hash
first_block_number = nameop_sequence[0][0]
first_block_number = blockstore_op_sequence[0][0]
db.consensus_hashes[str(first_block_number)] = calculate_merkle_snapshot(db)
for block_number, nameops in nameop_sequence:
# log the pending nameops
for nameop in nameops:
for block_number, blockstore_ops in blockstore_op_sequence:
# accumulate all blockstore operations in this block
for blockstore_op in blockstore_ops:
try:
log_nameop(db, nameop, block_number)
log_blockstore_op(db, blockstore_op, block_number)
except Exception as e:
traceback.print_exc()
# process and tentatively commit the pending nameops
process_pending_nameops_in_block(db, block_number)
# clean out the expired names
# process and tentatively commit the pending operations
process_pending_ops_in_block(db, block_number)
# clean out the expired names and their associated data
clean_out_expired_names(db, block_number)
# calculate the merkle snapshot consensus hash
consensus_hash128 = calculate_merkle_snapshot(db)
# record the merkle consensus hash
record_consensus_hash(db, consensus_hash128, block_number)
# set the current consensus hash
# set the current consensus hash for the set of names
db.consensus_hashes['current'] = consensus_hash128
# return the current consensus hash
return consensus_hash128
from ..blockchain import get_nulldata_txs_in_block, get_nulldata_txs_in_blocks
def nulldata_txs_to_nameops(txs):
nameops = []
def nulldata_txs_to_blockstore_ops(txs):
"""
Given a list of transactions, extract the nulldata from the transaction's script
and construct a blockstore operation. Importantly, obtain the fee and list of senders.
Return a list of blockstore operations, where each blockstore operation is a dict which optionally has:
* "sender": the hex string of the primary sender's script_pubkey
* "fee": the total amount paid
"""
ops = []
for tx in txs:
blockstore_op = None
try:
nameop = parse_nameop( tx['nulldata'], tx['vout'], senders=tx['senders'], fee=tx['fee'])
blockstore_op = parse_blockstore_op( tx['nulldata'], tx['vout'], senders=tx['senders'], fee=tx['fee'] )
except Exception, e:
traceback.print_exc()
pass
else:
if nameop:
nameops.append(nameop)
return nameops
if blockstore_op is not None:
ops.append(blockstore_op)
return ops
def get_nameops_in_block(bitcoind, block_number ):
current_nulldata_txs = get_nulldata_txs_in_block(bitcoind, block_number )
nameops = nulldata_txs_to_nameops(current_nulldata_txs)
return nameops
def get_nameops_in_blocks( workpool, blocks ):
def get_blockstore_ops_in_blocks( workpool, blocks ):
"""
Get the full list of blockstore operations for a set of blocks (where 'blocks' is a list of integer block IDs)
Return the list of blockstore operations, extracted from transaction nulldata.
"""
current_nulldata_txs = get_nulldata_txs_in_blocks( workpool, blocks )
all_nameops = []
all_blockstore_ops = []
for (block_number, txs) in current_nulldata_txs:
nameops = nulldata_txs_to_nameops(txs)
all_nameops += [(block_number, nameops)]
blockstore_ops = nulldata_txs_to_blockstore_ops(txs)
all_blockstore_ops += [(block_number, blockstore_ops)]
return all_nameops
return all_blockstore_ops
"""
def get_nameops_in_block( bitcoind, block_number ):
current_nulldata_txs = get_nulldata_txs_in_block( bitcoind, block_number )
nameops = nulldata_txs_to_nameops(current_nulldata_txs)
return nameops
"""
"""
# DEPRECATED
def get_nameops_in_block_range(bitcoind, first_block=0, last_block=None):
nameop_sequence = []
@@ -163,3 +274,4 @@ def get_nameops_in_block_range(bitcoind, first_block=0, last_block=None):
nameop_sequence.append((block_number, block_nameops))
return nameop_sequence
"""

View File

@@ -1,4 +1,4 @@
from ..hashing import hash_name
from ..hashing import hash_name, hash256_trunc128
from ..config import BLOCKS_CONSENSUS_HASH_IS_VALID
@@ -12,6 +12,46 @@ def name_not_registered(db, name):
return (not name_registered(db, name))
def namespace_registered( db, namespace ):
"""
Has a namespace been declared?
"""
if namespace in db.namespaces.keys():
return True
else:
return False
def namespace_importing( db, namespace ):
"""
Is a namespace in the process of being defined?
"""
try:
namespace_id_hash = hash_name(namespace_id, sender_script_pubkey)
except ValueError:
return False
if namespace_id_hash in db.imports.keys():
return True
else:
return False
def has_defined_namespace( db, namespace_id, sender_script_pubkey ):
"""
Has the given user (identified by the sender_script_pubkey) defined this namespace?
"""
try:
namespace_id_hash = hash_name(namespace_id, sender_script_pubkey)
except ValueError:
return False
if namespace_id_hash in db.imports.keys():
if sender_script_pubkey == db.imports[namespace_id_hash]['sender']:
return True
return False
def no_pending_higher_priority_registration(db, name, mining_fee):
if name in db.pending_registrations:
del db.pending_registrations[name]
@@ -57,3 +97,32 @@ def is_consensus_hash_valid(db, consensus_hash, current_block_number):
if str(consensus_hash) == str(db.consensus_hashes[str(block_number)]):
return True
return False
def is_storageop_from_registered_name( db, storageop ):
"""
Determine if a storage operation came from a valid registered name.
"""
name_hash = storageop['name_hash']
data_hash = storageop['data_hash']
name = get_name_from_hash128( name_hash, db )
if name is None:
# name does not exist
return False
# name must be registered
if not name_registered( db, name ):
return
# storageop must have a sender
if 'sender' not in storageop:
return
# storageop's sender must be the same as the name owner
name_owner = db.name_records[name]['owner']
if name_owner != storageop['sender']:
return False

View File

@@ -1,5 +1,5 @@
from ..hashing import hash_name
from ..hashing import hash_name, hash256_trunc128
from .namedb import get_name_from_hash128
def remove_preorder(db, name, script_pubkey):
try:
@@ -11,23 +11,32 @@ def remove_preorder(db, name, script_pubkey):
return True
def commit_preorder(db, nameop):
db.preorders[nameop['name_hash']] = nameop
def commit_registration(db, nameop, current_block_number):
"""
Construct a name registration record, and update:
* name_records
* block_expirations
* index_hash_name
"""
name = nameop['name']
name_hash128 = hash256_trunc128( name )
remove_preorder(db, name, nameop['sender'])
db.name_records[name] = {
'value_hash': None,
'owner': str(nameop['sender']),
'owner': str(nameop['sender']), # i.e. the hex string of the script_pubkey
'first_registered': current_block_number,
'last_renewed': current_block_number
}
db.block_expirations[current_block_number][name] = True
db.index_hash_name[ name_hash128 ] = name
def commit_renewal(db, nameop, current_block_number):
"""
Commit a name renewal, and update:
* block_expirations
* name_records
"""
name = nameop['name']
# grab the block the name was last renewed to find the old expiration timer
block_last_renewed = db.name_records[name]['last_renewed']
@@ -40,8 +49,64 @@ def commit_renewal(db, nameop, current_block_number):
def commit_update(db, nameop):
db.name_records[nameop['name']]['value_hash'] = nameop['update']
"""
Commit an update to a name's data, and update:
* name_records (value_hash)
"""
name = get_name_from_hash128( nameop['name_hash'], db )
db.name_records[name]['value_hash'] = nameop['update_hash']
def commit_transfer(db, nameop):
"""
Commit a transfer: change the name's owner in name_records to the nameop's 'recipient' script_pubkey
"""
db.name_records[nameop['name']]['owner'] = str(nameop['recipient'])
def commit_namespace( db, nameop, block_number ):
"""
Commit a namespace and its imports.
nameop is a NAMESPACE_BEGIN nameop
"""
namespace_id = nameop['namespace_id']
namespace_define_nameop = db.pending_imports[ namespace_id ][0]
op_sequence = db.pending_imports[ namespace_id ][1:]
namespace_id_hash = namespace_define_nameop['namespace_id_hash']
# no longer importing
del db.imports[ namespace_id_hash ]
del db.pending_imports[ namespace_id ]
# merge each operation to pending
for op in op_sequence:
log_blockstore_op( db, op, op['block_number'] )
def commit_putdata( db, storageop ):
"""
Store signed data hash, owned by the principal that put the storage op.
"""
name_hash = storageop['name_hash']
data_hash = storageop['data_hash']
name = get_name_from_hash128( name_hash )
put_signed_data( name, data_hash, db )
def commit_deletedata( db, storageop):
"""
Delete a signed data hash.
"""
name_hash = storageop['name_hash']
data_hash = storageop['data_hash']
name = get_name_from_hash128( name_hash )
remove_signed_data( name, data_hash, db )

View File

@@ -1,43 +1,173 @@
from .check import name_not_registered, has_preordered_name, \
is_name_owner, is_preorder_hash_unique, name_registered, \
is_consensus_hash_valid
is_consensus_hash_valid, is_storageop_from_registered_name, \
namespace_importing
from ..fees import is_mining_fee_sufficient
from .commit import commit_preorder
from .namedb import get_name_from_hash128, put_signed_data, get_namespace_from_name
from ..hashing import hash256_trunc128, hash_name
def log_registration(db, nameop):
def log_import( db, nameop, block_number ):
"""
Log an op as part of a namespace import
"""
name = nameop['name']
namespace_id = get_namespace_from_name( name )
nameop['block_number'] = block_number
db.imports[ namespace_id ].append( nameop )
def log_registration(db, nameop, block_number):
"""
Log a name registration.
"""
name = nameop['name']
# check if this registration is a valid one
if (name_not_registered(db, name)
and has_preordered_name(db, name, nameop['sender'])
and is_mining_fee_sufficient(name, nameop['fee'])):
# we're good - log the registration!
db.pending_registrations[name].append(nameop)
# check if this registration is actually a valid renewal
if (name_registered(db, name)
and is_name_owner(db, name, nameop['sender'])
and is_mining_fee_sufficient(name, nameop['fee'])):
# we're good - log the renewal!
db.pending_renewals[name].append(nameop)
namespace_id = get_namespace_from_name( name )
# part of an import?
if namespace_importing( db, namespace_id ):
# yup--remember which block, to avoid conflicts
log_import( db, nameop, block_number )
else:
namespace = get_namespace( db, namespace_id )
# check if this registration is a valid one
if (name_not_registered(db, name) and has_preordered_name(db, name, nameop['sender']) and is_mining_fee_sufficient(name, nameop['fee'])):
# we're good - log the registration!
db.pending_registrations[name].append(nameop)
# check if this registration is actually a valid renewal
if (name_registered(db, name) and is_name_owner(db, name, nameop['sender']) and is_mining_fee_sufficient(name, nameop['fee'])):
# we're good - log the renewal!
db.pending_renewals[name].append(nameop)
def log_update(db, nameop):
def log_update(db, nameop, block_number):
"""
Log an update to a name's associated data.
Use the nameop's 128-bit name hash to find the name itself.
"""
name_hash128 = nameop['name_hash']
name = get_name_from_hash128( name_hash128, db )
if name is None:
# nothing to do
return
namespace_id = get_namespace_from_name( name )
# part of an import?
if namespace_importing( db, namespace_id ):
# yup--remember which block, to avoid conflicts
log_import( db, nameop, block_number )
else:
if is_name_owner(db, name, nameop['sender']):
# we're good - log it!
db.pending_updates[name].append(nameop)
def log_transfer(db, nameop, block_number):
"""
Log a transfer for this name to the nameop's 'sender' script_pubkey
"""
name = nameop['name']
if is_name_owner(db, name, nameop['sender']):
# we're good - log it!
db.pending_updates[name].append(nameop)
def log_transfer(db, nameop):
name = nameop['name']
if is_name_owner(db, name, nameop['sender']):
# we're good - log it!
db.pending_transfers[name].append(nameop)
namespace_id = get_namespace_from_name( name )
# part of an import?
if namespace_importing( db, namespace_id ):
# yup--remember which block, to avoid conflicts
log_import( db, nameop, block_number )
else:
if is_name_owner(db, name, nameop['sender']):
# we're good - log it!
db.pending_transfers[name].append(nameop)
def log_preorder(db, nameop, block_number):
"""
Log a preorder of a name at a particular block number.
NOTE: these can't be incorporated into namespace-imports,
since we have no way of knowning which namespace the
nameop belongs to.
"""
preorder_name_hash = nameop['preorder_name_hash']
consensus_hash = nameop['consensus_hash']
if (is_preorder_hash_unique(db, nameop['name_hash'])
and is_consensus_hash_valid(db, consensus_hash, block_number)):
if (is_preorder_hash_unique(db, preorder_name_hash) and is_consensus_hash_valid(db, consensus_hash, block_number)):
# we're good - log it!
commit_preorder(db, nameop)
db.preorders[ preorder_name_hash ] = nameop
def log_namespace_define(db, nameop, block_number):
"""
Log a "namespace define" operation to the name database.
It is only valid if it is the first such operation
for this namespace.
"""
namespace_id_hash = nameop['namespace_id_hash']
if not namespace_registered( db, namespace_id_hash ) and not namespace_importing( db, namespace_id_hash ):
# can begin the import
db.imports[ namespace_id_hash ] = defaultdict(list)
db.imports[ namespace_id_hash ].append( nameop )
def log_namespace_begin(db, nameop, block_number):
"""
Log a "namespace begin" operation to the name database.
All pending operations will be incorporated into the same consensus hash.
"""
namespace_id = nameop['namespace_id']
if not namespace_registered( db, namespace_id ) and namespace_importing( db, namespace_id ) and has_defined_namespace( db, namespace_id, nameop['sender'] ):
# can merge on next commit. this namespace is no longer importing.
db.pending_imports[ namespace_id ] = db.imports[ namespace_id_hash ]
del db.imports[ namespace_id_hash ]
def log_putdata( db, storageop, block_number ):
"""
Log that someone stored data.
Data can only be written by users with registered names.
"""
data_hash = storageop['data_hash']
if is_storageop_from_registered_name( storageop ):
name_hash = storageop['name_hash']
name = get_name_from_hash128( name_hash )
if name is not None:
db.pending_data_puts[name].append( data_hash )
def log_deletedata( db, storageop, block_number ):
"""
Log that someone deleted data.
Data can only be deleted by the user that put it.
"""
data_hash = storageop['data_hash']
if is_storageop_from_registered_name( storageop ):
name_hash = storageop['name_hash']
name = get_name_from_hash128( name_hash )
if name is not None and data_hash in db.signed_data[name]:
# user owns this data
db.pending_data_deletes[name].append( data_hash )

View File

@@ -2,30 +2,66 @@ import json
import traceback
from collections import defaultdict
from ..config import NAMESPACE_DEFAULT
class NameDb():
def __init__(self, names_filename, snapshots_filename):
self.name_records = {}
self.preorders = {}
self.name_records = {} # map name.ns_id to dict of
# { "owner": hex string of script_pubkey,
# "first_registered": block when registered,
# "last_renewed": block when last renewed,
# "value_hash": hex string of hash of last update }
self.index_hash_name = {} # map 128-bit hash (as a hex string) to name.ns_id, to look up updates and data signatures
self.preorders = {} # map preorder name.ns_id+script_pubkey hash (as a hex string) to its first "preorder" nameop
self.imports = {} # map an in-progress namespace import (as the hex string of ns_id+script_pubkey hash) to a list of nameops. The first element is the namespace_define nameop
self.namespaces = {} # map namespace ID to first instance of NAMESPACE_BEGIN op
self.signed_data = {} # map name to set of hashes of data
self.pending_registrations = defaultdict(list)
self.pending_updates = defaultdict(list)
self.pending_transfers = defaultdict(list)
self.pending_renewals = defaultdict(list)
self.pending_data_puts = defaultdict(list)
self.pending_data_deletes = defaultdict(list)
self.pending_imports = defaultdict(list) # map namespace_id to list of [namespace_define] + [nameops], but such that each nameop has a 'block_number'
self.block_expirations = defaultdict(dict)
self.consensus_hashes = defaultdict(dict)
# default namespace
self.namespaces[""] = NAMESPACE_DEFAULT
self.namespaces[None] = NAMESPACE_DEFAULT
if names_filename:
try:
with open(names_filename, 'r') as f:
db_dict = json.loads(f.read())
if 'registrations' in db_dict:
self.name_records = db_dict['registrations']
if 'namespaces' in db_dict:
self.namespaces = db_dict['namespaces']
if 'imports' in db_dict:
self.imports = db_dict['imports']
if 'preorders' in db_dict:
self.preorders = db_dict['preorders']
if 'index_hash_name' in db_dict:
self.index_hash_name = db_dict['index_hash_name']
if 'signed_data' in db_dict:
self.signed_data = db_dict['signed_data']
# convert to sets
for (name, hash_list) in self.signed_data.items():
self.signed_data[name] = set(hash_list)
except Exception as e:
pass
@@ -37,13 +73,24 @@ class NameDb():
self.consensus_hashes = db_dict['snapshots']
except Exception as e:
pass
def save_names(self, filename):
# serialize signed data to lists
serialized_signed_data = {}
for (name, hash_set) in self.signed_data.items():
serialized_signed_data[name] = list(hash_set)
try:
with open(filename, 'w') as f:
db_dict = {
'registrations': self.name_records,
'preorders': self.preorders
'index_hash_name': self.index_hash_name,
'preorders': self.preorders,
'namespaces': self.namespaces,
'imports': self.imports,
'signed_data': serialized_signed_data
}
f.write(json.dumps(db_dict))
except Exception as e:
@@ -71,6 +118,87 @@ def get_value_hash_for_name(name, db):
return None
def get_name_from_hash128( hash128, db ):
"""
Find the name from its 128-bit hash.
"""
if hash128 in db.index_hash_name.keys():
return db.index_hash_name[ hash128 ]
else:
return None
def get_storage_owner_name( data_hash, db ):
"""
Get the name of the user that wrote
a piece of data.
Return the name if successful
Return None if not
"""
name_hash = data_hash.get( 'name_hash', None )
if name_hash is None:
return None
name = get_name_from_hash128( name_hash, db )
if name is None:
return None
return name
def get_namespace_from_name( name ):
"""
Get a name's namespace, if it has one.
It's the sequence of characters after the last "." in the name.
"""
if "." not in name:
# invalid
return None
return name.split(".")[-1]
def put_signed_data( owner_name, data_hash, db ):
"""
Remember that a particular principal (identified by name)
owns a piece of data.
NOTE: this doesn't verify that the name is valid; the caller must do so.
"""
if db.signed_data.has_key( owner_name ):
db.signed_data[owner_name].update( set([data_hash]) )
else:
db.signed_data[owner_name] = set([data_hash])
def verify_signed_data( owner_name, data_hash, db ):
"""
Confirm that a given user wrote a particular piece of data.
Return True if so; False if not
"""
if not db.signed_data.has_key( owner_name ):
# user has written nothing
return False
return data_hash in db.signed_data[ owner_name ]
def delete_signed_data( owner_name, data_hash, db ):
"""
Remove signed data written by a particular principal (identified by name)
"""
if db.signed_data.has_key( owner_name ):
if data_hash in db.signed_data[owner_name]:
db.signed_data[owner_name].remove( data_hash )
def lookup_name(name, db):
value_hash = get_value_hash_for_name(name, db)

View File

@@ -2,6 +2,10 @@ import preorder
import register
import transfer
import update
import putdata
import rmdata
import namespacedefine
import namespacebegin
from .preorder import build as build_preorder, \
broadcast as preorder_name, parse as parse_preorder
@@ -12,3 +16,13 @@ from .transfer import build as build_transfer, \
make_outputs as make_transfer_ouptuts
from .update import build as build_update, \
broadcast as update_name, parse as parse_update
from .putdata import build as build_putdata, \
broadcast as putdata_storage, parse as parse_putdata
from .rmdata import build as build_rmdata, \
broadcast as rmdata_storage, parse as parse_rmdata
from .namespacedefine import build as build_namespacedefine, \
broadcast as namespace_define, parse as parse_namespacedefine
from .namespacebegin import build as build_namespacebegin, \
broadcast as namespace_begin, parse as parse_namespacebegin

View File

@@ -1,48 +1,72 @@
from coinkit import embed_data_in_blockchain, BlockchainInfoClient, \
bin_hash160, BitcoinPrivateKey, script_to_hex
bin_hash160, BitcoinPrivateKey
from utilitybelt import is_hex
from binascii import hexlify, unhexlify
from ..b40 import b40_to_hex
from ..b40 import b40_to_hex, is_b40
from ..config import *
from ..scripts import name_script_to_hex, add_magic_bytes
from ..scripts import blockstore_script_to_hex, add_magic_bytes, get_script_pubkey
from ..hashing import hash_name, calculate_consensus_hash128
def build(name, script_pubkey, consensus_hash, testset=False):
""" Takes in an ascii string as a name.
"""
Takes a name, including the namespace ID (but not the id:// scheme), a script_publickey to prove ownership
of the subsequent NAME_REGISTER operation, and the current consensus hash for this block (to prove that the
caller is not on a shorter fork).
Returns a NAME_PREORDER script.
Record format:
0 2 3 23 39
|-----|--|------------------------|--------------|
magic op hash(name.ns_id,pubkey) consensus hash
"""
if not is_b40( name ):
raise Exception("Name '%s' is not base-40" % name)
# name itself cannot exceed LENGTHS['blockchain_id_name']
if len(NAME_SCHEME) + len(name) > LENGTHS['blockchain_id_name']:
raise Exception("Name '%s' is too long; exceeds %s bytes" % (name, LENGTHS['blockchain_id_name'] - len(NAME_SCHEME)))
name_hash = hash_name(name, script_pubkey)
script = 'NAME_PREORDER %s %s' % (name_hash, consensus_hash)
hex_script = name_script_to_hex(script)
hex_script = blockstore_script_to_hex(script)
packaged_script = add_magic_bytes(hex_script, testset=testset)
return packaged_script
def broadcast(name, consensus_hash, private_key,
blockchain_client=BlockchainInfoClient(), testset=False):
""" Builds and broadcasts a preorder transaction.
def broadcast(name, consensus_hash, private_key, blockchain_client=BlockchainInfoClient(), testset=False):
"""
hash160 = BitcoinPrivateKey(private_key).public_key().hash160()
script_pubkey = script_to_hex(
'OP_DUP OP_HASH160 %s OP_EQUALVERIFY OP_CHECKSIG' % hash160)
nulldata = build(
name, script_pubkey, consensus_hash, testset=testset)
response = embed_data_in_blockchain(
nulldata, private_key, blockchain_client, format='hex')
Builds and broadcasts a preorder transaction.
"""
script_pubkey = get_script_pubkey( private_key )
nulldata = build( name, script_pubkey, consensus_hash, testset=testset)
response = embed_data_in_blockchain( nulldata, private_key, blockchain_client, format='hex')
# response = {'success': True }
response.update(
{'data': nulldata, 'consensus_hash': consensus_hash})
response.update( {'data': nulldata, 'consensus_hash': consensus_hash})
return response
def parse(bin_payload):
name_hash = bin_payload[0:LENGTHS['name_hash']]
consensus_hash = bin_payload[LENGTHS['name_hash']:]
"""
Parse a name preorder.
NOTE: bin_payload *excludes* the leading 3 bytes (magic + op) returned by build.
"""
name_hash = hexlify( bin_payload[0:LENGTHS['preorder_name_hash']] )
consensus_hash = hexlify( bin_payload[LENGTHS['preorder_name_hash']:] )
return {
'opcode': 'NAME_PREORDER',
'name_hash': hexlify(name_hash),
'preorder_name_hash': hexlify(name_hash),
'consensus_hash': hexlify(consensus_hash)
}

View File

@@ -2,38 +2,61 @@ from coinkit import embed_data_in_blockchain, BlockchainInfoClient
from utilitybelt import is_hex
from binascii import hexlify, unhexlify
from ..b40 import b40_to_hex, bin_to_b40
from ..b40 import b40_to_hex, bin_to_b40, is_b40
from ..config import *
from ..scripts import name_script_to_hex, add_magic_bytes
from ..scripts import blockstore_script_to_hex, add_magic_bytes
def build(name, testset=False):
""" Takes in the name that was preordered.
"""
hex_name = b40_to_hex(name)
name_len = len(hex_name)/2
readable_script = 'NAME_REGISTRATION %i %s' % (name_len, hex_name)
hex_script = name_script_to_hex(readable_script)
packaged_script = add_magic_bytes(hex_script, testset=testset)
return packaged_script
Takes in the name that was preordered, including the namespace ID (but not the id:// scheme)
Returns a hex string representing up to LENGTHS['blockchain_id_name'] bytes.
Record format:
0 5 39
|-----|-----------------------------|
id:// name.ns_id (34 bytes)
"""
if not is_b40( name ):
raise Exception("Name '%s' is not base-40" % name)
fqn = NAME_SCHEME + str(name)
fqn_hex = hexlify( fqn )
if len(fqn_hex) > LENGTHS['blockchain_id_name'] * 2:
# too long
raise Exception("Name '%s' too long (exceeds %d bytes)" % (fqn, LENGTHS['blockchain_id_name']))
return fqn_hex
def broadcast(name, private_key,
blockchain_client=BlockchainInfoClient(), testset=False):
def broadcast(name, private_key, blockchain_client=BlockchainInfoClient(), testset=False):
nulldata = build(name, testset=testset)
# response = {'success': True }
response = embed_data_in_blockchain(
nulldata, private_key, blockchain_client, format='hex')
response = embed_data_in_blockchain( nulldata, private_key, blockchain_client, format='hex')
response.update({'data': nulldata})
return response
def parse(bin_payload):
name_len = ord(bin_payload[0:1])
name = bin_payload[1:1+name_len]
"""
Interpret a block's nulldata back into a name. The first three bytes (2 magic + 1 opcode)
will not be present in bin_payload.
"""
fqn = unhexlify( bin_payload )
scheme = fqn[0:(len(NAME_SCHEME) - 3)] # excludes 'id://'
if scheme != NAME_SCHEME[3:]:
raise Exception("Invalid bin payload: does not start with '%s'" % NAME_SCHEME)
return {
'opcode': 'NAME_REGISTRATION',
'name': bin_to_b40(name)
'opcode': 'NAME_REGISTRATION',
'name': fqn[(len(NAME_SCHEME) - 3):] # skip the '//', since that's what bin_payload will always start with
}

View File

@@ -7,32 +7,45 @@ from binascii import hexlify, unhexlify
from ..b40 import b40_to_hex, bin_to_b40
from ..config import *
from ..scripts import name_script_to_hex, add_magic_bytes
from ..scripts import blockstore_script_to_hex, add_magic_bytes
from ..fees import calculate_basic_name_tx_fee
def build(name, testset=False):
""" Takes in a name to transfer.
"""
hex_name = b40_to_hex(name)
name_len = len(hex_name)/2
readable_script = 'NAME_TRANSFER %i %s' % (name_len, hex_name)
hex_script = name_script_to_hex(readable_script)
Takes in a name to transfer. Name must include the namespace ID, but not the scheme.
Record format:
0 2 3 4 39
|-----|--|--|----------------------|
magic op len name.ns_id (up to 34 bytes)
"""
if name.startswith(NAME_SCHEME):
raise Exception("Invalid name %s: must not start with %s" % (name, NAME_SCHEME))
# without the scheme, name must be 34 bytes
if len(name) > LENGTHS['blockchain_id_name'] - LENGTHS['blockchain_id_scheme']:
raise Exception("Name '%s' is too long; expected %s bytes" % (name, LENGTHS['blockchain_id_name'] - LENGTHS['blockchain_id_scheme']))
name_hex = hexlify(name)
readable_script = 'NAME_TRANSFER %i %s' % (len(name_hex), name_hex)
hex_script = blockstore_script_to_hex(readable_script)
packaged_script = add_magic_bytes(hex_script, testset=testset)
return packaged_script
def make_outputs(
data, inputs, new_name_owner_address, change_address, format='bin',
fee=None, op_return_amount=DEFAULT_OP_RETURN_VALUE,
name_owner_amount=DEFAULT_DUST_SIZE):
def make_outputs( data, inputs, new_name_owner_address, change_address, format='bin', fee=None, op_return_amount=DEFAULT_OP_RETURN_VALUE, name_owner_amount=DEFAULT_DUST_SIZE):
""" Builds the outputs for a name transfer operation.
"""
if not fee:
if fee is None:
fee = calculate_basic_name_tx_fee()
total_to_send = op_return_amount + name_owner_amount
return [
# main output
{"script_hex": make_op_return_script(data, format=format),
@@ -46,18 +59,15 @@ def make_outputs(
]
def broadcast(name, destination_address, private_key,
blockchain_client=BlockchainInfoClient(), testset=False):
def broadcast(name, destination_address, private_key, blockchain_client=BlockchainInfoClient(), testset=False):
nulldata = build(name, testset=testset)
# get inputs and from address
private_key_obj, from_address, inputs = analyze_private_key(
private_key, blockchain_client)
private_key_obj, from_address, inputs = analyze_private_key(private_key, blockchain_client)
# build custom outputs here
outputs = make_outputs(
nulldata, inputs, destination_address, from_address, format='hex')
outputs = make_outputs(nulldata, inputs, destination_address, from_address, format='hex')
# serialize, sign, and broadcast the tx
response = serialize_sign_and_broadcast(inputs, outputs, private_key_obj,
blockchain_client)
response = serialize_sign_and_broadcast(inputs, outputs, private_key_obj, blockchain_client)
# response = {'success': True }
response.update({'data': nulldata})
# return the response
@@ -65,10 +75,18 @@ def broadcast(name, destination_address, private_key,
def parse(bin_payload):
"""
# NOTE: first three bytes were stripped
bin_payload format:
0 1 len+1 <= 37
|---|-----------------|
len name (hex)
"""
name_len = ord(bin_payload[0:1])
name = bin_payload[1:1+name_len]
name = unhexlify( bin_payload[1:1+name_len] )
return {
'opcode': 'NAME_TRANSFER',
'name': bin_to_b40(name),
'name': name,
'recipient': None
}

View File

@@ -4,44 +4,65 @@ from binascii import hexlify, unhexlify
from ..b40 import b40_to_hex, bin_to_b40
from ..config import *
from ..scripts import name_script_to_hex, add_magic_bytes
from ..scripts import blockstore_script_to_hex, add_magic_bytes
from ..hashing import hash256_trunc128
def build(name, data_hash=None, data=None, testset=False):
""" Takes in the name to update the data for and the data update itself.
"""
hex_name = b40_to_hex(name)
name_len = len(hex_name)/2
Takes in the name to update the data for and the data update itself.
Name must include the namespace ID, but not the scheme.
Record format:
0 2 3 19 39
|-----|--|-------------------|-----------------------|
magic op hash128(name.ns_id) hash160(data)
"""
if name.startswith(NAME_SCHEME):
raise Exception("Invalid name %s: must not start with %s" % (name, NAME_SCHEME))
hex_name = hash256_trunc128(name)
if not data_hash:
if not data:
raise ValueError('A data hash or data string is required.')
data_hash = hex_hash160(data)
elif not (is_hex(data_hash) and len(data_hash) == 40):
raise ValueError('Data hash must be a 20 byte hex string.')
readable_script = 'NAME_UPDATE %i %s %s' % (name_len, hex_name, data_hash)
hex_script = name_script_to_hex(readable_script)
readable_script = 'NAME_UPDATE %s %s' % (hex_name, data_hash)
hex_script = blockstore_script_to_hex(readable_script)
packaged_script = add_magic_bytes(hex_script, testset=testset)
return packaged_script
def broadcast(name, data, private_key,
blockchain_client=BlockchainInfoClient(), testset=False):
def broadcast(name, data, private_key, blockchain_client=BlockchainInfoClient(), testset=False):
"""
Write a name update into the blockchain.
"""
nulldata = build(name, data_hash=hex_hash160(data), testset=testset)
response = embed_data_in_blockchain(
nulldata, private_key, blockchain_client, format='hex')
response = embed_data_in_blockchain(nulldata, private_key, blockchain_client, format='hex')
response.update({'data': nulldata})
return response
def parse(bin_payload):
name_len = ord(bin_payload[0:1])
name = bin_payload[1:1+name_len]
update = bin_payload[1+name_len:1+name_len+LENGTHS['update_hash']]
"""
Parse a payload to get back the name and update hash.
NOTE: bin_payload excludes the leading three bytes.
"""
name_hash_bin = bin_payload[:LENGTHS['name_hash']]
update_hash_bin = bin_payload[LENGTHS['name_hash']:]
name_hash = hexlify( name_hash_bin )
update_hash = hexlify( update_hash_bin )
return {
'opcode': 'NAME_UPDATE',
'name': bin_to_b40(name),
'update': hexlify(update)
'name_hash': hexlify( name_hash ),
'update_hash': hexlify(update)
}

View File

@@ -4,7 +4,7 @@ from utilitybelt import is_hex, hex_to_charset, charset_to_hex
from .config import *
from .b40 import bin_to_b40
from .operations import parse_preorder, parse_registration, parse_update, \
parse_transfer
parse_transfer, parse_putdata, parse_rmdata, parse_namespacedefine, parse_namespacebegin
def get_recipient_from_nameop_outputs(outputs):
@@ -19,11 +19,26 @@ def get_recipient_from_nameop_outputs(outputs):
return None
def parse_nameop_data(data):
def parse_blockstore_op_data(data):
"""
Parse a string of binary data (nulldata from a blockchain transaction) into a blockstore operation.
data format (once unhex'ed):
0 2 3 40
|-----------|-----|-----------------------------------|
magic bytes op payload
For registered names, bytes 0-3 are 'i', 'd', ':', since
name registrations occur as id://name.ns_id verbatum.
For all other operations, "magic bytes" and "op" are our own
special values.
"""
if not is_hex(data):
raise ValueError('Data must be hex')
# if not len(data) <= OP_RETURN_MAX_SIZE*2:
# raise ValueError('Payload too large')
if not len(data) % 2 == 0:
# raise ValueError('Data must have an even number of bytes')
return None
@@ -33,42 +48,84 @@ def parse_nameop_data(data):
except:
raise Exception('Invalid data supplied: %s' % data)
magic_bytes, opcode, payload = bin_data[0:2], bin_data[2:3], bin_data[3:]
# not a registered name, but a full-on operation?
if bin_data[0:3] != NAME_SCHEME[0:3]:
magic_bytes, opcode, payload = bin_data[0:2], bin_data[2:3], bin_data[3:]
if not magic_bytes == MAGIC_BYTES:
# Magic bytes don't match - not an openname operation.
return None
if not magic_bytes == MAGIC_BYTES:
# Magic bytes don't match - not an openname operation.
return None
if opcode == NAME_PREORDER and len(payload) >= MIN_OP_LENGTHS['preorder']:
nameop = parse_preorder(payload)
elif (opcode == NAME_REGISTRATION
and len(payload) >= MIN_OP_LENGTHS['registration']):
nameop = parse_registration(payload)
elif opcode == NAME_UPDATE and len(payload) >= MIN_OP_LENGTHS['update']:
nameop = parse_update(payload)
elif (opcode == NAME_TRANSFER
and len(payload) >= MIN_OP_LENGTHS['transfer']):
nameop = parse_transfer(payload)
else:
nameop = None
# this is a name registration
opcode = NAME_REGISTRATION
return nameop
op = None
if opcode == NAME_PREORDER and len(payload) >= MIN_OP_LENGTHS['preorder']:
op = parse_preorder(payload)
elif (opcode == NAME_REGISTRATION and len(payload) >= MIN_OP_LENGTHS['registration']):
op = parse_registration(payload)
elif opcode == NAME_UPDATE and len(payload) >= MIN_OP_LENGTHS['update']:
op = parse_update(payload)
elif (opcode == NAME_TRANSFER and len(payload) >= MIN_OP_LENGTHS['transfer']):
op = parse_transfer(payload)
elif opcode == NAMESPACE_DEFINE and len(payload) >= MIN_OP_LENGTHS['namespace_define']:
op = parse_namespacedefine( payload )
elif opcode == NAMESPACE_BEGIN and len(payload) >= MIN_OP_LENGTHS['namespace_begin']:
op = parse_namespacebegin( payload )
elif opcode == DATA_PUT and len(payload) >= MIN_OP_LENGTHS['data_put']:
op = parse_putdata( payload )
elif opcode == DATA_REMOVE and len(payload) >= MIN_OP_LENGTHS['data_remove']:
op = parse_rmdata( payload )
return op
def analyze_nameop_outputs(nameop, outputs):
def analyze_op_outputs(nameop, outputs):
"""
Perform opcode-specific analysis on blockstore operations,
e.g. inserting new data into the operation as a post-processing step.
Name transfers: fill in 'recipient' with the hex string of the script_pubkey of the recipient principal.
"""
if eval(nameop['opcode']) == NAME_TRANSFER:
recipient = get_recipient_from_nameop_outputs(outputs)
nameop.update({'recipient': recipient})
return nameop
def parse_nameop(data, outputs, senders=None, fee=None):
nameop = parse_nameop_data(data)
if nameop:
nameop = analyze_nameop_outputs(nameop, outputs)
def parse_blockstore_op(data, outputs, senders=None, fee=None):
"""
Parse a blockstore operation from a transaction's nulldata (data) and a list of outputs, as well as
optionally the list of transaction's senders and the total fee paid.
Return a parsed operation, and will also optionally have:
* "sender": the first (primary) sender's script_pubkey, if there are any senders
* "fee": the total fee paid for this record.
"""
op = parse_blockstore_op_data(data)
if op:
op = analyze_op_outputs(op, outputs)
if senders and len(senders) > 0 and 'script_pubkey' in senders[0]:
primary_sender = str(senders[0]['script_pubkey'])
nameop['sender'] = primary_sender
op['sender'] = primary_sender
if fee:
nameop['fee'] = fee
return nameop
op['fee'] = fee
return op

View File

@@ -1,6 +1,7 @@
from utilitybelt import is_hex, is_valid_int
from binascii import hexlify, unhexlify
from coinkit import BitcoinPrivateKey, script_to_hex
from .config import *
@@ -12,13 +13,13 @@ def add_magic_bytes(hex_script, testset=False):
return hexlify(magic_bytes) + hex_script
def name_script_to_hex(script):
""" Parse the readable version of a name script, return the hex version.
def blockstore_script_to_hex(script):
""" Parse the readable version of a script, return the hex version.
"""
hex_script = ''
parts = script.split(' ')
for part in parts:
if part[0:5] == 'NAME_':
if part.startswith("NAME_") or part.startswith("DATA_") or part.startswith("NAMESPACE_"):
try:
hex_script += '%0.2x' % ord(eval(part))
except:
@@ -28,8 +29,27 @@ def name_script_to_hex(script):
elif is_valid_int(part):
hex_script += '%0.2x' % int(part)
else:
raise ValueError(
'Invalid script, contains invalid characters: %s' % script)
raise ValueError('Invalid script (at %s), contains invalid characters: %s' % (part, script))
if len(hex_script) % 2 != 0:
raise ValueError('Invalid script: must have an even number of chars.')
return hex_script
def name_script_to_hex(script):
""" Parse the readable version of a name script, return the hex version.
"""
return blockstore_script_to_hex( "NAME_", script )
def data_script_to_hex(script):
""" Parse the readable version of a data script, return the hex version.
"""
return blockstore_script_to_hex( "DATA_", script )
# generate a pay-to-pubkeyhash script from a private key.
def get_script_pubkey( private_key ):
hash160 = BitcoinPrivateKey(private_key).public_key().hash160()
script_pubkey = script_to_hex( 'OP_DUP OP_HASH160 %s OP_EQUALVERIFY OP_CHECKSIG' % hash160)
return script_pubkey