Merge branch 'develop' into feature/blockstack_client_refactor

This commit is contained in:
Jude Nelson
2018-06-27 17:36:34 -04:00
4 changed files with 208 additions and 8 deletions

View File

@@ -32,9 +32,18 @@ from .utils import get_json, config_log, pretty_print
from api.config import SEARCH_BLOCKCHAIN_DATA_FILE, SEARCH_PROFILE_DATA_FILE
from .db import namespace, profile_data
from .db import search_profiles
from .db import people_cache, twitter_cache, username_cache
client = get_mongo_client()
search_db = client['search_db_next']
search_cache = client['search_cache_next']
namespace = search_db.namespace
profile_data = search_db.profile_data
search_profiles = search_db.profiles
people_cache = search_cache.people_cache
twitter_cache = search_cache.twitter_cache
username_cache = search_cache.username_cache
""" create the basic index
"""
@@ -131,11 +140,26 @@ def flush_db():
client = get_mongo_client()
# delete any old cache/index
client.drop_database('search_db')
client.drop_database('search_cache')
client.drop_database('search_db_next')
client.drop_database('search_cache_next')
log.debug("Flushed DB")
def swap_next_current_db():
client = get_mongo_client()
client.drop_database('search_db_prior')
client.drop_database('search_cache_prior')
client.admin.command('copydb', fromdb='search_db', todb='search_db_prior')
client.admin.command('copydb', fromdb='search_cache', todb='search_cache_prior')
client.drop_database('search_db')
client.drop_database('search_cache')
client.admin.command('copydb', fromdb='search_db_next', todb='search_db')
client.admin.command('copydb', fromdb='search_cache_next', todb='search_cache')
def optimize_db():
@@ -281,6 +305,7 @@ if __name__ == "__main__":
fetch_profile_data_from_file()
fetch_namespace_from_file()
create_search_index()
swap_next_current_db()
else:
print "Usage error"

View File

@@ -2719,6 +2719,174 @@ def get_name_and_history(name, include_expired=False, include_grace=True, hostpo
return {'status': True, 'record': rec, 'lastblock': hist['lastblock'], 'indexing': hist['indexing']}
def get_name_history_page(name, page, hostport=None, proxy=None):
"""
Get a page of the name's history
Returns {'status': True, 'history': ..., 'indexing': ..., 'lastblock': ...} on success
Returns {'error': ...} on error
"""
assert hostport or proxy, 'Need hostport or proxy'
if proxy is None:
proxy = connect_hostport(hostport)
hist_schema = {
'type': 'object',
'patternProperties': {
'^[0-9]+$': {
'type': 'array',
'items': {
'type': 'object',
'properties': OP_HISTORY_SCHEMA['properties'],
'required': [
'op',
'opcode',
'txid',
'vtxindex',
],
},
},
},
}
hist_resp_schema = {
'type': 'object',
'properties': {
'history': hist_schema,
},
'required': [ 'history' ],
}
resp_schema = json_response_schema(hist_resp_schema)
resp = {}
lastblock = None
indexin = None
try:
_resp = proxy.get_name_history_page(name, page)
resp = json_validate(resp_schema, _resp)
if json_is_error(resp):
return resp
lastblock = _resp['lastblock']
indexing = _resp['indexing']
except ValidationError as e:
resp = json_traceback(resp.get('error'))
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.'}
return resp
return {'status': True, 'history': resp['history'], 'lastblock': lastblock, 'indexing': indexing}
def name_history_merge(h1, h2):
"""
Given two name histories (grouped by block), merge them.
"""
ret = {}
blocks_1 = [int(b) for b in h1.keys()]
blocks_2 = [int(b) for b in h2.keys()]
# find overlapping blocks
overlap = list(set(blocks_1).intersection(set(blocks_2)))
if len(overlap) > 0:
for b in overlap:
h = h1[str(b)] + h2[str(b)]
h.sort(lambda v1, v2: -1 if v1['vtxindex'] < v2['vtxindex'] else 1)
uniq = []
last_vtxindex = None
for i in range(0, len(h)):
if h[i]['vtxindex'] != last_vtxindex:
uniq.append(h[i])
last_vtxindex = h[i]['vtxindex']
ret[str(b)] = uniq
all_blocks = list(set(blocks_1 + blocks_2))
for b in all_blocks:
if b in overlap:
continue
if b in blocks_1:
ret[str(b)] = h1[str(b)]
else:
ret[str(b)] = h2[str(b)]
return ret
def get_name_history(name, hostport=None, proxy=None, history_page=None):
"""
Get the full history of a name
Returns {'status': True, 'history': ...} on success, where history is grouped by block
Returns {'error': ...} on error
"""
assert hostport or proxy, 'Need hostport or proxy'
if proxy is None:
proxy = connect_hostport(hostport)
hist = {}
indexing = None
lastblock = None
if history_page != None:
resp = get_name_history_page(name, history_page, proxy=proxy)
if 'error' in resp:
return resp
indexing = resp['indexing']
lastblock = resp['lastblock']
return {'status': True, 'history': resp['history'], 'indexing': indexing, 'lastblock': lastblock}
for i in range(0, 10000): # this is obviously too big
resp = get_name_history_page(name, i, proxy=proxy)
if 'error' in resp:
return resp
indexing = resp['indexing']
lastblock = resp['lastblock']
if len(resp['history']) == 0:
# caught up
break
hist = name_history_merge(hist, resp['history'])
return {'status': True, 'history': hist, 'indexing': indexing, 'lastblock': lastblock}
def get_name_and_history(name, include_expired=False, include_grace=True, hostport=None, proxy=None, history_page=None):
"""
Get the current name record and its history
(this is a replacement for proxy.get_name_blockchain_record())
Return {'status': True, 'record': ...} on success, where .record.history is defined as {block_height: [{history}, {history}, ...], ...}
Return {'error': ...} on error
"""
assert hostport or proxy, 'Need hostport or proxy'
if proxy is None:
proxy = connect_hostport(hostport)
hist = get_name_history(name, proxy=proxy, history_page=history_page)
if 'error' in hist:
return hist
# just the name
rec = get_name_record(name, include_history=False, include_expired=include_expired, include_grace=include_grace, proxy=proxy)
if 'error' in rec:
return rec
rec['history'] = hist['history']
return {'status': True, 'record': rec, 'lastblock': hist['lastblock'], 'indexing': hist['indexing']}
def get_name_at(name, block_id, include_expired=False, hostport=None, proxy=None):
"""
Get the name as it was at a particular height.

View File

@@ -888,7 +888,13 @@ class BlockstackAPIEndpointHandler(SimpleHTTPRequestHandler):
return {'error': 'Not found', 'http_status': 404}
if 'zonefile' in domain_rec:
return {'status': True, 'zonefile': domain_rec['zonefile']}
try:
zf_txt = base64.b64decode(domain_rec['zonefile'])
return {'status': True, 'zonefile': zf_txt}
except:
log.error("Failed to parse zonefile returned by blockstackd: contents return: {}"
.format(domain_rec['zonefile']))
return {'error': 'Failed to parse zonefile', 'http_status': 502}
last_zonefile_hash = None
page = 0
@@ -1011,7 +1017,7 @@ class BlockstackAPIEndpointHandler(SimpleHTTPRequestHandler):
else:
return self._reply_json(
{'error': res['error']}, status_code=res['http_status'])
domain_zf_txt = res['zonefile']
domain_zf_json = zonefile.decode_name_zonefile(domain_name, domain_zf_txt, allow_legacy=False)
matching_uris = [ x['target'] for x in domain_zf_json['uri'] if x['name'] == '_resolver' ]
@@ -1106,6 +1112,7 @@ class BlockstackAPIEndpointHandler(SimpleHTTPRequestHandler):
return
blockstackd_url = get_blockstackd_url(self.server.config_path)
res = blockstackd_client.get_name_record(name, include_history=True,
hostport=blockstackd_url, history_page=page)
if json_is_error(res):

View File

@@ -24,4 +24,4 @@
__version_major__ = '0'
__version_minor__ = '19'
__version_patch__ = '0'
__version__ = '{}.{}.{}.0'.format(__version_major__, __version_minor__, __version_patch__)
__version__ = '{}.{}.{}.7'.format(__version_major__, __version_minor__, __version_patch__)