2007-07-11 01:41:52 +00:00
|
|
|
|
2007-07-13 23:58:08 +00:00
|
|
|
# do not import any allmydata modules at this level. Do that from inside
|
|
|
|
# individual functions instead.
|
2008-01-14 20:43:25 +00:00
|
|
|
import sys, struct, time, os
|
2007-07-11 01:41:52 +00:00
|
|
|
from twisted.python import usage
|
|
|
|
|
|
|
|
class DumpOptions(usage.Options):
|
2008-08-12 20:37:32 +00:00
|
|
|
def getSynopsis(self):
|
|
|
|
return "Usage: tahoe debug dump-share SHARE_FILENAME"
|
|
|
|
|
|
|
|
def getUsage(self, width=None):
|
|
|
|
t = usage.Options.getUsage(self, width)
|
|
|
|
t += """
|
|
|
|
Print lots of information about the given share, by parsing the share's
|
|
|
|
contents. This includes share type, lease information, encoding parameters,
|
|
|
|
hash-tree roots, public keys, and segment sizes. This command also emits a
|
|
|
|
verify-cap for the file that uses the share.
|
|
|
|
|
|
|
|
tahoe debug dump-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
|
|
|
|
|
|
|
|
"""
|
|
|
|
return t
|
2007-07-11 01:41:52 +00:00
|
|
|
|
2008-02-06 21:15:33 +00:00
|
|
|
def parseArgs(self, filename):
|
|
|
|
self['filename'] = filename
|
2007-07-11 01:41:52 +00:00
|
|
|
|
2007-09-02 21:48:20 +00:00
|
|
|
def dump_share(config, out=sys.stdout, err=sys.stderr):
|
2007-07-14 00:25:45 +00:00
|
|
|
from allmydata import uri, storage
|
2008-07-07 21:11:02 +00:00
|
|
|
from allmydata.util import base32
|
2007-07-11 01:41:52 +00:00
|
|
|
|
2007-11-07 01:55:55 +00:00
|
|
|
# check the version, to see if we have a mutable or immutable share
|
2008-02-06 20:37:43 +00:00
|
|
|
print >>out, "share filename: %s" % config['filename']
|
2008-02-06 20:19:51 +00:00
|
|
|
|
2007-11-07 01:55:55 +00:00
|
|
|
f = open(config['filename'], "rb")
|
|
|
|
prefix = f.read(32)
|
|
|
|
f.close()
|
|
|
|
if prefix == storage.MutableShareFile.MAGIC:
|
|
|
|
return dump_mutable_share(config, out, err)
|
|
|
|
# otherwise assume it's immutable
|
2007-09-02 21:48:20 +00:00
|
|
|
f = storage.ShareFile(config['filename'])
|
2007-07-13 23:58:08 +00:00
|
|
|
# use a ReadBucketProxy to parse the bucket and find the uri extension
|
2007-07-14 00:25:45 +00:00
|
|
|
bp = storage.ReadBucketProxy(None)
|
2007-09-04 16:00:24 +00:00
|
|
|
offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
|
2007-09-02 21:48:20 +00:00
|
|
|
seek = offsets['uri_extension']
|
|
|
|
length = struct.unpack(">L", f.read_share_data(seek, 4))[0]
|
|
|
|
seek += 4
|
2008-02-06 19:48:19 +00:00
|
|
|
UEB_data = f.read_share_data(seek, length)
|
2007-07-13 23:58:08 +00:00
|
|
|
|
2008-02-06 19:48:19 +00:00
|
|
|
unpacked = uri.unpack_extension_readable(UEB_data)
|
2007-07-11 01:41:52 +00:00
|
|
|
keys1 = ("size", "num_segments", "segment_size",
|
|
|
|
"needed_shares", "total_shares")
|
|
|
|
keys2 = ("codec_name", "codec_params", "tail_codec_params")
|
|
|
|
keys3 = ("plaintext_hash", "plaintext_root_hash",
|
|
|
|
"crypttext_hash", "crypttext_root_hash",
|
2008-02-06 19:48:19 +00:00
|
|
|
"share_root_hash", "UEB_hash")
|
2007-09-26 22:00:59 +00:00
|
|
|
display_keys = {"size": "file_size"}
|
2007-07-11 01:41:52 +00:00
|
|
|
for k in keys1:
|
|
|
|
if k in unpacked:
|
2007-09-26 22:00:59 +00:00
|
|
|
dk = display_keys.get(k, k)
|
2008-02-06 20:37:43 +00:00
|
|
|
print >>out, "%20s: %s" % (dk, unpacked[k])
|
2007-07-11 01:41:52 +00:00
|
|
|
print >>out
|
|
|
|
for k in keys2:
|
|
|
|
if k in unpacked:
|
2007-09-26 22:00:59 +00:00
|
|
|
dk = display_keys.get(k, k)
|
2008-02-06 20:37:43 +00:00
|
|
|
print >>out, "%20s: %s" % (dk, unpacked[k])
|
2007-07-11 01:41:52 +00:00
|
|
|
print >>out
|
|
|
|
for k in keys3:
|
|
|
|
if k in unpacked:
|
2007-09-26 22:00:59 +00:00
|
|
|
dk = display_keys.get(k, k)
|
2008-02-06 20:37:43 +00:00
|
|
|
print >>out, "%20s: %s" % (dk, unpacked[k])
|
2007-07-11 01:41:52 +00:00
|
|
|
|
|
|
|
leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
|
|
|
|
if leftover:
|
|
|
|
print >>out
|
2007-07-13 23:58:08 +00:00
|
|
|
print >>out, "LEFTOVER:"
|
2007-07-11 01:41:52 +00:00
|
|
|
for k in sorted(leftover):
|
2008-02-06 20:37:43 +00:00
|
|
|
print >>out, "%20s: %s" % (k, unpacked[k])
|
2007-07-11 01:41:52 +00:00
|
|
|
|
2008-07-07 21:11:02 +00:00
|
|
|
# the storage index isn't stored in the share itself, so we depend upon
|
|
|
|
# knowing the parent directory name to get it
|
|
|
|
pieces = config['filename'].split(os.sep)
|
|
|
|
if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
|
|
|
|
storage_index = base32.a2b(pieces[-2])
|
|
|
|
uri_extension_hash = base32.a2b(unpacked["UEB_hash"])
|
|
|
|
u = uri.CHKFileVerifierURI(storage_index, uri_extension_hash,
|
|
|
|
unpacked["needed_shares"],
|
|
|
|
unpacked["total_shares"], unpacked["size"])
|
|
|
|
verify_cap = u.to_string()
|
|
|
|
print >>out, "%20s: %s" % ("verify-cap", verify_cap)
|
|
|
|
|
2007-08-27 06:42:39 +00:00
|
|
|
sizes = {}
|
|
|
|
sizes['data'] = bp._data_size
|
|
|
|
sizes['validation'] = (offsets['uri_extension'] -
|
|
|
|
offsets['plaintext_hash_tree'])
|
2008-02-06 19:48:19 +00:00
|
|
|
sizes['uri-extension'] = len(UEB_data)
|
2007-08-27 06:42:39 +00:00
|
|
|
print >>out
|
2008-02-06 20:37:43 +00:00
|
|
|
print >>out, " Size of data within the share:"
|
2007-08-27 06:42:39 +00:00
|
|
|
for k in sorted(sizes):
|
2008-02-06 20:37:43 +00:00
|
|
|
print >>out, "%20s: %s" % (k, sizes[k])
|
2007-08-27 06:42:39 +00:00
|
|
|
|
2007-09-02 21:48:20 +00:00
|
|
|
# display lease information too
|
|
|
|
leases = list(f.iter_leases())
|
|
|
|
if leases:
|
|
|
|
for i,lease in enumerate(leases):
|
2008-07-10 01:06:55 +00:00
|
|
|
when = format_expiration_time(lease.expiration_time)
|
|
|
|
print >>out, " Lease #%d: owner=%d, expire in %s" \
|
|
|
|
% (i, lease.owner_num, when)
|
2007-09-02 21:48:20 +00:00
|
|
|
else:
|
2008-02-06 20:37:43 +00:00
|
|
|
print >>out, " No leases."
|
2007-09-02 21:48:20 +00:00
|
|
|
|
|
|
|
print >>out
|
|
|
|
return 0
|
|
|
|
|
2007-11-07 01:55:55 +00:00
|
|
|
def format_expiration_time(expiration_time):
|
|
|
|
now = time.time()
|
|
|
|
remains = expiration_time - now
|
|
|
|
when = "%ds" % remains
|
|
|
|
if remains > 24*3600:
|
|
|
|
when += " (%d days)" % (remains / (24*3600))
|
|
|
|
elif remains > 3600:
|
|
|
|
when += " (%d hours)" % (remains / 3600)
|
|
|
|
return when
|
|
|
|
|
|
|
|
|
|
|
|
def dump_mutable_share(config, out, err):
|
|
|
|
from allmydata import storage
|
2008-02-15 02:27:47 +00:00
|
|
|
from allmydata.util import base32, idlib
|
2007-11-07 01:55:55 +00:00
|
|
|
m = storage.MutableShareFile(config['filename'])
|
|
|
|
f = open(config['filename'], "rb")
|
|
|
|
WE, nodeid = m._read_write_enabler_and_nodeid(f)
|
|
|
|
num_extra_leases = m._read_num_extra_leases(f)
|
|
|
|
data_length = m._read_data_length(f)
|
|
|
|
extra_lease_offset = m._read_extra_lease_offset(f)
|
2007-11-07 02:31:22 +00:00
|
|
|
container_size = extra_lease_offset - m.DATA_OFFSET
|
2007-11-07 01:55:55 +00:00
|
|
|
leases = list(m._enumerate_leases(f))
|
2007-11-07 02:46:31 +00:00
|
|
|
|
|
|
|
share_type = "unknown"
|
|
|
|
f.seek(m.DATA_OFFSET)
|
|
|
|
if f.read(1) == "\x00":
|
|
|
|
# this slot contains an SMDF share
|
|
|
|
share_type = "SDMF"
|
2007-11-07 01:55:55 +00:00
|
|
|
f.close()
|
|
|
|
|
|
|
|
print >>out
|
2007-11-07 02:10:49 +00:00
|
|
|
print >>out, "Mutable slot found:"
|
2007-11-07 02:46:31 +00:00
|
|
|
print >>out, " share_type: %s" % share_type
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " write_enabler: %s" % base32.b2a(WE)
|
2007-11-07 02:10:49 +00:00
|
|
|
print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
|
|
|
|
print >>out, " num_extra_leases: %d" % num_extra_leases
|
2007-11-07 02:31:22 +00:00
|
|
|
print >>out, " container_size: %d" % container_size
|
2007-11-07 02:10:49 +00:00
|
|
|
print >>out, " data_length: %d" % data_length
|
2007-11-07 01:55:55 +00:00
|
|
|
if leases:
|
2008-07-10 01:06:55 +00:00
|
|
|
for (leasenum, lease) in leases:
|
2007-11-07 02:31:22 +00:00
|
|
|
print >>out
|
2007-11-07 02:10:49 +00:00
|
|
|
print >>out, " Lease #%d:" % leasenum
|
2008-07-10 01:06:55 +00:00
|
|
|
print >>out, " ownerid: %d" % lease.owner_num
|
|
|
|
when = format_expiration_time(lease.expiration_time)
|
2007-11-07 02:10:49 +00:00
|
|
|
print >>out, " expires in %s" % when
|
2008-07-10 01:06:55 +00:00
|
|
|
print >>out, " renew_secret: %s" % base32.b2a(lease.renew_secret)
|
|
|
|
print >>out, " cancel_secret: %s" % base32.b2a(lease.cancel_secret)
|
|
|
|
print >>out, " secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
|
2007-11-07 01:55:55 +00:00
|
|
|
else:
|
|
|
|
print >>out, "No leases."
|
|
|
|
print >>out
|
2007-11-07 02:46:31 +00:00
|
|
|
|
|
|
|
if share_type == "SDMF":
|
|
|
|
dump_SDMF_share(m.DATA_OFFSET, data_length, config, out, err)
|
|
|
|
|
2007-11-07 01:55:55 +00:00
|
|
|
return 0
|
|
|
|
|
2007-11-07 02:46:31 +00:00
|
|
|
def dump_SDMF_share(offset, length, config, out, err):
|
2008-04-11 21:31:16 +00:00
|
|
|
from allmydata.mutable.layout import unpack_share
|
|
|
|
from allmydata.mutable.common import NeedMoreDataError
|
2008-07-07 21:11:02 +00:00
|
|
|
from allmydata.util import base32, hashutil
|
|
|
|
from allmydata.uri import SSKVerifierURI
|
2007-11-07 02:46:31 +00:00
|
|
|
|
|
|
|
f = open(config['filename'], "rb")
|
|
|
|
f.seek(offset)
|
|
|
|
data = f.read(min(length, 2000))
|
|
|
|
f.close()
|
|
|
|
|
2007-11-08 00:52:09 +00:00
|
|
|
try:
|
2008-04-11 21:31:16 +00:00
|
|
|
pieces = unpack_share(data)
|
|
|
|
except NeedMoreDataError, e:
|
2007-11-08 00:52:09 +00:00
|
|
|
# retry once with the larger size
|
|
|
|
size = e.needed_bytes
|
|
|
|
f = open(config['filename'], "rb")
|
|
|
|
f.seek(offset)
|
|
|
|
data = f.read(min(length, size))
|
|
|
|
f.close()
|
2008-04-11 21:31:16 +00:00
|
|
|
pieces = unpack_share(data)
|
2007-11-07 02:46:31 +00:00
|
|
|
|
|
|
|
(seqnum, root_hash, IV, k, N, segsize, datalen,
|
|
|
|
pubkey, signature, share_hash_chain, block_hash_tree,
|
|
|
|
share_data, enc_privkey) = pieces
|
|
|
|
|
|
|
|
print >>out, " SDMF contents:"
|
|
|
|
print >>out, " seqnum: %d" % seqnum
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " root_hash: %s" % base32.b2a(root_hash)
|
|
|
|
print >>out, " IV: %s" % base32.b2a(IV)
|
2007-11-07 02:46:31 +00:00
|
|
|
print >>out, " required_shares: %d" % k
|
|
|
|
print >>out, " total_shares: %d" % N
|
|
|
|
print >>out, " segsize: %d" % segsize
|
|
|
|
print >>out, " datalen: %d" % datalen
|
2008-05-19 21:24:41 +00:00
|
|
|
print >>out, " enc_privkey: %d bytes" % len(enc_privkey)
|
|
|
|
print >>out, " pubkey: %d bytes" % len(pubkey)
|
|
|
|
print >>out, " signature: %d bytes" % len(signature)
|
2007-11-14 06:08:15 +00:00
|
|
|
share_hash_ids = ",".join(sorted([str(hid)
|
|
|
|
for hid in share_hash_chain.keys()]))
|
2007-11-07 02:46:31 +00:00
|
|
|
print >>out, " share_hash_chain: %s" % share_hash_ids
|
|
|
|
print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
|
|
|
|
|
2008-07-07 21:11:02 +00:00
|
|
|
# the storage index isn't stored in the share itself, so we depend upon
|
|
|
|
# knowing the parent directory name to get it
|
|
|
|
pieces = config['filename'].split(os.sep)
|
|
|
|
if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
|
|
|
|
storage_index = base32.a2b(pieces[-2])
|
|
|
|
fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
|
|
|
|
u = SSKVerifierURI(storage_index, fingerprint)
|
|
|
|
verify_cap = u.to_string()
|
|
|
|
print >>out, " verify-cap:", verify_cap
|
|
|
|
|
2007-11-07 02:46:31 +00:00
|
|
|
print >>out
|
|
|
|
|
2007-11-07 01:55:55 +00:00
|
|
|
|
2008-01-14 20:43:25 +00:00
|
|
|
|
|
|
|
class DumpCapOptions(usage.Options):
|
2008-08-12 20:37:32 +00:00
|
|
|
def getSynopsis(self):
|
|
|
|
return "Usage: tahoe debug dump-cap [options] FILECAP"
|
2008-01-14 20:43:25 +00:00
|
|
|
optParameters = [
|
2008-08-12 20:37:32 +00:00
|
|
|
["nodeid", "n",
|
|
|
|
None, "storage server nodeid (ascii), to construct WE and secrets."],
|
|
|
|
["client-secret", "c", None,
|
|
|
|
"client's base secret (ascii), to construct secrets"],
|
|
|
|
["client-dir", "d", None,
|
|
|
|
"client's base directory, from which a -c secret will be read"],
|
2008-01-14 20:43:25 +00:00
|
|
|
]
|
|
|
|
def parseArgs(self, cap):
|
|
|
|
self.cap = cap
|
|
|
|
|
2008-08-12 20:37:32 +00:00
|
|
|
def getUsage(self, width=None):
|
|
|
|
t = usage.Options.getUsage(self, width)
|
|
|
|
t += """
|
|
|
|
Print information about the given cap-string (aka: URI, file-cap, dir-cap,
|
|
|
|
read-cap, write-cap). The URI string is parsed and unpacked. This prints the
|
|
|
|
type of the cap, its storage index, and any derived keys.
|
|
|
|
|
|
|
|
tahoe debug dump-cap URI:SSK-Verifier:4vozh77tsrw7mdhnj7qvp5ky74:q7f3dwz76sjys4kqfdt3ocur2pay3a6rftnkqmi2uxu3vqsdsofq
|
|
|
|
|
|
|
|
This may be useful to determine if a read-cap and a write-cap refer to the
|
|
|
|
same time, or to extract the storage-index from a file-cap (to then use with
|
|
|
|
find-shares)
|
|
|
|
|
|
|
|
If additional information is provided (storage server nodeid and/or client
|
|
|
|
base secret), this command will compute the shared secrets used for the
|
|
|
|
write-enabler and for lease-renewal.
|
|
|
|
"""
|
|
|
|
return t
|
|
|
|
|
|
|
|
|
2008-01-14 20:43:25 +00:00
|
|
|
def dump_cap(config, out=sys.stdout, err=sys.stderr):
|
|
|
|
from allmydata import uri
|
2008-02-15 02:27:47 +00:00
|
|
|
from allmydata.util import base32
|
2008-01-14 20:43:25 +00:00
|
|
|
from base64 import b32decode
|
2008-01-14 21:12:27 +00:00
|
|
|
import urlparse, urllib
|
2008-01-14 20:43:25 +00:00
|
|
|
|
|
|
|
cap = config.cap
|
|
|
|
nodeid = None
|
|
|
|
if config['nodeid']:
|
|
|
|
nodeid = b32decode(config['nodeid'].upper())
|
|
|
|
secret = None
|
|
|
|
if config['client-secret']:
|
2008-02-15 02:27:47 +00:00
|
|
|
secret = base32.a2b(config['client-secret'])
|
2008-01-14 20:43:25 +00:00
|
|
|
elif config['client-dir']:
|
|
|
|
secretfile = os.path.join(config['client-dir'], "private", "secret")
|
|
|
|
try:
|
2008-02-15 02:27:47 +00:00
|
|
|
secret = base32.a2b(open(secretfile, "r").read().strip())
|
2008-01-14 20:43:25 +00:00
|
|
|
except EnvironmentError:
|
|
|
|
pass
|
|
|
|
|
2008-01-14 21:12:27 +00:00
|
|
|
if cap.startswith("http"):
|
|
|
|
scheme, netloc, path, params, query, fragment = urlparse.urlparse(cap)
|
|
|
|
assert path.startswith("/uri/")
|
|
|
|
cap = urllib.unquote(path[len("/uri/"):])
|
|
|
|
|
|
|
|
u = uri.from_string(cap)
|
|
|
|
|
2008-01-14 20:43:25 +00:00
|
|
|
print >>out
|
|
|
|
dump_uri_instance(u, nodeid, secret, out, err)
|
|
|
|
|
|
|
|
def _dump_secrets(storage_index, secret, nodeid, out):
|
|
|
|
from allmydata.util import hashutil
|
2008-02-15 02:27:47 +00:00
|
|
|
from allmydata.util import base32
|
2008-01-14 20:43:25 +00:00
|
|
|
|
|
|
|
if secret:
|
|
|
|
crs = hashutil.my_renewal_secret_hash(secret)
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " client renewal secret:", base32.b2a(crs)
|
2008-01-14 20:43:25 +00:00
|
|
|
frs = hashutil.file_renewal_secret_hash(crs, storage_index)
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " file renewal secret:", base32.b2a(frs)
|
2008-01-14 20:43:25 +00:00
|
|
|
if nodeid:
|
|
|
|
renew = hashutil.bucket_renewal_secret_hash(frs, nodeid)
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " lease renewal secret:", base32.b2a(renew)
|
2008-01-14 20:43:25 +00:00
|
|
|
ccs = hashutil.my_cancel_secret_hash(secret)
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " client cancel secret:", base32.b2a(ccs)
|
2008-01-14 20:43:25 +00:00
|
|
|
fcs = hashutil.file_cancel_secret_hash(ccs, storage_index)
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " file cancel secret:", base32.b2a(fcs)
|
2008-01-14 20:43:25 +00:00
|
|
|
if nodeid:
|
|
|
|
cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid)
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " lease cancel secret:", base32.b2a(cancel)
|
2008-01-14 20:43:25 +00:00
|
|
|
|
|
|
|
def dump_uri_instance(u, nodeid, secret, out, err, show_header=True):
|
2008-02-13 03:48:37 +00:00
|
|
|
from allmydata import storage, uri
|
2008-02-15 02:27:47 +00:00
|
|
|
from allmydata.util import base32, hashutil
|
2008-01-14 20:43:25 +00:00
|
|
|
|
|
|
|
if isinstance(u, uri.CHKFileURI):
|
|
|
|
if show_header:
|
|
|
|
print >>out, "CHK File:"
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " key:", base32.b2a(u.key)
|
|
|
|
print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
|
2008-01-14 20:43:25 +00:00
|
|
|
print >>out, " size:", u.size
|
|
|
|
print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
|
2008-02-13 03:48:37 +00:00
|
|
|
print >>out, " storage index:", storage.si_b2a(u.storage_index)
|
2008-01-14 20:43:25 +00:00
|
|
|
_dump_secrets(u.storage_index, secret, nodeid, out)
|
|
|
|
elif isinstance(u, uri.CHKFileVerifierURI):
|
|
|
|
if show_header:
|
|
|
|
print >>out, "CHK Verifier URI:"
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
|
2008-01-14 20:43:25 +00:00
|
|
|
print >>out, " size:", u.size
|
|
|
|
print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
|
2008-02-13 03:48:37 +00:00
|
|
|
print >>out, " storage index:", storage.si_b2a(u.storage_index)
|
2008-01-14 20:43:25 +00:00
|
|
|
|
|
|
|
elif isinstance(u, uri.LiteralFileURI):
|
|
|
|
if show_header:
|
|
|
|
print >>out, "Literal File URI:"
|
|
|
|
print >>out, " data:", u.data
|
|
|
|
|
|
|
|
elif isinstance(u, uri.WriteableSSKFileURI):
|
|
|
|
if show_header:
|
|
|
|
print >>out, "SSK Writeable URI:"
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " writekey:", base32.b2a(u.writekey)
|
|
|
|
print >>out, " readkey:", base32.b2a(u.readkey)
|
2008-02-13 03:48:37 +00:00
|
|
|
print >>out, " storage index:", storage.si_b2a(u.storage_index)
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " fingerprint:", base32.b2a(u.fingerprint)
|
2008-01-14 20:43:25 +00:00
|
|
|
print >>out
|
|
|
|
if nodeid:
|
|
|
|
we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " write_enabler:", base32.b2a(we)
|
2008-01-14 20:43:25 +00:00
|
|
|
print >>out
|
|
|
|
_dump_secrets(u.storage_index, secret, nodeid, out)
|
|
|
|
|
|
|
|
elif isinstance(u, uri.ReadonlySSKFileURI):
|
|
|
|
if show_header:
|
|
|
|
print >>out, "SSK Read-only URI:"
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " readkey:", base32.b2a(u.readkey)
|
2008-02-13 03:48:37 +00:00
|
|
|
print >>out, " storage index:", storage.si_b2a(u.storage_index)
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " fingerprint:", base32.b2a(u.fingerprint)
|
2008-01-14 20:43:25 +00:00
|
|
|
elif isinstance(u, uri.SSKVerifierURI):
|
|
|
|
if show_header:
|
|
|
|
print >>out, "SSK Verifier URI:"
|
2008-02-13 03:48:37 +00:00
|
|
|
print >>out, " storage index:", storage.si_b2a(u.storage_index)
|
2008-02-15 02:27:47 +00:00
|
|
|
print >>out, " fingerprint:", base32.b2a(u.fingerprint)
|
2008-01-14 20:43:25 +00:00
|
|
|
|
|
|
|
elif isinstance(u, uri.NewDirectoryURI):
|
|
|
|
if show_header:
|
|
|
|
print >>out, "Directory Writeable URI:"
|
|
|
|
dump_uri_instance(u._filenode_uri, nodeid, secret, out, err, False)
|
|
|
|
elif isinstance(u, uri.ReadonlyNewDirectoryURI):
|
|
|
|
if show_header:
|
|
|
|
print >>out, "Directory Read-only URI:"
|
|
|
|
dump_uri_instance(u._filenode_uri, nodeid, secret, out, err, False)
|
|
|
|
elif isinstance(u, uri.NewDirectoryURIVerifier):
|
|
|
|
if show_header:
|
|
|
|
print >>out, "Directory Verifier URI:"
|
|
|
|
dump_uri_instance(u._filenode_uri, nodeid, secret, out, err, False)
|
|
|
|
else:
|
|
|
|
print >>out, "unknown cap type"
|
|
|
|
|
2008-02-06 20:19:51 +00:00
|
|
|
class FindSharesOptions(usage.Options):
|
2008-08-12 20:37:32 +00:00
|
|
|
def getSynopsis(self):
|
|
|
|
return "Usage: tahoe debug find-shares STORAGE_INDEX NODEDIRS.."
|
2008-02-06 20:19:51 +00:00
|
|
|
def parseArgs(self, storage_index_s, *nodedirs):
|
|
|
|
self.si_s = storage_index_s
|
|
|
|
self.nodedirs = nodedirs
|
2008-08-12 20:37:32 +00:00
|
|
|
def getUsage(self, width=None):
|
|
|
|
t = usage.Options.getUsage(self, width)
|
|
|
|
t += """
|
|
|
|
Locate all shares for the given storage index. This command looks through one
|
|
|
|
or more node directories to find the shares. It returns a list of filenames,
|
|
|
|
one per line, for each share file found.
|
|
|
|
|
|
|
|
tahoe debug find-shares 4vozh77tsrw7mdhnj7qvp5ky74 testgrid/node-*
|
|
|
|
|
|
|
|
It may be useful during testing, when running a test grid in which all the
|
|
|
|
nodes are on a local disk. The share files thus located can be counted,
|
|
|
|
examined (with dump-share), or corrupted/deleted to test checker/repairer.
|
|
|
|
"""
|
|
|
|
return t
|
2008-02-06 20:19:51 +00:00
|
|
|
|
|
|
|
def find_shares(config, out=sys.stdout, err=sys.stderr):
|
|
|
|
"""Given a storage index and a list of node directories, emit a list of
|
|
|
|
all matching shares to stdout, one per line. For example:
|
|
|
|
|
|
|
|
find-shares.py 44kai1tui348689nrw8fjegc8c ~/testnet/node-*
|
|
|
|
|
|
|
|
gives:
|
|
|
|
|
|
|
|
/home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/5
|
|
|
|
/home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/9
|
|
|
|
/home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2
|
|
|
|
"""
|
|
|
|
from allmydata import storage
|
|
|
|
|
2008-02-13 03:48:37 +00:00
|
|
|
sharedir = storage.storage_index_to_dir(storage.si_a2b(config.si_s))
|
2008-02-06 20:19:51 +00:00
|
|
|
for d in config.nodedirs:
|
|
|
|
d = os.path.join(os.path.expanduser(d), "storage/shares", sharedir)
|
|
|
|
if os.path.exists(d):
|
|
|
|
for shnum in os.listdir(d):
|
|
|
|
print >>out, os.path.join(d, shnum)
|
|
|
|
|
|
|
|
return 0
|
2008-01-14 20:43:25 +00:00
|
|
|
|
2008-02-12 01:17:01 +00:00
|
|
|
|
|
|
|
class CatalogSharesOptions(usage.Options):
|
|
|
|
"""
|
|
|
|
|
|
|
|
"""
|
|
|
|
def parseArgs(self, *nodedirs):
|
|
|
|
self.nodedirs = nodedirs
|
2008-08-12 20:37:32 +00:00
|
|
|
if not nodedirs:
|
|
|
|
raise usage.UsageError("must specify at least one node directory")
|
|
|
|
|
|
|
|
def getSynopsis(self):
|
|
|
|
return "Usage: tahoe debug catalog-shares NODEDIRS.."
|
|
|
|
|
|
|
|
def getUsage(self, width=None):
|
|
|
|
t = usage.Options.getUsage(self, width)
|
|
|
|
t += """
|
|
|
|
Locate all shares in the given node directories, and emit a one-line summary
|
|
|
|
of each share. Run it like this:
|
|
|
|
|
|
|
|
tahoe debug catalog-shares testgrid/node-* >allshares.txt
|
|
|
|
|
|
|
|
The lines it emits will look like the following:
|
|
|
|
|
|
|
|
CHK $SI $k/$N $filesize $UEB_hash $expiration $abspath_sharefile
|
|
|
|
SDMF $SI $k/$N $filesize $seqnum/$roothash $expiration $abspath_sharefile
|
|
|
|
UNKNOWN $abspath_sharefile
|
|
|
|
|
|
|
|
This command can be used to build up a catalog of shares from many storage
|
|
|
|
servers and then sort the results to compare all shares for the same file. If
|
|
|
|
you see shares with the same SI but different parameters/filesize/UEB_hash,
|
|
|
|
then something is wrong. The misc/find-share/anomalies.py script may be
|
|
|
|
useful for purpose.
|
|
|
|
"""
|
|
|
|
return t
|
2008-02-12 01:17:01 +00:00
|
|
|
|
2008-02-12 01:44:54 +00:00
|
|
|
def describe_share(abs_sharefile, si_s, shnum_s, now, out, err):
|
2008-04-11 21:31:16 +00:00
|
|
|
from allmydata import uri, storage
|
|
|
|
from allmydata.mutable.layout import unpack_share
|
|
|
|
from allmydata.mutable.common import NeedMoreDataError
|
2008-02-15 02:27:47 +00:00
|
|
|
from allmydata.util import base32
|
2008-02-12 01:17:01 +00:00
|
|
|
import struct
|
|
|
|
|
|
|
|
f = open(abs_sharefile, "rb")
|
|
|
|
prefix = f.read(32)
|
|
|
|
|
|
|
|
if prefix == storage.MutableShareFile.MAGIC:
|
|
|
|
# mutable share
|
|
|
|
m = storage.MutableShareFile(abs_sharefile)
|
|
|
|
WE, nodeid = m._read_write_enabler_and_nodeid(f)
|
|
|
|
num_extra_leases = m._read_num_extra_leases(f)
|
|
|
|
data_length = m._read_data_length(f)
|
|
|
|
extra_lease_offset = m._read_extra_lease_offset(f)
|
|
|
|
container_size = extra_lease_offset - m.DATA_OFFSET
|
|
|
|
leases = list(m._enumerate_leases(f))
|
2008-07-10 01:06:55 +00:00
|
|
|
expiration_time = min( [lease[1].expiration_time
|
|
|
|
for lease in leases] )
|
2008-02-12 01:44:54 +00:00
|
|
|
expiration = max(0, expiration_time - now)
|
2008-02-12 01:17:01 +00:00
|
|
|
|
|
|
|
share_type = "unknown"
|
|
|
|
f.seek(m.DATA_OFFSET)
|
|
|
|
if f.read(1) == "\x00":
|
|
|
|
# this slot contains an SMDF share
|
|
|
|
share_type = "SDMF"
|
|
|
|
|
|
|
|
if share_type == "SDMF":
|
|
|
|
f.seek(m.DATA_OFFSET)
|
|
|
|
data = f.read(min(data_length, 2000))
|
|
|
|
|
|
|
|
try:
|
2008-04-11 21:31:16 +00:00
|
|
|
pieces = unpack_share(data)
|
|
|
|
except NeedMoreDataError, e:
|
2008-02-12 01:17:01 +00:00
|
|
|
# retry once with the larger size
|
|
|
|
size = e.needed_bytes
|
|
|
|
f.seek(m.DATA_OFFSET)
|
|
|
|
data = f.read(min(data_length, size))
|
2008-04-11 21:31:16 +00:00
|
|
|
pieces = unpack_share(data)
|
2008-02-12 01:17:01 +00:00
|
|
|
(seqnum, root_hash, IV, k, N, segsize, datalen,
|
|
|
|
pubkey, signature, share_hash_chain, block_hash_tree,
|
|
|
|
share_data, enc_privkey) = pieces
|
|
|
|
|
2008-02-13 22:12:06 +00:00
|
|
|
print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
|
|
|
|
(si_s, k, N, datalen,
|
2008-02-15 02:27:47 +00:00
|
|
|
seqnum, base32.b2a(root_hash),
|
2008-02-13 22:12:06 +00:00
|
|
|
expiration, abs_sharefile)
|
2008-02-12 01:17:01 +00:00
|
|
|
else:
|
|
|
|
print >>out, "UNKNOWN mutable %s" % (abs_sharefile,)
|
|
|
|
|
|
|
|
elif struct.unpack(">L", prefix[:4]) == (1,):
|
|
|
|
# immutable
|
|
|
|
|
|
|
|
sf = storage.ShareFile(abs_sharefile)
|
|
|
|
# use a ReadBucketProxy to parse the bucket and find the uri extension
|
|
|
|
bp = storage.ReadBucketProxy(None)
|
|
|
|
offsets = bp._parse_offsets(sf.read_share_data(0, 0x24))
|
|
|
|
seek = offsets['uri_extension']
|
|
|
|
length = struct.unpack(">L", sf.read_share_data(seek, 4))[0]
|
|
|
|
seek += 4
|
|
|
|
UEB_data = sf.read_share_data(seek, length)
|
2008-07-10 01:06:55 +00:00
|
|
|
expiration_time = min( [lease.expiration_time
|
|
|
|
for lease in sf.iter_leases()] )
|
2008-02-12 01:44:54 +00:00
|
|
|
expiration = max(0, expiration_time - now)
|
2008-02-12 01:17:01 +00:00
|
|
|
|
|
|
|
unpacked = uri.unpack_extension_readable(UEB_data)
|
|
|
|
k = unpacked["needed_shares"]
|
|
|
|
N = unpacked["total_shares"]
|
|
|
|
filesize = unpacked["size"]
|
|
|
|
ueb_hash = unpacked["UEB_hash"]
|
|
|
|
|
2008-02-12 01:44:54 +00:00
|
|
|
print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
|
|
|
|
ueb_hash, expiration,
|
|
|
|
abs_sharefile)
|
2008-02-12 01:17:01 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
print >>out, "UNKNOWN really-unknown %s" % (abs_sharefile,)
|
|
|
|
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
|
|
|
|
def catalog_shares(config, out=sys.stdout, err=sys.stderr):
|
2008-02-12 01:44:54 +00:00
|
|
|
now = time.time()
|
2008-02-12 01:17:01 +00:00
|
|
|
for d in config.nodedirs:
|
|
|
|
d = os.path.join(os.path.expanduser(d), "storage/shares")
|
2008-06-26 18:23:44 +00:00
|
|
|
try:
|
|
|
|
abbrevs = os.listdir(d)
|
|
|
|
except EnvironmentError:
|
|
|
|
# ignore nodes that have storage turned off altogether
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
for abbrevdir in abbrevs:
|
|
|
|
if abbrevdir == "incoming":
|
|
|
|
continue
|
2008-02-12 01:17:01 +00:00
|
|
|
abbrevdir = os.path.join(d, abbrevdir)
|
|
|
|
for si_s in os.listdir(abbrevdir):
|
|
|
|
si_dir = os.path.join(abbrevdir, si_s)
|
|
|
|
for shnum_s in os.listdir(si_dir):
|
|
|
|
abs_sharefile = os.path.join(si_dir, shnum_s)
|
2008-02-12 01:44:54 +00:00
|
|
|
abs_sharefile = os.path.abspath(abs_sharefile)
|
2008-02-12 01:17:01 +00:00
|
|
|
assert os.path.isfile(abs_sharefile)
|
2008-02-12 01:44:54 +00:00
|
|
|
describe_share(abs_sharefile, si_s, shnum_s, now,
|
|
|
|
out, err)
|
2008-02-12 01:17:01 +00:00
|
|
|
return 0
|
|
|
|
|
|
|
|
|
2008-08-12 20:40:17 +00:00
|
|
|
class ReplOptions(usage.Options):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def repl(options, out=sys.stdout, err=sys.stderr):
|
|
|
|
import code
|
|
|
|
return code.interact()
|
|
|
|
|
|
|
|
|
2008-08-12 20:37:32 +00:00
|
|
|
class DebugCommand(usage.Options):
|
|
|
|
subCommands = [
|
|
|
|
["dump-share", None, DumpOptions,
|
|
|
|
"Unpack and display the contents of a share (uri_extension and leases)."],
|
|
|
|
["dump-cap", None, DumpCapOptions, "Unpack a read-cap or write-cap"],
|
|
|
|
["find-shares", None, FindSharesOptions, "Locate sharefiles in node dirs"],
|
|
|
|
["catalog-shares", None, CatalogSharesOptions, "Describe shares in node dirs"],
|
2008-08-12 20:40:17 +00:00
|
|
|
["repl", None, ReplOptions, "Open a python interpreter"],
|
2008-08-12 20:37:32 +00:00
|
|
|
]
|
|
|
|
def postOptions(self):
|
|
|
|
if not hasattr(self, 'subOptions'):
|
|
|
|
raise usage.UsageError("must specify a subcommand")
|
|
|
|
def getSynopsis(self):
|
|
|
|
return "Usage: tahoe debug SUBCOMMAND"
|
|
|
|
def getUsage(self, width=None):
|
|
|
|
#t = usage.Options.getUsage(self, width)
|
|
|
|
t = """
|
|
|
|
Subcommands:
|
|
|
|
tahoe debug dump-share Unpack and display the contents of a share
|
|
|
|
tahoe debug dump-cap Unpack a read-cap or write-cap
|
|
|
|
tahoe debug find-shares Locate sharefiles in node directories
|
|
|
|
tahoe debug catalog-shares Describe all shares in node dirs
|
|
|
|
|
|
|
|
Please run e.g. 'tahoe debug dump-share --help' for more details on each
|
|
|
|
subcommand.
|
|
|
|
"""
|
|
|
|
return t
|
|
|
|
|
|
|
|
subDispatch = {
|
|
|
|
"dump-share": dump_share,
|
|
|
|
"dump-cap": dump_cap,
|
|
|
|
"find-shares": find_shares,
|
|
|
|
"catalog-shares": catalog_shares,
|
2008-08-12 20:40:17 +00:00
|
|
|
"repl": repl,
|
2008-08-12 20:37:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def do_debug(options):
|
|
|
|
so = options.subOptions
|
|
|
|
f = subDispatch[options.subCommand]
|
|
|
|
return f(so, options.stdout, options.stderr)
|
|
|
|
|
2008-02-12 01:17:01 +00:00
|
|
|
|
2007-07-11 01:41:52 +00:00
|
|
|
subCommands = [
|
2008-08-12 20:37:32 +00:00
|
|
|
["debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list"],
|
2007-07-11 01:41:52 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
dispatch = {
|
2008-08-12 20:37:32 +00:00
|
|
|
"debug": do_debug,
|
2007-07-11 01:41:52 +00:00
|
|
|
}
|