mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-01-31 08:25:35 +00:00
hush pyflakes-0.4.0 warnings: remove trivial unused variables. For #900.
This commit is contained in:
parent
874a979a8e
commit
731d15e56f
@ -413,7 +413,6 @@ class DirectoryNode:
|
||||
def set_children(self, entries, overwrite=True):
|
||||
# this takes URIs
|
||||
a = Adder(self, overwrite=overwrite)
|
||||
node_entries = []
|
||||
for (name, e) in entries.iteritems():
|
||||
assert isinstance(name, unicode)
|
||||
if len(e) == 2:
|
||||
|
@ -82,9 +82,9 @@ class ValidatedThingObtainer:
|
||||
self._log_id = log_id
|
||||
|
||||
def _bad(self, f, validatedthingproxy):
|
||||
failtype = f.trap(RemoteException, DeadReferenceError,
|
||||
IntegrityCheckReject, layout.LayoutInvalid,
|
||||
layout.ShareVersionIncompatible)
|
||||
f.trap(RemoteException, DeadReferenceError,
|
||||
IntegrityCheckReject, layout.LayoutInvalid,
|
||||
layout.ShareVersionIncompatible)
|
||||
level = log.WEIRD
|
||||
if f.check(DeadReferenceError):
|
||||
level = log.UNUSUAL
|
||||
@ -598,9 +598,9 @@ class BlockDownloader(log.PrefixingLogMixin):
|
||||
self.parent.hold_block(self.blocknum, data)
|
||||
|
||||
def _got_block_error(self, f):
|
||||
failtype = f.trap(RemoteException, DeadReferenceError,
|
||||
IntegrityCheckReject,
|
||||
layout.LayoutInvalid, layout.ShareVersionIncompatible)
|
||||
f.trap(RemoteException, DeadReferenceError,
|
||||
IntegrityCheckReject, layout.LayoutInvalid,
|
||||
layout.ShareVersionIncompatible)
|
||||
if f.check(RemoteException, DeadReferenceError):
|
||||
level = log.UNUSUAL
|
||||
else:
|
||||
|
@ -777,7 +777,6 @@ class CHKUploader:
|
||||
for shnum in self._encoder.get_shares_placed():
|
||||
peer_tracker = self._peer_trackers[shnum]
|
||||
peerid = peer_tracker.peerid
|
||||
peerid_s = idlib.shortnodeid_b2a(peerid)
|
||||
r.sharemap.add(shnum, peerid)
|
||||
r.servermap.add(peerid, shnum)
|
||||
r.pushed_shares = len(self._encoder.get_shares_placed())
|
||||
|
@ -121,7 +121,6 @@ class ResponseCache:
|
||||
# we have a fragment that contains the whole request
|
||||
|
||||
index = (verinfo, shnum)
|
||||
end = offset+length
|
||||
for entry in self.cache.get(index, set()):
|
||||
(e_start, e_data, e_timestamp) = entry
|
||||
if self._inside(offset, length, e_start, len(e_data)):
|
||||
|
@ -166,7 +166,7 @@ def pack_offsets(verification_key_length, signature_length,
|
||||
o3 = offsets['block_hash_tree'] = o2 + share_hash_chain_length
|
||||
o4 = offsets['share_data'] = o3 + block_hash_tree_length
|
||||
o5 = offsets['enc_privkey'] = o4 + share_data_length
|
||||
o6 = offsets['EOF'] = o5 + encprivkey_length
|
||||
offsets['EOF'] = o5 + encprivkey_length
|
||||
|
||||
return struct.pack(">LLLLQQ",
|
||||
offsets['signature'],
|
||||
|
@ -374,7 +374,6 @@ class Publish:
|
||||
str(self._first_write_error),
|
||||
self._first_write_error)
|
||||
|
||||
new_assignments = []
|
||||
# we then index this peerlist with an integer, because we may have to
|
||||
# wrap. We update the goal as we go.
|
||||
i = 0
|
||||
@ -465,7 +464,7 @@ class Publish:
|
||||
# build the block hash tree. SDMF has only one leaf.
|
||||
leaves = [hashutil.block_hash(share_data)]
|
||||
t = hashtree.HashTree(leaves)
|
||||
block_hash_trees[shnum] = block_hash_tree = list(t)
|
||||
block_hash_trees[shnum] = list(t)
|
||||
share_hash_leaves[shnum] = t[0]
|
||||
for leaf in share_hash_leaves:
|
||||
assert leaf is not None
|
||||
|
@ -500,7 +500,6 @@ class ServermapUpdater:
|
||||
self._status.set_status("Sending %d initial queries" % len(peerlist))
|
||||
self._queries_outstanding = set()
|
||||
self._sharemap = DictOfSets() # shnum -> [(peerid, seqnum, R)..]
|
||||
dl = []
|
||||
for (peerid, ss) in peerlist.items():
|
||||
self._queries_outstanding.add(peerid)
|
||||
self._do_query(ss, peerid, self._storage_index, self._read_size)
|
||||
|
@ -57,7 +57,6 @@ class ProvisioningTool(rend.Page):
|
||||
i_select = T.select(name=name)
|
||||
for (count, description) in options:
|
||||
count = astype(count)
|
||||
selected = False
|
||||
if ((current_value is not None and count == current_value) or
|
||||
(current_value is None and count == default)):
|
||||
o = T.option(value=str(count), selected="true")[description]
|
||||
@ -340,7 +339,6 @@ class ProvisioningTool(rend.Page):
|
||||
add_output("Users",
|
||||
T.div["Average file size: ", number(file_size)])
|
||||
total_files = num_users * files_per_user / sharing_ratio
|
||||
user_file_check_interval = file_check_interval / files_per_user
|
||||
|
||||
add_output("Grid",
|
||||
T.div["Total number of files in grid: ",
|
||||
@ -711,6 +709,7 @@ class ProvisioningTool(rend.Page):
|
||||
from allmydata import reliability
|
||||
# we import this just to test to see if the page is available
|
||||
_hush_pyflakes = reliability
|
||||
del _hush_pyflakes
|
||||
f = [T.div[T.a(href="../reliability")["Reliability Math"]], f]
|
||||
except ImportError:
|
||||
pass
|
||||
|
@ -79,7 +79,6 @@ class ReliabilityModel:
|
||||
#print "DIFF:", (old_post_repair - decay * repair)
|
||||
|
||||
START = array([0]*N + [1])
|
||||
ALIVE = array([0]*k + [1]*(1+N-k))
|
||||
DEAD = array([1]*k + [0]*(1+N-k))
|
||||
REPAIRp = array([0]*k + [1]*(R-k) + [0]*(1+N-R))
|
||||
REPAIR_newshares = array([0]*k +
|
||||
@ -87,7 +86,6 @@ class ReliabilityModel:
|
||||
[0]*(1+N-R))
|
||||
assert REPAIR_newshares.shape[0] == N+1
|
||||
#print "START", START
|
||||
#print "ALIVE", ALIVE
|
||||
#print "REPAIRp", REPAIRp
|
||||
#print "REPAIR_newshares", REPAIR_newshares
|
||||
|
||||
|
@ -553,10 +553,7 @@ def describe_share(abs_sharefile, si_s, shnum_s, now, out):
|
||||
# mutable share
|
||||
m = MutableShareFile(abs_sharefile)
|
||||
WE, nodeid = m._read_write_enabler_and_nodeid(f)
|
||||
num_extra_leases = m._read_num_extra_leases(f)
|
||||
data_length = m._read_data_length(f)
|
||||
extra_lease_offset = m._read_extra_lease_offset(f)
|
||||
container_size = extra_lease_offset - m.DATA_OFFSET
|
||||
expiration_time = min( [lease.expiration_time
|
||||
for (i,lease) in m._enumerate_leases(f)] )
|
||||
expiration = max(0, expiration_time - now)
|
||||
|
@ -27,7 +27,6 @@ g.setServiceParent(application)
|
||||
"""
|
||||
|
||||
def create_stats_gatherer(config):
|
||||
out = config.stdout
|
||||
err = config.stderr
|
||||
basedir = config['basedir']
|
||||
if not basedir:
|
||||
|
@ -72,14 +72,11 @@ class BackerUpper:
|
||||
def run(self):
|
||||
options = self.options
|
||||
nodeurl = options['node-url']
|
||||
from_dir = options.from_dir
|
||||
to_dir = options.to_dir
|
||||
self.verbosity = 1
|
||||
if options['quiet']:
|
||||
self.verbosity = 0
|
||||
if options['verbose']:
|
||||
self.verbosity = 2
|
||||
stdin = options.stdin
|
||||
stdout = options.stdout
|
||||
stderr = options.stderr
|
||||
|
||||
@ -101,7 +98,6 @@ class BackerUpper:
|
||||
to_url += "/"
|
||||
|
||||
archives_url = to_url + "Archives/"
|
||||
latest_url = to_url + "Latest"
|
||||
|
||||
# first step: make sure the target directory exists, as well as the
|
||||
# Archives/ subdirectory.
|
||||
@ -112,11 +108,6 @@ class BackerUpper:
|
||||
print >>stderr, "Unable to create target directory: %s %s %s" % \
|
||||
(resp.status, resp.reason, resp.read())
|
||||
return 1
|
||||
archives_dir = {}
|
||||
else:
|
||||
jdata = simplejson.load(resp)
|
||||
(otype, attrs) = jdata
|
||||
archives_dir = attrs["children"]
|
||||
|
||||
# second step: process the tree
|
||||
new_backup_dircap = self.process(options.from_dir)
|
||||
|
@ -530,7 +530,6 @@ class Copier:
|
||||
url = self.nodeurl + "uri/%s" % urllib.quote(rootcap)
|
||||
if path:
|
||||
url += "/" + escape_path(path)
|
||||
last_slash = path.rfind("/")
|
||||
|
||||
resp = do_http("GET", url + "?t=json")
|
||||
if resp.status == 404:
|
||||
|
@ -93,7 +93,6 @@ class StatsGrabber(SlowOperationRunner):
|
||||
|
||||
def write_results(self, data):
|
||||
stdout = self.options.stdout
|
||||
stderr = self.options.stderr
|
||||
keys = ("count-immutable-files",
|
||||
"count-mutable-files",
|
||||
"count-literal-files",
|
||||
|
@ -5,17 +5,11 @@ from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path
|
||||
|
||||
def rm(options):
|
||||
"""
|
||||
@param verbosity: 0, 1, or 2, meaning quiet, verbose, or very verbose
|
||||
|
||||
@return: a Deferred which eventually fires with the exit code
|
||||
"""
|
||||
nodeurl = options['node-url']
|
||||
aliases = options.aliases
|
||||
where = options.where
|
||||
if options['quiet']:
|
||||
verbosity = 0
|
||||
else:
|
||||
verbosity = 2
|
||||
stdout = options.stdout
|
||||
stderr = options.stderr
|
||||
|
||||
|
@ -258,7 +258,6 @@ class ShareCrawler(service.MultiService):
|
||||
self.current_sleep_time = None
|
||||
self.next_wake_time = None
|
||||
try:
|
||||
s = self.last_complete_prefix_index
|
||||
self.start_current_prefix(start_slice)
|
||||
finished_cycle = True
|
||||
except TimeSliceExceeded:
|
||||
|
@ -76,7 +76,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
self._clean_incomplete()
|
||||
fileutil.make_dirs(self.incomingdir)
|
||||
self._active_writers = weakref.WeakKeyDictionary()
|
||||
lp = log.msg("StorageServer created", facility="tahoe.storage")
|
||||
log.msg("StorageServer created", facility="tahoe.storage")
|
||||
|
||||
if reserved_space:
|
||||
if self.get_available_space() is None:
|
||||
@ -479,7 +479,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
start = time.time()
|
||||
self.count("writev")
|
||||
si_s = si_b2a(storage_index)
|
||||
lp = log.msg("storage: slot_writev %s" % si_s)
|
||||
log.msg("storage: slot_writev %s" % si_s)
|
||||
si_dir = storage_index_to_dir(storage_index)
|
||||
(write_enabler, renew_secret, cancel_secret) = secrets
|
||||
# shares exist if there is a file for them
|
||||
|
@ -95,7 +95,6 @@ class SpeedTest:
|
||||
|
||||
def do_test(self):
|
||||
print "doing test"
|
||||
rr = self.client_rref
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(self.one_test, "startup", 1, 1000, False) #ignore this one
|
||||
d.addCallback(self.measure_rtt)
|
||||
|
@ -1335,18 +1335,11 @@ def _corrupt_crypttext_hash_tree(data, debug=False):
|
||||
return corrupt_field(data, 0x0c+crypttexthashtreeoffset, blockhashesoffset-crypttexthashtreeoffset, debug=debug)
|
||||
|
||||
def _corrupt_crypttext_hash_tree_byte_x221(data, debug=False):
|
||||
"""Scramble the file data -- the field containing the crypttext hash tree
|
||||
will have the 7th bit of the 9th byte flipped.
|
||||
"""Scramble the file data -- the byte at offset 0x221 will have its 7th
|
||||
(b1) bit flipped.
|
||||
"""
|
||||
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
|
||||
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
|
||||
if sharevernum == 1:
|
||||
crypttexthashtreeoffset = struct.unpack(">L", data[0x0c+0x14:0x0c+0x14+4])[0]
|
||||
blockhashesoffset = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0]
|
||||
else:
|
||||
crypttexthashtreeoffset = struct.unpack(">Q", data[0x0c+0x24:0x0c+0x24+8])[0]
|
||||
blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
|
||||
|
||||
if debug:
|
||||
log.msg("original data: %r" % (data,))
|
||||
return data[:0x0c+0x221] + chr(ord(data[0x0c+0x221])^0x02) + data[0x0c+0x2210+1:]
|
||||
|
@ -32,7 +32,7 @@ class Basic(unittest.TestCase):
|
||||
basedir = "test_client.Basic.test_loadable"
|
||||
os.mkdir(basedir)
|
||||
open(os.path.join(basedir, "introducer.furl"), "w").write("")
|
||||
c = client.Client(basedir)
|
||||
client.Client(basedir)
|
||||
|
||||
def test_loadable_old_config_bits(self):
|
||||
basedir = "test_client.Basic.test_loadable_old_config_bits"
|
||||
@ -190,7 +190,7 @@ class Run(unittest.TestCase, testutil.StallMixin):
|
||||
dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
|
||||
open(os.path.join(basedir, "introducer.furl"), "w").write(dummy)
|
||||
open(os.path.join(basedir, "suicide_prevention_hotline"), "w")
|
||||
c = client.Client(basedir)
|
||||
client.Client(basedir)
|
||||
|
||||
def test_reloadable(self):
|
||||
basedir = "test_client.Run.test_reloadable"
|
||||
|
@ -341,7 +341,8 @@ class Basic(unittest.TestCase, StallMixin, pollmixin.PollMixin):
|
||||
ss = StorageServer(self.basedir, serverid)
|
||||
ss.setServiceParent(self.s)
|
||||
|
||||
sis = [self.write(i, ss, serverid) for i in range(10)]
|
||||
for i in range(10):
|
||||
self.write(i, ss, serverid)
|
||||
|
||||
statefile = os.path.join(self.basedir, "statefile")
|
||||
c = ConsumingCrawler(ss, statefile)
|
||||
@ -385,7 +386,8 @@ class Basic(unittest.TestCase, StallMixin, pollmixin.PollMixin):
|
||||
ss = StorageServer(self.basedir, serverid)
|
||||
ss.setServiceParent(self.s)
|
||||
|
||||
sis = [self.write(i, ss, serverid) for i in range(10)]
|
||||
for i in range(10):
|
||||
self.write(i, ss, serverid)
|
||||
|
||||
statefile = os.path.join(self.basedir, "statefile")
|
||||
c = ShareCrawler(ss, statefile)
|
||||
@ -412,7 +414,8 @@ class Basic(unittest.TestCase, StallMixin, pollmixin.PollMixin):
|
||||
ss = StorageServer(self.basedir, serverid)
|
||||
ss.setServiceParent(self.s)
|
||||
|
||||
sis = [self.write(i, ss, serverid) for i in range(30)]
|
||||
for i in range(30):
|
||||
self.write(i, ss, serverid)
|
||||
|
||||
statefile = os.path.join(self.basedir, "statefile")
|
||||
c = OneShotCrawler(ss, statefile)
|
||||
|
@ -38,7 +38,6 @@ class MutableChecker(GridTestMixin, unittest.TestCase, ErrorMixin):
|
||||
def _created(node):
|
||||
self.node = node
|
||||
self.fileurl = "uri/" + urllib.quote(node.get_uri())
|
||||
si = self.node.get_storage_index()
|
||||
d.addCallback(_created)
|
||||
# now make sure the webapi verifier sees no problems
|
||||
d.addCallback(lambda ign: self.GET(self.fileurl+"?t=check&verify=true",
|
||||
@ -395,13 +394,14 @@ class DeepCheckWebGood(DeepCheckBase, unittest.TestCase):
|
||||
self.check_stats_good(stats)
|
||||
|
||||
def do_web_stream_check(self, ignored):
|
||||
# TODO
|
||||
return
|
||||
d = self.web(self.root, t="stream-deep-check")
|
||||
def _check(res):
|
||||
units = list(self.parse_streamed_json(res))
|
||||
files = [u for u in units if u["type"] in ("file", "directory")]
|
||||
#files = [u for u in units if u["type"] in ("file", "directory")]
|
||||
assert units[-1]["type"] == "stats"
|
||||
stats = units[-1]["stats"]
|
||||
#stats = units[-1]["stats"]
|
||||
# ...
|
||||
d.addCallback(_check)
|
||||
return d
|
||||
|
@ -294,7 +294,6 @@ class Dirnode(GridTestMixin, unittest.TestCase,
|
||||
return d
|
||||
|
||||
def _mark_file_bad(self, rootnode):
|
||||
si = rootnode.get_storage_index()
|
||||
self.delete_shares_numbered(rootnode.get_uri(), [0])
|
||||
return rootnode
|
||||
|
||||
@ -976,25 +975,25 @@ class Packing(unittest.TestCase):
|
||||
self.failUnlessIn("lit", packed)
|
||||
|
||||
kids = self._make_kids(nm, ["imm", "lit", "write"])
|
||||
e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
|
||||
dirnode.pack_children,
|
||||
fn, kids, deep_immutable=True)
|
||||
self.failUnlessRaises(dirnode.MustBeDeepImmutable,
|
||||
dirnode.pack_children,
|
||||
fn, kids, deep_immutable=True)
|
||||
|
||||
# read-only is not enough: all children must be immutable
|
||||
kids = self._make_kids(nm, ["imm", "lit", "read"])
|
||||
e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
|
||||
dirnode.pack_children,
|
||||
fn, kids, deep_immutable=True)
|
||||
self.failUnlessRaises(dirnode.MustBeDeepImmutable,
|
||||
dirnode.pack_children,
|
||||
fn, kids, deep_immutable=True)
|
||||
|
||||
kids = self._make_kids(nm, ["imm", "lit", "dirwrite"])
|
||||
e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
|
||||
dirnode.pack_children,
|
||||
fn, kids, deep_immutable=True)
|
||||
self.failUnlessRaises(dirnode.MustBeDeepImmutable,
|
||||
dirnode.pack_children,
|
||||
fn, kids, deep_immutable=True)
|
||||
|
||||
kids = self._make_kids(nm, ["imm", "lit", "dirread"])
|
||||
e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
|
||||
dirnode.pack_children,
|
||||
fn, kids, deep_immutable=True)
|
||||
self.failUnlessRaises(dirnode.MustBeDeepImmutable,
|
||||
dirnode.pack_children,
|
||||
fn, kids, deep_immutable=True)
|
||||
|
||||
class FakeMutableFile:
|
||||
implements(IMutableFileNode)
|
||||
|
@ -457,7 +457,6 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
|
||||
def _ready(res):
|
||||
k,happy,n = e.get_param("share_counts")
|
||||
assert n == NUM_SHARES # else we'll be completely confused
|
||||
all_peers = []
|
||||
for shnum in range(NUM_SHARES):
|
||||
mode = bucket_modes.get(shnum, "good")
|
||||
peer = FakeBucketReaderWriterProxy(mode)
|
||||
|
@ -30,7 +30,6 @@ class Node(unittest.TestCase):
|
||||
needed_shares=3,
|
||||
total_shares=10,
|
||||
size=1000)
|
||||
c = FakeClient()
|
||||
cf = cachedir.CacheFile("none")
|
||||
fn1 = ImmutableFileNode(u, None, None, None, None, cf)
|
||||
fn2 = ImmutableFileNode(u, None, None, None, None, cf)
|
||||
@ -55,7 +54,6 @@ class Node(unittest.TestCase):
|
||||
def test_literal_filenode(self):
|
||||
DATA = "I am a short file."
|
||||
u = uri.LiteralFileURI(data=DATA)
|
||||
c = None
|
||||
fn1 = LiteralFileNode(u)
|
||||
fn2 = LiteralFileNode(u)
|
||||
self.failUnlessEqual(fn1, fn2)
|
||||
@ -91,7 +89,6 @@ class Node(unittest.TestCase):
|
||||
def test_mutable_filenode(self):
|
||||
client = FakeClient()
|
||||
wk = "\x00"*16
|
||||
fp = "\x00"*32
|
||||
rk = hashutil.ssk_readkey_hash(wk)
|
||||
si = hashutil.ssk_storage_index_hash(rk)
|
||||
|
||||
|
@ -81,7 +81,7 @@ class Incomplete(unittest.TestCase):
|
||||
self.failUnlessEqual(ht.needed_hashes(5, True), set([12, 11, 6, 1]))
|
||||
|
||||
def test_depth_of(self):
|
||||
ht = hashtree.IncompleteHashTree(8)
|
||||
hashtree.IncompleteHashTree(8)
|
||||
self.failUnlessEqual(hashtree.depth_of(0), 0)
|
||||
for i in [1,2]:
|
||||
self.failUnlessEqual(hashtree.depth_of(i), 1, "i=%d"%i)
|
||||
|
@ -78,7 +78,7 @@ class Test(common.ShareManglingMixin, unittest.TestCase):
|
||||
random.shuffle(shnums)
|
||||
for i in shnums[:7]:
|
||||
self._corrupt_a_share(None, common._corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes, i)
|
||||
before_download_reads = self._count_reads()
|
||||
#before_download_reads = self._count_reads()
|
||||
d.addCallback(_then_corrupt_7)
|
||||
d.addCallback(self._download_and_check_plaintext)
|
||||
return d
|
||||
|
@ -129,7 +129,7 @@ class SystemTest(SystemTestMixin, unittest.TestCase):
|
||||
log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
|
||||
c = IntroducerClient(tub, self.introducer_furl, u"nickname-%d" % i,
|
||||
"version", "oldest")
|
||||
received_announcements[c] = ra = {}
|
||||
received_announcements[c] = {}
|
||||
def got(serverid, ann_d, announcements):
|
||||
announcements[serverid] = ann_d
|
||||
c.subscribe_to("storage", got, received_announcements[c])
|
||||
|
@ -6,7 +6,6 @@ from twisted.internet import defer, reactor
|
||||
from allmydata import uri, client
|
||||
from allmydata.nodemaker import NodeMaker
|
||||
from allmydata.util import base32
|
||||
from allmydata.util.idlib import shortnodeid_b2a
|
||||
from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
|
||||
ssk_pubkey_fingerprint_hash
|
||||
from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
|
||||
@ -71,7 +70,6 @@ class FakeStorage:
|
||||
self._pending_timer = None
|
||||
pending = self._pending
|
||||
self._pending = {}
|
||||
extra = []
|
||||
for peerid in self._sequence:
|
||||
if peerid in pending:
|
||||
d, shares = pending.pop(peerid)
|
||||
@ -602,7 +600,6 @@ class PublishMixin:
|
||||
# publish a file and create shares, which can then be manipulated
|
||||
# later.
|
||||
self.CONTENTS = "New contents go here" * 1000
|
||||
num_peers = 20
|
||||
self._storage = FakeStorage()
|
||||
self._nodemaker = make_nodemaker(self._storage)
|
||||
self._storage_broker = self._nodemaker.storage_broker
|
||||
@ -620,7 +617,6 @@ class PublishMixin:
|
||||
"Contents 3a",
|
||||
"Contents 3b"]
|
||||
self._copied_shares = {}
|
||||
num_peers = 20
|
||||
self._storage = FakeStorage()
|
||||
self._nodemaker = make_nodemaker(self._storage)
|
||||
d = self._nodemaker.create_mutable_file(self.CONTENTS[0]) # seqnum=1
|
||||
@ -757,7 +753,6 @@ class Servermap(unittest.TestCase, PublishMixin):
|
||||
def test_mark_bad(self):
|
||||
d = defer.succeed(None)
|
||||
ms = self.make_servermap
|
||||
us = self.update_servermap
|
||||
|
||||
d.addCallback(lambda res: ms(mode=MODE_READ))
|
||||
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
|
||||
@ -1435,7 +1430,6 @@ class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
|
||||
self.failUnlessEqual(len(smap.recoverable_versions()), 1)
|
||||
self.failIf(smap.unrecoverable_versions())
|
||||
# now, which should have won?
|
||||
roothash_s4a = self.get_roothash_for(3)
|
||||
expected_contents = self.CONTENTS[3]
|
||||
new_versionid = smap.best_recoverable_version()
|
||||
self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
|
||||
@ -1586,7 +1580,6 @@ class MultipleEncodings(unittest.TestCase):
|
||||
sb = self._storage_broker
|
||||
|
||||
for peerid in sorted(sb.get_all_serverids()):
|
||||
peerid_s = shortnodeid_b2a(peerid)
|
||||
for shnum in self._shares1.get(peerid, {}):
|
||||
if shnum < len(places):
|
||||
which = places[shnum]
|
||||
@ -1596,7 +1589,6 @@ class MultipleEncodings(unittest.TestCase):
|
||||
in_1 = shnum in self._shares1[peerid]
|
||||
in_2 = shnum in self._shares2.get(peerid, {})
|
||||
in_3 = shnum in self._shares3.get(peerid, {})
|
||||
#print peerid_s, shnum, which, in_1, in_2, in_3
|
||||
if which == 1:
|
||||
if in_1:
|
||||
peers[shnum] = self._shares1[peerid][shnum]
|
||||
|
@ -253,7 +253,7 @@ class Server(unittest.TestCase):
|
||||
return ss
|
||||
|
||||
def test_create(self):
|
||||
ss = self.create("test_create")
|
||||
self.create("test_create")
|
||||
|
||||
def allocate(self, ss, storage_index, sharenums, size, canary=None):
|
||||
renew_secret = hashutil.tagged_hash("blah", "%d" % self._lease_secret.next())
|
||||
@ -326,7 +326,6 @@ class Server(unittest.TestCase):
|
||||
|
||||
self.failUnlessEqual(ss.remote_get_buckets("allocate"), {})
|
||||
|
||||
canary = FakeCanary()
|
||||
already,writers = self.allocate(ss, "allocate", [0,1,2], 75)
|
||||
self.failUnlessEqual(already, set())
|
||||
self.failUnlessEqual(set(writers.keys()), set([0,1,2]))
|
||||
@ -387,7 +386,7 @@ class Server(unittest.TestCase):
|
||||
f.write(struct.pack(">L", 0)) # this is invalid: minimum used is v1
|
||||
f.close()
|
||||
|
||||
b = ss.remote_get_buckets("allocate")
|
||||
ss.remote_get_buckets("allocate")
|
||||
|
||||
e = self.failUnlessRaises(UnknownImmutableContainerVersionError,
|
||||
ss.remote_get_buckets, "si1")
|
||||
@ -654,7 +653,6 @@ class Server(unittest.TestCase):
|
||||
ss = StorageServer(workdir, "\x00" * 20, discard_storage=True)
|
||||
ss.setServiceParent(self.sparent)
|
||||
|
||||
canary = FakeCanary()
|
||||
already,writers = self.allocate(ss, "vid", [0,1,2], 75)
|
||||
self.failUnlessEqual(already, set())
|
||||
self.failUnlessEqual(set(writers.keys()), set([0,1,2]))
|
||||
@ -733,7 +731,7 @@ class MutableServer(unittest.TestCase):
|
||||
return ss
|
||||
|
||||
def test_create(self):
|
||||
ss = self.create("test_create")
|
||||
self.create("test_create")
|
||||
|
||||
def write_enabler(self, we_tag):
|
||||
return hashutil.tagged_hash("we_blah", we_tag)
|
||||
|
@ -465,7 +465,6 @@ class SystemTest(SystemTestMixin, unittest.TestCase):
|
||||
def _done(res):
|
||||
log.msg("DONE: %s" % (res,))
|
||||
self._mutable_node_1 = res
|
||||
uri = res.get_uri()
|
||||
d1.addCallback(_done)
|
||||
return d1
|
||||
d.addCallback(_create_mutable)
|
||||
@ -1364,13 +1363,11 @@ class SystemTest(SystemTestMixin, unittest.TestCase):
|
||||
# network calls)
|
||||
|
||||
private_uri = self._private_node.get_uri()
|
||||
some_uri = self._root_directory_uri
|
||||
client0_basedir = self.getdir("client0")
|
||||
|
||||
nodeargs = [
|
||||
"--node-directory", client0_basedir,
|
||||
]
|
||||
TESTDATA = "I will not write the same thing over and over.\n" * 100
|
||||
|
||||
d = defer.succeed(None)
|
||||
|
||||
|
@ -380,7 +380,6 @@ class Dirnode(unittest.TestCase):
|
||||
u0 = uri.LiteralFileURI("data")
|
||||
u1 = uri.LiteralDirectoryURI(u0)
|
||||
self.failUnless(str(u1))
|
||||
u1s = u1.to_string()
|
||||
self.failUnlessEqual(u1.to_string(), "URI:DIR2-LIT:mrqxiyi")
|
||||
self.failUnless(u1.is_readonly())
|
||||
self.failIf(u1.is_mutable())
|
||||
|
@ -176,7 +176,7 @@ class Statistics(unittest.TestCase):
|
||||
try:
|
||||
func(*args, **kwargs)
|
||||
self.fail(msg)
|
||||
except AssertionError, e:
|
||||
except AssertionError:
|
||||
pass
|
||||
|
||||
def failUnlessListEqual(self, a, b, msg = None):
|
||||
@ -320,9 +320,6 @@ class Asserts(unittest.TestCase):
|
||||
self.fail("assert was not caught")
|
||||
|
||||
def should_not_assert(self, func, *args, **kwargs):
|
||||
if "re" in kwargs:
|
||||
regexp = kwargs["re"]
|
||||
del kwargs["re"]
|
||||
try:
|
||||
func(*args, **kwargs)
|
||||
except AssertionError, e:
|
||||
|
@ -438,6 +438,7 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, unittest.TestCase):
|
||||
try:
|
||||
from allmydata import reliability
|
||||
_hush_pyflakes = reliability
|
||||
del _hush_pyflakes
|
||||
except:
|
||||
raise unittest.SkipTest("reliability tool requires NumPy")
|
||||
|
||||
@ -483,7 +484,7 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, unittest.TestCase):
|
||||
def _check_json(res):
|
||||
data = simplejson.loads(res)
|
||||
self.failUnless(isinstance(data, dict))
|
||||
active = data["active"]
|
||||
#active = data["active"]
|
||||
# TODO: test more. We need a way to fake an active operation
|
||||
# here.
|
||||
d.addCallback(_check_json)
|
||||
|
@ -529,7 +529,7 @@ def from_string(s):
|
||||
|
||||
def is_uri(s):
|
||||
try:
|
||||
uri = from_string(s)
|
||||
from_string(s)
|
||||
return True
|
||||
except (TypeError, AssertionError):
|
||||
return False
|
||||
|
@ -247,7 +247,6 @@ def a2b_l(cs, lengthinbits):
|
||||
octets = []
|
||||
pos = 2048
|
||||
num = qs[0] * pos
|
||||
readybits = 5
|
||||
i = 1
|
||||
while len(octets) < numoctets:
|
||||
while pos > 256:
|
||||
|
@ -33,7 +33,6 @@ def next_power_of_k(n, k):
|
||||
x = 0
|
||||
else:
|
||||
x = int(math.log(n, k) + 0.5)
|
||||
r = k**x
|
||||
if k**x < n:
|
||||
return k**(x+1)
|
||||
else:
|
||||
|
@ -177,7 +177,6 @@ class ResultsBase:
|
||||
|
||||
def _render_si_link(self, ctx, storage_index):
|
||||
si_s = base32.b2a(storage_index)
|
||||
root = get_root(ctx)
|
||||
req = inevow.IRequest(ctx)
|
||||
ophandle = req.prepath[-1]
|
||||
target = "%s/operations/%s/%s" % (get_root(ctx), ophandle, si_s)
|
||||
|
@ -60,7 +60,6 @@ class DirectoryNodeHandler(RenderMixin, rend.Page, ReplaceMeMixin):
|
||||
self.name = name
|
||||
|
||||
def childFactory(self, ctx, name):
|
||||
req = IRequest(ctx)
|
||||
name = name.decode("utf-8")
|
||||
if not name:
|
||||
raise EmptyPathnameComponentError()
|
||||
|
@ -355,7 +355,6 @@ class FileDownloader(rend.Page):
|
||||
if encoding:
|
||||
req.setHeader("content-encoding", encoding)
|
||||
|
||||
save_to_filename = None
|
||||
if boolean_of_arg(get_arg(req, "save", "False")):
|
||||
# tell the browser to save the file rather display it we don't
|
||||
# try to encode the filename, instead we echo back the exact same
|
||||
|
@ -53,7 +53,6 @@ class MoreInfo(rend.Page):
|
||||
|
||||
def render_size(self, ctx, data):
|
||||
node = self.original
|
||||
si = node.get_storage_index()
|
||||
d = node.get_current_size()
|
||||
def _no_size(size):
|
||||
if size is None:
|
||||
|
@ -1,5 +1,5 @@
|
||||
|
||||
from nevow import rend, inevow, tags as T
|
||||
from nevow import rend, tags as T
|
||||
reliability = None # might not be usable
|
||||
try:
|
||||
from allmydata import reliability # requires NumPy
|
||||
@ -58,7 +58,6 @@ class ReliabilityTool(rend.Page):
|
||||
return "%d" % s
|
||||
|
||||
def get_parameters(self, ctx):
|
||||
req = inevow.IRequest(ctx)
|
||||
parameters = {}
|
||||
for (name,default,argtype,description) in self.DEFAULT_PARAMETERS:
|
||||
v = get_arg(ctx, name, default)
|
||||
|
@ -184,7 +184,6 @@ class StorageStatus(rend.Page):
|
||||
ecr = ec["space-recovered"]
|
||||
|
||||
p = T.ul()
|
||||
pieces = []
|
||||
def add(*pieces):
|
||||
p[T.li[pieces]]
|
||||
|
||||
@ -243,7 +242,6 @@ class StorageStatus(rend.Page):
|
||||
]
|
||||
|
||||
p = T.ul()
|
||||
pieces = []
|
||||
def add(*pieces):
|
||||
p[T.li[pieces]]
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user