2007-08-12 23:33:51 +00:00
from base64 import b32encode
2008-09-25 17:34:53 +00:00
import os , sys , time , re , simplejson , urllib
2007-06-26 22:55:00 +00:00
from cStringIO import StringIO
2008-10-06 22:50:37 +00:00
from zope . interface import implements
2006-12-03 01:30:18 +00:00
from twisted . trial import unittest
2008-04-23 00:14:26 +00:00
from twisted . internet import defer
2007-10-12 02:20:41 +00:00
from twisted . internet import threads # CLI tests use deferToThread
offloaded: fix failure in unit test on windows
in trying to test my fix for the failure of the offloaded unit test on windows
(by closing the reader before unlinking the encoding file - which, perhaps
disturbingly doesn't actually make a difference in my windows environment)
I was unable too because the unit test failed every time with a connection lost
error.
after much more time than I'd like to admit it took, I eventually managed to
track that down to a part of the unit test which is supposed to be be dropping
a connection. it looks like the exceptions that get thrown on unix, or at
least all the specific environments brian tested in, for that dropped
connection are different from what is thrown on my box (which is running py2.4
and twisted 2.4.0, for reference) adding ConnectionLost to the list of
expected exceptions makes the test pass.
though curiously still my test logs a NotEnoughWritersError error, and I'm not
currently able to fathom why that exception isn't leading to any overall
failure of the unit test itself.
for general interest, a large part of the time spent trying to track this down
was lost to the state of logging. I added a whole bunch of logging to try
and track down where the tests were failing, but then spent a bunch of time
searching in vain for that log output. as far as I can tell at this point
the unit tests are themselves logging to foolscap's log module, but that isn't
being directed anywhere, so all the test's logging is being black holed.
2008-01-18 03:57:29 +00:00
from twisted . internet . error import ConnectionDone , ConnectionLost
2008-10-06 22:50:37 +00:00
from twisted . internet . interfaces import IConsumer , IPushProducer
2008-03-12 00:36:25 +00:00
import allmydata
2008-07-25 22:33:49 +00:00
from allmydata import uri , storage , offloaded
2008-07-16 20:14:39 +00:00
from allmydata . immutable import download , upload , filenode
2008-09-25 17:34:53 +00:00
from allmydata . util import idlib , mathutil
2008-07-07 21:11:02 +00:00
from allmydata . util import log , base32
2008-08-01 22:05:14 +00:00
from allmydata . scripts import runner
2008-09-09 23:34:49 +00:00
from allmydata . interfaces import IDirectoryNode , IFileNode , IFileURI , \
ICheckerResults , ICheckAndRepairResults , IDeepCheckResults , \
2008-10-27 20:34:49 +00:00
IDeepCheckAndRepairResults , NoSuchChildError , NotEnoughSharesError
2008-10-22 08:38:18 +00:00
from allmydata . monitor import Monitor , OperationCancelledError
2008-04-11 21:31:16 +00:00
from allmydata . mutable . common import NotMutableError
from allmydata . mutable import layout as mutable_layout
2008-07-25 22:33:49 +00:00
from foolscap import DeadReferenceError
2007-04-19 01:29:10 +00:00
from twisted . python . failure import Failure
2006-12-07 19:48:06 +00:00
from twisted . web . client import getPage
2007-07-08 03:06:44 +00:00
from twisted . web . error import Error
2006-12-03 01:30:18 +00:00
2008-11-07 05:35:47 +00:00
from allmydata . test . common import SystemTestMixin , ErrorMixin , \
2008-10-28 20:41:04 +00:00
MemoryConsumer , download_to_data
2007-04-07 03:34:32 +00:00
2007-07-12 20:22:36 +00:00
LARGE_DATA = """
This is some data to publish to the virtual drive , which needs to be large
enough to not fit inside a LIT uri .
"""
2008-02-08 00:27:30 +00:00
class CountingDataUploadable ( upload . Data ) :
bytes_read = 0
2008-02-08 03:15:37 +00:00
interrupt_after = None
interrupt_after_d = None
2008-02-08 00:27:30 +00:00
def read ( self , length ) :
self . bytes_read + = length
2008-02-08 03:15:37 +00:00
if self . interrupt_after is not None :
if self . bytes_read > self . interrupt_after :
self . interrupt_after = None
self . interrupt_after_d . callback ( self )
2008-02-08 00:27:30 +00:00
return upload . Data . read ( self , length )
2008-10-06 22:50:37 +00:00
class GrabEverythingConsumer :
implements ( IConsumer )
def __init__ ( self ) :
self . contents = " "
def registerProducer ( self , producer , streaming ) :
assert streaming
assert IPushProducer . providedBy ( producer )
def write ( self , data ) :
self . contents + = data
def unregisterProducer ( self ) :
pass
2008-02-08 00:27:30 +00:00
2008-07-25 22:33:49 +00:00
class SystemTest ( SystemTestMixin , unittest . TestCase ) :
2006-12-03 02:37:31 +00:00
2006-12-03 03:32:08 +00:00
def test_connections ( self ) :
2007-06-28 18:00:03 +00:00
self . basedir = " system/SystemTest/test_connections "
2006-12-03 07:56:27 +00:00
d = self . set_up_nodes ( )
2007-03-27 23:12:11 +00:00
self . extra_node = None
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . add_extra_node ( self . numclients ) )
2007-01-10 02:40:36 +00:00
def _check ( extra_node ) :
2007-01-16 22:12:49 +00:00
self . extra_node = extra_node
2006-12-03 03:32:08 +00:00
for c in self . clients :
2007-08-12 23:29:34 +00:00
all_peerids = list ( c . get_all_peerids ( ) )
2007-12-03 21:52:42 +00:00
self . failUnlessEqual ( len ( all_peerids ) , self . numclients + 1 )
2008-02-05 20:05:13 +00:00
permuted_peers = list ( c . get_permuted_peers ( " storage " , " a " ) )
2007-12-03 21:52:42 +00:00
self . failUnlessEqual ( len ( permuted_peers ) , self . numclients + 1 )
2007-08-12 23:29:34 +00:00
2006-12-03 03:32:08 +00:00
d . addCallback ( _check )
2007-01-16 22:12:49 +00:00
def _shutdown_extra_node ( res ) :
2007-03-27 23:12:11 +00:00
if self . extra_node :
2007-04-04 23:09:13 +00:00
return self . extra_node . stopService ( )
2007-03-27 23:12:11 +00:00
return res
2007-01-16 22:12:49 +00:00
d . addBoth ( _shutdown_extra_node )
2006-12-03 03:32:08 +00:00
return d
2007-04-07 03:34:32 +00:00
test_connections . timeout = 300
2007-04-18 23:06:57 +00:00
# test_connections is subsumed by test_upload_and_download, and takes
# quite a while to run on a slow machine (because of all the TLS
# connections that must be established). If we ever rework the introducer
# code to such an extent that we're not sure if it works anymore, we can
# reinstate this test until it does.
del test_connections
2006-12-03 03:32:08 +00:00
2008-01-30 19:24:50 +00:00
def test_upload_and_download_random_key ( self ) :
2008-03-04 06:55:58 +00:00
self . basedir = " system/SystemTest/test_upload_and_download_random_key "
2008-03-24 16:46:06 +00:00
return self . _test_upload_and_download ( convergence = None )
2008-01-30 19:24:50 +00:00
test_upload_and_download_random_key . timeout = 4800
2008-03-24 16:46:06 +00:00
def test_upload_and_download_convergent ( self ) :
self . basedir = " system/SystemTest/test_upload_and_download_convergent "
return self . _test_upload_and_download ( convergence = " some convergence string " )
test_upload_and_download_convergent . timeout = 4800
2008-01-30 19:24:50 +00:00
2008-03-24 16:46:06 +00:00
def _test_upload_and_download ( self , convergence ) :
2007-04-17 20:40:47 +00:00
# we use 4000 bytes of data, which will result in about 400k written
# to disk among all our simulated nodes
DATA = " Some data to upload \n " * 200
2006-12-03 07:56:27 +00:00
d = self . set_up_nodes ( )
2007-08-12 23:29:34 +00:00
def _check_connections ( res ) :
for c in self . clients :
all_peerids = list ( c . get_all_peerids ( ) )
2007-12-03 21:52:42 +00:00
self . failUnlessEqual ( len ( all_peerids ) , self . numclients )
2008-02-05 20:05:13 +00:00
permuted_peers = list ( c . get_permuted_peers ( " storage " , " a " ) )
2007-12-03 21:52:42 +00:00
self . failUnlessEqual ( len ( permuted_peers ) , self . numclients )
2007-08-12 23:29:34 +00:00
d . addCallback ( _check_connections )
2008-03-12 00:36:25 +00:00
2006-12-03 10:01:43 +00:00
def _do_upload ( res ) :
2006-12-03 07:56:27 +00:00
log . msg ( " UPLOADING " )
2006-12-03 03:32:08 +00:00
u = self . clients [ 0 ] . getServiceNamed ( " uploader " )
2007-04-19 01:29:10 +00:00
self . uploader = u
2007-04-17 20:40:47 +00:00
# we crank the max segsize down to 1024b for the duration of this
# test, so we can exercise multiple segments. It is important
# that this is not a multiple of the segment size, so that the
2007-04-18 03:28:19 +00:00
# tail segment is not the same length as the others. This actualy
# gets rounded up to 1025 to be a multiple of the number of
# required shares (since we use 25 out of 100 FEC).
2008-03-24 16:46:06 +00:00
up = upload . Data ( DATA , convergence = convergence )
2008-01-17 08:18:10 +00:00
up . max_segment_size = 1024
d1 = u . upload ( up )
2006-12-03 03:32:08 +00:00
return d1
2006-12-03 10:01:43 +00:00
d . addCallback ( _do_upload )
2008-02-06 04:01:38 +00:00
def _upload_done ( results ) :
uri = results . uri
2007-01-16 04:22:22 +00:00
log . msg ( " upload finished: uri is %s " % ( uri , ) )
2007-04-16 20:07:36 +00:00
self . uri = uri
2006-12-03 10:01:43 +00:00
dl = self . clients [ 1 ] . getServiceNamed ( " downloader " )
2007-04-16 20:07:36 +00:00
self . downloader = dl
2006-12-03 10:01:43 +00:00
d . addCallback ( _upload_done )
2007-04-19 01:29:10 +00:00
def _upload_again ( res ) :
2008-03-24 16:46:06 +00:00
# Upload again. If using convergent encryption then this ought to be
2008-01-30 19:24:50 +00:00
# short-circuited, however with the way we currently generate URIs
# (i.e. because they include the roothash), we have to do all of the
# encoding work, and only get to save on the upload part.
2007-04-19 01:29:10 +00:00
log . msg ( " UPLOADING AGAIN " )
2008-03-24 16:46:06 +00:00
up = upload . Data ( DATA , convergence = convergence )
2008-01-17 08:18:10 +00:00
up . max_segment_size = 1024
d1 = self . uploader . upload ( up )
2007-04-19 01:29:10 +00:00
d . addCallback ( _upload_again )
def _download_to_data ( res ) :
log . msg ( " DOWNLOADING " )
return self . downloader . download_to_data ( self . uri )
d . addCallback ( _download_to_data )
def _download_to_data_done ( data ) :
2006-12-03 10:01:43 +00:00
log . msg ( " download finished " )
self . failUnlessEqual ( data , DATA )
2007-04-19 01:29:10 +00:00
d . addCallback ( _download_to_data_done )
2007-04-16 20:07:36 +00:00
target_filename = os . path . join ( self . basedir , " download.target " )
def _download_to_filename ( res ) :
return self . downloader . download_to_filename ( self . uri ,
target_filename )
d . addCallback ( _download_to_filename )
def _download_to_filename_done ( res ) :
newdata = open ( target_filename , " rb " ) . read ( )
self . failUnlessEqual ( newdata , DATA )
d . addCallback ( _download_to_filename_done )
target_filename2 = os . path . join ( self . basedir , " download.target2 " )
def _download_to_filehandle ( res ) :
fh = open ( target_filename2 , " wb " )
return self . downloader . download_to_filehandle ( self . uri , fh )
d . addCallback ( _download_to_filehandle )
def _download_to_filehandle_done ( fh ) :
fh . close ( )
newdata = open ( target_filename2 , " rb " ) . read ( )
self . failUnlessEqual ( newdata , DATA )
d . addCallback ( _download_to_filehandle_done )
2008-10-06 22:50:37 +00:00
consumer = GrabEverythingConsumer ( )
ct = download . ConsumerAdapter ( consumer )
d . addCallback ( lambda res :
self . downloader . download ( self . uri , ct ) )
def _download_to_consumer_done ( ign ) :
self . failUnlessEqual ( consumer . contents , DATA )
d . addCallback ( _download_to_consumer_done )
2008-10-28 20:41:04 +00:00
def _test_read ( res ) :
n = self . clients [ 1 ] . create_node_from_uri ( self . uri )
d = download_to_data ( n )
def _read_done ( data ) :
self . failUnlessEqual ( data , DATA )
d . addCallback ( _read_done )
d . addCallback ( lambda ign :
n . read ( MemoryConsumer ( ) , offset = 1 , size = 4 ) )
def _read_portion_done ( mc ) :
self . failUnlessEqual ( " " . join ( mc . chunks ) , DATA [ 1 : 1 + 4 ] )
d . addCallback ( _read_portion_done )
d . addCallback ( lambda ign :
n . read ( MemoryConsumer ( ) , offset = 2 , size = None ) )
def _read_tail_done ( mc ) :
self . failUnlessEqual ( " " . join ( mc . chunks ) , DATA [ 2 : ] )
d . addCallback ( _read_tail_done )
2008-11-04 22:29:19 +00:00
d . addCallback ( lambda ign :
n . read ( MemoryConsumer ( ) , size = len ( DATA ) + 1000 ) )
def _read_too_much ( mc ) :
self . failUnlessEqual ( " " . join ( mc . chunks ) , DATA )
d . addCallback ( _read_too_much )
2008-10-29 00:56:18 +00:00
2008-10-28 20:41:04 +00:00
return d
d . addCallback ( _test_read )
2008-10-29 00:56:18 +00:00
def _test_bad_read ( res ) :
bad_u = uri . from_string_filenode ( self . uri )
bad_u . key = self . flip_bit ( bad_u . key )
bad_n = self . clients [ 1 ] . create_node_from_uri ( bad_u . to_string ( ) )
# this should cause an error during download
d = self . shouldFail2 ( NotEnoughSharesError , " ' download bad node ' " ,
None ,
bad_n . read , MemoryConsumer ( ) , offset = 2 )
return d
d . addCallback ( _test_bad_read )
2007-04-19 01:29:10 +00:00
def _download_nonexistent_uri ( res ) :
baduri = self . mangle_uri ( self . uri )
2008-02-05 20:05:13 +00:00
log . msg ( " about to download non-existent URI " , level = log . UNUSUAL ,
facility = " tahoe.tests " )
2007-04-19 01:29:10 +00:00
d1 = self . downloader . download_to_data ( baduri )
def _baduri_should_fail ( res ) :
2008-02-05 20:05:13 +00:00
log . msg ( " finished downloading non-existend URI " ,
level = log . UNUSUAL , facility = " tahoe.tests " )
2007-04-19 01:29:10 +00:00
self . failUnless ( isinstance ( res , Failure ) )
2008-10-27 20:34:49 +00:00
self . failUnless ( res . check ( NotEnoughSharesError ) ,
2008-04-15 23:08:32 +00:00
" expected NotEnoughSharesError, got %s " % res )
2007-04-19 01:29:10 +00:00
# TODO: files that have zero peers should get a special kind
2008-04-15 23:08:32 +00:00
# of NotEnoughSharesError, which can be used to suggest that
2007-09-26 19:07:37 +00:00
# the URI might be wrong or that they've never uploaded the
2007-04-19 01:29:10 +00:00
# file in the first place.
d1 . addBoth ( _baduri_should_fail )
return d1
d . addCallback ( _download_nonexistent_uri )
2008-01-11 12:42:55 +00:00
2008-01-15 04:24:26 +00:00
# add a new node, which doesn't accept shares, and only uses the
# helper for upload.
d . addCallback ( lambda res : self . add_extra_node ( self . numclients ,
self . helper_furl ,
add_to_sparent = True ) )
def _added ( extra_node ) :
self . extra_node = extra_node
d . addCallback ( _added )
2008-01-31 01:49:02 +00:00
HELPER_DATA = " Data that needs help to upload " * 1000
2008-01-11 12:42:55 +00:00
def _upload_with_helper ( res ) :
2008-03-24 16:46:06 +00:00
u = upload . Data ( HELPER_DATA , convergence = convergence )
2008-01-15 04:24:26 +00:00
d = self . extra_node . upload ( u )
2008-02-06 04:01:38 +00:00
def _uploaded ( results ) :
uri = results . uri
2008-01-11 12:42:55 +00:00
return self . downloader . download_to_data ( uri )
d . addCallback ( _uploaded )
def _check ( newdata ) :
2008-01-31 01:49:02 +00:00
self . failUnlessEqual ( newdata , HELPER_DATA )
2008-01-11 12:42:55 +00:00
d . addCallback ( _check )
return d
d . addCallback ( _upload_with_helper )
2008-01-31 01:49:02 +00:00
def _upload_duplicate_with_helper ( res ) :
2008-03-24 16:46:06 +00:00
u = upload . Data ( HELPER_DATA , convergence = convergence )
2008-01-31 01:49:02 +00:00
u . debug_stash_RemoteEncryptedUploadable = True
d = self . extra_node . upload ( u )
2008-02-06 04:01:38 +00:00
def _uploaded ( results ) :
uri = results . uri
2008-01-31 01:49:02 +00:00
return self . downloader . download_to_data ( uri )
d . addCallback ( _uploaded )
def _check ( newdata ) :
self . failUnlessEqual ( newdata , HELPER_DATA )
self . failIf ( hasattr ( u , " debug_RemoteEncryptedUploadable " ) ,
" uploadable started uploading, should have been avoided " )
d . addCallback ( _check )
return d
2008-03-24 16:46:06 +00:00
if convergence is not None :
2008-01-31 01:49:02 +00:00
d . addCallback ( _upload_duplicate_with_helper )
2008-01-15 04:24:26 +00:00
def _upload_resumable ( res ) :
DATA = " Data that needs help to upload and gets interrupted " * 1000
2008-03-24 16:46:06 +00:00
u1 = CountingDataUploadable ( DATA , convergence = convergence )
u2 = CountingDataUploadable ( DATA , convergence = convergence )
2008-01-17 08:18:10 +00:00
2008-02-08 03:15:37 +00:00
# we interrupt the connection after about 5kB by shutting down
# the helper, then restartingit.
u1 . interrupt_after = 5000
u1 . interrupt_after_d = defer . Deferred ( )
u1 . interrupt_after_d . addCallback ( lambda res :
self . bounce_client ( 0 ) )
2008-02-08 00:27:30 +00:00
2008-01-17 08:18:10 +00:00
# sneak into the helper and reduce its chunk size, so that our
2008-01-15 04:24:26 +00:00
# debug_interrupt will sever the connection on about the fifth
2008-01-17 08:18:10 +00:00
# chunk fetched. This makes sure that we've started to write the
# new shares before we abandon them, which exercises the
# abort/delete-partial-share code. TODO: find a cleaner way to do
# this. I know that this will affect later uses of the helper in
# this same test run, but I'm not currently worried about it.
offloaded . CHKCiphertextFetcher . CHUNK_SIZE = 1000
2008-01-15 04:24:26 +00:00
2008-01-17 08:18:10 +00:00
d = self . extra_node . upload ( u1 )
2008-01-15 04:24:26 +00:00
def _should_not_finish ( res ) :
self . fail ( " interrupted upload should have failed, not finished "
" with result %s " % ( res , ) )
def _interrupted ( f ) :
offloaded: fix failure in unit test on windows
in trying to test my fix for the failure of the offloaded unit test on windows
(by closing the reader before unlinking the encoding file - which, perhaps
disturbingly doesn't actually make a difference in my windows environment)
I was unable too because the unit test failed every time with a connection lost
error.
after much more time than I'd like to admit it took, I eventually managed to
track that down to a part of the unit test which is supposed to be be dropping
a connection. it looks like the exceptions that get thrown on unix, or at
least all the specific environments brian tested in, for that dropped
connection are different from what is thrown on my box (which is running py2.4
and twisted 2.4.0, for reference) adding ConnectionLost to the list of
expected exceptions makes the test pass.
though curiously still my test logs a NotEnoughWritersError error, and I'm not
currently able to fathom why that exception isn't leading to any overall
failure of the unit test itself.
for general interest, a large part of the time spent trying to track this down
was lost to the state of logging. I added a whole bunch of logging to try
and track down where the tests were failing, but then spent a bunch of time
searching in vain for that log output. as far as I can tell at this point
the unit tests are themselves logging to foolscap's log module, but that isn't
being directed anywhere, so all the test's logging is being black holed.
2008-01-18 03:57:29 +00:00
f . trap ( ConnectionLost , ConnectionDone , DeadReferenceError )
2008-02-08 00:27:30 +00:00
2008-01-15 04:24:26 +00:00
# make sure we actually interrupted it before finishing the
# file
2008-02-08 00:27:30 +00:00
self . failUnless ( u1 . bytes_read < len ( DATA ) ,
" read %d out of %d total " % ( u1 . bytes_read ,
2008-01-15 04:24:26 +00:00
len ( DATA ) ) )
2008-02-08 00:27:30 +00:00
2008-01-17 08:18:10 +00:00
log . msg ( " waiting for reconnect " , level = log . NOISY ,
facility = " tahoe.test.test_system " )
2008-01-15 04:24:26 +00:00
# now, we need to give the nodes a chance to notice that this
# connection has gone away. When this happens, the storage
# servers will be told to abort their uploads, removing the
# partial shares. Unfortunately this involves TCP messages
# going through the loopback interface, and we can't easily
# predict how long that will take. If it were all local, we
# could use fireEventually() to stall. Since we don't have
# the right introduction hooks, the best we can do is use a
# fixed delay. TODO: this is fragile.
2008-02-08 03:15:37 +00:00
u1 . interrupt_after_d . addCallback ( self . stall , 2.0 )
return u1 . interrupt_after_d
2008-01-15 04:24:26 +00:00
d . addCallbacks ( _should_not_finish , _interrupted )
def _disconnected ( res ) :
# check to make sure the storage servers aren't still hanging
# on to the partial share: their incoming/ directories should
# now be empty.
2008-01-17 08:18:10 +00:00
log . msg ( " disconnected " , level = log . NOISY ,
facility = " tahoe.test.test_system " )
2008-01-15 04:24:26 +00:00
for i in range ( self . numclients ) :
incdir = os . path . join ( self . getdir ( " client %d " % i ) ,
" storage " , " shares " , " incoming " )
2008-01-31 23:26:28 +00:00
self . failIf ( os . path . exists ( incdir ) and os . listdir ( incdir ) )
2008-01-15 04:24:26 +00:00
d . addCallback ( _disconnected )
2008-02-06 21:05:11 +00:00
# then we need to give the reconnector a chance to
# reestablish the connection to the helper.
d . addCallback ( lambda res :
log . msg ( " wait_for_connections " , level = log . NOISY ,
facility = " tahoe.test.test_system " ) )
d . addCallback ( lambda res : self . wait_for_connections ( ) )
d . addCallback ( lambda res :
log . msg ( " uploading again " , level = log . NOISY ,
facility = " tahoe.test.test_system " ) )
d . addCallback ( lambda res : self . extra_node . upload ( u2 ) )
2008-01-15 04:24:26 +00:00
2008-02-06 04:01:38 +00:00
def _uploaded ( results ) :
uri = results . uri
2008-01-17 08:18:10 +00:00
log . msg ( " Second upload complete " , level = log . NOISY ,
facility = " tahoe.test.test_system " )
2008-02-08 00:27:30 +00:00
# this is really bytes received rather than sent, but it's
# convenient and basically measures the same thing
bytes_sent = results . ciphertext_fetched
2008-01-30 19:24:50 +00:00
# We currently don't support resumption of upload if the data is
# encrypted with a random key. (Because that would require us
# to store the key locally and re-use it on the next upload of
# this file, which isn't a bad thing to do, but we currently
# don't do it.)
2008-03-24 16:46:06 +00:00
if convergence is not None :
2008-01-30 19:24:50 +00:00
# Make sure we did not have to read the whole file the
# second time around .
2008-02-08 00:27:30 +00:00
self . failUnless ( bytes_sent < len ( DATA ) ,
2008-01-17 08:18:10 +00:00
" resumption didn ' t save us any work: "
" read %d bytes out of %d total " %
2008-02-08 00:27:30 +00:00
( bytes_sent , len ( DATA ) ) )
2008-01-30 19:24:50 +00:00
else :
# Make sure we did have to read the whole file the second
# time around -- because the one that we partially uploaded
# earlier was encrypted with a different random key.
2008-02-08 00:27:30 +00:00
self . failIf ( bytes_sent < len ( DATA ) ,
2008-01-30 19:24:50 +00:00
" resumption saved us some work even though we were using random keys: "
" read %d bytes out of %d total " %
2008-02-08 00:27:30 +00:00
( bytes_sent , len ( DATA ) ) )
2008-01-15 04:24:26 +00:00
return self . downloader . download_to_data ( uri )
d . addCallback ( _uploaded )
2008-01-17 08:18:10 +00:00
2008-01-15 04:24:26 +00:00
def _check ( newdata ) :
self . failUnlessEqual ( newdata , DATA )
2008-03-24 16:46:06 +00:00
# If using convergent encryption, then also check that the
# helper has removed the temp file from its directories.
if convergence is not None :
2008-01-30 19:24:50 +00:00
basedir = os . path . join ( self . getdir ( " client0 " ) , " helper " )
files = os . listdir ( os . path . join ( basedir , " CHK_encoding " ) )
self . failUnlessEqual ( files , [ ] )
files = os . listdir ( os . path . join ( basedir , " CHK_incoming " ) )
self . failUnlessEqual ( files , [ ] )
2008-01-15 04:24:26 +00:00
d . addCallback ( _check )
return d
2008-01-17 08:18:10 +00:00
d . addCallback ( _upload_resumable )
2008-01-15 04:24:26 +00:00
2008-12-04 22:01:24 +00:00
def _grab_stats ( ignored ) :
# the StatsProvider doesn't normally publish a FURL:
# instead it passes a live reference to the StatsGatherer
# (if and when it connects). To exercise the remote stats
# interface, we manually publish client0's StatsProvider
# and use client1 to query it.
sp = self . clients [ 0 ] . stats_provider
sp_furl = self . clients [ 0 ] . tub . registerReference ( sp )
d = self . clients [ 1 ] . tub . getReference ( sp_furl )
d . addCallback ( lambda sp_rref : sp_rref . callRemote ( " get_stats " ) )
def _got_stats ( stats ) :
#print "STATS"
#from pprint import pprint
#pprint(stats)
s = stats [ " stats " ]
self . failUnlessEqual ( s [ " storage_server.accepting_immutable_shares " ] , 1 )
c = stats [ " counters " ]
self . failUnlessEqual ( c [ " storage_server.allocate " ] , 2 )
d . addCallback ( _got_stats )
return d
d . addCallback ( _grab_stats )
2006-12-03 02:37:31 +00:00
return d
2006-12-03 07:56:27 +00:00
2007-11-14 06:08:15 +00:00
def _find_shares ( self , basedir ) :
shares = [ ]
for ( dirpath , dirnames , filenames ) in os . walk ( basedir ) :
if " storage " not in dirpath :
continue
if not filenames :
continue
pieces = dirpath . split ( os . sep )
2008-01-31 23:26:28 +00:00
if pieces [ - 4 ] == " storage " and pieces [ - 3 ] == " shares " :
# we're sitting in .../storage/shares/$START/$SINDEX , and there
2007-11-14 06:08:15 +00:00
# are sharefiles here
2008-01-31 23:26:28 +00:00
assert pieces [ - 5 ] . startswith ( " client " )
client_num = int ( pieces [ - 5 ] [ - 1 ] )
2007-11-14 06:08:15 +00:00
storage_index_s = pieces [ - 1 ]
2008-02-13 03:48:37 +00:00
storage_index = storage . si_a2b ( storage_index_s )
2007-11-14 06:08:15 +00:00
for sharename in filenames :
shnum = int ( sharename )
filename = os . path . join ( dirpath , sharename )
data = ( client_num , storage_index , filename , shnum )
shares . append ( data )
if not shares :
self . fail ( " unable to find any share files in %s " % basedir )
return shares
def _corrupt_mutable_share ( self , filename , which ) :
msf = storage . MutableShareFile ( filename )
datav = msf . readv ( [ ( 0 , 1000000 ) ] )
final_share = datav [ 0 ]
assert len ( final_share ) < 1000000 # ought to be truncated
2008-04-11 21:31:16 +00:00
pieces = mutable_layout . unpack_share ( final_share )
2007-11-14 06:08:15 +00:00
( seqnum , root_hash , IV , k , N , segsize , datalen ,
verification_key , signature , share_hash_chain , block_hash_tree ,
share_data , enc_privkey ) = pieces
if which == " seqnum " :
seqnum = seqnum + 15
elif which == " R " :
root_hash = self . flip_bit ( root_hash )
elif which == " IV " :
IV = self . flip_bit ( IV )
elif which == " segsize " :
segsize = segsize + 15
elif which == " pubkey " :
verification_key = self . flip_bit ( verification_key )
elif which == " signature " :
signature = self . flip_bit ( signature )
elif which == " share_hash_chain " :
nodenum = share_hash_chain . keys ( ) [ 0 ]
share_hash_chain [ nodenum ] = self . flip_bit ( share_hash_chain [ nodenum ] )
elif which == " block_hash_tree " :
block_hash_tree [ - 1 ] = self . flip_bit ( block_hash_tree [ - 1 ] )
elif which == " share_data " :
share_data = self . flip_bit ( share_data )
elif which == " encprivkey " :
enc_privkey = self . flip_bit ( enc_privkey )
2008-04-11 21:31:16 +00:00
prefix = mutable_layout . pack_prefix ( seqnum , root_hash , IV , k , N ,
segsize , datalen )
final_share = mutable_layout . pack_share ( prefix ,
verification_key ,
signature ,
share_hash_chain ,
block_hash_tree ,
share_data ,
enc_privkey )
2007-11-14 06:08:15 +00:00
msf . writev ( [ ( 0 , final_share ) ] , None )
2007-11-07 01:57:11 +00:00
2008-07-25 22:13:00 +00:00
2007-11-07 01:57:11 +00:00
def test_mutable ( self ) :
self . basedir = " system/SystemTest/test_mutable "
2007-11-07 21:19:01 +00:00
DATA = " initial contents go here. " # 25 bytes % 3 != 0
2007-11-08 04:01:39 +00:00
NEWDATA = " new contents yay "
NEWERDATA = " this is getting old "
2008-07-25 22:29:31 +00:00
d = self . set_up_nodes ( use_key_generator = True )
2007-11-15 21:55:00 +00:00
2007-11-07 01:57:11 +00:00
def _create_mutable ( res ) :
c = self . clients [ 0 ]
2007-11-08 11:07:33 +00:00
log . msg ( " starting create_mutable_file " )
2008-01-14 21:55:59 +00:00
d1 = c . create_mutable_file ( DATA )
2007-11-07 01:57:11 +00:00
def _done ( res ) :
log . msg ( " DONE: %s " % ( res , ) )
2007-11-07 21:19:01 +00:00
self . _mutable_node_1 = res
uri = res . get_uri ( )
2007-11-08 04:01:39 +00:00
d1 . addCallback ( _done )
2007-11-07 01:57:11 +00:00
return d1
d . addCallback ( _create_mutable )
2007-11-07 02:10:49 +00:00
def _test_debug ( res ) :
# find a share. It is important to run this while there is only
# one slot in the grid.
2007-11-14 06:08:15 +00:00
shares = self . _find_shares ( self . basedir )
( client_num , storage_index , filename , shnum ) = shares [ 0 ]
2007-11-07 02:10:49 +00:00
log . msg ( " test_system.SystemTest.test_mutable._test_debug using %s "
% filename )
log . msg ( " for clients[ %d ] " % client_num )
out , err = StringIO ( ) , StringIO ( )
2008-08-12 21:46:56 +00:00
rc = runner . runner ( [ " debug " , " dump-share " , " --offsets " ,
2007-11-07 02:10:49 +00:00
filename ] ,
stdout = out , stderr = err )
output = out . getvalue ( )
self . failUnlessEqual ( rc , 0 )
2007-11-07 02:50:33 +00:00
try :
self . failUnless ( " Mutable slot found: \n " in output )
self . failUnless ( " share_type: SDMF \n " in output )
peerid = idlib . nodeid_b2a ( self . clients [ client_num ] . nodeid )
self . failUnless ( " WE for nodeid: %s \n " % peerid in output )
self . failUnless ( " num_extra_leases: 0 \n " in output )
2007-11-08 00:51:35 +00:00
# the pubkey size can vary by a byte, so the container might
# be a bit larger on some runs.
m = re . search ( r ' ^ container_size: ( \ d+)$ ' , output , re . M )
self . failUnless ( m )
container_size = int ( m . group ( 1 ) )
2007-12-03 21:52:42 +00:00
self . failUnless ( 2037 < = container_size < = 2049 , container_size )
2007-11-08 00:51:35 +00:00
m = re . search ( r ' ^ data_length: ( \ d+)$ ' , output , re . M )
self . failUnless ( m )
data_length = int ( m . group ( 1 ) )
2007-12-03 21:52:42 +00:00
self . failUnless ( 2037 < = data_length < = 2049 , data_length )
2007-11-07 02:50:33 +00:00
self . failUnless ( " secrets are for nodeid: %s \n " % peerid
in output )
self . failUnless ( " SDMF contents: \n " in output )
self . failUnless ( " seqnum: 1 \n " in output )
self . failUnless ( " required_shares: 3 \n " in output )
self . failUnless ( " total_shares: 10 \n " in output )
2007-12-03 21:52:42 +00:00
self . failUnless ( " segsize: 27 \n " in output , ( output , filename ) )
2007-11-07 02:50:33 +00:00
self . failUnless ( " datalen: 25 \n " in output )
# the exact share_hash_chain nodes depends upon the sharenum,
# and is more of a hassle to compute than I want to deal with
# now
self . failUnless ( " share_hash_chain: " in output )
self . failUnless ( " block_hash_tree: 1 nodes \n " in output )
2008-07-07 21:11:02 +00:00
expected = ( " verify-cap: URI:SSK-Verifier: %s : " %
base32 . b2a ( storage_index ) )
self . failUnless ( expected in output )
2007-11-07 02:50:33 +00:00
except unittest . FailTest :
print
print " dump-share output was: "
print output
raise
2007-11-07 02:10:49 +00:00
d . addCallback ( _test_debug )
2007-11-07 21:19:01 +00:00
# test retrieval
# first, let's see if we can use the existing node to retrieve the
# contents. This allows it to use the cached pubkey and maybe the
# latest-known sharemap.
2008-04-18 00:51:38 +00:00
d . addCallback ( lambda res : self . _mutable_node_1 . download_best_version ( ) )
2007-11-07 21:19:01 +00:00
def _check_download_1 ( res ) :
self . failUnlessEqual ( res , DATA )
# now we see if we can retrieve the data from a new node,
# constructed using the URI of the original one. We do this test
# on the same client that uploaded the data.
uri = self . _mutable_node_1 . get_uri ( )
2007-11-08 11:07:33 +00:00
log . msg ( " starting retrieve1 " )
2007-11-09 09:54:51 +00:00
newnode = self . clients [ 0 ] . create_node_from_uri ( uri )
2008-05-09 01:02:55 +00:00
newnode_2 = self . clients [ 0 ] . create_node_from_uri ( uri )
self . failUnlessIdentical ( newnode , newnode_2 )
2008-04-18 00:51:38 +00:00
return newnode . download_best_version ( )
2007-11-07 21:19:01 +00:00
d . addCallback ( _check_download_1 )
def _check_download_2 ( res ) :
self . failUnlessEqual ( res , DATA )
# same thing, but with a different client
uri = self . _mutable_node_1 . get_uri ( )
2007-11-09 09:54:51 +00:00
newnode = self . clients [ 1 ] . create_node_from_uri ( uri )
2007-11-08 11:07:33 +00:00
log . msg ( " starting retrieve2 " )
2008-04-18 00:51:38 +00:00
d1 = newnode . download_best_version ( )
2007-11-08 04:01:39 +00:00
d1 . addCallback ( lambda res : ( res , newnode ) )
return d1
2007-11-07 21:19:01 +00:00
d . addCallback ( _check_download_2 )
2007-11-08 04:01:39 +00:00
def _check_download_3 ( ( res , newnode ) ) :
2007-11-07 21:19:01 +00:00
self . failUnlessEqual ( res , DATA )
2007-11-08 04:01:39 +00:00
# replace the data
2007-11-08 11:07:33 +00:00
log . msg ( " starting replace1 " )
2008-04-18 00:51:38 +00:00
d1 = newnode . overwrite ( NEWDATA )
d1 . addCallback ( lambda res : newnode . download_best_version ( ) )
2007-11-08 04:01:39 +00:00
return d1
2007-11-07 21:19:01 +00:00
d . addCallback ( _check_download_3 )
2007-11-08 04:01:39 +00:00
def _check_download_4 ( res ) :
self . failUnlessEqual ( res , NEWDATA )
# now create an even newer node and replace the data on it. This
# new node has never been used for download before.
uri = self . _mutable_node_1 . get_uri ( )
2007-11-09 09:54:51 +00:00
newnode1 = self . clients [ 2 ] . create_node_from_uri ( uri )
newnode2 = self . clients [ 3 ] . create_node_from_uri ( uri )
2007-11-14 06:08:15 +00:00
self . _newnode3 = self . clients [ 3 ] . create_node_from_uri ( uri )
2007-11-08 11:07:33 +00:00
log . msg ( " starting replace2 " )
2008-03-13 01:00:43 +00:00
d1 = newnode1 . overwrite ( NEWERDATA )
2008-04-18 00:51:38 +00:00
d1 . addCallback ( lambda res : newnode2 . download_best_version ( ) )
2007-11-08 04:01:39 +00:00
return d1
2007-11-08 11:07:33 +00:00
d . addCallback ( _check_download_4 )
2007-11-08 04:01:39 +00:00
def _check_download_5 ( res ) :
2007-11-08 11:07:33 +00:00
log . msg ( " finished replace2 " )
2007-11-08 04:01:39 +00:00
self . failUnlessEqual ( res , NEWERDATA )
2007-12-03 22:21:14 +00:00
d . addCallback ( _check_download_5 )
def _corrupt_shares ( res ) :
# run around and flip bits in all but k of the shares, to test
# the hash checks
shares = self . _find_shares ( self . basedir )
## sort by share number
#shares.sort( lambda a,b: cmp(a[3], b[3]) )
where = dict ( [ ( shnum , filename )
for ( client_num , storage_index , filename , shnum )
in shares ] )
assert len ( where ) == 10 # this test is designed for 3-of-10
for shnum , filename in where . items ( ) :
# shares 7,8,9 are left alone. read will check
# (share_hash_chain, block_hash_tree, share_data). New
# seqnum+R pairs will trigger a check of (seqnum, R, IV,
# segsize, signature).
if shnum == 0 :
# read: this will trigger "pubkey doesn't match
# fingerprint".
self . _corrupt_mutable_share ( filename , " pubkey " )
self . _corrupt_mutable_share ( filename , " encprivkey " )
elif shnum == 1 :
# triggers "signature is invalid"
self . _corrupt_mutable_share ( filename , " seqnum " )
elif shnum == 2 :
# triggers "signature is invalid"
self . _corrupt_mutable_share ( filename , " R " )
elif shnum == 3 :
# triggers "signature is invalid"
self . _corrupt_mutable_share ( filename , " segsize " )
elif shnum == 4 :
self . _corrupt_mutable_share ( filename , " share_hash_chain " )
elif shnum == 5 :
self . _corrupt_mutable_share ( filename , " block_hash_tree " )
elif shnum == 6 :
self . _corrupt_mutable_share ( filename , " share_data " )
# other things to correct: IV, signature
# 7,8,9 are left alone
# note that initial_query_count=5 means that we'll hit the
# first 5 servers in effectively random order (based upon
# response time), so we won't necessarily ever get a "pubkey
# doesn't match fingerprint" error (if we hit shnum>=1 before
# shnum=0, we pull the pubkey from there). To get repeatable
# specific failures, we need to set initial_query_count=1,
# but of course that will change the sequencing behavior of
# the retrieval process. TODO: find a reasonable way to make
# this a parameter, probably when we expand this test to test
# for one failure mode at a time.
# when we retrieve this, we should get three signature
# failures (where we've mangled seqnum, R, and segsize). The
# pubkey mangling
d . addCallback ( _corrupt_shares )
2008-04-18 00:51:38 +00:00
d . addCallback ( lambda res : self . _newnode3 . download_best_version ( ) )
2007-12-03 22:21:14 +00:00
d . addCallback ( _check_download_5 )
def _check_empty_file ( res ) :
2007-11-14 06:08:15 +00:00
# make sure we can create empty files, this usually screws up the
# segsize math
2008-01-14 21:55:59 +00:00
d1 = self . clients [ 2 ] . create_mutable_file ( " " )
2008-04-18 00:51:38 +00:00
d1 . addCallback ( lambda newnode : newnode . download_best_version ( ) )
2007-11-08 11:31:00 +00:00
d1 . addCallback ( lambda res : self . failUnlessEqual ( " " , res ) )
return d1
2007-11-14 06:08:15 +00:00
d . addCallback ( _check_empty_file )
2007-11-08 04:01:39 +00:00
2008-01-14 21:55:59 +00:00
d . addCallback ( lambda res : self . clients [ 0 ] . create_empty_dirnode ( ) )
2007-11-08 11:31:00 +00:00
def _created_dirnode ( dnode ) :
2007-12-03 21:52:42 +00:00
log . msg ( " _created_dirnode( %s ) " % ( dnode , ) )
2007-11-08 11:31:00 +00:00
d1 = dnode . list ( )
d1 . addCallback ( lambda children : self . failUnlessEqual ( children , { } ) )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : dnode . has_child ( u " edgar " ) )
2007-11-08 11:31:00 +00:00
d1 . addCallback ( lambda answer : self . failUnlessEqual ( answer , False ) )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : dnode . set_node ( u " see recursive " , dnode ) )
d1 . addCallback ( lambda res : dnode . has_child ( u " see recursive " ) )
2007-11-08 11:31:00 +00:00
d1 . addCallback ( lambda answer : self . failUnlessEqual ( answer , True ) )
2008-10-22 00:03:07 +00:00
d1 . addCallback ( lambda res : dnode . build_manifest ( ) . when_done ( ) )
2008-11-19 22:03:47 +00:00
d1 . addCallback ( lambda res :
self . failUnlessEqual ( len ( res [ " manifest " ] ) , 1 ) )
2007-11-08 11:31:00 +00:00
return d1
d . addCallback ( _created_dirnode )
2008-04-03 22:57:07 +00:00
def wait_for_c3_kg_conn ( ) :
return self . clients [ 3 ] . _key_generator is not None
d . addCallback ( lambda junk : self . poll ( wait_for_c3_kg_conn ) )
def check_kg_poolsize ( junk , size_delta ) :
self . failUnlessEqual ( len ( self . key_generator_svc . key_generator . keypool ) ,
self . key_generator_svc . key_generator . pool_size + size_delta )
d . addCallback ( check_kg_poolsize , 0 )
d . addCallback ( lambda junk : self . clients [ 3 ] . create_mutable_file ( ' hello, world ' ) )
d . addCallback ( check_kg_poolsize , - 1 )
d . addCallback ( lambda junk : self . clients [ 3 ] . create_empty_dirnode ( ) )
d . addCallback ( check_kg_poolsize , - 2 )
# use_helper induces use of clients[3], which is the using-key_gen client
d . addCallback ( lambda junk : self . POST ( " uri " , use_helper = True , t = " mkdir " , name = ' george ' ) )
d . addCallback ( check_kg_poolsize , - 3 )
2007-11-07 01:57:11 +00:00
return d
2007-11-29 21:47:35 +00:00
# The default 120 second timeout went off when running it under valgrind
# on my old Windows laptop, so I'm bumping up the timeout.
test_mutable . timeout = 240
2007-11-07 01:57:11 +00:00
2007-04-19 01:29:10 +00:00
def flip_bit ( self , good ) :
return good [ : - 1 ] + chr ( ord ( good [ - 1 ] ) ^ 0x01 )
def mangle_uri ( self , gooduri ) :
2007-07-22 01:23:15 +00:00
# change the key, which changes the storage index, which means we'll
# be asking about the wrong file, so nobody will have any shares
2007-07-21 22:40:36 +00:00
u = IFileURI ( gooduri )
2007-07-22 01:23:15 +00:00
u2 = uri . CHKFileURI ( key = self . flip_bit ( u . key ) ,
2007-07-21 22:40:36 +00:00
uri_extension_hash = u . uri_extension_hash ,
needed_shares = u . needed_shares ,
total_shares = u . total_shares ,
size = u . size )
return u2 . to_string ( )
2007-04-19 01:29:10 +00:00
2007-06-08 22:59:16 +00:00
# TODO: add a test which mangles the uri_extension_hash instead, and
# should fail due to not being able to get a valid uri_extension block.
# Also a test which sneakily mangles the uri_extension block to change
# some of the validation data, so it will fail in the post-download phase
# when the file's crypttext integrity check fails. Do the same thing for
# the key, which should cause the download to fail the post-download
2007-06-10 03:46:04 +00:00
# plaintext_hash check.
2007-04-26 00:53:10 +00:00
2006-12-04 05:42:19 +00:00
def test_vdrive ( self ) :
2007-06-28 18:00:03 +00:00
self . basedir = " system/SystemTest/test_vdrive "
2007-07-12 20:22:36 +00:00
self . data = LARGE_DATA
2008-07-25 22:29:31 +00:00
d = self . set_up_nodes ( use_stats_gatherer = True )
2008-03-12 00:36:25 +00:00
d . addCallback ( self . _test_introweb )
2007-06-28 18:00:03 +00:00
d . addCallback ( self . log , " starting publish " )
2007-09-25 01:12:37 +00:00
d . addCallback ( self . _do_publish1 )
d . addCallback ( self . _test_runner )
d . addCallback ( self . _do_publish2 )
2007-12-03 21:52:42 +00:00
# at this point, we have the following filesystem (where "R" denotes
# self._root_directory_uri):
# R
# R/subdir1
# R/subdir1/mydata567
# R/subdir1/subdir2/
# R/subdir1/subdir2/mydata992
2007-06-28 18:00:03 +00:00
2008-02-11 22:26:58 +00:00
d . addCallback ( lambda res : self . bounce_client ( 0 ) )
2007-06-28 18:00:03 +00:00
d . addCallback ( self . log , " bounced client0 " )
d . addCallback ( self . _check_publish1 )
d . addCallback ( self . log , " did _check_publish1 " )
d . addCallback ( self . _check_publish2 )
d . addCallback ( self . log , " did _check_publish2 " )
d . addCallback ( self . _do_publish_private )
d . addCallback ( self . log , " did _do_publish_private " )
2008-01-04 00:02:05 +00:00
# now we also have (where "P" denotes a new dir):
2007-12-03 21:52:42 +00:00
# P/personal/sekrit data
# P/s2-rw -> /subdir1/subdir2/
# P/s2-ro -> /subdir1/subdir2/ (read-only)
2007-06-28 18:00:03 +00:00
d . addCallback ( self . _check_publish_private )
d . addCallback ( self . log , " did _check_publish_private " )
2006-12-07 19:48:06 +00:00
d . addCallback ( self . _test_web )
2007-09-26 19:06:55 +00:00
d . addCallback ( self . _test_control )
2007-10-12 02:20:41 +00:00
d . addCallback ( self . _test_cli )
2007-12-04 22:55:27 +00:00
# P now has four top-level children:
# P/personal/sekrit data
# P/s2-ro/
# P/s2-rw/
# P/test_put/ (empty)
2007-10-15 23:16:39 +00:00
d . addCallback ( self . _test_checker )
2006-12-04 05:42:19 +00:00
return d
2007-05-01 03:14:58 +00:00
test_vdrive . timeout = 1100
2006-12-04 05:42:19 +00:00
2008-03-12 00:36:25 +00:00
def _test_introweb ( self , res ) :
d = getPage ( self . introweb_url , method = " GET " , followRedirect = True )
def _check ( res ) :
try :
self . failUnless ( " allmydata: %s " % str ( allmydata . __version__ )
in res )
2008-03-25 19:56:12 +00:00
self . failUnless ( " Announcement Summary: storage: 5, stub_client: 5 " in res )
self . failUnless ( " Subscription Summary: storage: 5 " in res )
2008-03-12 00:36:25 +00:00
except unittest . FailTest :
print
print " GET %s output was: " % self . introweb_url
print res
raise
d . addCallback ( _check )
2008-03-25 19:56:12 +00:00
d . addCallback ( lambda res :
getPage ( self . introweb_url + " ?t=json " ,
method = " GET " , followRedirect = True ) )
def _check_json ( res ) :
data = simplejson . loads ( res )
try :
self . failUnlessEqual ( data [ " subscription_summary " ] ,
{ " storage " : 5 } )
self . failUnlessEqual ( data [ " announcement_summary " ] ,
{ " storage " : 5 , " stub_client " : 5 } )
2008-11-18 22:30:15 +00:00
self . failUnlessEqual ( data [ " announcement_distinct_hosts " ] ,
{ " storage " : 1 , " stub_client " : 1 } )
2008-03-25 19:56:12 +00:00
except unittest . FailTest :
print
print " GET %s ?t=json output was: " % self . introweb_url
print res
raise
d . addCallback ( _check_json )
2008-03-12 00:36:25 +00:00
return d
2007-09-25 01:12:37 +00:00
def _do_publish1 ( self , res ) :
2008-03-24 16:46:06 +00:00
ut = upload . Data ( self . data , convergence = None )
2007-06-28 18:00:03 +00:00
c0 = self . clients [ 0 ]
2008-01-14 21:55:59 +00:00
d = c0 . create_empty_dirnode ( )
2007-12-03 21:52:42 +00:00
def _made_root ( new_dirnode ) :
self . _root_directory_uri = new_dirnode . get_uri ( )
return c0 . create_node_from_uri ( self . _root_directory_uri )
d . addCallback ( _made_root )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda root : root . create_empty_directory ( u " subdir1 " ) )
2007-06-28 22:05:33 +00:00
def _made_subdir1 ( subdir1_node ) :
2007-09-25 01:12:37 +00:00
self . _subdir1_node = subdir1_node
2008-02-14 22:45:56 +00:00
d1 = subdir1_node . add_file ( u " mydata567 " , ut )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " publish finished " )
def _stash_uri ( filenode ) :
self . uri = filenode . get_uri ( )
d1 . addCallback ( _stash_uri )
return d1
d . addCallback ( _made_subdir1 )
2007-06-28 18:00:03 +00:00
return d
2007-09-25 01:12:37 +00:00
def _do_publish2 ( self , res ) :
2008-03-24 16:46:06 +00:00
ut = upload . Data ( self . data , convergence = None )
2008-02-14 22:45:56 +00:00
d = self . _subdir1_node . create_empty_directory ( u " subdir2 " )
d . addCallback ( lambda subdir2 : subdir2 . add_file ( u " mydata992 " , ut ) )
2007-09-25 01:12:37 +00:00
return d
2008-09-08 03:03:36 +00:00
def log ( self , res , * args , * * kwargs ) :
# print "MSG: %s RES: %s" % (msg, args)
log . msg ( * args , * * kwargs )
2007-06-28 18:00:03 +00:00
return res
def _do_publish_private ( self , res ) :
2007-07-12 23:17:49 +00:00
self . smalldata = " sssh, very secret stuff "
2008-03-24 16:46:06 +00:00
ut = upload . Data ( self . smalldata , convergence = None )
2008-01-14 21:55:59 +00:00
d = self . clients [ 0 ] . create_empty_dirnode ( )
2007-12-03 21:52:42 +00:00
d . addCallback ( self . log , " GOT private directory " )
2008-01-04 00:02:05 +00:00
def _got_new_dir ( privnode ) :
2007-12-03 21:52:42 +00:00
rootnode = self . clients [ 0 ] . create_node_from_uri ( self . _root_directory_uri )
2008-02-14 22:45:56 +00:00
d1 = privnode . create_empty_directory ( u " personal " )
2007-12-03 21:52:42 +00:00
d1 . addCallback ( self . log , " made P/personal " )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda node : node . add_file ( u " sekrit data " , ut ) )
2007-12-03 21:52:42 +00:00
d1 . addCallback ( self . log , " made P/personal/sekrit data " )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : rootnode . get_child_at_path ( [ u " subdir1 " , u " subdir2 " ] ) )
2007-06-28 22:05:33 +00:00
def _got_s2 ( s2node ) :
2008-02-14 22:45:56 +00:00
d2 = privnode . set_uri ( u " s2-rw " , s2node . get_uri ( ) )
d2 . addCallback ( lambda node : privnode . set_uri ( u " s2-ro " , s2node . get_readonly_uri ( ) ) )
2007-06-28 22:05:33 +00:00
return d2
d1 . addCallback ( _got_s2 )
2008-01-04 00:02:05 +00:00
d1 . addCallback ( lambda res : privnode )
2007-06-28 22:05:33 +00:00
return d1
2008-01-04 00:02:05 +00:00
d . addCallback ( _got_new_dir )
2007-06-28 18:00:03 +00:00
return d
def _check_publish1 ( self , res ) :
# this one uses the iterative API
c1 = self . clients [ 1 ]
2007-12-03 21:52:42 +00:00
d = defer . succeed ( c1 . create_node_from_uri ( self . _root_directory_uri ) )
2007-06-28 18:00:03 +00:00
d . addCallback ( self . log , " check_publish1 got / " )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda root : root . get ( u " subdir1 " ) )
d . addCallback ( lambda subdir1 : subdir1 . get ( u " mydata567 " ) )
2007-06-28 18:00:03 +00:00
d . addCallback ( lambda filenode : filenode . download_to_data ( ) )
d . addCallback ( self . log , " get finished " )
def _get_done ( data ) :
self . failUnlessEqual ( data , self . data )
d . addCallback ( _get_done )
return d
def _check_publish2 ( self , res ) :
# this one uses the path-based API
2007-12-03 21:52:42 +00:00
rootnode = self . clients [ 1 ] . create_node_from_uri ( self . _root_directory_uri )
2008-02-14 22:45:56 +00:00
d = rootnode . get_child_at_path ( u " subdir1 " )
2007-06-28 18:00:03 +00:00
d . addCallback ( lambda dirnode :
self . failUnless ( IDirectoryNode . providedBy ( dirnode ) ) )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda res : rootnode . get_child_at_path ( u " subdir1/mydata567 " ) )
2007-06-28 18:00:03 +00:00
d . addCallback ( lambda filenode : filenode . download_to_data ( ) )
d . addCallback ( lambda data : self . failUnlessEqual ( data , self . data ) )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda res : rootnode . get_child_at_path ( u " subdir1/mydata567 " ) )
2007-06-28 18:00:03 +00:00
def _got_filenode ( filenode ) :
2007-12-03 21:52:42 +00:00
fnode = self . clients [ 1 ] . create_node_from_uri ( filenode . get_uri ( ) )
assert fnode == filenode
2007-06-28 18:00:03 +00:00
d . addCallback ( _got_filenode )
return d
2008-01-04 00:02:05 +00:00
def _check_publish_private ( self , resnode ) :
2007-06-28 18:00:03 +00:00
# this one uses the path-based API
2008-01-04 00:02:05 +00:00
self . _private_node = resnode
2007-12-03 21:52:42 +00:00
2008-02-14 22:45:56 +00:00
d = self . _private_node . get_child_at_path ( u " personal " )
2007-06-28 22:05:33 +00:00
def _got_personal ( personal ) :
self . _personal_node = personal
return personal
d . addCallback ( _got_personal )
2007-12-03 21:52:42 +00:00
2007-06-28 18:00:03 +00:00
d . addCallback ( lambda dirnode :
2007-12-03 21:52:42 +00:00
self . failUnless ( IDirectoryNode . providedBy ( dirnode ) , dirnode ) )
def get_path ( path ) :
return self . _private_node . get_child_at_path ( path )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda res : get_path ( u " personal/sekrit data " ) )
2007-06-28 18:00:03 +00:00
d . addCallback ( lambda filenode : filenode . download_to_data ( ) )
2007-07-12 23:17:49 +00:00
d . addCallback ( lambda data : self . failUnlessEqual ( data , self . smalldata ) )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda res : get_path ( u " s2-rw " ) )
2007-06-28 22:05:33 +00:00
d . addCallback ( lambda dirnode : self . failUnless ( dirnode . is_mutable ( ) ) )
2008-02-14 22:45:56 +00:00
d . addCallback ( lambda res : get_path ( u " s2-ro " ) )
2007-06-28 22:05:33 +00:00
def _got_s2ro ( dirnode ) :
2007-12-03 21:52:42 +00:00
self . failUnless ( dirnode . is_mutable ( ) , dirnode )
self . failUnless ( dirnode . is_readonly ( ) , dirnode )
2007-06-28 22:05:33 +00:00
d1 = defer . succeed ( None )
d1 . addCallback ( lambda res : dirnode . list ( ) )
d1 . addCallback ( self . log , " dirnode.list " )
2007-12-03 21:52:42 +00:00
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotMutableError , " mkdir(nope) " , None , dirnode . create_empty_directory , u " nope " ) )
2007-12-03 21:52:42 +00:00
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " doing add_file(ro) " )
2008-03-24 16:46:06 +00:00
ut = upload . Data ( " I will disappear, unrecorded and unobserved. The tragedy of my demise is made more poignant by its silence, but this beauty is not for you to ever know. " , convergence = " 99i-p1x4-xd4-18yc-ywt-87uu-msu-zo -- completely and totally unguessable string (unless you read this) " )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotMutableError , " add_file(nope) " , None , dirnode . add_file , u " hope " , ut ) )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " doing get(ro) " )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : dirnode . get ( u " mydata992 " ) )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( lambda filenode :
self . failUnless ( IFileNode . providedBy ( filenode ) ) )
d1 . addCallback ( self . log , " doing delete(ro) " )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotMutableError , " delete(nope) " , None , dirnode . delete , u " mydata992 " ) )
2007-06-28 22:05:33 +00:00
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotMutableError , " set_uri(nope) " , None , dirnode . set_uri , u " hopeless " , self . uri ) )
2007-06-28 22:05:33 +00:00
2008-10-27 20:15:25 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NoSuchChildError , " get(missing) " , " missing " , dirnode . get , u " missing " ) )
2007-06-28 22:05:33 +00:00
personal = self . _personal_node
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotMutableError , " mv from readonly " , None , dirnode . move_child_to , u " mydata992 " , personal , u " nope " ) )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " doing move_child_to(ro)2 " )
2008-02-14 22:45:56 +00:00
d1 . addCallback ( lambda res : self . shouldFail2 ( NotMutableError , " mv to readonly " , None , personal . move_child_to , u " sekrit data " , dirnode , u " nope " ) )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " finished with _got_s2ro " )
return d1
d . addCallback ( _got_s2ro )
2007-12-03 21:52:42 +00:00
def _got_home ( dummy ) :
home = self . _private_node
2007-06-28 22:05:33 +00:00
personal = self . _personal_node
d1 = defer . succeed ( None )
2007-12-03 21:52:42 +00:00
d1 . addCallback ( self . log , " mv ' P/personal/sekrit data ' to P/sekrit " )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( lambda res :
2008-02-14 22:45:56 +00:00
personal . move_child_to ( u " sekrit data " , home , u " sekrit " ) )
2007-06-28 22:05:33 +00:00
2007-12-03 21:52:42 +00:00
d1 . addCallback ( self . log , " mv P/sekrit ' P/sekrit data ' " )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( lambda res :
2008-02-14 22:45:56 +00:00
home . move_child_to ( u " sekrit " , home , u " sekrit data " ) )
2007-06-28 22:05:33 +00:00
2007-12-03 21:52:42 +00:00
d1 . addCallback ( self . log , " mv ' P/sekret data ' P/personal/ " )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( lambda res :
2008-02-14 22:45:56 +00:00
home . move_child_to ( u " sekrit data " , personal ) )
2007-06-28 22:05:33 +00:00
2008-10-22 00:03:07 +00:00
d1 . addCallback ( lambda res : home . build_manifest ( ) . when_done ( ) )
2007-06-28 22:05:33 +00:00
d1 . addCallback ( self . log , " manifest " )
2008-10-07 04:36:18 +00:00
# five items:
# P/
2007-12-03 21:52:42 +00:00
# P/personal/
# P/personal/sekrit data
# P/s2-rw (same as P/s2-ro)
# P/s2-rw/mydata992 (same as P/s2-rw/mydata992)
2008-11-19 22:03:47 +00:00
d1 . addCallback ( lambda res :
self . failUnlessEqual ( len ( res [ " manifest " ] ) , 5 ) )
2008-10-22 00:03:07 +00:00
d1 . addCallback ( lambda res : home . start_deep_stats ( ) . when_done ( ) )
2008-05-08 23:19:42 +00:00
def _check_stats ( stats ) :
expected = { " count-immutable-files " : 1 ,
" count-mutable-files " : 0 ,
" count-literal-files " : 1 ,
" count-files " : 2 ,
" count-directories " : 3 ,
" size-immutable-files " : 112 ,
" size-literal-files " : 23 ,
#"size-directories": 616, # varies
#"largest-directory": 616,
" largest-directory-children " : 3 ,
" largest-immutable-file " : 112 ,
}
for k , v in expected . iteritems ( ) :
self . failUnlessEqual ( stats [ k ] , v ,
" stats[ %s ] was %s , not %s " %
( k , stats [ k ] , v ) )
self . failUnless ( stats [ " size-directories " ] > 1300 ,
stats [ " size-directories " ] )
self . failUnless ( stats [ " largest-directory " ] > 800 ,
stats [ " largest-directory " ] )
self . failUnlessEqual ( stats [ " size-files-histogram " ] ,
[ ( 11 , 31 , 1 ) , ( 101 , 316 , 1 ) ] )
d1 . addCallback ( _check_stats )
2007-06-28 22:05:33 +00:00
return d1
d . addCallback ( _got_home )
2007-06-28 18:00:03 +00:00
return d
2007-06-28 22:05:33 +00:00
def shouldFail ( self , res , expected_failure , which , substring = None ) :
if isinstance ( res , Failure ) :
res . trap ( expected_failure )
if substring :
self . failUnless ( substring in str ( res ) ,
" substring ' %s ' not in ' %s ' "
% ( substring , str ( res ) ) )
else :
self . fail ( " %s was supposed to raise %s , not get ' %s ' " %
( which , expected_failure , res ) )
2007-12-03 21:52:42 +00:00
def shouldFail2 ( self , expected_failure , which , substring , callable , * args , * * kwargs ) :
assert substring is None or isinstance ( substring , str )
d = defer . maybeDeferred ( callable , * args , * * kwargs )
def done ( res ) :
if isinstance ( res , Failure ) :
res . trap ( expected_failure )
if substring :
self . failUnless ( substring in str ( res ) ,
" substring ' %s ' not in ' %s ' "
% ( substring , str ( res ) ) )
else :
self . fail ( " %s was supposed to raise %s , not get ' %s ' " %
( which , expected_failure , res ) )
d . addBoth ( done )
return d
2007-08-16 23:49:40 +00:00
def PUT ( self , urlpath , data ) :
url = self . webish_url + urlpath
return getPage ( url , method = " PUT " , postdata = data )
def GET ( self , urlpath , followRedirect = False ) :
url = self . webish_url + urlpath
return getPage ( url , method = " GET " , followRedirect = followRedirect )
2008-02-15 11:02:50 +00:00
def POST ( self , urlpath , followRedirect = False , use_helper = False , * * fields ) :
if use_helper :
url = self . helper_webish_url + urlpath
else :
url = self . webish_url + urlpath
sepbase = " boogabooga "
sep = " -- " + sepbase
form = [ ]
form . append ( sep )
form . append ( ' Content-Disposition: form-data; name= " _charset " ' )
form . append ( ' ' )
form . append ( ' UTF-8 ' )
form . append ( sep )
for name , value in fields . iteritems ( ) :
if isinstance ( value , tuple ) :
filename , value = value
form . append ( ' Content-Disposition: form-data; name= " %s " ; '
' filename= " %s " ' % ( name , filename . encode ( " utf-8 " ) ) )
else :
form . append ( ' Content-Disposition: form-data; name= " %s " ' % name )
form . append ( ' ' )
form . append ( str ( value ) )
form . append ( sep )
form [ - 1 ] + = " -- "
body = " \r \n " . join ( form ) + " \r \n "
headers = { " content-type " : " multipart/form-data; boundary= %s " % sepbase ,
}
return getPage ( url , method = " POST " , postdata = body ,
headers = headers , followRedirect = followRedirect )
2006-12-07 19:48:06 +00:00
def _test_web ( self , res ) :
base = self . webish_url
2007-12-20 00:54:40 +00:00
public = " uri/ " + self . _root_directory_uri
2006-12-07 19:48:06 +00:00
d = getPage ( base )
def _got_welcome ( page ) :
2008-02-06 00:32:27 +00:00
expected = " Connected Storage Servers: <span> %d </span> " % ( self . numclients )
2006-12-07 19:48:06 +00:00
self . failUnless ( expected in page ,
2008-02-06 00:32:27 +00:00
" I didn ' t see the right ' connected storage servers ' "
" message in: %s " % page
2006-12-07 19:48:06 +00:00
)
2007-08-12 17:29:38 +00:00
expected = " My nodeid: <span> %s </span> " % ( b32encode ( self . clients [ 0 ] . nodeid ) . lower ( ) , )
2007-03-29 21:31:55 +00:00
self . failUnless ( expected in page ,
" I didn ' t see the right ' My nodeid ' message "
" in: %s " % page )
2008-04-14 23:28:11 +00:00
self . failUnless ( " Helper: 0 active uploads " in page )
2006-12-07 19:48:06 +00:00
d . addCallback ( _got_welcome )
2007-08-16 23:49:40 +00:00
d . addCallback ( self . log , " done with _got_welcome " )
2008-02-15 11:02:50 +00:00
# get the welcome page from the node that uses the helper too
d . addCallback ( lambda res : getPage ( self . helper_webish_url ) )
def _got_welcome_helper ( page ) :
self . failUnless ( " Connected to helper?: <span>yes</span> " in page ,
page )
2008-04-14 23:28:11 +00:00
self . failUnless ( " Not running helper " in page )
2008-02-15 11:02:50 +00:00
d . addCallback ( _got_welcome_helper )
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : getPage ( base + public ) )
d . addCallback ( lambda res : getPage ( base + public + " /subdir1 " ) )
2006-12-07 19:48:06 +00:00
def _got_subdir1 ( page ) :
# there ought to be an href for our file
2007-05-24 00:25:49 +00:00
self . failUnless ( ( " <td> %d </td> " % len ( self . data ) ) in page )
2006-12-07 21:48:37 +00:00
self . failUnless ( " >mydata567</a> " in page )
2006-12-07 19:48:06 +00:00
d . addCallback ( _got_subdir1 )
2007-08-16 23:49:40 +00:00
d . addCallback ( self . log , " done with _got_subdir1 " )
2007-06-15 08:38:55 +00:00
d . addCallback ( lambda res :
2007-12-03 21:52:42 +00:00
getPage ( base + public + " /subdir1/mydata567 " ) )
2006-12-07 21:48:37 +00:00
def _got_data ( page ) :
self . failUnlessEqual ( page , self . data )
d . addCallback ( _got_data )
2007-04-24 08:41:54 +00:00
# download from a URI embedded in a URL
2007-08-16 23:49:40 +00:00
d . addCallback ( self . log , " _get_from_uri " )
2007-04-24 08:41:54 +00:00
def _get_from_uri ( res ) :
2007-08-16 23:49:40 +00:00
return getPage ( base + " uri/ %s ?filename= %s "
2007-04-24 08:41:54 +00:00
% ( self . uri , " mydata567 " ) )
d . addCallback ( _get_from_uri )
def _got_from_uri ( page ) :
self . failUnlessEqual ( page , self . data )
d . addCallback ( _got_from_uri )
# download from a URI embedded in a URL, second form
2007-08-16 23:49:40 +00:00
d . addCallback ( self . log , " _get_from_uri2 " )
2007-04-24 08:41:54 +00:00
def _get_from_uri2 ( res ) :
2007-08-16 23:49:40 +00:00
return getPage ( base + " uri?uri= %s " % ( self . uri , ) )
2007-04-24 08:41:54 +00:00
d . addCallback ( _get_from_uri2 )
2007-12-03 21:52:42 +00:00
d . addCallback ( _got_from_uri )
2007-04-24 08:41:54 +00:00
2007-07-03 20:18:14 +00:00
# download from a bogus URI, make sure we get a reasonable error
2008-02-05 20:05:13 +00:00
d . addCallback ( self . log , " _get_from_bogus_uri " , level = log . UNUSUAL )
2007-07-03 20:18:14 +00:00
def _get_from_bogus_uri ( res ) :
2007-08-16 23:49:40 +00:00
d1 = getPage ( base + " uri/ %s ?filename= %s "
% ( self . mangle_uri ( self . uri ) , " mydata567 " ) )
d1 . addBoth ( self . shouldFail , Error , " downloading bogus URI " ,
2007-08-17 00:05:26 +00:00
" 410 " )
2007-08-16 23:49:40 +00:00
return d1
2007-07-03 20:18:14 +00:00
d . addCallback ( _get_from_bogus_uri )
2008-02-05 20:05:13 +00:00
d . addCallback ( self . log , " _got_from_bogus_uri " , level = log . UNUSUAL )
2007-08-16 23:49:40 +00:00
# upload a file with PUT
d . addCallback ( self . log , " about to try PUT " )
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . PUT ( public + " /subdir3/new.txt " ,
2007-08-16 23:49:40 +00:00
" new.txt contents " ) )
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . GET ( public + " /subdir3/new.txt " ) )
2007-08-16 23:49:40 +00:00
d . addCallback ( self . failUnlessEqual , " new.txt contents " )
2007-09-19 08:43:44 +00:00
# and again with something large enough to use multiple segments,
# and hopefully trigger pauseProducing too
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . PUT ( public + " /subdir3/big.txt " ,
2007-09-19 08:43:44 +00:00
" big " * 500000 ) ) # 1.5MB
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . GET ( public + " /subdir3/big.txt " ) )
2007-09-19 08:43:44 +00:00
d . addCallback ( lambda res : self . failUnlessEqual ( len ( res ) , 1500000 ) )
2007-07-03 20:47:37 +00:00
2007-08-17 00:03:50 +00:00
# can we replace files in place?
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . PUT ( public + " /subdir3/new.txt " ,
2007-08-17 00:03:50 +00:00
" NEWER contents " ) )
2007-12-03 21:52:42 +00:00
d . addCallback ( lambda res : self . GET ( public + " /subdir3/new.txt " ) )
2007-08-17 00:03:50 +00:00
d . addCallback ( self . failUnlessEqual , " NEWER contents " )
2008-02-15 11:02:50 +00:00
# test unlinked POST
d . addCallback ( lambda res : self . POST ( " uri " , t = " upload " ,
file = ( " new.txt " , " data " * 10000 ) ) )
# and again using the helper, which exercises different upload-status
# display code
d . addCallback ( lambda res : self . POST ( " uri " , use_helper = True , t = " upload " ,
file = ( " foo.txt " , " data2 " * 10000 ) ) )
2008-02-13 20:57:39 +00:00
# check that the status page exists
2008-03-01 05:19:03 +00:00
d . addCallback ( lambda res : self . GET ( " status " , followRedirect = True ) )
2008-03-04 04:37:17 +00:00
def _got_status ( res ) :
# find an interesting upload and download to look at. LIT files
# are not interesting.
2008-04-17 20:02:22 +00:00
for ds in self . clients [ 0 ] . list_all_download_statuses ( ) :
if ds . get_size ( ) > 200 :
self . _down_status = ds . get_counter ( )
for us in self . clients [ 0 ] . list_all_upload_statuses ( ) :
if us . get_size ( ) > 200 :
self . _up_status = us . get_counter ( )
rs = self . clients [ 0 ] . list_all_retrieve_statuses ( ) [ 0 ]
2008-04-17 02:05:41 +00:00
self . _retrieve_status = rs . get_counter ( )
2008-04-17 20:02:22 +00:00
ps = self . clients [ 0 ] . list_all_publish_statuses ( ) [ 0 ]
2008-04-17 02:05:41 +00:00
self . _publish_status = ps . get_counter ( )
2008-04-17 20:02:22 +00:00
us = self . clients [ 0 ] . list_all_mapupdate_statuses ( ) [ 0 ]
self . _update_status = us . get_counter ( )
2008-03-04 04:37:17 +00:00
# and that there are some upload- and download- status pages
return self . GET ( " status/up- %d " % self . _up_status )
d . addCallback ( _got_status )
def _got_up ( res ) :
return self . GET ( " status/down- %d " % self . _down_status )
d . addCallback ( _got_up )
2008-03-04 08:24:35 +00:00
def _got_down ( res ) :
2008-04-17 20:02:22 +00:00
return self . GET ( " status/mapupdate- %d " % self . _update_status )
2008-04-17 02:05:41 +00:00
d . addCallback ( _got_down )
2008-04-17 20:02:22 +00:00
def _got_update ( res ) :
return self . GET ( " status/publish- %d " % self . _publish_status )
d . addCallback ( _got_update )
2008-03-04 08:24:35 +00:00
def _got_publish ( res ) :
return self . GET ( " status/retrieve- %d " % self . _retrieve_status )
2008-04-17 02:05:41 +00:00
d . addCallback ( _got_publish )
2008-04-15 22:57:28 +00:00
# check that the helper status page exists
d . addCallback ( lambda res :
self . GET ( " helper_status " , followRedirect = True ) )
def _got_helper_status ( res ) :
self . failUnless ( " Bytes Fetched: " in res )
# touch a couple of files in the helper's working directory to
# exercise more code paths
workdir = os . path . join ( self . getdir ( " client0 " ) , " helper " )
incfile = os . path . join ( workdir , " CHK_incoming " , " spurious " )
f = open ( incfile , " wb " )
f . write ( " small file " )
f . close ( )
then = time . time ( ) - 86400 * 3
now = time . time ( )
os . utime ( incfile , ( now , then ) )
encfile = os . path . join ( workdir , " CHK_encoding " , " spurious " )
f = open ( encfile , " wb " )
f . write ( " less small file " )
f . close ( )
os . utime ( encfile , ( now , then ) )
d . addCallback ( _got_helper_status )
# and that the json form exists
d . addCallback ( lambda res :
self . GET ( " helper_status?t=json " , followRedirect = True ) )
def _got_helper_status_json ( res ) :
data = simplejson . loads ( res )
self . failUnlessEqual ( data [ " chk_upload_helper.upload_need_upload " ] ,
1 )
self . failUnlessEqual ( data [ " chk_upload_helper.incoming_count " ] , 1 )
self . failUnlessEqual ( data [ " chk_upload_helper.incoming_size " ] , 10 )
self . failUnlessEqual ( data [ " chk_upload_helper.incoming_size_old " ] ,
10 )
self . failUnlessEqual ( data [ " chk_upload_helper.encoding_count " ] , 1 )
self . failUnlessEqual ( data [ " chk_upload_helper.encoding_size " ] , 15 )
self . failUnlessEqual ( data [ " chk_upload_helper.encoding_size_old " ] ,
15 )
d . addCallback ( _got_helper_status_json )
# and check that client[3] (which uses a helper but does not run one
# itself) doesn't explode when you ask for its status
d . addCallback ( lambda res : getPage ( self . helper_webish_url + " status/ " ) )
def _got_non_helper_status ( res ) :
self . failUnless ( " Upload and Download Status " in res )
d . addCallback ( _got_non_helper_status )
# or for helper status with t=json
d . addCallback ( lambda res :
getPage ( self . helper_webish_url + " helper_status?t=json " ) )
def _got_non_helper_status_json ( res ) :
data = simplejson . loads ( res )
self . failUnlessEqual ( data , { } )
d . addCallback ( _got_non_helper_status_json )
# see if the statistics page exists
d . addCallback ( lambda res : self . GET ( " statistics " ) )
def _got_stats ( res ) :
self . failUnless ( " Node Statistics " in res )
2008-07-16 00:23:25 +00:00
self . failUnless ( " ' downloader.files_downloaded ' : 5, " in res , res )
2008-04-15 22:57:28 +00:00
d . addCallback ( _got_stats )
d . addCallback ( lambda res : self . GET ( " statistics?t=json " ) )
def _got_stats_json ( res ) :
data = simplejson . loads ( res )
self . failUnlessEqual ( data [ " counters " ] [ " uploader.files_uploaded " ] , 5 )
self . failUnlessEqual ( data [ " stats " ] [ " chk_upload_helper.upload_need_upload " ] , 1 )
d . addCallback ( _got_stats_json )
2007-08-22 21:54:34 +00:00
2007-07-03 20:47:37 +00:00
# TODO: mangle the second segment of a file, to test errors that
# occur after we've already sent some good data, which uses a
# different error path.
2007-07-03 20:18:14 +00:00
2007-07-08 03:06:44 +00:00
# TODO: download a URI with a form
2007-04-24 08:41:54 +00:00
# TODO: create a directory by using a form
# TODO: upload by using a form on the directory page
2007-12-03 21:52:42 +00:00
# url = base + "somedir/subdir1/freeform_post!!upload"
2007-04-24 08:41:54 +00:00
# TODO: delete a file by using a button on the directory page
2006-12-07 19:48:06 +00:00
return d
2007-06-26 22:55:00 +00:00
def _test_runner ( self , res ) :
# exercise some of the diagnostic tools in runner.py
2007-07-13 23:58:08 +00:00
# find a share
2007-06-26 22:55:00 +00:00
for ( dirpath , dirnames , filenames ) in os . walk ( self . basedir ) :
2007-07-13 23:58:08 +00:00
if " storage " not in dirpath :
continue
if not filenames :
continue
pieces = dirpath . split ( os . sep )
2008-01-31 23:26:28 +00:00
if pieces [ - 4 ] == " storage " and pieces [ - 3 ] == " shares " :
# we're sitting in .../storage/shares/$START/$SINDEX , and there
# are sharefiles here
2007-07-13 23:58:08 +00:00
filename = os . path . join ( dirpath , filenames [ 0 ] )
2007-12-03 21:52:42 +00:00
# peek at the magic to see if it is a chk share
magic = open ( filename , " rb " ) . read ( 4 )
if magic == ' \x00 \x00 \x00 \x01 ' :
break
2007-06-26 22:55:00 +00:00
else :
self . fail ( " unable to find any uri_extension files in %s "
% self . basedir )
2007-07-13 23:58:08 +00:00
log . msg ( " test_system.SystemTest._test_runner using %s " % filename )
2007-06-26 23:51:38 +00:00
out , err = StringIO ( ) , StringIO ( )
2008-08-12 21:46:56 +00:00
rc = runner . runner ( [ " debug " , " dump-share " , " --offsets " ,
2007-06-26 23:51:38 +00:00
filename ] ,
stdout = out , stderr = err )
output = out . getvalue ( )
2007-06-26 22:55:00 +00:00
self . failUnlessEqual ( rc , 0 )
2007-09-25 01:12:37 +00:00
# we only upload a single file, so we can assert some things about
# its size and shares.
2008-02-06 20:19:51 +00:00
self . failUnless ( ( " share filename: %s " % filename ) in output )
2007-09-25 01:12:37 +00:00
self . failUnless ( " size: %d \n " % len ( self . data ) in output )
self . failUnless ( " num_segments: 1 \n " in output )
# segment_size is always a multiple of needed_shares
2007-12-03 21:52:42 +00:00
self . failUnless ( " segment_size: %d \n " % mathutil . next_multiple ( len ( self . data ) , 3 ) in output )
2007-09-25 01:12:37 +00:00
self . failUnless ( " total_shares: 10 \n " in output )
2007-06-26 22:55:00 +00:00
# keys which are supposed to be present
for key in ( " size " , " num_segments " , " segment_size " ,
" needed_shares " , " total_shares " ,
" codec_name " , " codec_params " , " tail_codec_params " ,
2008-03-24 20:39:51 +00:00
#"plaintext_hash", "plaintext_root_hash",
2008-03-23 22:35:54 +00:00
" crypttext_hash " , " crypttext_root_hash " ,
2008-02-06 19:48:19 +00:00
" share_root_hash " , " UEB_hash " ) :
2007-06-26 22:55:00 +00:00
self . failUnless ( " %s : " % key in output , key )
2008-07-07 21:11:02 +00:00
self . failUnless ( " verify-cap: URI:CHK-Verifier: " in output )
2007-06-26 22:55:00 +00:00
2008-02-06 20:19:51 +00:00
# now use its storage index to find the other shares using the
# 'find-shares' tool
sharedir , shnum = os . path . split ( filename )
storagedir , storage_index_s = os . path . split ( sharedir )
out , err = StringIO ( ) , StringIO ( )
nodedirs = [ self . getdir ( " client %d " % i ) for i in range ( self . numclients ) ]
2008-08-12 20:37:32 +00:00
cmd = [ " debug " , " find-shares " , storage_index_s ] + nodedirs
2008-02-06 20:19:51 +00:00
rc = runner . runner ( cmd , stdout = out , stderr = err )
self . failUnlessEqual ( rc , 0 )
out . seek ( 0 )
sharefiles = [ sfn . strip ( ) for sfn in out . readlines ( ) ]
self . failUnlessEqual ( len ( sharefiles ) , 10 )
2008-02-12 01:17:01 +00:00
# also exercise the 'catalog-shares' tool
out , err = StringIO ( ) , StringIO ( )
nodedirs = [ self . getdir ( " client %d " % i ) for i in range ( self . numclients ) ]
2008-08-12 20:37:32 +00:00
cmd = [ " debug " , " catalog-shares " ] + nodedirs
2008-02-12 01:17:01 +00:00
rc = runner . runner ( cmd , stdout = out , stderr = err )
self . failUnlessEqual ( rc , 0 )
out . seek ( 0 )
descriptions = [ sfn . strip ( ) for sfn in out . readlines ( ) ]
self . failUnlessEqual ( len ( descriptions ) , 30 )
matching = [ line
for line in descriptions
if line . startswith ( " CHK %s " % storage_index_s ) ]
self . failUnlessEqual ( len ( matching ) , 10 )
2007-09-26 19:06:55 +00:00
def _test_control ( self , res ) :
# exercise the remote-control-the-client foolscap interfaces in
# allmydata.control (mostly used for performance tests)
c0 = self . clients [ 0 ]
2007-12-17 23:39:54 +00:00
control_furl_file = os . path . join ( c0 . basedir , " private " , " control.furl " )
2007-09-26 19:06:55 +00:00
control_furl = open ( control_furl_file , " r " ) . read ( ) . strip ( )
# it doesn't really matter which Tub we use to connect to the client,
2007-12-03 21:52:42 +00:00
# so let's just use our IntroducerNode's
d = self . introducer . tub . getReference ( control_furl )
2007-09-26 19:06:55 +00:00
d . addCallback ( self . _test_control2 , control_furl_file )
return d
def _test_control2 ( self , rref , filename ) :
2008-03-24 16:46:06 +00:00
d = rref . callRemote ( " upload_from_file_to_uri " , filename , convergence = None )
2007-09-26 19:06:55 +00:00
downfile = os . path . join ( self . basedir , " control.downfile " )
d . addCallback ( lambda uri :
rref . callRemote ( " download_from_uri_to_file " ,
uri , downfile ) )
def _check ( res ) :
self . failUnlessEqual ( res , downfile )
data = open ( downfile , " r " ) . read ( )
expected_data = open ( filename , " r " ) . read ( )
self . failUnlessEqual ( data , expected_data )
d . addCallback ( _check )
2007-12-14 10:08:16 +00:00
d . addCallback ( lambda res : rref . callRemote ( " speed_test " , 1 , 200 , False ) )
2007-09-26 19:06:55 +00:00
if sys . platform == " linux2 " :
d . addCallback ( lambda res : rref . callRemote ( " get_memory_usage " ) )
2007-09-26 19:21:15 +00:00
d . addCallback ( lambda res : rref . callRemote ( " measure_peer_response_time " ) )
2007-09-26 19:06:55 +00:00
return d
2007-10-12 02:20:41 +00:00
def _test_cli ( self , res ) :
# run various CLI commands (in a thread, since they use blocking
# network calls)
2007-12-03 21:52:42 +00:00
private_uri = self . _private_node . get_uri ( )
some_uri = self . _root_directory_uri
2008-01-08 00:46:22 +00:00
client0_basedir = self . getdir ( " client0 " )
2007-12-03 21:52:42 +00:00
2007-10-12 02:20:41 +00:00
nodeargs = [
2008-01-08 00:46:22 +00:00
" --node-directory " , client0_basedir ,
2007-10-21 19:33:17 +00:00
]
TESTDATA = " I will not write the same thing over and over. \n " * 100
d = defer . succeed ( None )
2007-10-12 02:20:41 +00:00
2008-05-20 02:28:50 +00:00
# for compatibility with earlier versions, private/root_dir.cap is
# supposed to be treated as an alias named "tahoe:". Start by making
# sure that works, before we add other aliases.
root_file = os . path . join ( client0_basedir , " private " , " root_dir.cap " )
f = open ( root_file , " w " )
f . write ( private_uri )
f . close ( )
2008-08-01 22:05:14 +00:00
def run ( ignored , verb , * args , * * kwargs ) :
stdin = kwargs . get ( " stdin " , " " )
2008-05-20 02:28:50 +00:00
newargs = [ verb ] + nodeargs + list ( args )
2008-08-01 22:05:14 +00:00
return self . _run_cli ( newargs , stdin = stdin )
2008-05-20 02:28:50 +00:00
def _check_ls ( ( out , err ) , expected_children , unexpected_children = [ ] ) :
self . failUnlessEqual ( err , " " )
for s in expected_children :
2008-08-01 18:46:24 +00:00
self . failUnless ( s in out , ( s , out ) )
2008-05-20 02:28:50 +00:00
for s in unexpected_children :
2008-08-01 18:46:24 +00:00
self . failIf ( s in out , ( s , out ) )
2008-05-20 02:28:50 +00:00
2007-10-21 19:33:17 +00:00
def _check_ls_root ( ( out , err ) ) :
2007-10-12 02:20:41 +00:00
self . failUnless ( " personal " in out )
self . failUnless ( " s2-ro " in out )
self . failUnless ( " s2-rw " in out )
self . failUnlessEqual ( err , " " )
2008-05-20 02:28:50 +00:00
# this should reference private_uri
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " personal " , " s2-ro " , " s2-rw " ] )
2008-05-20 21:36:04 +00:00
d . addCallback ( run , " list-aliases " )
def _check_aliases_1 ( ( out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( out , " tahoe: %s \n " % private_uri )
d . addCallback ( _check_aliases_1 )
2008-05-20 02:28:50 +00:00
# now that that's out of the way, remove root_dir.cap and work with
# new files
d . addCallback ( lambda res : os . unlink ( root_file ) )
2008-05-20 21:36:04 +00:00
d . addCallback ( run , " list-aliases " )
def _check_aliases_2 ( ( out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( out , " " )
d . addCallback ( _check_aliases_2 )
2008-05-20 02:28:50 +00:00
d . addCallback ( run , " mkdir " )
def _got_dir ( ( out , err ) ) :
self . failUnless ( uri . from_string_dirnode ( out . strip ( ) ) )
return out . strip ( )
d . addCallback ( _got_dir )
d . addCallback ( lambda newcap : run ( None , " add-alias " , " tahoe " , newcap ) )
2008-05-20 21:36:04 +00:00
d . addCallback ( run , " list-aliases " )
def _check_aliases_3 ( ( out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnless ( " tahoe: " in out )
d . addCallback ( _check_aliases_3 )
2008-05-20 02:28:50 +00:00
def _check_empty_dir ( ( out , err ) ) :
self . failUnlessEqual ( out , " " )
2007-10-21 19:33:17 +00:00
self . failUnlessEqual ( err , " " )
2008-05-20 02:28:50 +00:00
d . addCallback ( run , " ls " )
d . addCallback ( _check_empty_dir )
2008-05-22 00:34:52 +00:00
def _check_missing_dir ( ( out , err ) ) :
# TODO: check that rc==2
self . failUnlessEqual ( out , " " )
self . failUnlessEqual ( err , " No such file or directory \n " )
d . addCallback ( run , " ls " , " bogus " )
d . addCallback ( _check_missing_dir )
2008-05-20 02:28:50 +00:00
files = [ ]
datas = [ ]
for i in range ( 10 ) :
fn = os . path . join ( self . basedir , " file %d " % i )
files . append ( fn )
data = " data to be uploaded: file %d \n " % i
datas . append ( data )
2008-05-20 03:24:12 +00:00
open ( fn , " wb " ) . write ( data )
2008-05-20 02:28:50 +00:00
2008-05-21 18:49:22 +00:00
def _check_stdout_against ( ( out , err ) , filenum = None , data = None ) :
2008-05-20 23:56:03 +00:00
self . failUnlessEqual ( err , " " )
2008-05-21 18:49:22 +00:00
if filenum is not None :
self . failUnlessEqual ( out , datas [ filenum ] )
if data is not None :
self . failUnlessEqual ( out , data )
2008-05-20 23:56:03 +00:00
2008-05-20 02:28:50 +00:00
# test all both forms of put: from a file, and from stdin
# tahoe put bar FOO
d . addCallback ( run , " put " , files [ 0 ] , " tahoe-file0 " )
def _put_out ( ( out , err ) ) :
self . failUnless ( " URI:LIT: " in out , out )
self . failUnless ( " 201 Created " in err , err )
uri0 = out . strip ( )
return run ( None , " get " , uri0 )
d . addCallback ( _put_out )
d . addCallback ( lambda ( out , err ) : self . failUnlessEqual ( out , datas [ 0 ] ) )
d . addCallback ( run , " put " , files [ 1 ] , " subdir/tahoe-file1 " )
# tahoe put bar tahoe:FOO
d . addCallback ( run , " put " , files [ 2 ] , " tahoe:file2 " )
2008-05-20 19:36:55 +00:00
d . addCallback ( run , " put " , " --mutable " , files [ 3 ] , " tahoe:file3 " )
2008-05-20 19:49:11 +00:00
def _check_put_mutable ( ( out , err ) ) :
self . _mutable_file3_uri = out . strip ( )
d . addCallback ( _check_put_mutable )
2008-05-20 23:56:03 +00:00
d . addCallback ( run , " get " , " tahoe:file3 " )
d . addCallback ( _check_stdout_against , 3 )
2008-05-20 02:28:50 +00:00
# tahoe put FOO
STDIN_DATA = " This is the file to upload from stdin. "
2008-08-02 02:27:29 +00:00
d . addCallback ( run , " put " , " - " , " tahoe-file-stdin " , stdin = STDIN_DATA )
2008-05-20 02:28:50 +00:00
# tahoe put tahoe:FOO
2008-08-02 02:27:29 +00:00
d . addCallback ( run , " put " , " - " , " tahoe:from-stdin " ,
2008-08-01 22:05:14 +00:00
stdin = " Other file from stdin. " )
2008-05-20 02:28:50 +00:00
d . addCallback ( run , " ls " )
2008-05-20 19:36:55 +00:00
d . addCallback ( _check_ls , [ " tahoe-file0 " , " file2 " , " file3 " , " subdir " ,
2008-05-20 02:28:50 +00:00
" tahoe-file-stdin " , " from-stdin " ] )
d . addCallback ( run , " ls " , " subdir " )
d . addCallback ( _check_ls , [ " tahoe-file1 " ] )
# tahoe mkdir FOO
d . addCallback ( run , " mkdir " , " subdir2 " )
d . addCallback ( run , " ls " )
# TODO: extract the URI, set an alias with it
d . addCallback ( _check_ls , [ " subdir2 " ] )
# tahoe get: (to stdin and to a file)
d . addCallback ( run , " get " , " tahoe-file0 " )
2008-05-20 23:56:03 +00:00
d . addCallback ( _check_stdout_against , 0 )
2008-05-20 02:28:50 +00:00
d . addCallback ( run , " get " , " tahoe:subdir/tahoe-file1 " )
2008-05-20 23:56:03 +00:00
d . addCallback ( _check_stdout_against , 1 )
2008-05-20 02:28:50 +00:00
outfile0 = os . path . join ( self . basedir , " outfile0 " )
d . addCallback ( run , " get " , " file2 " , outfile0 )
def _check_outfile0 ( ( out , err ) ) :
data = open ( outfile0 , " rb " ) . read ( )
self . failUnlessEqual ( data , " data to be uploaded: file2 \n " )
d . addCallback ( _check_outfile0 )
outfile1 = os . path . join ( self . basedir , " outfile0 " )
d . addCallback ( run , " get " , " tahoe:subdir/tahoe-file1 " , outfile1 )
def _check_outfile1 ( ( out , err ) ) :
data = open ( outfile1 , " rb " ) . read ( )
self . failUnlessEqual ( data , " data to be uploaded: file1 \n " )
d . addCallback ( _check_outfile1 )
d . addCallback ( run , " rm " , " tahoe-file0 " )
d . addCallback ( run , " rm " , " tahoe:file2 " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ ] , [ " tahoe-file0 " , " file2 " ] )
d . addCallback ( run , " ls " , " -l " )
def _check_ls_l ( ( out , err ) ) :
lines = out . split ( " \n " )
for l in lines :
if " tahoe-file-stdin " in l :
2008-05-20 19:36:55 +00:00
self . failUnless ( l . startswith ( " -r-- " ) , l )
2008-05-20 02:28:50 +00:00
self . failUnless ( " %d " % len ( STDIN_DATA ) in l )
2008-05-20 19:36:55 +00:00
if " file3 " in l :
self . failUnless ( l . startswith ( " -rw- " ) , l ) # mutable
2008-05-20 02:28:50 +00:00
d . addCallback ( _check_ls_l )
2008-05-20 19:49:11 +00:00
d . addCallback ( run , " ls " , " --uri " )
def _check_ls_uri ( ( out , err ) ) :
lines = out . split ( " \n " )
for l in lines :
if " file3 " in l :
self . failUnless ( self . _mutable_file3_uri in l )
d . addCallback ( _check_ls_uri )
d . addCallback ( run , " ls " , " --readonly-uri " )
def _check_ls_rouri ( ( out , err ) ) :
lines = out . split ( " \n " )
for l in lines :
if " file3 " in l :
rw_uri = self . _mutable_file3_uri
u = uri . from_string_mutable_filenode ( rw_uri )
ro_uri = u . get_readonly ( ) . to_string ( )
self . failUnless ( ro_uri in l )
d . addCallback ( _check_ls_rouri )
2008-05-20 02:28:50 +00:00
d . addCallback ( run , " mv " , " tahoe-file-stdin " , " tahoe-moved " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " tahoe-moved " ] , [ " tahoe-file-stdin " ] )
2007-10-21 19:33:17 +00:00
2008-05-20 20:30:31 +00:00
d . addCallback ( run , " ln " , " tahoe-moved " , " newlink " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " tahoe-moved " , " newlink " ] )
2008-05-20 23:56:03 +00:00
d . addCallback ( run , " cp " , " tahoe:file3 " , " tahoe:file3-copy " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " file3 " , " file3-copy " ] )
d . addCallback ( run , " get " , " tahoe:file3-copy " )
d . addCallback ( _check_stdout_against , 3 )
# copy from disk into tahoe
d . addCallback ( run , " cp " , files [ 4 ] , " tahoe:file4 " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " file3 " , " file3-copy " , " file4 " ] )
d . addCallback ( run , " get " , " tahoe:file4 " )
d . addCallback ( _check_stdout_against , 4 )
# copy from tahoe into disk
target_filename = os . path . join ( self . basedir , " file-out " )
d . addCallback ( run , " cp " , " tahoe:file4 " , target_filename )
def _check_cp_out ( ( out , err ) ) :
self . failUnless ( os . path . exists ( target_filename ) )
got = open ( target_filename , " rb " ) . read ( )
self . failUnlessEqual ( got , datas [ 4 ] )
d . addCallback ( _check_cp_out )
# copy from disk to disk (silly case)
target2_filename = os . path . join ( self . basedir , " file-out-copy " )
d . addCallback ( run , " cp " , target_filename , target2_filename )
def _check_cp_out2 ( ( out , err ) ) :
self . failUnless ( os . path . exists ( target2_filename ) )
got = open ( target2_filename , " rb " ) . read ( )
self . failUnlessEqual ( got , datas [ 4 ] )
d . addCallback ( _check_cp_out2 )
# copy from tahoe into disk, overwriting an existing file
d . addCallback ( run , " cp " , " tahoe:file3 " , target_filename )
def _check_cp_out3 ( ( out , err ) ) :
self . failUnless ( os . path . exists ( target_filename ) )
got = open ( target_filename , " rb " ) . read ( )
self . failUnlessEqual ( got , datas [ 3 ] )
d . addCallback ( _check_cp_out3 )
# copy from disk into tahoe, overwriting an existing immutable file
d . addCallback ( run , " cp " , files [ 5 ] , " tahoe:file4 " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " file3 " , " file3-copy " , " file4 " ] )
d . addCallback ( run , " get " , " tahoe:file4 " )
d . addCallback ( _check_stdout_against , 5 )
# copy from disk into tahoe, overwriting an existing mutable file
d . addCallback ( run , " cp " , files [ 5 ] , " tahoe:file3 " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " file3 " , " file3-copy " , " file4 " ] )
d . addCallback ( run , " get " , " tahoe:file3 " )
d . addCallback ( _check_stdout_against , 5 )
2008-05-21 18:49:22 +00:00
# recursive copy: setup
dn = os . path . join ( self . basedir , " dir1 " )
os . makedirs ( dn )
2008-05-22 00:35:21 +00:00
open ( os . path . join ( dn , " rfile1 " ) , " wb " ) . write ( " rfile1 " )
open ( os . path . join ( dn , " rfile2 " ) , " wb " ) . write ( " rfile2 " )
open ( os . path . join ( dn , " rfile3 " ) , " wb " ) . write ( " rfile3 " )
2008-05-21 18:49:22 +00:00
sdn2 = os . path . join ( dn , " subdir2 " )
os . makedirs ( sdn2 )
2008-05-22 00:35:21 +00:00
open ( os . path . join ( sdn2 , " rfile4 " ) , " wb " ) . write ( " rfile4 " )
open ( os . path . join ( sdn2 , " rfile5 " ) , " wb " ) . write ( " rfile5 " )
2008-05-21 18:49:22 +00:00
# from disk into tahoe
2008-05-22 00:55:32 +00:00
d . addCallback ( run , " cp " , " -r " , dn , " tahoe:dir1 " )
2008-05-22 00:35:21 +00:00
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " dir1 " ] )
d . addCallback ( run , " ls " , " dir1 " )
d . addCallback ( _check_ls , [ " rfile1 " , " rfile2 " , " rfile3 " , " subdir2 " ] ,
[ " rfile4 " , " rfile5 " ] )
d . addCallback ( run , " ls " , " tahoe:dir1/subdir2 " )
d . addCallback ( _check_ls , [ " rfile4 " , " rfile5 " ] ,
[ " rfile1 " , " rfile2 " , " rfile3 " ] )
d . addCallback ( run , " get " , " dir1/subdir2/rfile4 " )
d . addCallback ( _check_stdout_against , data = " rfile4 " )
2008-05-21 18:49:22 +00:00
2008-05-22 01:36:25 +00:00
# and back out again
dn_copy = os . path . join ( self . basedir , " dir1-copy " )
d . addCallback ( run , " cp " , " --verbose " , " -r " , " tahoe:dir1 " , dn_copy )
def _check_cp_r_out ( ( out , err ) ) :
def _cmp ( name ) :
old = open ( os . path . join ( dn , name ) , " rb " ) . read ( )
newfn = os . path . join ( dn_copy , name )
self . failUnless ( os . path . exists ( newfn ) )
new = open ( newfn , " rb " ) . read ( )
self . failUnlessEqual ( old , new )
_cmp ( " rfile1 " )
_cmp ( " rfile2 " )
_cmp ( " rfile3 " )
_cmp ( os . path . join ( " subdir2 " , " rfile4 " ) )
_cmp ( os . path . join ( " subdir2 " , " rfile5 " ) )
d . addCallback ( _check_cp_r_out )
2008-05-22 01:40:49 +00:00
# and copy it a second time, which ought to overwrite the same files
d . addCallback ( run , " cp " , " -r " , " tahoe:dir1 " , dn_copy )
# and tahoe-to-tahoe
d . addCallback ( run , " cp " , " -r " , " tahoe:dir1 " , " tahoe:dir1-copy " )
d . addCallback ( run , " ls " )
d . addCallback ( _check_ls , [ " dir1 " , " dir1-copy " ] )
d . addCallback ( run , " ls " , " dir1-copy " )
d . addCallback ( _check_ls , [ " rfile1 " , " rfile2 " , " rfile3 " , " subdir2 " ] ,
[ " rfile4 " , " rfile5 " ] )
d . addCallback ( run , " ls " , " tahoe:dir1-copy/subdir2 " )
d . addCallback ( _check_ls , [ " rfile4 " , " rfile5 " ] ,
[ " rfile1 " , " rfile2 " , " rfile3 " ] )
d . addCallback ( run , " get " , " dir1-copy/subdir2/rfile4 " )
d . addCallback ( _check_stdout_against , data = " rfile4 " )
# and copy it a second time, which ought to overwrite the same files
d . addCallback ( run , " cp " , " -r " , " tahoe:dir1 " , " tahoe:dir1-copy " )
2007-10-21 19:33:17 +00:00
# tahoe_ls doesn't currently handle the error correctly: it tries to
# JSON-parse a traceback.
## def _ls_missing(res):
## argv = ["ls"] + nodeargs + ["bogus"]
## return self._run_cli(argv)
## d.addCallback(_ls_missing)
## def _check_ls_missing((out,err)):
## print "OUT", out
## print "ERR", err
## self.failUnlessEqual(err, "")
## d.addCallback(_check_ls_missing)
2007-10-12 02:20:41 +00:00
return d
2008-08-01 22:05:14 +00:00
def _run_cli ( self , argv , stdin = " " ) :
2008-05-20 02:28:50 +00:00
#print "CLI:", argv
2007-10-12 02:20:41 +00:00
stdout , stderr = StringIO ( ) , StringIO ( )
d = threads . deferToThread ( runner . runner , argv , run_by_human = False ,
2008-08-01 22:05:14 +00:00
stdin = StringIO ( stdin ) ,
2007-10-12 02:20:41 +00:00
stdout = stdout , stderr = stderr )
def _done ( res ) :
return stdout . getvalue ( ) , stderr . getvalue ( )
d . addCallback ( _done )
return d
2007-10-15 23:16:39 +00:00
def _test_checker ( self , res ) :
2008-07-16 00:23:25 +00:00
ut = upload . Data ( " too big to be literal " * 200 , convergence = None )
d = self . _personal_node . add_file ( u " big file " , ut )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda res : self . _personal_node . check ( Monitor ( ) ) )
2008-07-16 00:23:25 +00:00
def _check_dirnode_results ( r ) :
self . failUnless ( r . is_healthy ( ) )
d . addCallback ( _check_dirnode_results )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda res : self . _personal_node . check ( Monitor ( ) , verify = True ) )
2008-07-16 00:23:25 +00:00
d . addCallback ( _check_dirnode_results )
d . addCallback ( lambda res : self . _personal_node . get ( u " big file " ) )
def _got_chk_filenode ( n ) :
self . failUnless ( isinstance ( n , filenode . FileNode ) )
2008-10-22 08:38:18 +00:00
d = n . check ( Monitor ( ) )
2008-07-16 00:23:25 +00:00
def _check_filenode_results ( r ) :
self . failUnless ( r . is_healthy ( ) )
d . addCallback ( _check_filenode_results )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda res : n . check ( Monitor ( ) , verify = True ) )
2008-07-16 00:23:25 +00:00
d . addCallback ( _check_filenode_results )
return d
d . addCallback ( _got_chk_filenode )
d . addCallback ( lambda res : self . _personal_node . get ( u " sekrit data " ) )
def _got_lit_filenode ( n ) :
self . failUnless ( isinstance ( n , filenode . LiteralFileNode ) )
2008-10-22 08:38:18 +00:00
d = n . check ( Monitor ( ) )
2008-09-07 19:44:56 +00:00
def _check_lit_filenode_results ( r ) :
self . failUnlessEqual ( r , None )
d . addCallback ( _check_lit_filenode_results )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda res : n . check ( Monitor ( ) , verify = True ) )
2008-09-07 19:44:56 +00:00
d . addCallback ( _check_lit_filenode_results )
2008-07-16 00:23:25 +00:00
return d
d . addCallback ( _got_lit_filenode )
2007-10-16 19:25:09 +00:00
return d
2008-07-25 22:13:00 +00:00
2008-11-07 05:35:47 +00:00
class MutableChecker ( SystemTestMixin , unittest . TestCase , ErrorMixin ) :
2008-08-13 00:05:01 +00:00
def _run_cli ( self , argv ) :
stdout , stderr = StringIO ( ) , StringIO ( )
2008-11-13 22:27:48 +00:00
# this can only do synchronous operations
assert argv [ 0 ] == " debug "
2008-08-13 00:05:01 +00:00
runner . runner ( argv , run_by_human = False , stdout = stdout , stderr = stderr )
return stdout . getvalue ( )
def test_good ( self ) :
self . basedir = self . mktemp ( )
d = self . set_up_nodes ( )
CONTENTS = " a little bit of data "
d . addCallback ( lambda res : self . clients [ 0 ] . create_mutable_file ( CONTENTS ) )
def _created ( node ) :
self . node = node
si = self . node . get_storage_index ( )
d . addCallback ( _created )
# now make sure the webapi verifier sees no problems
def _do_check ( res ) :
url = ( self . webish_url +
" uri/ %s " % urllib . quote ( self . node . get_uri ( ) ) +
" ?t=check&verify=true " )
return getPage ( url , method = " POST " )
d . addCallback ( _do_check )
def _got_results ( out ) :
2008-11-07 05:35:47 +00:00
self . failUnless ( " <span>Healthy : Healthy</span> " in out , out )
2008-09-07 19:44:56 +00:00
self . failUnless ( " Recoverable Versions: 10*seq1- " in out , out )
2008-08-13 00:05:01 +00:00
self . failIf ( " Not Healthy! " in out , out )
self . failIf ( " Unhealthy " in out , out )
self . failIf ( " Corrupt Shares " in out , out )
d . addCallback ( _got_results )
2008-10-22 00:03:07 +00:00
d . addErrback ( self . explain_web_error )
2008-08-13 00:05:01 +00:00
return d
def test_corrupt ( self ) :
self . basedir = self . mktemp ( )
d = self . set_up_nodes ( )
CONTENTS = " a little bit of data "
d . addCallback ( lambda res : self . clients [ 0 ] . create_mutable_file ( CONTENTS ) )
def _created ( node ) :
self . node = node
si = self . node . get_storage_index ( )
out = self . _run_cli ( [ " debug " , " find-shares " , base32 . b2a ( si ) ,
self . clients [ 1 ] . basedir ] )
files = out . split ( " \n " )
# corrupt one of them, using the CLI debug command
f = files [ 0 ]
shnum = os . path . basename ( f )
nodeid = self . clients [ 1 ] . nodeid
nodeid_prefix = idlib . shortnodeid_b2a ( nodeid )
self . corrupt_shareid = " %s -sh %s " % ( nodeid_prefix , shnum )
out = self . _run_cli ( [ " debug " , " corrupt-share " , files [ 0 ] ] )
d . addCallback ( _created )
# now make sure the webapi verifier notices it
def _do_check ( res ) :
url = ( self . webish_url +
" uri/ %s " % urllib . quote ( self . node . get_uri ( ) ) +
" ?t=check&verify=true " )
return getPage ( url , method = " POST " )
d . addCallback ( _do_check )
def _got_results ( out ) :
self . failUnless ( " Not Healthy! " in out , out )
2008-09-07 19:44:56 +00:00
self . failUnless ( " Unhealthy: best version has only 9 shares (encoding is 3-of-10) " in out , out )
self . failUnless ( " Corrupt Shares: " in out , out )
2008-08-26 23:34:54 +00:00
d . addCallback ( _got_results )
2008-08-13 00:05:01 +00:00
2008-08-26 23:34:54 +00:00
# now make sure the webapi repairer can fix it
def _do_repair ( res ) :
url = ( self . webish_url +
" uri/ %s " % urllib . quote ( self . node . get_uri ( ) ) +
" ?t=check&verify=true&repair=true " )
return getPage ( url , method = " POST " )
d . addCallback ( _do_repair )
def _got_repair_results ( out ) :
2008-09-07 19:44:56 +00:00
self . failUnless ( " <div>Repair successful</div> " in out , out )
2008-08-26 23:34:54 +00:00
d . addCallback ( _got_repair_results )
d . addCallback ( _do_check )
def _got_postrepair_results ( out ) :
self . failIf ( " Not Healthy! " in out , out )
2008-09-07 19:44:56 +00:00
self . failUnless ( " Recoverable Versions: 10*seq " in out , out )
2008-08-26 23:34:54 +00:00
d . addCallback ( _got_postrepair_results )
2008-10-22 00:03:07 +00:00
d . addErrback ( self . explain_web_error )
2008-08-26 23:34:54 +00:00
return d
def test_delete_share ( self ) :
self . basedir = self . mktemp ( )
d = self . set_up_nodes ( )
CONTENTS = " a little bit of data "
d . addCallback ( lambda res : self . clients [ 0 ] . create_mutable_file ( CONTENTS ) )
def _created ( node ) :
self . node = node
si = self . node . get_storage_index ( )
out = self . _run_cli ( [ " debug " , " find-shares " , base32 . b2a ( si ) ,
self . clients [ 1 ] . basedir ] )
files = out . split ( " \n " )
# corrupt one of them, using the CLI debug command
f = files [ 0 ]
shnum = os . path . basename ( f )
nodeid = self . clients [ 1 ] . nodeid
nodeid_prefix = idlib . shortnodeid_b2a ( nodeid )
self . corrupt_shareid = " %s -sh %s " % ( nodeid_prefix , shnum )
os . unlink ( files [ 0 ] )
d . addCallback ( _created )
# now make sure the webapi checker notices it
def _do_check ( res ) :
url = ( self . webish_url +
" uri/ %s " % urllib . quote ( self . node . get_uri ( ) ) +
" ?t=check&verify=false " )
return getPage ( url , method = " POST " )
d . addCallback ( _do_check )
def _got_results ( out ) :
self . failUnless ( " Not Healthy! " in out , out )
2008-09-07 19:44:56 +00:00
self . failUnless ( " Unhealthy: best version has only 9 shares (encoding is 3-of-10) " in out , out )
2008-08-26 23:34:54 +00:00
self . failIf ( " Corrupt Shares " in out , out )
2008-08-13 00:05:01 +00:00
d . addCallback ( _got_results )
2008-08-26 23:34:54 +00:00
# now make sure the webapi repairer can fix it
def _do_repair ( res ) :
url = ( self . webish_url +
" uri/ %s " % urllib . quote ( self . node . get_uri ( ) ) +
" ?t=check&verify=false&repair=true " )
return getPage ( url , method = " POST " )
d . addCallback ( _do_repair )
def _got_repair_results ( out ) :
2008-09-07 19:44:56 +00:00
self . failUnless ( " Repair successful " in out )
2008-08-26 23:34:54 +00:00
d . addCallback ( _got_repair_results )
d . addCallback ( _do_check )
def _got_postrepair_results ( out ) :
self . failIf ( " Not Healthy! " in out , out )
self . failUnless ( " Recoverable Versions: 10*seq " in out )
d . addCallback ( _got_postrepair_results )
2008-10-22 00:03:07 +00:00
d . addErrback ( self . explain_web_error )
2008-08-26 23:34:54 +00:00
2008-08-13 00:05:01 +00:00
return d
2008-11-07 05:35:47 +00:00
class DeepCheckBase ( SystemTestMixin , ErrorMixin ) :
def web_json ( self , n , * * kwargs ) :
kwargs [ " output " ] = " json "
d = self . web ( n , " POST " , * * kwargs )
d . addCallback ( self . decode_json )
return d
def decode_json ( self , ( s , url ) ) :
try :
data = simplejson . loads ( s )
except ValueError :
self . fail ( " %s : not JSON: ' %s ' " % ( url , s ) )
return data
def web ( self , n , method = " GET " , * * kwargs ) :
# returns (data, url)
url = ( self . webish_url + " uri/ %s " % urllib . quote ( n . get_uri ( ) )
+ " ? " + " & " . join ( [ " %s = %s " % ( k , v ) for ( k , v ) in kwargs . items ( ) ] ) )
d = getPage ( url , method = method )
d . addCallback ( lambda data : ( data , url ) )
return d
def wait_for_operation ( self , ignored , ophandle ) :
url = self . webish_url + " operations/ " + ophandle
url + = " ?t=status&output=JSON "
d = getPage ( url )
def _got ( res ) :
try :
data = simplejson . loads ( res )
except ValueError :
self . fail ( " %s : not JSON: ' %s ' " % ( url , res ) )
if not data [ " finished " ] :
d = self . stall ( delay = 1.0 )
d . addCallback ( self . wait_for_operation , ophandle )
return d
return data
d . addCallback ( _got )
return d
def get_operation_results ( self , ignored , ophandle , output = None ) :
url = self . webish_url + " operations/ " + ophandle
url + = " ?t=status "
if output :
url + = " &output= " + output
d = getPage ( url )
def _got ( res ) :
if output and output . lower ( ) == " json " :
try :
return simplejson . loads ( res )
except ValueError :
self . fail ( " %s : not JSON: ' %s ' " % ( url , res ) )
return res
d . addCallback ( _got )
return d
def slow_web ( self , n , output = None , * * kwargs ) :
# use ophandle=
handle = base32 . b2a ( os . urandom ( 4 ) )
d = self . web ( n , " POST " , ophandle = handle , * * kwargs )
d . addCallback ( self . wait_for_operation , handle )
d . addCallback ( self . get_operation_results , handle , output = output )
return d
class DeepCheckWebGood ( DeepCheckBase , unittest . TestCase ) :
2008-09-09 23:34:49 +00:00
# construct a small directory tree (with one dir, one immutable file, one
2008-09-18 05:00:41 +00:00
# mutable file, one LIT file, and a loop), and then check/examine it in
# various ways.
2008-09-09 23:34:49 +00:00
def set_up_tree ( self , ignored ) :
# 2.9s
2008-11-07 05:35:47 +00:00
# root
# mutable
# large
# small
2008-11-15 05:50:49 +00:00
# small2
2008-11-07 05:35:47 +00:00
# loop -> root
2008-09-09 23:34:49 +00:00
c0 = self . clients [ 0 ]
d = c0 . create_empty_dirnode ( )
def _created_root ( n ) :
self . root = n
self . root_uri = n . get_uri ( )
d . addCallback ( _created_root )
d . addCallback ( lambda ign : c0 . create_mutable_file ( " mutable file contents " ) )
d . addCallback ( lambda n : self . root . set_node ( u " mutable " , n ) )
def _created_mutable ( n ) :
self . mutable = n
self . mutable_uri = n . get_uri ( )
d . addCallback ( _created_mutable )
large = upload . Data ( " Lots of data \n " * 1000 , None )
d . addCallback ( lambda ign : self . root . add_file ( u " large " , large ) )
def _created_large ( n ) :
self . large = n
self . large_uri = n . get_uri ( )
d . addCallback ( _created_large )
small = upload . Data ( " Small enough for a LIT " , None )
d . addCallback ( lambda ign : self . root . add_file ( u " small " , small ) )
def _created_small ( n ) :
self . small = n
self . small_uri = n . get_uri ( )
d . addCallback ( _created_small )
2008-11-15 05:50:49 +00:00
small2 = upload . Data ( " Small enough for a LIT too " , None )
d . addCallback ( lambda ign : self . root . add_file ( u " small2 " , small2 ) )
def _created_small2 ( n ) :
self . small2 = n
self . small2_uri = n . get_uri ( )
d . addCallback ( _created_small2 )
2008-09-09 23:34:49 +00:00
d . addCallback ( lambda ign : self . root . set_node ( u " loop " , self . root ) )
return d
2008-09-10 01:08:27 +00:00
def check_is_healthy ( self , cr , n , where , incomplete = False ) :
2008-09-10 00:15:46 +00:00
self . failUnless ( ICheckerResults . providedBy ( cr ) , where )
self . failUnless ( cr . is_healthy ( ) , where )
self . failUnlessEqual ( cr . get_storage_index ( ) , n . get_storage_index ( ) ,
where )
self . failUnlessEqual ( cr . get_storage_index_string ( ) ,
base32 . b2a ( n . get_storage_index ( ) ) , where )
needs_rebalancing = bool ( len ( self . clients ) < 10 )
2008-09-10 01:08:27 +00:00
if not incomplete :
self . failUnlessEqual ( cr . needs_rebalancing ( ) , needs_rebalancing , where )
2008-09-10 00:15:46 +00:00
d = cr . get_data ( )
self . failUnlessEqual ( d [ " count-shares-good " ] , 10 , where )
self . failUnlessEqual ( d [ " count-shares-needed " ] , 3 , where )
self . failUnlessEqual ( d [ " count-shares-expected " ] , 10 , where )
2008-09-10 01:08:27 +00:00
if not incomplete :
self . failUnlessEqual ( d [ " count-good-share-hosts " ] , len ( self . clients ) , where )
2008-09-10 00:15:46 +00:00
self . failUnlessEqual ( d [ " count-corrupt-shares " ] , 0 , where )
self . failUnlessEqual ( d [ " list-corrupt-shares " ] , [ ] , where )
2008-09-10 01:08:27 +00:00
if not incomplete :
self . failUnlessEqual ( sorted ( d [ " servers-responding " ] ) ,
2008-09-10 02:45:17 +00:00
sorted ( [ c . nodeid for c in self . clients ] ) ,
where )
2008-09-10 01:08:27 +00:00
self . failUnless ( " sharemap " in d , where )
2008-09-10 02:45:17 +00:00
all_serverids = set ( )
for ( shareid , serverids ) in d [ " sharemap " ] . items ( ) :
all_serverids . update ( serverids )
self . failUnlessEqual ( sorted ( all_serverids ) ,
sorted ( [ c . nodeid for c in self . clients ] ) ,
where )
2008-09-10 00:30:10 +00:00
self . failUnlessEqual ( d [ " count-wrong-shares " ] , 0 , where )
self . failUnlessEqual ( d [ " count-recoverable-versions " ] , 1 , where )
self . failUnlessEqual ( d [ " count-unrecoverable-versions " ] , 0 , where )
2008-09-10 00:15:46 +00:00
2008-09-09 23:34:49 +00:00
2008-09-10 01:08:27 +00:00
def check_and_repair_is_healthy ( self , cr , n , where , incomplete = False ) :
2008-09-09 23:34:49 +00:00
self . failUnless ( ICheckAndRepairResults . providedBy ( cr ) , where )
self . failUnless ( cr . get_pre_repair_results ( ) . is_healthy ( ) , where )
2008-09-10 01:08:27 +00:00
self . check_is_healthy ( cr . get_pre_repair_results ( ) , n , where , incomplete )
2008-09-09 23:34:49 +00:00
self . failUnless ( cr . get_post_repair_results ( ) . is_healthy ( ) , where )
2008-09-10 01:08:27 +00:00
self . check_is_healthy ( cr . get_post_repair_results ( ) , n , where , incomplete )
2008-09-09 23:34:49 +00:00
self . failIf ( cr . get_repair_attempted ( ) , where )
def deep_check_is_healthy ( self , cr , num_healthy , where ) :
self . failUnless ( IDeepCheckResults . providedBy ( cr ) )
self . failUnlessEqual ( cr . get_counters ( ) [ " count-objects-healthy " ] ,
num_healthy , where )
def deep_check_and_repair_is_healthy ( self , cr , num_healthy , where ) :
self . failUnless ( IDeepCheckAndRepairResults . providedBy ( cr ) , where )
c = cr . get_counters ( )
self . failUnlessEqual ( c [ " count-objects-healthy-pre-repair " ] ,
num_healthy , where )
self . failUnlessEqual ( c [ " count-objects-healthy-post-repair " ] ,
num_healthy , where )
self . failUnlessEqual ( c [ " count-repairs-attempted " ] , 0 , where )
def test_good ( self ) :
self . basedir = self . mktemp ( )
d = self . set_up_nodes ( )
d . addCallback ( self . set_up_tree )
2008-09-10 05:56:34 +00:00
d . addCallback ( self . do_stats )
2008-11-07 05:35:47 +00:00
d . addCallback ( self . do_test_check_good )
d . addCallback ( self . do_test_web_good )
2008-11-13 03:17:25 +00:00
d . addCallback ( self . do_test_cli_good )
2008-10-22 00:03:07 +00:00
d . addErrback ( self . explain_web_error )
2008-11-07 05:35:47 +00:00
d . addErrback ( self . explain_error )
2008-09-10 00:15:46 +00:00
return d
2008-09-09 23:34:49 +00:00
2008-09-10 05:56:34 +00:00
def do_stats ( self , ignored ) :
d = defer . succeed ( None )
2008-10-22 00:03:07 +00:00
d . addCallback ( lambda ign : self . root . start_deep_stats ( ) . when_done ( ) )
2008-11-07 05:35:47 +00:00
d . addCallback ( self . check_stats_good )
2008-09-10 05:56:34 +00:00
return d
2008-11-07 05:35:47 +00:00
def check_stats_good ( self , s ) :
2008-09-10 05:56:34 +00:00
self . failUnlessEqual ( s [ " count-directories " ] , 1 )
2008-11-15 05:50:49 +00:00
self . failUnlessEqual ( s [ " count-files " ] , 4 )
2008-09-10 05:56:34 +00:00
self . failUnlessEqual ( s [ " count-immutable-files " ] , 1 )
2008-11-15 05:50:49 +00:00
self . failUnlessEqual ( s [ " count-literal-files " ] , 2 )
2008-09-10 05:56:34 +00:00
self . failUnlessEqual ( s [ " count-mutable-files " ] , 1 )
# don't check directories: their size will vary
# s["largest-directory"]
# s["size-directories"]
2008-11-15 05:50:49 +00:00
self . failUnlessEqual ( s [ " largest-directory-children " ] , 5 )
2008-09-10 05:56:34 +00:00
self . failUnlessEqual ( s [ " largest-immutable-file " ] , 13000 )
2008-10-22 00:03:07 +00:00
# to re-use this function for both the local
# dirnode.start_deep_stats() and the webapi t=start-deep-stats, we
# coerce the result into a list of tuples. dirnode.start_deep_stats()
# returns a list of tuples, but JSON only knows about lists., so
# t=start-deep-stats returns a list of lists.
2008-09-10 06:54:57 +00:00
histogram = [ tuple ( stuff ) for stuff in s [ " size-files-histogram " ] ]
2008-11-15 05:50:49 +00:00
self . failUnlessEqual ( histogram , [ ( 11 , 31 , 2 ) ,
2008-09-10 06:54:57 +00:00
( 10001 , 31622 , 1 ) ,
] )
2008-09-10 05:56:34 +00:00
self . failUnlessEqual ( s [ " size-immutable-files " ] , 13000 )
2008-11-15 05:50:49 +00:00
self . failUnlessEqual ( s [ " size-literal-files " ] , 48 )
2008-09-10 05:56:34 +00:00
2008-11-07 05:35:47 +00:00
def do_test_check_good ( self , ignored ) :
2008-09-10 00:15:46 +00:00
d = defer . succeed ( None )
2008-09-09 23:34:49 +00:00
# check the individual items
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . root . check ( Monitor ( ) ) )
2008-09-10 00:15:46 +00:00
d . addCallback ( self . check_is_healthy , self . root , " root " )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . mutable . check ( Monitor ( ) ) )
2008-09-10 00:15:46 +00:00
d . addCallback ( self . check_is_healthy , self . mutable , " mutable " )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . large . check ( Monitor ( ) ) )
2008-09-10 00:15:46 +00:00
d . addCallback ( self . check_is_healthy , self . large , " large " )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . small . check ( Monitor ( ) ) )
2008-09-09 23:34:49 +00:00
d . addCallback ( self . failUnlessEqual , None , " small " )
2008-11-15 05:50:49 +00:00
d . addCallback ( lambda ign : self . small2 . check ( Monitor ( ) ) )
d . addCallback ( self . failUnlessEqual , None , " small2 " )
2008-09-09 23:34:49 +00:00
# and again with verify=True
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . root . check ( Monitor ( ) , verify = True ) )
2008-09-10 00:15:46 +00:00
d . addCallback ( self . check_is_healthy , self . root , " root " )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . mutable . check ( Monitor ( ) , verify = True ) )
2008-09-10 00:15:46 +00:00
d . addCallback ( self . check_is_healthy , self . mutable , " mutable " )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . large . check ( Monitor ( ) , verify = True ) )
2008-09-10 01:08:27 +00:00
d . addCallback ( self . check_is_healthy , self . large , " large " ,
incomplete = True )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . small . check ( Monitor ( ) , verify = True ) )
2008-09-09 23:34:49 +00:00
d . addCallback ( self . failUnlessEqual , None , " small " )
2008-11-15 05:50:49 +00:00
d . addCallback ( lambda ign : self . small2 . check ( Monitor ( ) , verify = True ) )
d . addCallback ( self . failUnlessEqual , None , " small2 " )
2008-09-09 23:34:49 +00:00
# and check_and_repair(), which should be a nop
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . root . check_and_repair ( Monitor ( ) ) )
2008-09-10 00:57:06 +00:00
d . addCallback ( self . check_and_repair_is_healthy , self . root , " root " )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . mutable . check_and_repair ( Monitor ( ) ) )
2008-09-10 00:57:06 +00:00
d . addCallback ( self . check_and_repair_is_healthy , self . mutable , " mutable " )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . large . check_and_repair ( Monitor ( ) ) )
2008-09-10 00:57:06 +00:00
d . addCallback ( self . check_and_repair_is_healthy , self . large , " large " )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . small . check_and_repair ( Monitor ( ) ) )
2008-09-09 23:34:49 +00:00
d . addCallback ( self . failUnlessEqual , None , " small " )
2008-11-15 05:50:49 +00:00
d . addCallback ( lambda ign : self . small2 . check_and_repair ( Monitor ( ) ) )
d . addCallback ( self . failUnlessEqual , None , " small2 " )
2008-09-09 23:34:49 +00:00
# check_and_repair(verify=True)
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . root . check_and_repair ( Monitor ( ) , verify = True ) )
2008-09-10 00:57:06 +00:00
d . addCallback ( self . check_and_repair_is_healthy , self . root , " root " )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . mutable . check_and_repair ( Monitor ( ) , verify = True ) )
2008-09-10 00:57:06 +00:00
d . addCallback ( self . check_and_repair_is_healthy , self . mutable , " mutable " )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . large . check_and_repair ( Monitor ( ) , verify = True ) )
2008-09-10 01:08:27 +00:00
d . addCallback ( self . check_and_repair_is_healthy , self . large , " large " ,
incomplete = True )
2008-10-22 08:38:18 +00:00
d . addCallback ( lambda ign : self . small . check_and_repair ( Monitor ( ) , verify = True ) )
2008-09-09 23:34:49 +00:00
d . addCallback ( self . failUnlessEqual , None , " small " )
2008-11-15 05:50:49 +00:00
d . addCallback ( lambda ign : self . small2 . check_and_repair ( Monitor ( ) , verify = True ) )
d . addCallback ( self . failUnlessEqual , None , " small2 " )
2008-09-09 23:34:49 +00:00
2008-09-10 20:44:58 +00:00
# now deep-check the root, with various verify= and repair= options
2008-10-22 00:03:07 +00:00
d . addCallback ( lambda ign :
self . root . start_deep_check ( ) . when_done ( ) )
2008-09-09 23:34:49 +00:00
d . addCallback ( self . deep_check_is_healthy , 3 , " root " )
2008-10-22 00:03:07 +00:00
d . addCallback ( lambda ign :
self . root . start_deep_check ( verify = True ) . when_done ( ) )
2008-09-09 23:34:49 +00:00
d . addCallback ( self . deep_check_is_healthy , 3 , " root " )
2008-10-22 00:03:07 +00:00
d . addCallback ( lambda ign :
self . root . start_deep_check_and_repair ( ) . when_done ( ) )
2008-09-09 23:34:49 +00:00
d . addCallback ( self . deep_check_and_repair_is_healthy , 3 , " root " )
2008-10-22 00:03:07 +00:00
d . addCallback ( lambda ign :
self . root . start_deep_check_and_repair ( verify = True ) . when_done ( ) )
2008-09-09 23:34:49 +00:00
d . addCallback ( self . deep_check_and_repair_is_healthy , 3 , " root " )
2008-10-22 07:55:52 +00:00
# and finally, start a deep-check, but then cancel it.
d . addCallback ( lambda ign : self . root . start_deep_check ( ) )
def _checking ( monitor ) :
monitor . cancel ( )
d = monitor . when_done ( )
# this should fire as soon as the next dirnode.list finishes.
# TODO: add a counter to measure how many list() calls are made,
# assert that no more than one gets to run before the cancel()
# takes effect.
def _finished_normally ( res ) :
self . fail ( " this was supposed to fail, not finish normally " )
def _cancelled ( f ) :
f . trap ( OperationCancelledError )
d . addCallbacks ( _finished_normally , _cancelled )
return d
d . addCallback ( _checking )
2008-09-09 23:34:49 +00:00
return d
2008-09-10 02:45:17 +00:00
def json_check_is_healthy ( self , data , n , where , incomplete = False ) :
self . failUnlessEqual ( data [ " storage-index " ] ,
base32 . b2a ( n . get_storage_index ( ) ) , where )
2008-11-19 01:28:26 +00:00
self . failUnless ( " summary " in data , ( where , data ) )
self . failUnlessEqual ( data [ " summary " ] . lower ( ) , " healthy " ,
" %s : ' %s ' " % ( where , data [ " summary " ] ) )
2008-09-10 02:45:17 +00:00
r = data [ " results " ]
self . failUnlessEqual ( r [ " healthy " ] , True , where )
needs_rebalancing = bool ( len ( self . clients ) < 10 )
if not incomplete :
self . failUnlessEqual ( r [ " needs-rebalancing " ] , needs_rebalancing , where )
self . failUnlessEqual ( r [ " count-shares-good " ] , 10 , where )
self . failUnlessEqual ( r [ " count-shares-needed " ] , 3 , where )
self . failUnlessEqual ( r [ " count-shares-expected " ] , 10 , where )
if not incomplete :
self . failUnlessEqual ( r [ " count-good-share-hosts " ] , len ( self . clients ) , where )
self . failUnlessEqual ( r [ " count-corrupt-shares " ] , 0 , where )
self . failUnlessEqual ( r [ " list-corrupt-shares " ] , [ ] , where )
if not incomplete :
self . failUnlessEqual ( sorted ( r [ " servers-responding " ] ) ,
sorted ( [ idlib . nodeid_b2a ( c . nodeid )
for c in self . clients ] ) , where )
self . failUnless ( " sharemap " in r , where )
all_serverids = set ( )
for ( shareid , serverids_s ) in r [ " sharemap " ] . items ( ) :
all_serverids . update ( serverids_s )
self . failUnlessEqual ( sorted ( all_serverids ) ,
sorted ( [ idlib . nodeid_b2a ( c . nodeid )
for c in self . clients ] ) , where )
self . failUnlessEqual ( r [ " count-wrong-shares " ] , 0 , where )
self . failUnlessEqual ( r [ " count-recoverable-versions " ] , 1 , where )
self . failUnlessEqual ( r [ " count-unrecoverable-versions " ] , 0 , where )
def json_check_and_repair_is_healthy ( self , data , n , where , incomplete = False ) :
self . failUnlessEqual ( data [ " storage-index " ] ,
base32 . b2a ( n . get_storage_index ( ) ) , where )
self . failUnlessEqual ( data [ " repair-attempted " ] , False , where )
self . json_check_is_healthy ( data [ " pre-repair-results " ] ,
n , where , incomplete )
self . json_check_is_healthy ( data [ " post-repair-results " ] ,
n , where , incomplete )
2008-09-10 06:14:16 +00:00
def json_full_deepcheck_is_healthy ( self , data , n , where ) :
self . failUnlessEqual ( data [ " root-storage-index " ] ,
base32 . b2a ( n . get_storage_index ( ) ) , where )
self . failUnlessEqual ( data [ " count-objects-checked " ] , 3 , where )
self . failUnlessEqual ( data [ " count-objects-healthy " ] , 3 , where )
self . failUnlessEqual ( data [ " count-objects-unhealthy " ] , 0 , where )
self . failUnlessEqual ( data [ " count-corrupt-shares " ] , 0 , where )
self . failUnlessEqual ( data [ " list-corrupt-shares " ] , [ ] , where )
self . failUnlessEqual ( data [ " list-unhealthy-files " ] , [ ] , where )
2008-11-07 05:35:47 +00:00
self . json_check_stats_good ( data [ " stats " ] , where )
2008-09-10 06:14:16 +00:00
def json_full_deepcheck_and_repair_is_healthy ( self , data , n , where ) :
self . failUnlessEqual ( data [ " root-storage-index " ] ,
base32 . b2a ( n . get_storage_index ( ) ) , where )
self . failUnlessEqual ( data [ " count-objects-checked " ] , 3 , where )
self . failUnlessEqual ( data [ " count-objects-healthy-pre-repair " ] , 3 , where )
self . failUnlessEqual ( data [ " count-objects-unhealthy-pre-repair " ] , 0 , where )
self . failUnlessEqual ( data [ " count-corrupt-shares-pre-repair " ] , 0 , where )
self . failUnlessEqual ( data [ " count-objects-healthy-post-repair " ] , 3 , where )
self . failUnlessEqual ( data [ " count-objects-unhealthy-post-repair " ] , 0 , where )
self . failUnlessEqual ( data [ " count-corrupt-shares-post-repair " ] , 0 , where )
self . failUnlessEqual ( data [ " list-corrupt-shares " ] , [ ] , where )
self . failUnlessEqual ( data [ " list-remaining-corrupt-shares " ] , [ ] , where )
self . failUnlessEqual ( data [ " list-unhealthy-files " ] , [ ] , where )
self . failUnlessEqual ( data [ " count-repairs-attempted " ] , 0 , where )
self . failUnlessEqual ( data [ " count-repairs-successful " ] , 0 , where )
self . failUnlessEqual ( data [ " count-repairs-unsuccessful " ] , 0 , where )
2008-09-10 02:45:17 +00:00
def json_check_lit ( self , data , n , where ) :
self . failUnlessEqual ( data [ " storage-index " ] , " " , where )
self . failUnlessEqual ( data [ " results " ] [ " healthy " ] , True , where )
2008-11-07 05:35:47 +00:00
def json_check_stats_good ( self , data , where ) :
self . check_stats_good ( data )
2008-09-10 06:54:57 +00:00
2008-11-07 05:35:47 +00:00
def do_test_web_good ( self , ignored ) :
2008-09-10 02:45:17 +00:00
d = defer . succeed ( None )
2008-09-10 06:54:57 +00:00
# stats
2008-10-22 00:03:07 +00:00
d . addCallback ( lambda ign :
self . slow_web ( self . root ,
t = " start-deep-stats " , output = " json " ) )
2008-11-07 05:35:47 +00:00
d . addCallback ( self . json_check_stats_good , " deep-stats " )
2008-09-10 06:54:57 +00:00
2008-09-10 02:45:17 +00:00
# check, no verify
d . addCallback ( lambda ign : self . web_json ( self . root , t = " check " ) )
d . addCallback ( self . json_check_is_healthy , self . root , " root " )
d . addCallback ( lambda ign : self . web_json ( self . mutable , t = " check " ) )
d . addCallback ( self . json_check_is_healthy , self . mutable , " mutable " )
d . addCallback ( lambda ign : self . web_json ( self . large , t = " check " ) )
d . addCallback ( self . json_check_is_healthy , self . large , " large " )
d . addCallback ( lambda ign : self . web_json ( self . small , t = " check " ) )
d . addCallback ( self . json_check_lit , self . small , " small " )
2008-11-15 05:50:49 +00:00
d . addCallback ( lambda ign : self . web_json ( self . small2 , t = " check " ) )
d . addCallback ( self . json_check_lit , self . small2 , " small2 " )
2008-09-10 02:45:17 +00:00
# check and verify
d . addCallback ( lambda ign :
self . web_json ( self . root , t = " check " , verify = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_is_healthy , self . root , " root+v " )
2008-09-10 02:45:17 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . mutable , t = " check " , verify = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_is_healthy , self . mutable , " mutable+v " )
2008-09-10 02:45:17 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . large , t = " check " , verify = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_is_healthy , self . large , " large+v " ,
incomplete = True )
2008-09-10 02:45:17 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . small , t = " check " , verify = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_lit , self . small , " small+v " )
2008-11-15 05:50:49 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . small2 , t = " check " , verify = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_lit , self . small2 , " small2+v " )
2008-09-10 02:45:17 +00:00
# check and repair, no verify
d . addCallback ( lambda ign :
self . web_json ( self . root , t = " check " , repair = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_and_repair_is_healthy , self . root , " root+r " )
2008-09-10 02:45:17 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . mutable , t = " check " , repair = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_and_repair_is_healthy , self . mutable , " mutable+r " )
2008-09-10 02:45:17 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . large , t = " check " , repair = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_and_repair_is_healthy , self . large , " large+r " )
2008-09-10 02:45:17 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . small , t = " check " , repair = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_lit , self . small , " small+r " )
2008-11-15 05:50:49 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . small2 , t = " check " , repair = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_lit , self . small2 , " small2+r " )
2008-09-10 02:45:17 +00:00
# check+verify+repair
d . addCallback ( lambda ign :
self . web_json ( self . root , t = " check " , repair = " true " , verify = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_and_repair_is_healthy , self . root , " root+vr " )
2008-09-10 02:45:17 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . mutable , t = " check " , repair = " true " , verify = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_and_repair_is_healthy , self . mutable , " mutable+vr " )
2008-09-10 02:45:17 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . large , t = " check " , repair = " true " , verify = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_and_repair_is_healthy , self . large , " large+vr " , incomplete = True )
2008-09-10 02:45:17 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . small , t = " check " , repair = " true " , verify = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_lit , self . small , " small+vr " )
2008-11-15 05:50:49 +00:00
d . addCallback ( lambda ign :
self . web_json ( self . small2 , t = " check " , repair = " true " , verify = " true " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_check_lit , self . small2 , " small2+vr " )
2008-09-10 02:45:17 +00:00
2008-09-10 20:44:58 +00:00
# now run a deep-check, with various verify= and repair= flags
2008-09-10 06:14:16 +00:00
d . addCallback ( lambda ign :
2008-10-22 00:03:07 +00:00
self . slow_web ( self . root , t = " start-deep-check " , output = " json " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_full_deepcheck_is_healthy , self . root , " root+d " )
2008-09-10 06:14:16 +00:00
d . addCallback ( lambda ign :
2008-10-22 00:03:07 +00:00
self . slow_web ( self . root , t = " start-deep-check " , verify = " true " ,
output = " json " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_full_deepcheck_is_healthy , self . root , " root+dv " )
2008-09-10 06:14:16 +00:00
d . addCallback ( lambda ign :
2008-10-22 00:03:07 +00:00
self . slow_web ( self . root , t = " start-deep-check " , repair = " true " ,
output = " json " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_full_deepcheck_and_repair_is_healthy , self . root , " root+dr " )
2008-09-10 06:14:16 +00:00
d . addCallback ( lambda ign :
2008-10-22 00:03:07 +00:00
self . slow_web ( self . root , t = " start-deep-check " , verify = " true " , repair = " true " , output = " json " ) )
2008-11-19 01:29:50 +00:00
d . addCallback ( self . json_full_deepcheck_and_repair_is_healthy , self . root , " root+dvr " )
2008-09-10 06:14:16 +00:00
2008-09-18 05:00:41 +00:00
# now look at t=info
d . addCallback ( lambda ign : self . web ( self . root , t = " info " ) )
# TODO: examine the output
d . addCallback ( lambda ign : self . web ( self . mutable , t = " info " ) )
d . addCallback ( lambda ign : self . web ( self . large , t = " info " ) )
d . addCallback ( lambda ign : self . web ( self . small , t = " info " ) )
2008-11-15 05:50:49 +00:00
d . addCallback ( lambda ign : self . web ( self . small2 , t = " info " ) )
2008-09-18 05:00:41 +00:00
2008-09-10 02:45:17 +00:00
return d
2008-11-07 05:35:47 +00:00
2008-11-13 03:17:25 +00:00
def _run_cli ( self , argv , stdin = " " ) :
#print "CLI:", argv
stdout , stderr = StringIO ( ) , StringIO ( )
d = threads . deferToThread ( runner . runner , argv , run_by_human = False ,
stdin = StringIO ( stdin ) ,
stdout = stdout , stderr = stderr )
def _done ( res ) :
return stdout . getvalue ( ) , stderr . getvalue ( )
d . addCallback ( _done )
return d
def do_test_cli_good ( self , ignored ) :
2008-11-13 22:27:48 +00:00
basedir = self . getdir ( " client0 " )
d = self . _run_cli ( [ " manifest " ,
" --node-directory " , basedir ,
self . root_uri ] )
2008-11-13 03:17:25 +00:00
def _check ( ( out , err ) ) :
2008-11-19 23:00:27 +00:00
self . failUnlessEqual ( err , " " )
2008-11-13 03:17:25 +00:00
lines = [ l for l in out . split ( " \n " ) if l ]
2008-11-15 05:50:49 +00:00
self . failUnlessEqual ( len ( lines ) , 5 )
2008-11-13 03:17:25 +00:00
caps = { }
for l in lines :
try :
cap , path = l . split ( None , 1 )
except ValueError :
cap = l . strip ( )
path = " "
caps [ cap ] = path
self . failUnless ( self . root . get_uri ( ) in caps )
self . failUnlessEqual ( caps [ self . root . get_uri ( ) ] , " " )
self . failUnlessEqual ( caps [ self . mutable . get_uri ( ) ] , " mutable " )
self . failUnlessEqual ( caps [ self . large . get_uri ( ) ] , " large " )
self . failUnlessEqual ( caps [ self . small . get_uri ( ) ] , " small " )
2008-11-15 05:50:49 +00:00
self . failUnlessEqual ( caps [ self . small2 . get_uri ( ) ] , " small2 " )
2008-11-13 03:17:25 +00:00
d . addCallback ( _check )
d . addCallback ( lambda res :
2008-11-13 22:27:48 +00:00
self . _run_cli ( [ " manifest " ,
" --node-directory " , basedir ,
2008-11-13 03:17:25 +00:00
" --storage-index " , self . root_uri ] ) )
def _check2 ( ( out , err ) ) :
2008-11-24 21:40:46 +00:00
self . failUnlessEqual ( err , " " )
2008-11-13 03:17:25 +00:00
lines = [ l for l in out . split ( " \n " ) if l ]
self . failUnlessEqual ( len ( lines ) , 3 )
self . failUnless ( base32 . b2a ( self . root . get_storage_index ( ) ) in lines )
self . failUnless ( base32 . b2a ( self . mutable . get_storage_index ( ) ) in lines )
self . failUnless ( base32 . b2a ( self . large . get_storage_index ( ) ) in lines )
d . addCallback ( _check2 )
2008-11-14 02:43:50 +00:00
2008-11-24 21:40:46 +00:00
d . addCallback ( lambda res :
self . _run_cli ( [ " manifest " ,
" --node-directory " , basedir ,
" --raw " , self . root_uri ] ) )
def _check2r ( ( out , err ) ) :
self . failUnlessEqual ( err , " " )
data = simplejson . loads ( out )
sis = data [ " storage-index " ]
self . failUnlessEqual ( len ( sis ) , 3 )
self . failUnless ( base32 . b2a ( self . root . get_storage_index ( ) ) in sis )
self . failUnless ( base32 . b2a ( self . mutable . get_storage_index ( ) ) in sis )
self . failUnless ( base32 . b2a ( self . large . get_storage_index ( ) ) in sis )
self . failUnlessEqual ( data [ " stats " ] [ " count-files " ] , 4 )
self . failUnlessEqual ( data [ " origin " ] ,
base32 . b2a ( self . root . get_storage_index ( ) ) )
verifycaps = data [ " verifycaps " ]
self . failUnlessEqual ( len ( verifycaps ) , 3 )
self . failUnless ( self . root . get_verifier ( ) . to_string ( ) in verifycaps )
self . failUnless ( self . mutable . get_verifier ( ) . to_string ( ) in verifycaps )
self . failUnless ( self . large . get_verifier ( ) . to_string ( ) in verifycaps )
d . addCallback ( _check2r )
2008-11-14 02:43:50 +00:00
d . addCallback ( lambda res :
self . _run_cli ( [ " stats " ,
" --node-directory " , basedir ,
self . root_uri ] ) )
def _check3 ( ( out , err ) ) :
lines = [ l . strip ( ) for l in out . split ( " \n " ) if l ]
self . failUnless ( " count-immutable-files: 1 " in lines )
self . failUnless ( " count-mutable-files: 1 " in lines )
2008-11-15 05:50:49 +00:00
self . failUnless ( " count-literal-files: 2 " in lines )
self . failUnless ( " count-files: 4 " in lines )
2008-11-14 02:43:50 +00:00
self . failUnless ( " count-directories: 1 " in lines )
2008-11-19 03:32:59 +00:00
self . failUnless ( " size-immutable-files: 13000 (13.00 kB, 12.70 kiB) " in lines , lines )
2008-11-15 05:50:49 +00:00
self . failUnless ( " size-literal-files: 48 " in lines )
2008-11-19 03:32:59 +00:00
self . failUnless ( " 11-31 : 2 (31 B, 31 B) " . strip ( ) in lines )
self . failUnless ( " 10001-31622 : 1 (31.62 kB, 30.88 kiB) " . strip ( ) in lines )
2008-11-14 02:43:50 +00:00
d . addCallback ( _check3 )
2008-11-18 05:11:14 +00:00
d . addCallback ( lambda res :
self . _run_cli ( [ " stats " ,
" --node-directory " , basedir ,
2008-11-19 01:36:08 +00:00
" --raw " ,
2008-11-18 05:11:14 +00:00
self . root_uri ] ) )
def _check4 ( ( out , err ) ) :
data = simplejson . loads ( out )
self . failUnlessEqual ( data [ " count-immutable-files " ] , 1 )
self . failUnlessEqual ( data [ " count-immutable-files " ] , 1 )
self . failUnlessEqual ( data [ " count-mutable-files " ] , 1 )
self . failUnlessEqual ( data [ " count-literal-files " ] , 2 )
self . failUnlessEqual ( data [ " count-files " ] , 4 )
self . failUnlessEqual ( data [ " count-directories " ] , 1 )
self . failUnlessEqual ( data [ " size-immutable-files " ] , 13000 )
self . failUnlessEqual ( data [ " size-literal-files " ] , 48 )
self . failUnless ( [ 11 , 31 , 2 ] in data [ " size-files-histogram " ] )
self . failUnless ( [ 10001 , 31622 , 1 ] in data [ " size-files-histogram " ] )
d . addCallback ( _check4 )
2008-11-13 03:17:25 +00:00
return d
2008-11-07 05:35:47 +00:00
class DeepCheckWebBad ( DeepCheckBase , unittest . TestCase ) :
def test_bad ( self ) :
self . basedir = self . mktemp ( )
d = self . set_up_nodes ( )
d . addCallback ( self . set_up_damaged_tree )
d . addCallback ( self . do_test_check_bad )
d . addCallback ( self . do_test_deepcheck_bad )
d . addCallback ( self . do_test_web_bad )
d . addErrback ( self . explain_web_error )
d . addErrback ( self . explain_error )
return d
def set_up_damaged_tree ( self , ignored ) :
# 6.4s
# root
# mutable-good
# mutable-missing-shares
# mutable-corrupt-shares
# mutable-unrecoverable
# large-good
# large-missing-shares
# large-corrupt-shares
# large-unrecoverable
self . nodes = { }
c0 = self . clients [ 0 ]
d = c0 . create_empty_dirnode ( )
def _created_root ( n ) :
self . root = n
self . root_uri = n . get_uri ( )
d . addCallback ( _created_root )
d . addCallback ( self . create_mangled , " mutable-good " )
d . addCallback ( self . create_mangled , " mutable-missing-shares " )
d . addCallback ( self . create_mangled , " mutable-corrupt-shares " )
d . addCallback ( self . create_mangled , " mutable-unrecoverable " )
d . addCallback ( self . create_mangled , " large-good " )
d . addCallback ( self . create_mangled , " large-missing-shares " )
d . addCallback ( self . create_mangled , " large-corrupt-shares " )
d . addCallback ( self . create_mangled , " large-unrecoverable " )
return d
def create_mangled ( self , ignored , name ) :
nodetype , mangletype = name . split ( " - " , 1 )
if nodetype == " mutable " :
d = self . clients [ 0 ] . create_mutable_file ( " mutable file contents " )
d . addCallback ( lambda n : self . root . set_node ( unicode ( name ) , n ) )
elif nodetype == " large " :
large = upload . Data ( " Lots of data \n " * 1000 + name + " \n " , None )
d = self . root . add_file ( unicode ( name ) , large )
elif nodetype == " small " :
small = upload . Data ( " Small enough for a LIT " , None )
d = self . root . add_file ( unicode ( name ) , small )
def _stash_node ( node ) :
self . nodes [ name ] = node
return node
d . addCallback ( _stash_node )
if mangletype == " good " :
pass
elif mangletype == " missing-shares " :
d . addCallback ( self . _delete_some_shares )
elif mangletype == " corrupt-shares " :
d . addCallback ( self . _corrupt_some_shares )
else :
assert mangletype == " unrecoverable "
d . addCallback ( self . _delete_most_shares )
return d
def _run_cli ( self , argv ) :
stdout , stderr = StringIO ( ) , StringIO ( )
2008-11-13 22:27:48 +00:00
# this can only do synchronous operations
assert argv [ 0 ] == " debug "
2008-11-07 05:35:47 +00:00
runner . runner ( argv , run_by_human = False , stdout = stdout , stderr = stderr )
return stdout . getvalue ( )
def _find_shares ( self , node ) :
si = node . get_storage_index ( )
out = self . _run_cli ( [ " debug " , " find-shares " , base32 . b2a ( si ) ] +
[ c . basedir for c in self . clients ] )
files = out . split ( " \n " )
return [ f for f in files if f ]
def _delete_some_shares ( self , node ) :
shares = self . _find_shares ( node )
os . unlink ( shares [ 0 ] )
os . unlink ( shares [ 1 ] )
def _corrupt_some_shares ( self , node ) :
shares = self . _find_shares ( node )
self . _run_cli ( [ " debug " , " corrupt-share " , shares [ 0 ] ] )
self . _run_cli ( [ " debug " , " corrupt-share " , shares [ 1 ] ] )
def _delete_most_shares ( self , node ) :
shares = self . _find_shares ( node )
for share in shares [ 1 : ] :
os . unlink ( share )
def check_is_healthy ( self , cr , where ) :
self . failUnless ( ICheckerResults . providedBy ( cr ) , where )
self . failUnless ( cr . is_healthy ( ) , where )
self . failUnless ( cr . is_recoverable ( ) , where )
d = cr . get_data ( )
self . failUnlessEqual ( d [ " count-recoverable-versions " ] , 1 , where )
self . failUnlessEqual ( d [ " count-unrecoverable-versions " ] , 0 , where )
return cr
def check_is_missing_shares ( self , cr , where ) :
self . failUnless ( ICheckerResults . providedBy ( cr ) , where )
self . failIf ( cr . is_healthy ( ) , where )
self . failUnless ( cr . is_recoverable ( ) , where )
d = cr . get_data ( )
self . failUnlessEqual ( d [ " count-recoverable-versions " ] , 1 , where )
self . failUnlessEqual ( d [ " count-unrecoverable-versions " ] , 0 , where )
return cr
def check_has_corrupt_shares ( self , cr , where ) :
# by "corrupt-shares" we mean the file is still recoverable
self . failUnless ( ICheckerResults . providedBy ( cr ) , where )
d = cr . get_data ( )
self . failIf ( cr . is_healthy ( ) , where )
self . failUnless ( cr . is_recoverable ( ) , where )
d = cr . get_data ( )
self . failUnless ( d [ " count-shares-good " ] < 10 , where )
self . failUnless ( d [ " count-corrupt-shares " ] , where )
self . failUnless ( d [ " list-corrupt-shares " ] , where )
return cr
def check_is_unrecoverable ( self , cr , where ) :
self . failUnless ( ICheckerResults . providedBy ( cr ) , where )
d = cr . get_data ( )
self . failIf ( cr . is_healthy ( ) , where )
self . failIf ( cr . is_recoverable ( ) , where )
self . failUnless ( d [ " count-shares-good " ] < d [ " count-shares-needed " ] ,
where )
self . failUnlessEqual ( d [ " count-recoverable-versions " ] , 0 , where )
self . failUnlessEqual ( d [ " count-unrecoverable-versions " ] , 1 , where )
return cr
def do_test_check_bad ( self , ignored ) :
d = defer . succeed ( None )
# check the individual items, without verification. This will not
# detect corrupt shares.
def _check ( which , checker ) :
d = self . nodes [ which ] . check ( Monitor ( ) )
d . addCallback ( checker , which + " --check " )
return d
d . addCallback ( lambda ign : _check ( " mutable-good " , self . check_is_healthy ) )
d . addCallback ( lambda ign : _check ( " mutable-missing-shares " ,
self . check_is_missing_shares ) )
d . addCallback ( lambda ign : _check ( " mutable-corrupt-shares " ,
self . check_is_healthy ) )
d . addCallback ( lambda ign : _check ( " mutable-unrecoverable " ,
self . check_is_unrecoverable ) )
d . addCallback ( lambda ign : _check ( " large-good " , self . check_is_healthy ) )
d . addCallback ( lambda ign : _check ( " large-missing-shares " ,
self . check_is_missing_shares ) )
d . addCallback ( lambda ign : _check ( " large-corrupt-shares " ,
self . check_is_healthy ) )
d . addCallback ( lambda ign : _check ( " large-unrecoverable " ,
self . check_is_unrecoverable ) )
# and again with verify=True, which *does* detect corrupt shares.
def _checkv ( which , checker ) :
d = self . nodes [ which ] . check ( Monitor ( ) , verify = True )
d . addCallback ( checker , which + " --check-and-verify " )
return d
d . addCallback ( lambda ign : _checkv ( " mutable-good " , self . check_is_healthy ) )
d . addCallback ( lambda ign : _checkv ( " mutable-missing-shares " ,
self . check_is_missing_shares ) )
d . addCallback ( lambda ign : _checkv ( " mutable-corrupt-shares " ,
self . check_has_corrupt_shares ) )
d . addCallback ( lambda ign : _checkv ( " mutable-unrecoverable " ,
self . check_is_unrecoverable ) )
d . addCallback ( lambda ign : _checkv ( " large-good " , self . check_is_healthy ) )
# disabled pending immutable verifier
#d.addCallback(lambda ign: _checkv("large-missing-shares",
# self.check_is_missing_shares))
#d.addCallback(lambda ign: _checkv("large-corrupt-shares",
# self.check_has_corrupt_shares))
d . addCallback ( lambda ign : _checkv ( " large-unrecoverable " ,
self . check_is_unrecoverable ) )
return d
def do_test_deepcheck_bad ( self , ignored ) :
d = defer . succeed ( None )
# now deep-check the root, with various verify= and repair= options
d . addCallback ( lambda ign :
self . root . start_deep_check ( ) . when_done ( ) )
def _check1 ( cr ) :
self . failUnless ( IDeepCheckResults . providedBy ( cr ) )
c = cr . get_counters ( )
self . failUnlessEqual ( c [ " count-objects-checked " ] , 9 )
self . failUnlessEqual ( c [ " count-objects-healthy " ] , 5 )
self . failUnlessEqual ( c [ " count-objects-unhealthy " ] , 4 )
self . failUnlessEqual ( c [ " count-objects-unrecoverable " ] , 2 )
d . addCallback ( _check1 )
d . addCallback ( lambda ign :
self . root . start_deep_check ( verify = True ) . when_done ( ) )
def _check2 ( cr ) :
self . failUnless ( IDeepCheckResults . providedBy ( cr ) )
c = cr . get_counters ( )
self . failUnlessEqual ( c [ " count-objects-checked " ] , 9 )
# until we have a real immutable verifier, these counts will be
# off
#self.failUnlessEqual(c["count-objects-healthy"], 3)
#self.failUnlessEqual(c["count-objects-unhealthy"], 6)
self . failUnlessEqual ( c [ " count-objects-healthy " ] , 5 ) # todo
self . failUnlessEqual ( c [ " count-objects-unhealthy " ] , 4 )
self . failUnlessEqual ( c [ " count-objects-unrecoverable " ] , 2 )
d . addCallback ( _check2 )
return d
def json_is_healthy ( self , data , where ) :
r = data [ " results " ]
self . failUnless ( r [ " healthy " ] , where )
self . failUnless ( r [ " recoverable " ] , where )
self . failUnlessEqual ( r [ " count-recoverable-versions " ] , 1 , where )
self . failUnlessEqual ( r [ " count-unrecoverable-versions " ] , 0 , where )
def json_is_missing_shares ( self , data , where ) :
r = data [ " results " ]
self . failIf ( r [ " healthy " ] , where )
self . failUnless ( r [ " recoverable " ] , where )
self . failUnlessEqual ( r [ " count-recoverable-versions " ] , 1 , where )
self . failUnlessEqual ( r [ " count-unrecoverable-versions " ] , 0 , where )
def json_has_corrupt_shares ( self , data , where ) :
# by "corrupt-shares" we mean the file is still recoverable
r = data [ " results " ]
self . failIf ( r [ " healthy " ] , where )
self . failUnless ( r [ " recoverable " ] , where )
self . failUnless ( r [ " count-shares-good " ] < 10 , where )
self . failUnless ( r [ " count-corrupt-shares " ] , where )
self . failUnless ( r [ " list-corrupt-shares " ] , where )
def json_is_unrecoverable ( self , data , where ) :
r = data [ " results " ]
self . failIf ( r [ " healthy " ] , where )
self . failIf ( r [ " recoverable " ] , where )
self . failUnless ( r [ " count-shares-good " ] < r [ " count-shares-needed " ] ,
where )
self . failUnlessEqual ( r [ " count-recoverable-versions " ] , 0 , where )
self . failUnlessEqual ( r [ " count-unrecoverable-versions " ] , 1 , where )
def do_test_web_bad ( self , ignored ) :
d = defer . succeed ( None )
# check, no verify
def _check ( which , checker ) :
d = self . web_json ( self . nodes [ which ] , t = " check " )
d . addCallback ( checker , which + " --webcheck " )
return d
d . addCallback ( lambda ign : _check ( " mutable-good " ,
self . json_is_healthy ) )
d . addCallback ( lambda ign : _check ( " mutable-missing-shares " ,
self . json_is_missing_shares ) )
d . addCallback ( lambda ign : _check ( " mutable-corrupt-shares " ,
self . json_is_healthy ) )
d . addCallback ( lambda ign : _check ( " mutable-unrecoverable " ,
self . json_is_unrecoverable ) )
d . addCallback ( lambda ign : _check ( " large-good " ,
self . json_is_healthy ) )
d . addCallback ( lambda ign : _check ( " large-missing-shares " ,
self . json_is_missing_shares ) )
d . addCallback ( lambda ign : _check ( " large-corrupt-shares " ,
self . json_is_healthy ) )
d . addCallback ( lambda ign : _check ( " large-unrecoverable " ,
self . json_is_unrecoverable ) )
# check and verify
def _checkv ( which , checker ) :
d = self . web_json ( self . nodes [ which ] , t = " check " , verify = " true " )
d . addCallback ( checker , which + " --webcheck-and-verify " )
return d
d . addCallback ( lambda ign : _checkv ( " mutable-good " ,
self . json_is_healthy ) )
d . addCallback ( lambda ign : _checkv ( " mutable-missing-shares " ,
self . json_is_missing_shares ) )
d . addCallback ( lambda ign : _checkv ( " mutable-corrupt-shares " ,
self . json_has_corrupt_shares ) )
d . addCallback ( lambda ign : _checkv ( " mutable-unrecoverable " ,
self . json_is_unrecoverable ) )
d . addCallback ( lambda ign : _checkv ( " large-good " ,
self . json_is_healthy ) )
# disabled pending immutable verifier
#d.addCallback(lambda ign: _checkv("large-missing-shares",
# self.json_is_missing_shares))
#d.addCallback(lambda ign: _checkv("large-corrupt-shares",
# self.json_has_corrupt_shares))
d . addCallback ( lambda ign : _checkv ( " large-unrecoverable " ,
self . json_is_unrecoverable ) )
return d