2008-11-13 12:19:51 +00:00
# coding=utf-8
2007-10-11 07:30:36 +00:00
2008-08-02 02:10:41 +00:00
import os . path
2007-10-11 07:30:36 +00:00
from twisted . trial import unittest
2008-01-14 20:43:25 +00:00
from cStringIO import StringIO
2008-01-14 21:12:27 +00:00
import urllib
2009-02-06 05:07:01 +00:00
import re
2009-02-18 00:15:11 +00:00
import simplejson
2007-10-11 07:30:36 +00:00
2009-02-18 00:15:11 +00:00
from allmydata . util import fileutil , hashutil , base32
2007-10-11 07:30:36 +00:00
from allmydata import uri
2009-02-18 00:15:11 +00:00
from allmydata . immutable import upload
2007-10-11 07:30:36 +00:00
1970-01-05 11:10:55 +00:00
# Test that the scripts can be imported -- although the actual tests of their functionality are
# done by invoking them in a subprocess.
1970-01-05 11:00:58 +00:00
from allmydata . scripts import tahoe_ls , tahoe_get , tahoe_put , tahoe_rm , tahoe_cp
_hush_pyflakes = [ tahoe_ls , tahoe_get , tahoe_put , tahoe_rm , tahoe_cp ]
2007-10-11 07:30:36 +00:00
2009-02-25 00:56:20 +00:00
from allmydata . scripts import common
from allmydata . scripts . common import DEFAULT_ALIAS , get_aliases , get_alias , \
DefaultAliasMarker
1970-01-05 11:10:55 +00:00
2009-02-06 05:07:01 +00:00
from allmydata . scripts import cli , debug , runner , backupdb
2009-02-11 03:37:09 +00:00
from allmydata . test . common_util import StallMixin
2009-02-17 00:20:05 +00:00
from allmydata . test . no_network import GridTestMixin
2008-08-01 22:10:09 +00:00
from twisted . internet import threads # CLI tests use deferToThread
2009-02-16 04:04:51 +00:00
from twisted . python import usage
2007-10-11 07:30:36 +00:00
2009-06-09 21:05:09 +00:00
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
2010-02-06 01:38:55 +00:00
2007-10-11 07:30:36 +00:00
class CLI ( unittest . TestCase ) :
2008-08-01 22:10:09 +00:00
# this test case only looks at argument-processing and simple stuff.
2007-10-11 07:30:36 +00:00
def test_options ( self ) :
fileutil . rm_dir ( " cli/test_options " )
fileutil . make_dirs ( " cli/test_options " )
2008-01-04 00:48:53 +00:00
fileutil . make_dirs ( " cli/test_options/private " )
2010-02-06 01:37:27 +00:00
fileutil . write ( " cli/test_options/node.url " , " http://localhost:8080/ \n " )
2007-12-05 00:00:58 +00:00
filenode_uri = uri . WriteableSSKFileURI ( writekey = " \x00 " * 16 ,
fingerprint = " \x00 " * 32 )
2009-07-17 01:01:03 +00:00
private_uri = uri . DirectoryURI ( filenode_uri ) . to_string ( )
2010-02-06 01:37:27 +00:00
fileutil . write ( " cli/test_options/private/root_dir.cap " , private_uri + " \n " )
2007-10-11 07:30:36 +00:00
o = cli . ListOptions ( )
o . parseOptions ( [ " --node-directory " , " cli/test_options " ] )
self . failUnlessEqual ( o [ ' node-url ' ] , " http://localhost:8080/ " )
2008-05-20 02:28:50 +00:00
self . failUnlessEqual ( o . aliases [ DEFAULT_ALIAS ] , private_uri )
self . failUnlessEqual ( o . where , " " )
2007-10-11 07:30:36 +00:00
o = cli . ListOptions ( )
o . parseOptions ( [ " --node-directory " , " cli/test_options " ,
" --node-url " , " http://example.org:8111/ " ] )
self . failUnlessEqual ( o [ ' node-url ' ] , " http://example.org:8111/ " )
2008-05-20 02:28:50 +00:00
self . failUnlessEqual ( o . aliases [ DEFAULT_ALIAS ] , private_uri )
self . failUnlessEqual ( o . where , " " )
2007-10-11 07:30:36 +00:00
o = cli . ListOptions ( )
o . parseOptions ( [ " --node-directory " , " cli/test_options " ,
2008-01-08 17:41:27 +00:00
" --dir-cap " , " root " ] )
2007-10-11 07:30:36 +00:00
self . failUnlessEqual ( o [ ' node-url ' ] , " http://localhost:8080/ " )
2008-05-20 02:28:50 +00:00
self . failUnlessEqual ( o . aliases [ DEFAULT_ALIAS ] , " root " )
self . failUnlessEqual ( o . where , " " )
2007-10-11 07:30:36 +00:00
o = cli . ListOptions ( )
2007-12-05 00:00:58 +00:00
other_filenode_uri = uri . WriteableSSKFileURI ( writekey = " \x11 " * 16 ,
fingerprint = " \x11 " * 32 )
2009-07-17 01:01:03 +00:00
other_uri = uri . DirectoryURI ( other_filenode_uri ) . to_string ( )
2007-10-11 07:30:36 +00:00
o . parseOptions ( [ " --node-directory " , " cli/test_options " ,
2008-01-08 17:41:27 +00:00
" --dir-cap " , other_uri ] )
2007-10-11 07:30:36 +00:00
self . failUnlessEqual ( o [ ' node-url ' ] , " http://localhost:8080/ " )
2008-05-20 02:28:50 +00:00
self . failUnlessEqual ( o . aliases [ DEFAULT_ALIAS ] , other_uri )
self . failUnlessEqual ( o . where , " " )
2007-10-11 07:30:36 +00:00
o = cli . ListOptions ( )
o . parseOptions ( [ " --node-directory " , " cli/test_options " ,
2008-01-08 17:41:27 +00:00
" --dir-cap " , other_uri , " subdir " ] )
2007-10-11 07:30:36 +00:00
self . failUnlessEqual ( o [ ' node-url ' ] , " http://localhost:8080/ " )
2008-05-20 02:28:50 +00:00
self . failUnlessEqual ( o . aliases [ DEFAULT_ALIAS ] , other_uri )
self . failUnlessEqual ( o . where , " subdir " )
2008-01-14 20:43:25 +00:00
2009-02-16 04:04:51 +00:00
o = cli . ListOptions ( )
self . failUnlessRaises ( usage . UsageError ,
o . parseOptions ,
[ " --node-directory " , " cli/test_options " ,
" --node-url " , " NOT-A-URL " ] )
o = cli . ListOptions ( )
o . parseOptions ( [ " --node-directory " , " cli/test_options " ,
" --node-url " , " http://localhost:8080 " ] )
self . failUnlessEqual ( o [ " node-url " ] , " http://localhost:8080/ " )
2008-01-14 20:43:25 +00:00
def _dump_cap ( self , * args ) :
config = debug . DumpCapOptions ( )
2008-08-12 21:46:34 +00:00
config . stdout , config . stderr = StringIO ( ) , StringIO ( )
2008-01-14 20:43:25 +00:00
config . parseOptions ( args )
2008-08-12 21:46:34 +00:00
debug . dump_cap ( config )
self . failIf ( config . stderr . getvalue ( ) )
output = config . stdout . getvalue ( )
2008-01-14 20:43:25 +00:00
return output
def test_dump_cap_chk ( self ) :
key = " \x00 \x01 \x02 \x03 \x04 \x05 \x06 \x07 \x08 \x09 \x0a \x0b \x0c \x0d \x0e \x0f "
uri_extension_hash = hashutil . uri_extension_hash ( " stuff " )
needed_shares = 25
total_shares = 100
size = 1234
u = uri . CHKFileURI ( key = key ,
uri_extension_hash = uri_extension_hash ,
needed_shares = needed_shares ,
total_shares = total_shares ,
size = size )
output = self . _dump_cap ( u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " CHK File: " in output , output )
2008-02-15 02:27:47 +00:00
self . failUnless ( " key: aaaqeayeaudaocajbifqydiob4 " in output , output )
2008-02-15 02:58:01 +00:00
self . failUnless ( " UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa " in output , output )
self . failUnless ( " size: 1234 " in output , output )
self . failUnless ( " k/N: 25/100 " in output , output )
self . failUnless ( " storage index: hdis5iaveku6lnlaiccydyid7q " in output , output )
2008-01-14 20:43:25 +00:00
2008-02-15 02:27:47 +00:00
output = self . _dump_cap ( " --client-secret " , " 5s33nk3qpvnj2fw3z4mnm2y6fa " ,
2008-01-14 20:43:25 +00:00
u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " client renewal secret: znxmki5zdibb5qlt46xbdvk2t55j7hibejq3i5ijyurkr6m6jkhq " in output , output )
2008-01-14 20:43:25 +00:00
2008-12-08 19:44:11 +00:00
output = self . _dump_cap ( u . get_verify_cap ( ) . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failIf ( " key: " in output , output )
self . failUnless ( " UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa " in output , output )
self . failUnless ( " size: 1234 " in output , output )
self . failUnless ( " k/N: 25/100 " in output , output )
self . failUnless ( " storage index: hdis5iaveku6lnlaiccydyid7q " in output , output )
2008-01-14 20:43:25 +00:00
2008-01-14 21:12:27 +00:00
prefixed_u = " http://127.0.0.1/uri/ %s " % urllib . quote ( u . to_string ( ) )
output = self . _dump_cap ( prefixed_u )
2008-02-15 02:58:01 +00:00
self . failUnless ( " CHK File: " in output , output )
2008-02-15 02:27:47 +00:00
self . failUnless ( " key: aaaqeayeaudaocajbifqydiob4 " in output , output )
2008-02-15 02:58:01 +00:00
self . failUnless ( " UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa " in output , output )
self . failUnless ( " size: 1234 " in output , output )
self . failUnless ( " k/N: 25/100 " in output , output )
self . failUnless ( " storage index: hdis5iaveku6lnlaiccydyid7q " in output , output )
2008-01-14 21:12:27 +00:00
2008-01-14 20:43:25 +00:00
def test_dump_cap_lit ( self ) :
u = uri . LiteralFileURI ( " this is some data " )
output = self . _dump_cap ( u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " Literal File URI: " in output , output )
self . failUnless ( " data: this is some data " in output , output )
2008-01-14 20:43:25 +00:00
def test_dump_cap_ssk ( self ) :
writekey = " \x01 " * 16
fingerprint = " \xfe " * 32
u = uri . WriteableSSKFileURI ( writekey , fingerprint )
output = self . _dump_cap ( u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " SSK Writeable URI: " in output , output )
2008-02-15 02:27:47 +00:00
self . failUnless ( " writekey: aeaqcaibaeaqcaibaeaqcaibae " in output , output )
2008-02-15 02:58:01 +00:00
self . failUnless ( " readkey: nvgh5vj2ekzzkim5fgtb4gey5y " in output , output )
self . failUnless ( " storage index: nt4fwemuw7flestsezvo2eveke " in output , output )
2008-02-15 02:27:47 +00:00
self . failUnless ( " fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a " in output , output )
2008-01-14 20:43:25 +00:00
2008-02-15 02:58:01 +00:00
output = self . _dump_cap ( " --client-secret " , " 5s33nk3qpvnj2fw3z4mnm2y6fa " ,
2008-01-14 20:43:25 +00:00
u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq " in output , output )
2008-01-14 20:43:25 +00:00
fileutil . make_dirs ( " cli/test_dump_cap/private " )
2010-02-06 01:38:55 +00:00
fileutil . write ( " cli/test_dump_cap/private/secret " , " 5s33nk3qpvnj2fw3z4mnm2y6fa \n " )
2008-01-14 20:43:25 +00:00
output = self . _dump_cap ( " --client-dir " , " cli/test_dump_cap " ,
u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq " in output , output )
2008-01-14 20:43:25 +00:00
output = self . _dump_cap ( " --client-dir " , " cli/test_dump_cap_BOGUS " ,
u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failIf ( " file renewal secret: " in output , output )
2008-01-14 20:43:25 +00:00
output = self . _dump_cap ( " --nodeid " , " tqc35esocrvejvg4mablt6aowg6tl43j " ,
u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq " in output , output )
self . failIf ( " file renewal secret: " in output , output )
2008-01-14 20:43:25 +00:00
output = self . _dump_cap ( " --nodeid " , " tqc35esocrvejvg4mablt6aowg6tl43j " ,
2008-02-15 02:58:01 +00:00
" --client-secret " , " 5s33nk3qpvnj2fw3z4mnm2y6fa " ,
2008-01-14 20:43:25 +00:00
u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq " in output , output )
self . failUnless ( " file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq " in output , output )
self . failUnless ( " lease renewal secret: 7pjtaumrb7znzkkbvekkmuwpqfjyfyamznfz4bwwvmh4nw33lorq " in output , output )
2008-01-14 20:43:25 +00:00
u = u . get_readonly ( )
output = self . _dump_cap ( u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " SSK Read-only URI: " in output , output )
self . failUnless ( " readkey: nvgh5vj2ekzzkim5fgtb4gey5y " in output , output )
self . failUnless ( " storage index: nt4fwemuw7flestsezvo2eveke " in output , output )
self . failUnless ( " fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a " in output , output )
2008-01-14 20:43:25 +00:00
2008-12-08 19:44:11 +00:00
u = u . get_verify_cap ( )
2008-01-14 20:43:25 +00:00
output = self . _dump_cap ( u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " SSK Verifier URI: " in output , output )
self . failUnless ( " storage index: nt4fwemuw7flestsezvo2eveke " in output , output )
self . failUnless ( " fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a " in output , output )
2008-01-14 20:43:25 +00:00
def test_dump_cap_directory ( self ) :
writekey = " \x01 " * 16
fingerprint = " \xfe " * 32
u1 = uri . WriteableSSKFileURI ( writekey , fingerprint )
2009-07-17 01:01:03 +00:00
u = uri . DirectoryURI ( u1 )
2008-01-14 20:43:25 +00:00
output = self . _dump_cap ( u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " Directory Writeable URI: " in output , output )
self . failUnless ( " writekey: aeaqcaibaeaqcaibaeaqcaibae " in output ,
output )
self . failUnless ( " readkey: nvgh5vj2ekzzkim5fgtb4gey5y " in output , output )
self . failUnless ( " storage index: nt4fwemuw7flestsezvo2eveke " in output ,
output )
2008-02-15 02:27:47 +00:00
self . failUnless ( " fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a " in output , output )
2008-01-14 20:43:25 +00:00
2008-02-15 02:58:01 +00:00
output = self . _dump_cap ( " --client-secret " , " 5s33nk3qpvnj2fw3z4mnm2y6fa " ,
2008-01-14 20:43:25 +00:00
u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq " in output , output )
2008-01-14 20:43:25 +00:00
output = self . _dump_cap ( " --nodeid " , " tqc35esocrvejvg4mablt6aowg6tl43j " ,
u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq " in output , output )
self . failIf ( " file renewal secret: " in output , output )
2008-01-14 20:43:25 +00:00
output = self . _dump_cap ( " --nodeid " , " tqc35esocrvejvg4mablt6aowg6tl43j " ,
2008-02-15 02:58:01 +00:00
" --client-secret " , " 5s33nk3qpvnj2fw3z4mnm2y6fa " ,
2008-01-14 20:43:25 +00:00
u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq " in output , output )
self . failUnless ( " file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq " in output , output )
self . failUnless ( " lease renewal secret: 7pjtaumrb7znzkkbvekkmuwpqfjyfyamznfz4bwwvmh4nw33lorq " in output , output )
2008-01-14 20:43:25 +00:00
u = u . get_readonly ( )
output = self . _dump_cap ( u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " Directory Read-only URI: " in output , output )
self . failUnless ( " readkey: nvgh5vj2ekzzkim5fgtb4gey5y " in output , output )
self . failUnless ( " storage index: nt4fwemuw7flestsezvo2eveke " in output , output )
self . failUnless ( " fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a " in output , output )
2008-01-14 20:43:25 +00:00
2008-12-08 19:44:11 +00:00
u = u . get_verify_cap ( )
2008-01-14 20:43:25 +00:00
output = self . _dump_cap ( u . to_string ( ) )
2008-02-15 02:58:01 +00:00
self . failUnless ( " Directory Verifier URI: " in output , output )
self . failUnless ( " storage index: nt4fwemuw7flestsezvo2eveke " in output , output )
2008-02-15 02:27:47 +00:00
self . failUnless ( " fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a " in output , output )
2008-01-14 20:43:25 +00:00
2008-10-29 22:10:10 +00:00
def _catalog_shares ( self , * basedirs ) :
o = debug . CatalogSharesOptions ( )
o . stdout , o . stderr = StringIO ( ) , StringIO ( )
args = list ( basedirs )
o . parseOptions ( args )
debug . catalog_shares ( o )
out = o . stdout . getvalue ( )
err = o . stderr . getvalue ( )
return out , err
def test_catalog_shares_error ( self ) :
nodedir1 = " cli/test_catalog_shares/node1 "
sharedir = os . path . join ( nodedir1 , " storage " , " shares " , " mq " , " mqfblse6m5a6dh45isu2cg7oji " )
fileutil . make_dirs ( sharedir )
2010-02-06 01:38:55 +00:00
fileutil . write ( " cli/test_catalog_shares/node1/storage/shares/mq/not-a-dir " , " " )
2008-10-29 22:10:10 +00:00
# write a bogus share that looks a little bit like CHK
2010-02-06 01:38:55 +00:00
fileutil . write ( os . path . join ( sharedir , " 8 " ) ,
" \x00 \x00 \x00 \x01 " + " \xff " * 200 ) # this triggers an assert
2008-10-29 22:10:10 +00:00
nodedir2 = " cli/test_catalog_shares/node2 "
fileutil . make_dirs ( nodedir2 )
2010-02-06 01:38:55 +00:00
fileutil . write ( " cli/test_catalog_shares/node1/storage/shares/not-a-dir " , " " )
2008-10-29 22:10:10 +00:00
# now make sure that the 'catalog-shares' commands survives the error
out , err = self . _catalog_shares ( nodedir1 , nodedir2 )
self . failUnlessEqual ( out , " " , out )
2008-10-30 19:32:04 +00:00
self . failUnless ( " Error processing " in err ,
" didn ' t see ' error processing ' in ' %s ' " % err )
#self.failUnless(nodedir1 in err,
# "didn't see '%s' in '%s'" % (nodedir1, err))
# windows mangles the path, and os.path.join isn't enough to make
# up for it, so just look for individual strings
self . failUnless ( " node1 " in err ,
" didn ' t see ' node1 ' in ' %s ' " % err )
self . failUnless ( " mqfblse6m5a6dh45isu2cg7oji " in err ,
" didn ' t see ' mqfblse6m5a6dh45isu2cg7oji ' in ' %s ' " % err )
2008-10-29 22:10:10 +00:00
2009-02-25 00:56:20 +00:00
def test_alias ( self ) :
aliases = { " tahoe " : " TA " ,
" work " : " WA " ,
" c " : " CA " }
def ga1 ( path ) :
return get_alias ( aliases , path , " tahoe " )
2009-02-25 05:21:36 +00:00
uses_lettercolon = common . platform_uses_lettercolon_drivename ( )
2009-02-25 00:56:20 +00:00
self . failUnlessEqual ( ga1 ( " bare " ) , ( " TA " , " bare " ) )
self . failUnlessEqual ( ga1 ( " baredir/file " ) , ( " TA " , " baredir/file " ) )
self . failUnlessEqual ( ga1 ( " baredir/file:7 " ) , ( " TA " , " baredir/file:7 " ) )
self . failUnlessEqual ( ga1 ( " tahoe: " ) , ( " TA " , " " ) )
self . failUnlessEqual ( ga1 ( " tahoe:file " ) , ( " TA " , " file " ) )
self . failUnlessEqual ( ga1 ( " tahoe:dir/file " ) , ( " TA " , " dir/file " ) )
self . failUnlessEqual ( ga1 ( " work: " ) , ( " WA " , " " ) )
self . failUnlessEqual ( ga1 ( " work:file " ) , ( " WA " , " file " ) )
self . failUnlessEqual ( ga1 ( " work:dir/file " ) , ( " WA " , " dir/file " ) )
2009-02-25 05:21:36 +00:00
# default != None means we really expect a tahoe path, regardless of
# whether we're on windows or not. This is what 'tahoe get' uses.
2009-02-25 00:56:20 +00:00
self . failUnlessEqual ( ga1 ( " c: " ) , ( " CA " , " " ) )
self . failUnlessEqual ( ga1 ( " c:file " ) , ( " CA " , " file " ) )
self . failUnlessEqual ( ga1 ( " c:dir/file " ) , ( " CA " , " dir/file " ) )
self . failUnlessEqual ( ga1 ( " URI:stuff " ) , ( " URI:stuff " , " " ) )
self . failUnlessEqual ( ga1 ( " URI:stuff:./file " ) , ( " URI:stuff " , " file " ) )
self . failUnlessEqual ( ga1 ( " URI:stuff:./dir/file " ) ,
( " URI:stuff " , " dir/file " ) )
2009-02-25 05:21:36 +00:00
self . failUnlessRaises ( common . UnknownAliasError , ga1 , " missing: " )
self . failUnlessRaises ( common . UnknownAliasError , ga1 , " missing:dir " )
self . failUnlessRaises ( common . UnknownAliasError , ga1 , " missing:dir/file " )
2009-02-25 00:56:20 +00:00
def ga2 ( path ) :
return get_alias ( aliases , path , None )
self . failUnlessEqual ( ga2 ( " bare " ) , ( DefaultAliasMarker , " bare " ) )
self . failUnlessEqual ( ga2 ( " baredir/file " ) ,
( DefaultAliasMarker , " baredir/file " ) )
self . failUnlessEqual ( ga2 ( " baredir/file:7 " ) ,
( DefaultAliasMarker , " baredir/file:7 " ) )
self . failUnlessEqual ( ga2 ( " baredir/sub:1/file:7 " ) ,
( DefaultAliasMarker , " baredir/sub:1/file:7 " ) )
self . failUnlessEqual ( ga2 ( " tahoe: " ) , ( " TA " , " " ) )
self . failUnlessEqual ( ga2 ( " tahoe:file " ) , ( " TA " , " file " ) )
self . failUnlessEqual ( ga2 ( " tahoe:dir/file " ) , ( " TA " , " dir/file " ) )
2009-02-25 05:21:36 +00:00
# on windows, we really want c:foo to indicate a local file.
# default==None is what 'tahoe cp' uses.
if uses_lettercolon :
self . failUnlessEqual ( ga2 ( " c: " ) , ( DefaultAliasMarker , " c: " ) )
self . failUnlessEqual ( ga2 ( " c:file " ) , ( DefaultAliasMarker , " c:file " ) )
self . failUnlessEqual ( ga2 ( " c:dir/file " ) ,
( DefaultAliasMarker , " c:dir/file " ) )
else :
self . failUnlessEqual ( ga2 ( " c: " ) , ( " CA " , " " ) )
self . failUnlessEqual ( ga2 ( " c:file " ) , ( " CA " , " file " ) )
self . failUnlessEqual ( ga2 ( " c:dir/file " ) , ( " CA " , " dir/file " ) )
2009-02-25 00:56:20 +00:00
self . failUnlessEqual ( ga2 ( " work: " ) , ( " WA " , " " ) )
self . failUnlessEqual ( ga2 ( " work:file " ) , ( " WA " , " file " ) )
self . failUnlessEqual ( ga2 ( " work:dir/file " ) , ( " WA " , " dir/file " ) )
self . failUnlessEqual ( ga2 ( " URI:stuff " ) , ( " URI:stuff " , " " ) )
self . failUnlessEqual ( ga2 ( " URI:stuff:./file " ) , ( " URI:stuff " , " file " ) )
self . failUnlessEqual ( ga2 ( " URI:stuff:./dir/file " ) , ( " URI:stuff " , " dir/file " ) )
2009-02-25 05:21:36 +00:00
self . failUnlessRaises ( common . UnknownAliasError , ga2 , " missing: " )
self . failUnlessRaises ( common . UnknownAliasError , ga2 , " missing:dir " )
self . failUnlessRaises ( common . UnknownAliasError , ga2 , " missing:dir/file " )
2009-02-25 00:56:20 +00:00
def ga3 ( path ) :
old = common . pretend_platform_uses_lettercolon
try :
common . pretend_platform_uses_lettercolon = True
retval = get_alias ( aliases , path , None )
finally :
common . pretend_platform_uses_lettercolon = old
return retval
self . failUnlessEqual ( ga3 ( " bare " ) , ( DefaultAliasMarker , " bare " ) )
self . failUnlessEqual ( ga3 ( " baredir/file " ) ,
( DefaultAliasMarker , " baredir/file " ) )
self . failUnlessEqual ( ga3 ( " baredir/file:7 " ) ,
( DefaultAliasMarker , " baredir/file:7 " ) )
self . failUnlessEqual ( ga3 ( " baredir/sub:1/file:7 " ) ,
( DefaultAliasMarker , " baredir/sub:1/file:7 " ) )
self . failUnlessEqual ( ga3 ( " tahoe: " ) , ( " TA " , " " ) )
self . failUnlessEqual ( ga3 ( " tahoe:file " ) , ( " TA " , " file " ) )
self . failUnlessEqual ( ga3 ( " tahoe:dir/file " ) , ( " TA " , " dir/file " ) )
self . failUnlessEqual ( ga3 ( " c: " ) , ( DefaultAliasMarker , " c: " ) )
self . failUnlessEqual ( ga3 ( " c:file " ) , ( DefaultAliasMarker , " c:file " ) )
self . failUnlessEqual ( ga3 ( " c:dir/file " ) ,
( DefaultAliasMarker , " c:dir/file " ) )
self . failUnlessEqual ( ga3 ( " work: " ) , ( " WA " , " " ) )
self . failUnlessEqual ( ga3 ( " work:file " ) , ( " WA " , " file " ) )
self . failUnlessEqual ( ga3 ( " work:dir/file " ) , ( " WA " , " dir/file " ) )
self . failUnlessEqual ( ga3 ( " URI:stuff " ) , ( " URI:stuff " , " " ) )
self . failUnlessEqual ( ga3 ( " URI:stuff:./file " ) , ( " URI:stuff " , " file " ) )
self . failUnlessEqual ( ga3 ( " URI:stuff:./dir/file " ) , ( " URI:stuff " , " dir/file " ) )
2009-02-25 05:21:36 +00:00
self . failUnlessRaises ( common . UnknownAliasError , ga3 , " missing: " )
self . failUnlessRaises ( common . UnknownAliasError , ga3 , " missing:dir " )
self . failUnlessRaises ( common . UnknownAliasError , ga3 , " missing:dir/file " )
2010-02-12 06:21:37 +00:00
# calling get_alias with a path that doesn't include an alias and
# default set to something that isn't in the aliases argument should
# raise an UnknownAliasError.
def ga4 ( path ) :
return get_alias ( aliases , path , " badddefault: " )
self . failUnlessRaises ( common . UnknownAliasError , ga4 , " afile " )
self . failUnlessRaises ( common . UnknownAliasError , ga4 , " a/dir/path/ " )
def ga5 ( path ) :
old = common . pretend_platform_uses_lettercolon
try :
common . pretend_platform_uses_lettercolon = True
retval = get_alias ( aliases , path , " baddefault: " )
finally :
common . pretend_platform_uses_lettercolon = old
return retval
self . failUnlessRaises ( common . UnknownAliasError , ga5 , " C: \\ Windows " )
2009-02-25 00:56:20 +00:00
2008-10-29 22:10:10 +00:00
2009-02-16 22:08:33 +00:00
class Help ( unittest . TestCase ) :
def test_get ( self ) :
help = str ( cli . GetOptions ( ) )
2010-01-14 20:11:19 +00:00
self . failUnless ( " get REMOTE_FILE LOCAL_FILE " in help , help )
2009-02-16 22:08:33 +00:00
self . failUnless ( " % tahoe get FOO |less " in help , help )
def test_put ( self ) :
help = str ( cli . PutOptions ( ) )
2010-01-14 20:11:19 +00:00
self . failUnless ( " put LOCAL_FILE REMOTE_FILE " in help , help )
2009-02-16 22:08:33 +00:00
self . failUnless ( " % c at FILE | tahoe put " in help , help )
def test_rm ( self ) :
help = str ( cli . RmOptions ( ) )
2010-01-14 20:11:19 +00:00
self . failUnless ( " rm REMOTE_FILE " in help , help )
2009-02-16 22:08:33 +00:00
def test_mv ( self ) :
help = str ( cli . MvOptions ( ) )
self . failUnless ( " mv FROM TO " in help , help )
2009-07-20 03:45:53 +00:00
self . failUnless ( " Use ' tahoe mv ' to move files " in help )
2009-02-16 22:08:33 +00:00
def test_ln ( self ) :
help = str ( cli . LnOptions ( ) )
self . failUnless ( " ln FROM TO " in help , help )
def test_backup ( self ) :
help = str ( cli . BackupOptions ( ) )
self . failUnless ( " backup FROM ALIAS:TO " in help , help )
def test_webopen ( self ) :
help = str ( cli . WebopenOptions ( ) )
self . failUnless ( " webopen [ALIAS:PATH] " in help , help )
def test_manifest ( self ) :
help = str ( cli . ManifestOptions ( ) )
self . failUnless ( " manifest [ALIAS:PATH] " in help , help )
def test_stats ( self ) :
help = str ( cli . StatsOptions ( ) )
self . failUnless ( " stats [ALIAS:PATH] " in help , help )
def test_check ( self ) :
help = str ( cli . CheckOptions ( ) )
self . failUnless ( " check [ALIAS:PATH] " in help , help )
def test_deep_check ( self ) :
help = str ( cli . DeepCheckOptions ( ) )
self . failUnless ( " deep-check [ALIAS:PATH] " in help , help )
2009-02-22 17:37:32 +00:00
def test_create_alias ( self ) :
help = str ( cli . CreateAliasOptions ( ) )
self . failUnless ( " create-alias ALIAS " in help , help )
def test_add_aliases ( self ) :
help = str ( cli . AddAliasOptions ( ) )
self . failUnless ( " add-alias ALIAS DIRCAP " in help , help )
2008-08-02 02:29:38 +00:00
class CLITestMixin :
2008-08-02 02:10:41 +00:00
def do_cli ( self , verb , * args , * * kwargs ) :
nodeargs = [
2009-02-17 00:20:05 +00:00
" --node-directory " , self . get_clientdir ( ) ,
2008-08-02 02:10:41 +00:00
]
argv = [ verb ] + nodeargs + list ( args )
stdin = kwargs . get ( " stdin " , " " )
stdout , stderr = StringIO ( ) , StringIO ( )
d = threads . deferToThread ( runner . runner , argv , run_by_human = False ,
stdin = StringIO ( stdin ) ,
stdout = stdout , stderr = stderr )
2008-12-03 03:08:28 +00:00
def _done ( rc ) :
return rc , stdout . getvalue ( ) , stderr . getvalue ( )
2008-08-02 02:10:41 +00:00
d . addCallback ( _done )
return d
2009-02-17 00:20:05 +00:00
class CreateAlias ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
2008-08-02 02:29:38 +00:00
2008-08-12 01:20:23 +00:00
def _test_webopen ( self , args , expected_url ) :
woo = cli . WebopenOptions ( )
2009-02-17 00:20:05 +00:00
all_args = [ " --node-directory " , self . get_clientdir ( ) ] + list ( args )
2008-08-12 01:20:23 +00:00
woo . parseOptions ( all_args )
urls = [ ]
rc = cli . webopen ( woo , urls . append )
self . failUnlessEqual ( rc , 0 )
self . failUnlessEqual ( len ( urls ) , 1 )
self . failUnlessEqual ( urls [ 0 ] , expected_url )
2008-08-02 02:10:41 +00:00
def test_create ( self ) :
2009-03-07 10:04:28 +00:00
self . basedir = " cli/CreateAlias/create "
2009-02-17 00:20:05 +00:00
self . set_up_grid ( )
2010-01-14 21:02:46 +00:00
aliasfile = os . path . join ( self . get_clientdir ( ) , " private " , " aliases " )
2009-02-17 00:20:05 +00:00
d = self . do_cli ( " create-alias " , " tahoe " )
2008-12-03 03:08:28 +00:00
def _done ( ( rc , stdout , stderr ) ) :
2008-08-02 02:10:41 +00:00
self . failUnless ( " Alias ' tahoe ' created " in stdout )
self . failIf ( stderr )
2009-02-17 00:20:05 +00:00
aliases = get_aliases ( self . get_clientdir ( ) )
2008-08-02 02:10:41 +00:00
self . failUnless ( " tahoe " in aliases )
self . failUnless ( aliases [ " tahoe " ] . startswith ( " URI:DIR2: " ) )
d . addCallback ( _done )
2008-08-12 01:20:23 +00:00
d . addCallback ( lambda res : self . do_cli ( " create-alias " , " two " ) )
2008-12-03 03:08:28 +00:00
2008-08-12 01:20:23 +00:00
def _stash_urls ( res ) :
2009-02-17 00:20:05 +00:00
aliases = get_aliases ( self . get_clientdir ( ) )
node_url_file = os . path . join ( self . get_clientdir ( ) , " node.url " )
2010-02-06 01:38:55 +00:00
nodeurl = fileutil . read ( node_url_file ) . strip ( )
2009-07-01 20:05:48 +00:00
self . welcome_url = nodeurl
2008-08-12 01:20:23 +00:00
uribase = nodeurl + " uri/ "
CLI: rework webopen, and moreover its tests w.r.t. path handling
in the recent reconciliation of webopen patches, I wound up adjusting
webopen to 'pass through' the state of the trailing slash on the given
argument to the resultant url passed to the browser. this change
removes the requirement that arguments must be directories, and allows
webopen to be used with files. it also broke the tests that assumed
that webopen would always normalise the url to have a trailing slash.
in fixing the tests, I realised that, IMHO, there's something deeply
awry with the way tahoe handles paths; specifically in the combination
of '/' being the name of the root path within an alias, but a leading
slash on paths, e.g. 'alias:/path', is catagorically incorrect. i.e.
'tahoe:' == 'tahoe:/' == '/'
but 'tahoe:/foo' is an invalid path, and must be 'tahoe:foo'
I wound up making the internals of webopen simply spot a 'path' of
'/' and smash it to '', which 'fixes' webopen to match the behaviour
of tahoe's path handling elsewhere, but that special case sort of
points to the weirdness.
(fwiw, I personally found the fact that the leading / in a path was
disallowed to be weird - I'm just used to seeing paths qualified by
the leading / I guess - so in a debate about normalising path handling
I'd vote to include the /)
2008-09-24 16:45:23 +00:00
self . tahoe_url = uribase + urllib . quote ( aliases [ " tahoe " ] )
self . tahoe_subdir_url = self . tahoe_url + " /subdir "
self . two_url = uribase + urllib . quote ( aliases [ " two " ] )
2008-12-03 03:20:22 +00:00
self . two_uri = aliases [ " two " ]
2008-08-12 01:20:23 +00:00
d . addCallback ( _stash_urls )
2008-12-03 03:20:22 +00:00
d . addCallback ( lambda res : self . do_cli ( " create-alias " , " two " ) ) # dup
def _check_create_duplicate ( ( rc , stdout , stderr ) ) :
self . failIfEqual ( rc , 0 )
self . failUnless ( " Alias ' two ' already exists! " in stderr )
2009-02-17 00:20:05 +00:00
aliases = get_aliases ( self . get_clientdir ( ) )
2008-12-03 03:20:22 +00:00
self . failUnlessEqual ( aliases [ " two " ] , self . two_uri )
d . addCallback ( _check_create_duplicate )
d . addCallback ( lambda res : self . do_cli ( " add-alias " , " added " , self . two_uri ) )
def _check_add ( ( rc , stdout , stderr ) ) :
self . failUnlessEqual ( rc , 0 )
self . failUnless ( " Alias ' added ' added " in stdout )
d . addCallback ( _check_add )
# check add-alias with a duplicate
d . addCallback ( lambda res : self . do_cli ( " add-alias " , " two " , self . two_uri ) )
def _check_add_duplicate ( ( rc , stdout , stderr ) ) :
self . failIfEqual ( rc , 0 )
self . failUnless ( " Alias ' two ' already exists! " in stderr )
2009-02-17 00:20:05 +00:00
aliases = get_aliases ( self . get_clientdir ( ) )
2008-12-03 03:20:22 +00:00
self . failUnlessEqual ( aliases [ " two " ] , self . two_uri )
d . addCallback ( _check_add_duplicate )
CLI: rework webopen, and moreover its tests w.r.t. path handling
in the recent reconciliation of webopen patches, I wound up adjusting
webopen to 'pass through' the state of the trailing slash on the given
argument to the resultant url passed to the browser. this change
removes the requirement that arguments must be directories, and allows
webopen to be used with files. it also broke the tests that assumed
that webopen would always normalise the url to have a trailing slash.
in fixing the tests, I realised that, IMHO, there's something deeply
awry with the way tahoe handles paths; specifically in the combination
of '/' being the name of the root path within an alias, but a leading
slash on paths, e.g. 'alias:/path', is catagorically incorrect. i.e.
'tahoe:' == 'tahoe:/' == '/'
but 'tahoe:/foo' is an invalid path, and must be 'tahoe:foo'
I wound up making the internals of webopen simply spot a 'path' of
'/' and smash it to '', which 'fixes' webopen to match the behaviour
of tahoe's path handling elsewhere, but that special case sort of
points to the weirdness.
(fwiw, I personally found the fact that the leading / in a path was
disallowed to be weird - I'm just used to seeing paths qualified by
the leading / I guess - so in a debate about normalising path handling
I'd vote to include the /)
2008-09-24 16:45:23 +00:00
def _test_urls ( junk ) :
2009-07-01 20:05:48 +00:00
self . _test_webopen ( [ ] , self . welcome_url )
CLI: rework webopen, and moreover its tests w.r.t. path handling
in the recent reconciliation of webopen patches, I wound up adjusting
webopen to 'pass through' the state of the trailing slash on the given
argument to the resultant url passed to the browser. this change
removes the requirement that arguments must be directories, and allows
webopen to be used with files. it also broke the tests that assumed
that webopen would always normalise the url to have a trailing slash.
in fixing the tests, I realised that, IMHO, there's something deeply
awry with the way tahoe handles paths; specifically in the combination
of '/' being the name of the root path within an alias, but a leading
slash on paths, e.g. 'alias:/path', is catagorically incorrect. i.e.
'tahoe:' == 'tahoe:/' == '/'
but 'tahoe:/foo' is an invalid path, and must be 'tahoe:foo'
I wound up making the internals of webopen simply spot a 'path' of
'/' and smash it to '', which 'fixes' webopen to match the behaviour
of tahoe's path handling elsewhere, but that special case sort of
points to the weirdness.
(fwiw, I personally found the fact that the leading / in a path was
disallowed to be weird - I'm just used to seeing paths qualified by
the leading / I guess - so in a debate about normalising path handling
I'd vote to include the /)
2008-09-24 16:45:23 +00:00
self . _test_webopen ( [ " / " ] , self . tahoe_url )
self . _test_webopen ( [ " tahoe: " ] , self . tahoe_url )
self . _test_webopen ( [ " tahoe:/ " ] , self . tahoe_url )
self . _test_webopen ( [ " tahoe:subdir " ] , self . tahoe_subdir_url )
self . _test_webopen ( [ " tahoe:subdir/ " ] , self . tahoe_subdir_url + ' / ' )
self . _test_webopen ( [ " tahoe:subdir/file " ] , self . tahoe_subdir_url + ' /file ' )
2008-12-03 03:08:28 +00:00
# if "file" is indeed a file, then the url produced by webopen in
# this case is disallowed by the webui. but by design, webopen
# passes through the mistake from the user to the resultant
# webopened url
CLI: rework webopen, and moreover its tests w.r.t. path handling
in the recent reconciliation of webopen patches, I wound up adjusting
webopen to 'pass through' the state of the trailing slash on the given
argument to the resultant url passed to the browser. this change
removes the requirement that arguments must be directories, and allows
webopen to be used with files. it also broke the tests that assumed
that webopen would always normalise the url to have a trailing slash.
in fixing the tests, I realised that, IMHO, there's something deeply
awry with the way tahoe handles paths; specifically in the combination
of '/' being the name of the root path within an alias, but a leading
slash on paths, e.g. 'alias:/path', is catagorically incorrect. i.e.
'tahoe:' == 'tahoe:/' == '/'
but 'tahoe:/foo' is an invalid path, and must be 'tahoe:foo'
I wound up making the internals of webopen simply spot a 'path' of
'/' and smash it to '', which 'fixes' webopen to match the behaviour
of tahoe's path handling elsewhere, but that special case sort of
points to the weirdness.
(fwiw, I personally found the fact that the leading / in a path was
disallowed to be weird - I'm just used to seeing paths qualified by
the leading / I guess - so in a debate about normalising path handling
I'd vote to include the /)
2008-09-24 16:45:23 +00:00
self . _test_webopen ( [ " tahoe:subdir/file/ " ] , self . tahoe_subdir_url + ' /file/ ' )
self . _test_webopen ( [ " two: " ] , self . two_url )
d . addCallback ( _test_urls )
2008-08-12 01:20:23 +00:00
2010-01-14 21:02:46 +00:00
def _remove_trailing_newline_and_create_alias ( ign ) :
# ticket #741 is about a manually-edited alias file (which
# doesn't end in a newline) being corrupted by a subsequent
# "tahoe create-alias"
2010-02-06 01:38:55 +00:00
old = fileutil . read ( aliasfile )
fileutil . write ( aliasfile , old . rstrip ( ) )
2010-01-14 21:02:46 +00:00
return self . do_cli ( " create-alias " , " un-corrupted1 " )
d . addCallback ( _remove_trailing_newline_and_create_alias )
def _check_not_corrupted1 ( ( rc , stdout , stderr ) ) :
self . failUnless ( " Alias ' un-corrupted1 ' created " in stdout , stdout )
self . failIf ( stderr )
# the old behavior was to simply append the new record, causing a
# line that looked like "NAME1: CAP1NAME2: CAP2". This won't look
# like a valid dircap, so get_aliases() will raise an exception.
aliases = get_aliases ( self . get_clientdir ( ) )
self . failUnless ( " added " in aliases )
self . failUnless ( aliases [ " added " ] . startswith ( " URI:DIR2: " ) )
# to be safe, let's confirm that we don't see "NAME2:" in CAP1.
# No chance of a false-negative, because the hyphen in
# "un-corrupted1" is not a valid base32 character.
self . failIfIn ( " un-corrupted1: " , aliases [ " added " ] )
self . failUnless ( " un-corrupted1 " in aliases )
self . failUnless ( aliases [ " un-corrupted1 " ] . startswith ( " URI:DIR2: " ) )
d . addCallback ( _check_not_corrupted1 )
def _remove_trailing_newline_and_add_alias ( ign ) :
# same thing, but for "tahoe add-alias"
2010-02-06 01:38:55 +00:00
old = fileutil . read ( aliasfile )
fileutil . write ( aliasfile , old . rstrip ( ) )
2010-01-14 21:02:46 +00:00
return self . do_cli ( " add-alias " , " un-corrupted2 " , self . two_uri )
d . addCallback ( _remove_trailing_newline_and_add_alias )
def _check_not_corrupted ( ( rc , stdout , stderr ) ) :
self . failUnless ( " Alias ' un-corrupted2 ' added " in stdout , stdout )
self . failIf ( stderr )
aliases = get_aliases ( self . get_clientdir ( ) )
self . failUnless ( " un-corrupted1 " in aliases )
self . failUnless ( aliases [ " un-corrupted1 " ] . startswith ( " URI:DIR2: " ) )
self . failIfIn ( " un-corrupted2: " , aliases [ " un-corrupted1 " ] )
self . failUnless ( " un-corrupted2 " in aliases )
self . failUnless ( aliases [ " un-corrupted2 " ] . startswith ( " URI:DIR2: " ) )
d . addCallback ( _check_not_corrupted )
2008-08-02 02:10:41 +00:00
return d
2008-08-01 22:10:09 +00:00
2010-02-12 06:21:37 +00:00
class Ln ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
def _create_test_file ( self ) :
data = " puppies " * 1000
path = os . path . join ( self . basedir , " datafile " )
f = open ( path , ' wb ' )
f . write ( data )
f . close ( )
self . datafile = path
def test_ln_without_alias ( self ) :
# if invoked without an alias when the 'tahoe' alias doesn't
# exist, 'tahoe ln' should output a useful error message and not
# a stack trace
self . basedir = " cli/Ln/ln_without_alias "
self . set_up_grid ( )
d = self . do_cli ( " ln " , " from " , " to " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
# Make sure that validation extends to the "to" parameter
d . addCallback ( lambda ign : self . do_cli ( " create-alias " , " havasu " ) )
d . addCallback ( lambda ign : self . _create_test_file ( ) )
d . addCallback ( lambda ign : self . do_cli ( " put " , self . datafile ,
" havasu:from " ) )
d . addCallback ( lambda ign : self . do_cli ( " ln " , " havasu:from " , " to " ) )
d . addCallback ( _check )
return d
def test_ln_with_nonexistent_alias ( self ) :
# If invoked with aliases that don't exist, 'tahoe ln' should
# output a useful error message and not a stack trace.
self . basedir = " cli/Ln/ln_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " ln " , " havasu:from " , " havasu:to " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
# Make sure that validation occurs on the to parameter if the
# from parameter passes.
d . addCallback ( lambda ign : self . do_cli ( " create-alias " , " havasu " ) )
d . addCallback ( lambda ign : self . _create_test_file ( ) )
d . addCallback ( lambda ign : self . do_cli ( " put " , self . datafile ,
" havasu:from " ) )
d . addCallback ( lambda ign : self . do_cli ( " ln " , " havasu:from " , " huron:to " ) )
d . addCallback ( _check )
return d
2009-02-17 00:20:05 +00:00
class Put ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
2008-08-01 22:10:09 +00:00
2008-08-02 02:27:29 +00:00
def test_unlinked_immutable_stdin ( self ) :
# tahoe get `echo DATA | tahoe put`
# tahoe get `echo DATA | tahoe put -`
2009-03-07 10:04:28 +00:00
self . basedir = " cli/Put/unlinked_immutable_stdin "
2008-08-01 22:10:09 +00:00
DATA = " data " * 100
2009-02-17 00:20:05 +00:00
self . set_up_grid ( )
d = self . do_cli ( " put " , stdin = DATA )
2008-08-01 22:10:09 +00:00
def _uploaded ( res ) :
2008-12-03 03:08:28 +00:00
( rc , stdout , stderr ) = res
2008-08-02 02:47:34 +00:00
self . failUnless ( " waiting for file data on stdin.. " in stderr )
2008-12-02 00:24:21 +00:00
self . failUnless ( " 200 OK " in stderr , stderr )
2008-08-02 02:27:29 +00:00
self . readcap = stdout
self . failUnless ( self . readcap . startswith ( " URI:CHK: " ) )
2008-08-01 22:10:09 +00:00
d . addCallback ( _uploaded )
2008-08-02 02:27:29 +00:00
d . addCallback ( lambda res : self . do_cli ( " get " , self . readcap ) )
2008-08-01 22:10:09 +00:00
def _downloaded ( res ) :
2008-12-03 03:08:28 +00:00
( rc , stdout , stderr ) = res
2008-08-01 22:10:09 +00:00
self . failUnlessEqual ( stderr , " " )
self . failUnlessEqual ( stdout , DATA )
d . addCallback ( _downloaded )
2008-08-02 02:27:29 +00:00
d . addCallback ( lambda res : self . do_cli ( " put " , " - " , stdin = DATA ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , stdout , stderr ) :
2008-08-02 02:27:29 +00:00
self . failUnlessEqual ( stdout , self . readcap ) )
2008-08-01 22:10:09 +00:00
return d
2008-08-02 02:27:29 +00:00
def test_unlinked_immutable_from_file ( self ) :
# tahoe put file.txt
# tahoe put ./file.txt
# tahoe put /tmp/file.txt
# tahoe put ~/file.txt
2009-03-07 10:04:28 +00:00
self . basedir = " cli/Put/unlinked_immutable_from_file "
2009-02-17 00:20:05 +00:00
self . set_up_grid ( )
2008-08-02 02:27:29 +00:00
rel_fn = os . path . join ( self . basedir , " DATAFILE " )
abs_fn = os . path . abspath ( rel_fn )
# we make the file small enough to fit in a LIT file, for speed
2010-02-06 01:38:55 +00:00
fileutil . write ( rel_fn , " short file " )
2009-02-17 00:20:05 +00:00
d = self . do_cli ( " put " , rel_fn )
2008-12-03 03:08:28 +00:00
def _uploaded ( ( rc , stdout , stderr ) ) :
2008-08-02 02:27:29 +00:00
readcap = stdout
self . failUnless ( readcap . startswith ( " URI:LIT: " ) )
self . readcap = readcap
d . addCallback ( _uploaded )
d . addCallback ( lambda res : self . do_cli ( " put " , " ./ " + rel_fn ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , stdout , stderr ) :
2008-08-02 02:27:29 +00:00
self . failUnlessEqual ( stdout , self . readcap ) )
d . addCallback ( lambda res : self . do_cli ( " put " , abs_fn ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , stdout , stderr ) :
2008-08-02 02:27:29 +00:00
self . failUnlessEqual ( stdout , self . readcap ) )
# we just have to assume that ~ is handled properly
return d
def test_immutable_from_file ( self ) :
# tahoe put file.txt uploaded.txt
# tahoe - uploaded.txt
# tahoe put file.txt subdir/uploaded.txt
# tahoe put file.txt tahoe:uploaded.txt
# tahoe put file.txt tahoe:subdir/uploaded.txt
# tahoe put file.txt DIRCAP:./uploaded.txt
# tahoe put file.txt DIRCAP:./subdir/uploaded.txt
2009-03-07 10:04:28 +00:00
self . basedir = " cli/Put/immutable_from_file "
2009-02-17 00:20:05 +00:00
self . set_up_grid ( )
2008-08-02 02:27:29 +00:00
rel_fn = os . path . join ( self . basedir , " DATAFILE " )
# we make the file small enough to fit in a LIT file, for speed
2008-08-02 02:47:34 +00:00
DATA = " short file "
DATA2 = " short file two "
2010-02-06 01:38:55 +00:00
fileutil . write ( rel_fn , DATA )
2008-08-02 02:27:29 +00:00
2009-02-17 00:20:05 +00:00
d = self . do_cli ( " create-alias " , " tahoe " )
2008-08-02 02:27:29 +00:00
d . addCallback ( lambda res :
self . do_cli ( " put " , rel_fn , " uploaded.txt " ) )
2008-12-03 03:08:28 +00:00
def _uploaded ( ( rc , stdout , stderr ) ) :
2008-08-02 02:27:29 +00:00
readcap = stdout . strip ( )
self . failUnless ( readcap . startswith ( " URI:LIT: " ) )
self . failUnless ( " 201 Created " in stderr , stderr )
self . readcap = readcap
d . addCallback ( _uploaded )
d . addCallback ( lambda res :
self . do_cli ( " get " , " tahoe:uploaded.txt " ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , stdout , stderr ) :
2008-08-02 02:27:29 +00:00
self . failUnlessEqual ( stdout , DATA ) )
d . addCallback ( lambda res :
self . do_cli ( " put " , " - " , " uploaded.txt " , stdin = DATA2 ) )
2008-12-03 03:08:28 +00:00
def _replaced ( ( rc , stdout , stderr ) ) :
2008-08-02 02:27:29 +00:00
readcap = stdout . strip ( )
self . failUnless ( readcap . startswith ( " URI:LIT: " ) )
self . failUnless ( " 200 OK " in stderr , stderr )
d . addCallback ( _replaced )
d . addCallback ( lambda res :
self . do_cli ( " put " , rel_fn , " subdir/uploaded2.txt " ) )
d . addCallback ( lambda res : self . do_cli ( " get " , " subdir/uploaded2.txt " ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , stdout , stderr ) :
2008-08-02 02:27:29 +00:00
self . failUnlessEqual ( stdout , DATA ) )
d . addCallback ( lambda res :
self . do_cli ( " put " , rel_fn , " tahoe:uploaded3.txt " ) )
d . addCallback ( lambda res : self . do_cli ( " get " , " tahoe:uploaded3.txt " ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , stdout , stderr ) :
2008-08-02 02:27:29 +00:00
self . failUnlessEqual ( stdout , DATA ) )
d . addCallback ( lambda res :
self . do_cli ( " put " , rel_fn , " tahoe:subdir/uploaded4.txt " ) )
d . addCallback ( lambda res :
self . do_cli ( " get " , " tahoe:subdir/uploaded4.txt " ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , stdout , stderr ) :
2008-08-02 02:27:29 +00:00
self . failUnlessEqual ( stdout , DATA ) )
def _get_dircap ( res ) :
2009-02-17 00:20:05 +00:00
self . dircap = get_aliases ( self . get_clientdir ( ) ) [ " tahoe " ]
2008-08-02 02:27:29 +00:00
d . addCallback ( _get_dircap )
d . addCallback ( lambda res :
self . do_cli ( " put " , rel_fn ,
self . dircap + " :./uploaded5.txt " ) )
d . addCallback ( lambda res :
self . do_cli ( " get " , " tahoe:uploaded5.txt " ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , stdout , stderr ) :
2008-08-02 02:27:29 +00:00
self . failUnlessEqual ( stdout , DATA ) )
d . addCallback ( lambda res :
self . do_cli ( " put " , rel_fn ,
self . dircap + " :./subdir/uploaded6.txt " ) )
d . addCallback ( lambda res :
self . do_cli ( " get " , " tahoe:subdir/uploaded6.txt " ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , stdout , stderr ) :
2008-08-02 02:27:29 +00:00
self . failUnlessEqual ( stdout , DATA ) )
return d
def test_mutable_unlinked ( self ) :
# FILECAP = `echo DATA | tahoe put --mutable`
# tahoe get FILECAP, compare against DATA
# echo DATA2 | tahoe put - FILECAP
# tahoe get FILECAP, compare against DATA2
# tahoe put file.txt FILECAP
2009-03-07 10:04:28 +00:00
self . basedir = " cli/Put/mutable_unlinked "
2009-02-17 00:20:05 +00:00
self . set_up_grid ( )
2008-08-01 22:10:09 +00:00
DATA = " data " * 100
DATA2 = " two " * 100
2008-08-02 02:27:29 +00:00
rel_fn = os . path . join ( self . basedir , " DATAFILE " )
DATA3 = " three " * 100
2010-02-06 01:38:55 +00:00
fileutil . write ( rel_fn , DATA3 )
2008-08-02 02:27:29 +00:00
2009-02-17 00:20:05 +00:00
d = self . do_cli ( " put " , " --mutable " , stdin = DATA )
2008-08-01 22:10:09 +00:00
def _created ( res ) :
2008-12-03 03:08:28 +00:00
( rc , stdout , stderr ) = res
2008-08-02 02:47:34 +00:00
self . failUnless ( " waiting for file data on stdin.. " in stderr )
self . failUnless ( " 200 OK " in stderr )
2008-08-01 22:10:09 +00:00
self . filecap = stdout
self . failUnless ( self . filecap . startswith ( " URI:SSK: " ) )
d . addCallback ( _created )
d . addCallback ( lambda res : self . do_cli ( " get " , self . filecap ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , out , err ) : self . failUnlessEqual ( out , DATA ) )
2008-08-02 02:27:29 +00:00
d . addCallback ( lambda res : self . do_cli ( " put " , " - " , self . filecap , stdin = DATA2 ) )
2008-08-01 22:10:09 +00:00
def _replaced ( res ) :
2008-12-03 03:08:28 +00:00
( rc , stdout , stderr ) = res
2008-08-02 02:47:34 +00:00
self . failUnless ( " waiting for file data on stdin.. " in stderr )
self . failUnless ( " 200 OK " in stderr )
2008-08-01 22:10:09 +00:00
self . failUnlessEqual ( self . filecap , stdout )
d . addCallback ( _replaced )
d . addCallback ( lambda res : self . do_cli ( " get " , self . filecap ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , out , err ) : self . failUnlessEqual ( out , DATA2 ) )
2008-08-02 02:27:29 +00:00
d . addCallback ( lambda res : self . do_cli ( " put " , rel_fn , self . filecap ) )
def _replaced2 ( res ) :
2008-12-03 03:08:28 +00:00
( rc , stdout , stderr ) = res
2008-08-02 02:47:34 +00:00
self . failUnless ( " 200 OK " in stderr )
2008-08-02 02:27:29 +00:00
self . failUnlessEqual ( self . filecap , stdout )
d . addCallback ( _replaced2 )
d . addCallback ( lambda res : self . do_cli ( " get " , self . filecap ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , out , err ) : self . failUnlessEqual ( out , DATA3 ) )
2008-08-02 02:27:29 +00:00
2008-08-01 22:10:09 +00:00
return d
2008-08-02 02:27:29 +00:00
def test_mutable ( self ) :
2008-08-04 20:26:43 +00:00
# echo DATA1 | tahoe put --mutable - uploaded.txt
# echo DATA2 | tahoe put - uploaded.txt # should modify-in-place
# tahoe get uploaded.txt, compare against DATA2
2009-03-07 10:04:28 +00:00
self . basedir = " cli/Put/mutable "
2009-02-17 00:20:05 +00:00
self . set_up_grid ( )
2008-08-04 20:26:43 +00:00
DATA1 = " data " * 100
fn1 = os . path . join ( self . basedir , " DATA1 " )
2010-02-06 01:38:55 +00:00
fileutil . write ( fn1 , DATA1 )
2008-08-04 20:26:43 +00:00
DATA2 = " two " * 100
fn2 = os . path . join ( self . basedir , " DATA2 " )
2010-02-06 01:38:55 +00:00
fileutil . write ( fn2 , DATA2 )
2008-08-04 20:26:43 +00:00
2009-02-17 00:20:05 +00:00
d = self . do_cli ( " create-alias " , " tahoe " )
2008-08-04 20:26:43 +00:00
d . addCallback ( lambda res :
self . do_cli ( " put " , " --mutable " , fn1 , " tahoe:uploaded.txt " ) )
d . addCallback ( lambda res :
self . do_cli ( " put " , fn2 , " tahoe:uploaded.txt " ) )
d . addCallback ( lambda res :
self . do_cli ( " get " , " tahoe:uploaded.txt " ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , out , err ) : self . failUnlessEqual ( out , DATA2 ) )
2008-08-04 20:26:43 +00:00
return d
2008-11-13 12:19:51 +00:00
2010-02-12 06:21:37 +00:00
def test_put_with_nonexistent_alias ( self ) :
# when invoked with an alias that doesn't exist, 'tahoe put'
# should output a useful error message, not a stack trace
self . basedir = " cli/Put/put_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " put " , " somefile " , " fake:afile " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
return d
2009-03-07 12:08:15 +00:00
class List ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
def test_list ( self ) :
self . basedir = " cli/List/list "
self . set_up_grid ( )
c0 = self . g . clients [ 0 ]
2009-12-27 23:21:49 +00:00
small = " small "
2009-10-12 22:45:06 +00:00
d = c0 . create_dirnode ( )
2009-03-07 12:08:15 +00:00
def _stash_root_and_create_file ( n ) :
self . rootnode = n
self . rooturi = n . get_uri ( )
2009-12-27 23:21:49 +00:00
return n . add_file ( u " good " , upload . Data ( small , convergence = " " ) )
2009-03-07 12:08:15 +00:00
d . addCallback ( _stash_root_and_create_file )
2009-12-27 23:21:49 +00:00
def _stash_goodcap ( n ) :
self . goodcap = n . get_uri ( )
d . addCallback ( _stash_goodcap )
2009-10-13 02:15:20 +00:00
d . addCallback ( lambda ign : self . rootnode . create_subdirectory ( u " 1share " ) )
2009-03-07 12:08:15 +00:00
d . addCallback ( lambda n :
self . delete_shares_numbered ( n . get_uri ( ) , range ( 1 , 10 ) ) )
2009-10-13 02:15:20 +00:00
d . addCallback ( lambda ign : self . rootnode . create_subdirectory ( u " 0share " ) )
2009-03-07 12:08:15 +00:00
d . addCallback ( lambda n :
self . delete_shares_numbered ( n . get_uri ( ) , range ( 0 , 10 ) ) )
d . addCallback ( lambda ign :
self . do_cli ( " add-alias " , " tahoe " , self . rooturi ) )
d . addCallback ( lambda ign : self . do_cli ( " ls " ) )
def _check1 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
self . failUnlessEqual ( out . splitlines ( ) , [ " 0share " , " 1share " , " good " ] )
d . addCallback ( _check1 )
d . addCallback ( lambda ign : self . do_cli ( " ls " , " missing " ) )
def _check2 ( ( rc , out , err ) ) :
self . failIfEqual ( rc , 0 )
self . failUnlessEqual ( err . strip ( ) , " No such file or directory " )
self . failUnlessEqual ( out , " " )
d . addCallback ( _check2 )
d . addCallback ( lambda ign : self . do_cli ( " ls " , " 1share " ) )
def _check3 ( ( rc , out , err ) ) :
self . failIfEqual ( rc , 0 )
self . failUnlessIn ( " Error during GET: 410 Gone " , err )
self . failUnlessIn ( " UnrecoverableFileError: " , err )
self . failUnlessIn ( " could not be retrieved, because there were "
" insufficient good shares. " , err )
self . failUnlessEqual ( out , " " )
d . addCallback ( _check3 )
d . addCallback ( lambda ign : self . do_cli ( " ls " , " 0share " ) )
d . addCallback ( _check3 )
2009-12-27 22:54:43 +00:00
def _check4 ( ( rc , out , err ) ) :
2009-12-27 23:21:49 +00:00
# listing a file (as dir/filename) should have the edge metadata,
# including the filename
2009-12-27 22:54:43 +00:00
self . failUnlessEqual ( rc , 0 )
self . failUnlessIn ( " good " , out )
2009-12-27 23:21:49 +00:00
self . failIfIn ( " -r-- %d - " % len ( small ) , out ,
" trailing hyphen means unknown date " )
d . addCallback ( lambda ign : self . do_cli ( " ls " , " -l " , " good " ) )
2009-12-27 22:54:43 +00:00
d . addCallback ( _check4 )
2009-12-27 23:21:49 +00:00
def _check5 ( ( rc , out , err ) ) :
# listing a raw filecap should not explode, but it will have no
# metadata, just the size
self . failUnlessEqual ( rc , 0 )
self . failUnlessEqual ( " -r-- %d - " % len ( small ) , out . strip ( ) )
d . addCallback ( lambda ign : self . do_cli ( " ls " , " -l " , self . goodcap ) )
d . addCallback ( _check5 )
2009-03-07 12:08:15 +00:00
return d
2010-02-12 06:21:37 +00:00
def test_list_without_alias ( self ) :
# doing just 'tahoe ls' without specifying an alias or first
# doing 'tahoe create-alias tahoe' should fail gracefully.
self . basedir = " cli/List/list_without_alias "
self . set_up_grid ( )
d = self . do_cli ( " ls " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
return d
def test_list_with_nonexistent_alias ( self ) :
# doing 'tahoe ls' while specifying an alias that doesn't already
# exist should fail with an informative error message
self . basedir = " cli/List/list_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " ls " , " nonexistent: " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
self . failUnlessIn ( " nonexistent " , err )
d . addCallback ( _check )
return d
2009-07-20 03:46:09 +00:00
class Mv ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
def test_mv_behavior ( self ) :
self . basedir = " cli/Mv/mv_behavior "
self . set_up_grid ( )
fn1 = os . path . join ( self . basedir , " file1 " )
DATA1 = " Nuclear launch codes "
2010-02-06 01:37:27 +00:00
fileutil . write ( fn1 , DATA1 )
2009-07-20 03:46:09 +00:00
fn2 = os . path . join ( self . basedir , " file2 " )
DATA2 = " UML diagrams "
2010-02-06 01:37:27 +00:00
fileutil . write ( fn2 , DATA2 )
2009-07-20 03:46:09 +00:00
# copy both files to the grid
d = self . do_cli ( " create-alias " , " tahoe " )
d . addCallback ( lambda res :
self . do_cli ( " cp " , fn1 , " tahoe: " ) )
d . addCallback ( lambda res :
self . do_cli ( " cp " , fn2 , " tahoe: " ) )
2009-07-20 15:38:03 +00:00
# do mv file1 file3
2009-07-20 03:46:09 +00:00
# (we should be able to rename files)
d . addCallback ( lambda res :
self . do_cli ( " mv " , " tahoe:file1 " , " tahoe:file3 " ) )
d . addCallback ( lambda ( rc , out , err ) :
self . failUnlessIn ( " OK " , out , " mv didn ' t rename a file " ) )
2009-07-20 15:38:03 +00:00
2009-07-20 03:46:09 +00:00
# do mv file3 file2
# (This should succeed without issue)
d . addCallback ( lambda res :
self . do_cli ( " mv " , " tahoe:file3 " , " tahoe:file2 " ) )
# Out should contain "OK" to show that the transfer worked.
d . addCallback ( lambda ( rc , out , err ) :
self . failUnlessIn ( " OK " , out , " mv didn ' t output OK after mving " ) )
2009-07-20 15:38:03 +00:00
2009-07-20 03:46:09 +00:00
# Next, make a remote directory.
d . addCallback ( lambda res :
self . do_cli ( " mkdir " , " tahoe:directory " ) )
2009-07-20 15:38:03 +00:00
2009-07-20 03:46:09 +00:00
# mv file2 directory
2009-07-20 15:38:03 +00:00
# (should fail with a descriptive error message; the CLI mv
2009-07-20 03:46:09 +00:00
# client should support this)
d . addCallback ( lambda res :
self . do_cli ( " mv " , " tahoe:file2 " , " tahoe:directory " ) )
d . addCallback ( lambda ( rc , out , err ) :
self . failUnlessIn (
" Error: You can ' t overwrite a directory with a file " , err ,
" mv shouldn ' t overwrite directories " ) )
2009-07-20 15:38:03 +00:00
2009-07-20 03:46:09 +00:00
# mv file2 directory/
# (should succeed by making file2 a child node of directory)
d . addCallback ( lambda res :
self . do_cli ( " mv " , " tahoe:file2 " , " tahoe:directory/ " ) )
# We should see an "OK"...
d . addCallback ( lambda ( rc , out , err ) :
self . failUnlessIn ( " OK " , out ,
" mv didn ' t mv a file into a directory " ) )
# ... and be able to GET the file
d . addCallback ( lambda res :
self . do_cli ( " get " , " tahoe:directory/file2 " , self . basedir + " new " ) )
d . addCallback ( lambda ( rc , out , err ) :
self . failUnless ( os . path . exists ( self . basedir + " new " ) ,
" mv didn ' t write the destination file " ) )
# ... and not find the file where it was before.
d . addCallback ( lambda res :
self . do_cli ( " get " , " tahoe:file2 " , " file2 " ) )
d . addCallback ( lambda ( rc , out , err ) :
self . failUnlessIn ( " 404 " , err ,
" mv left the source file intact " ) )
2009-07-20 15:38:03 +00:00
2009-07-20 03:46:09 +00:00
# Let's build:
# directory/directory2/some_file
# directory3
d . addCallback ( lambda res :
self . do_cli ( " mkdir " , " tahoe:directory/directory2 " ) )
d . addCallback ( lambda res :
self . do_cli ( " cp " , fn2 , " tahoe:directory/directory2/some_file " ) )
d . addCallback ( lambda res :
self . do_cli ( " mkdir " , " tahoe:directory3 " ) )
2009-07-20 15:38:03 +00:00
2009-07-20 03:46:09 +00:00
# Let's now try to mv directory/directory2/some_file to
# directory3/some_file
d . addCallback ( lambda res :
self . do_cli ( " mv " , " tahoe:directory/directory2/some_file " ,
" tahoe:directory3/ " ) )
# We should have just some_file in tahoe:directory3
d . addCallback ( lambda res :
self . do_cli ( " get " , " tahoe:directory3/some_file " , " some_file " ) )
d . addCallback ( lambda ( rc , out , err ) :
self . failUnless ( " 404 " not in err ,
" mv didn ' t handle nested directories correctly " ) )
d . addCallback ( lambda res :
self . do_cli ( " get " , " tahoe:directory3/directory " , " directory " ) )
d . addCallback ( lambda ( rc , out , err ) :
self . failUnlessIn ( " 404 " , err ,
" mv moved the wrong thing " ) )
return d
2010-02-12 06:21:37 +00:00
def test_mv_without_alias ( self ) :
# doing 'tahoe mv' without explicitly specifying an alias or
# creating the default 'tahoe' alias should fail with a useful
# error message.
self . basedir = " cli/Mv/mv_without_alias "
self . set_up_grid ( )
d = self . do_cli ( " mv " , " afile " , " anotherfile " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
# check to see that the validation extends to the
# target argument by making an alias that will work with the first
# one.
d . addCallback ( lambda ign : self . do_cli ( " create-alias " , " havasu " ) )
def _create_a_test_file ( ign ) :
self . test_file_path = os . path . join ( self . basedir , " afile " )
f = open ( self . test_file_path , " wb " )
f . write ( " puppies " * 100 )
f . close ( )
d . addCallback ( _create_a_test_file )
d . addCallback ( lambda ign : self . do_cli ( " put " , self . test_file_path ,
" havasu:afile " ) )
d . addCallback ( lambda ign : self . do_cli ( " mv " , " havasu:afile " ,
" anotherfile " ) )
d . addCallback ( _check )
return d
def test_mv_with_nonexistent_alias ( self ) :
# doing 'tahoe mv' with an alias that doesn't exist should fail
# with an informative error message.
self . basedir = " cli/Mv/mv_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " mv " , " fake:afile " , " fake:anotherfile " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
self . failUnlessIn ( " fake " , err )
d . addCallback ( _check )
# check to see that the validation extends to the
# target argument by making an alias that will work with the first
# one.
d . addCallback ( lambda ign : self . do_cli ( " create-alias " , " havasu " ) )
def _create_a_test_file ( ign ) :
self . test_file_path = os . path . join ( self . basedir , " afile " )
f = open ( self . test_file_path , " wb " )
f . write ( " puppies " * 100 )
f . close ( )
d . addCallback ( _create_a_test_file )
d . addCallback ( lambda ign : self . do_cli ( " put " , self . test_file_path ,
" havasu:afile " ) )
d . addCallback ( lambda ign : self . do_cli ( " mv " , " havasu:afile " ,
" fake:anotherfile " ) )
d . addCallback ( _check )
return d
2009-02-17 00:20:05 +00:00
class Cp ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
2009-02-16 04:04:51 +00:00
def test_not_enough_args ( self ) :
o = cli . CpOptions ( )
self . failUnlessRaises ( usage . UsageError ,
o . parseOptions , [ " onearg " ] )
2008-11-13 12:19:51 +00:00
def test_unicode_filename ( self ) :
2009-03-07 10:04:28 +00:00
self . basedir = " cli/Cp/unicode_filename "
2009-02-17 00:20:05 +00:00
self . set_up_grid ( )
2008-11-13 12:19:51 +00:00
2008-11-14 14:41:37 +00:00
fn1 = os . path . join ( self . basedir , " Ärtonwall " )
DATA1 = " unicode file content "
2010-02-06 01:37:27 +00:00
fileutil . write ( fn1 , DATA1 )
2008-11-13 12:19:51 +00:00
2008-11-14 14:41:37 +00:00
fn2 = os . path . join ( self . basedir , " Metallica " )
DATA2 = " non-unicode file content "
2010-02-06 01:37:27 +00:00
fileutil . write ( fn2 , DATA2 )
2008-11-13 12:19:51 +00:00
# Bug #534
# Assure that uploading a file whose name contains unicode character doesn't
# prevent further uploads in the same directory
2009-02-17 00:20:05 +00:00
d = self . do_cli ( " create-alias " , " tahoe " )
2008-11-13 12:19:51 +00:00
d . addCallback ( lambda res : self . do_cli ( " cp " , fn1 , " tahoe: " ) )
d . addCallback ( lambda res : self . do_cli ( " cp " , fn2 , " tahoe: " ) )
2008-11-14 14:44:58 +00:00
d . addCallback ( lambda res : self . do_cli ( " get " , " tahoe:Ärtonwall " ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , out , err ) : self . failUnlessEqual ( out , DATA1 ) )
2008-11-14 14:44:58 +00:00
d . addCallback ( lambda res : self . do_cli ( " get " , " tahoe:Metallica " ) )
2008-12-03 03:08:28 +00:00
d . addCallback ( lambda ( rc , out , err ) : self . failUnlessEqual ( out , DATA2 ) )
2008-11-14 14:44:58 +00:00
2008-11-13 12:19:51 +00:00
return d
2008-12-24 20:28:02 +00:00
test_unicode_filename . todo = " This behavior is not yet supported, although it does happen to work (for reasons that are ill-understood) on many platforms. See issue ticket #534. "
2009-01-08 06:51:14 +00:00
def test_dangling_symlink_vs_recursion ( self ) :
2009-01-15 01:10:10 +00:00
if not hasattr ( os , ' symlink ' ) :
raise unittest . SkipTest ( " There is no symlink on this platform. " )
2009-01-08 06:51:14 +00:00
# cp -r on a directory containing a dangling symlink shouldn't assert
2009-03-07 10:04:28 +00:00
self . basedir = " cli/Cp/dangling_symlink_vs_recursion "
2009-02-17 00:20:05 +00:00
self . set_up_grid ( )
2009-01-08 06:51:14 +00:00
dn = os . path . join ( self . basedir , " dir " )
os . mkdir ( dn )
fn = os . path . join ( dn , " Fakebandica " )
ln = os . path . join ( dn , " link " )
os . symlink ( fn , ln )
2009-02-17 00:20:05 +00:00
d = self . do_cli ( " create-alias " , " tahoe " )
2009-01-08 06:51:14 +00:00
d . addCallback ( lambda res : self . do_cli ( " cp " , " --recursive " ,
dn , " tahoe: " ) )
return d
2009-02-03 04:09:02 +00:00
2009-11-30 21:10:09 +00:00
def test_copy_using_filecap ( self ) :
self . basedir = " cli/Cp/test_copy_using_filecap "
self . set_up_grid ( )
outdir = os . path . join ( self . basedir , " outdir " )
os . mkdir ( outdir )
fn1 = os . path . join ( self . basedir , " Metallica " )
fn2 = os . path . join ( outdir , " Not Metallica " )
fn3 = os . path . join ( outdir , " test2 " )
DATA1 = " puppies " * 10000
2010-02-06 01:37:27 +00:00
fileutil . write ( fn1 , DATA1 )
d = self . do_cli ( " create-alias " , " tahoe " )
d . addCallback ( lambda ign : self . do_cli ( " put " , fn1 ) )
2009-11-30 21:10:09 +00:00
def _put_file ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 0 )
# keep track of the filecap
self . filecap = out . strip ( )
d . addCallback ( _put_file )
2010-02-06 01:38:55 +00:00
2009-11-30 21:10:09 +00:00
# Let's try copying this to the disk using the filecap
# cp FILECAP filename
2010-02-06 01:38:55 +00:00
d . addCallback ( lambda ign : self . do_cli ( " cp " , self . filecap , fn2 ) )
2009-11-30 21:10:09 +00:00
def _copy_file ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 0 )
2010-02-06 01:38:55 +00:00
results = fileutil . read ( fn2 )
2009-11-30 21:10:09 +00:00
self . failUnlessEqual ( results , DATA1 )
2010-02-06 01:37:27 +00:00
d . addCallback ( _copy_file )
2009-11-30 21:10:09 +00:00
# Test with ./ (see #761)
# cp FILECAP localdir
2010-02-06 01:38:55 +00:00
d . addCallback ( lambda ign : self . do_cli ( " cp " , self . filecap , outdir ) )
2009-11-30 21:10:09 +00:00
def _resp ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: you must specify a destination filename " ,
err )
d . addCallback ( _resp )
2010-02-06 01:38:55 +00:00
2009-11-30 21:10:09 +00:00
# Create a directory, linked at tahoe:test
2010-02-06 01:38:55 +00:00
d . addCallback ( lambda ign : self . do_cli ( " mkdir " , " tahoe:test " ) )
2009-11-30 21:10:09 +00:00
def _get_dir ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 0 )
self . dircap = out . strip ( )
d . addCallback ( _get_dir )
2010-02-06 01:38:55 +00:00
2009-11-30 21:10:09 +00:00
# Upload a file to the directory
2010-02-06 01:38:55 +00:00
d . addCallback ( lambda ign :
self . do_cli ( " put " , fn1 , " tahoe:test/test_file " ) )
2009-11-30 21:10:09 +00:00
d . addCallback ( lambda ( rc , out , err ) : self . failUnlessEqual ( rc , 0 ) )
2010-02-06 01:38:55 +00:00
2009-11-30 21:10:09 +00:00
# cp DIRCAP/filename localdir
2010-02-06 01:38:55 +00:00
d . addCallback ( lambda ign :
2009-11-30 21:10:09 +00:00
self . do_cli ( " cp " , self . dircap + " /test_file " , outdir ) )
def _get_resp ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 0 )
2010-02-06 01:38:55 +00:00
results = fileutil . read ( os . path . join ( outdir , " test_file " ) )
2009-11-30 21:10:09 +00:00
self . failUnlessEqual ( results , DATA1 )
d . addCallback ( _get_resp )
2010-02-06 01:38:55 +00:00
2009-11-30 21:10:09 +00:00
# cp -r DIRCAP/filename filename2
2010-02-06 01:38:55 +00:00
d . addCallback ( lambda ign :
2009-11-30 21:10:09 +00:00
self . do_cli ( " cp " , self . dircap + " /test_file " , fn3 ) )
def _get_resp2 ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 0 )
2010-02-06 01:38:55 +00:00
results = fileutil . read ( fn3 )
2009-11-30 21:10:09 +00:00
self . failUnlessEqual ( results , DATA1 )
d . addCallback ( _get_resp2 )
return d
2010-02-12 06:21:37 +00:00
def test_cp_with_nonexistent_alias ( self ) :
# when invoked with an alias or aliases that don't exist, 'tahoe cp'
# should output a sensible error message rather than a stack trace.
self . basedir = " cli/Cp/cp_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " cp " , " fake:file1 " , " fake:file2 " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
# 'tahoe cp' actually processes the target argument first, so we
# need to check to make sure that validation extends to the
# source argument.
d . addCallback ( lambda ign : self . do_cli ( " create-alias " , " tahoe " ) )
d . addCallback ( lambda ign : self . do_cli ( " cp " , " fake:file1 " ,
" tahoe:file2 " ) )
d . addCallback ( _check )
return d
2009-02-17 00:20:05 +00:00
class Backup ( GridTestMixin , CLITestMixin , StallMixin , unittest . TestCase ) :
2009-02-03 04:09:02 +00:00
def writeto ( self , path , data ) :
2010-02-06 01:38:55 +00:00
full_path = os . path . join ( self . basedir , " home " , path )
fileutil . make_dirs ( os . path . dirname ( full_path ) )
fileutil . write ( full_path , data )
2009-02-03 04:09:02 +00:00
2009-02-06 05:07:01 +00:00
def count_output ( self , out ) :
2010-01-20 09:42:49 +00:00
mo = re . search ( r " ( \ d)+ files uploaded \ (( \ d+) reused \ ), "
" ( \ d)+ files skipped, "
" ( \ d+) directories created \ (( \ d+) reused \ ), "
" ( \ d+) directories skipped " , out )
2009-02-06 05:07:01 +00:00
return [ int ( s ) for s in mo . groups ( ) ]
def count_output2 ( self , out ) :
2009-11-26 23:42:57 +00:00
mo = re . search ( r " ( \ d)+ files checked, ( \ d+) directories checked " , out )
2009-02-06 05:07:01 +00:00
return [ int ( s ) for s in mo . groups ( ) ]
2009-02-03 04:09:02 +00:00
def test_backup ( self ) :
2009-03-07 10:04:28 +00:00
self . basedir = " cli/Backup/backup "
2009-02-17 00:20:05 +00:00
self . set_up_grid ( )
2009-02-03 04:09:02 +00:00
2009-02-06 05:07:01 +00:00
# is the backupdb available? If so, we test that a second backup does
# not create new directories.
hush = StringIO ( )
have_bdb = backupdb . get_backupdb ( os . path . join ( self . basedir , " dbtest " ) ,
hush )
2009-02-03 04:09:02 +00:00
# create a small local directory with a couple of files
source = os . path . join ( self . basedir , " home " )
fileutil . make_dirs ( os . path . join ( source , " empty " ) )
self . writeto ( " parent/subdir/foo.txt " , " foo " )
self . writeto ( " parent/subdir/bar.txt " , " bar \n " * 1000 )
self . writeto ( " parent/blah.txt " , " blah " )
2009-06-04 17:31:31 +00:00
def do_backup ( verbose = False ) :
2009-02-11 01:49:10 +00:00
cmd = [ " backup " ]
if verbose :
cmd . append ( " --verbose " )
cmd . append ( source )
cmd . append ( " tahoe:backups " )
return self . do_cli ( * cmd )
2009-02-17 00:20:05 +00:00
d = self . do_cli ( " create-alias " , " tahoe " )
2009-02-11 01:49:10 +00:00
if not have_bdb :
d . addCallback ( lambda res : self . do_cli ( " backup " , source , " tahoe:backups " ) )
def _should_complain ( ( rc , out , err ) ) :
self . failUnless ( " I was unable to import a python sqlite library " in err , err )
d . addCallback ( _should_complain )
2009-02-11 03:37:09 +00:00
d . addCallback ( self . stall , 1.1 ) # make sure the backups get distinct timestamps
2009-02-11 01:49:10 +00:00
d . addCallback ( lambda res : do_backup ( ) )
2009-02-03 04:09:02 +00:00
def _check0 ( ( rc , out , err ) ) :
2009-02-11 01:49:10 +00:00
self . failUnlessEqual ( err , " " )
2009-02-03 04:09:02 +00:00
self . failUnlessEqual ( rc , 0 )
2010-01-20 09:42:49 +00:00
fu , fr , fs , dc , dr , ds = self . count_output ( out )
2009-02-06 05:07:01 +00:00
# foo.txt, bar.txt, blah.txt
self . failUnlessEqual ( fu , 3 )
self . failUnlessEqual ( fr , 0 )
2010-01-20 09:42:49 +00:00
self . failUnlessEqual ( fs , 0 )
2009-02-06 05:07:01 +00:00
# empty, home, home/parent, home/parent/subdir
self . failUnlessEqual ( dc , 4 )
self . failUnlessEqual ( dr , 0 )
2010-01-20 09:42:49 +00:00
self . failUnlessEqual ( ds , 0 )
2009-02-03 04:09:02 +00:00
d . addCallback ( _check0 )
2009-02-06 05:07:01 +00:00
2009-11-18 19:28:13 +00:00
d . addCallback ( lambda res : self . do_cli ( " ls " , " --uri " , " tahoe:backups " ) )
2009-02-03 04:09:02 +00:00
def _check1 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
2009-11-18 19:28:13 +00:00
lines = out . split ( " \n " )
children = dict ( [ line . split ( ) for line in lines if line ] )
latest_uri = children [ " Latest " ]
self . failUnless ( latest_uri . startswith ( " URI:DIR2-CHK: " ) , latest_uri )
childnames = children . keys ( )
self . failUnlessEqual ( sorted ( childnames ) , [ " Archives " , " Latest " ] )
2009-02-03 04:09:02 +00:00
d . addCallback ( _check1 )
d . addCallback ( lambda res : self . do_cli ( " ls " , " tahoe:backups/Latest " ) )
def _check2 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
self . failUnlessEqual ( sorted ( out . split ( ) ) , [ " empty " , " parent " ] )
d . addCallback ( _check2 )
d . addCallback ( lambda res : self . do_cli ( " ls " , " tahoe:backups/Latest/empty " ) )
def _check2a ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
self . failUnlessEqual ( out . strip ( ) , " " )
d . addCallback ( _check2a )
d . addCallback ( lambda res : self . do_cli ( " get " , " tahoe:backups/Latest/parent/subdir/foo.txt " ) )
def _check3 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
self . failUnlessEqual ( out , " foo " )
d . addCallback ( _check3 )
d . addCallback ( lambda res : self . do_cli ( " ls " , " tahoe:backups/Archives " ) )
def _check4 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
self . old_archives = out . split ( )
self . failUnlessEqual ( len ( self . old_archives ) , 1 )
d . addCallback ( _check4 )
2009-02-11 03:37:09 +00:00
d . addCallback ( self . stall , 1.1 )
2009-02-11 01:49:10 +00:00
d . addCallback ( lambda res : do_backup ( ) )
2009-02-06 05:07:01 +00:00
def _check4a ( ( rc , out , err ) ) :
# second backup should reuse everything, if the backupdb is
# available
2009-02-11 01:49:10 +00:00
self . failUnlessEqual ( err , " " )
2009-02-06 05:07:01 +00:00
self . failUnlessEqual ( rc , 0 )
if have_bdb :
2010-01-20 09:42:49 +00:00
fu , fr , fs , dc , dr , ds = self . count_output ( out )
2009-02-06 05:07:01 +00:00
# foo.txt, bar.txt, blah.txt
self . failUnlessEqual ( fu , 0 )
self . failUnlessEqual ( fr , 3 )
2010-01-20 09:42:49 +00:00
self . failUnlessEqual ( fs , 0 )
2009-02-06 05:07:01 +00:00
# empty, home, home/parent, home/parent/subdir
self . failUnlessEqual ( dc , 0 )
self . failUnlessEqual ( dr , 4 )
2010-01-20 09:42:49 +00:00
self . failUnlessEqual ( ds , 0 )
2009-02-06 05:07:01 +00:00
d . addCallback ( _check4a )
if have_bdb :
# sneak into the backupdb, crank back the "last checked"
# timestamp to force a check on all files
def _reset_last_checked ( res ) :
2009-02-17 00:20:05 +00:00
dbfile = os . path . join ( self . get_clientdir ( ) ,
" private " , " backupdb.sqlite " )
2009-02-06 05:07:01 +00:00
self . failUnless ( os . path . exists ( dbfile ) , dbfile )
bdb = backupdb . get_backupdb ( dbfile )
bdb . cursor . execute ( " UPDATE last_upload SET last_checked=0 " )
2009-11-26 23:42:57 +00:00
bdb . cursor . execute ( " UPDATE directories SET last_checked=0 " )
2009-02-06 05:07:01 +00:00
bdb . connection . commit ( )
d . addCallback ( _reset_last_checked )
2009-02-11 03:37:09 +00:00
d . addCallback ( self . stall , 1.1 )
2009-02-11 01:49:10 +00:00
d . addCallback ( lambda res : do_backup ( verbose = True ) )
2009-02-06 05:07:01 +00:00
def _check4b ( ( rc , out , err ) ) :
# we should check all files, and re-use all of them. None of
2009-11-26 23:42:57 +00:00
# the directories should have been changed, so we should
# re-use all of them too.
2009-02-11 01:49:10 +00:00
self . failUnlessEqual ( err , " " )
2009-02-06 05:07:01 +00:00
self . failUnlessEqual ( rc , 0 )
2010-01-20 09:42:49 +00:00
fu , fr , fs , dc , dr , ds = self . count_output ( out )
2009-11-26 23:42:57 +00:00
fchecked , dchecked = self . count_output2 ( out )
2009-02-06 05:07:01 +00:00
self . failUnlessEqual ( fchecked , 3 )
self . failUnlessEqual ( fu , 0 )
self . failUnlessEqual ( fr , 3 )
2010-01-20 09:42:49 +00:00
self . failUnlessEqual ( fs , 0 )
2009-11-26 23:42:57 +00:00
self . failUnlessEqual ( dchecked , 4 )
2009-02-06 05:07:01 +00:00
self . failUnlessEqual ( dc , 0 )
self . failUnlessEqual ( dr , 4 )
2010-01-20 09:42:49 +00:00
self . failUnlessEqual ( ds , 0 )
2009-02-06 05:07:01 +00:00
d . addCallback ( _check4b )
2009-02-03 04:09:02 +00:00
d . addCallback ( lambda res : self . do_cli ( " ls " , " tahoe:backups/Archives " ) )
def _check5 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
self . new_archives = out . split ( )
2009-02-06 05:10:42 +00:00
expected_new = 2
if have_bdb :
expected_new + = 1
2009-02-11 03:37:09 +00:00
self . failUnlessEqual ( len ( self . new_archives ) , expected_new , out )
2009-02-06 05:07:01 +00:00
# the original backup should still be the oldest (i.e. sorts
# alphabetically towards the beginning)
2009-02-03 04:09:02 +00:00
self . failUnlessEqual ( sorted ( self . new_archives ) [ 0 ] ,
self . old_archives [ 0 ] )
d . addCallback ( _check5 )
2009-02-11 03:37:09 +00:00
d . addCallback ( self . stall , 1.1 )
2009-02-03 04:09:02 +00:00
def _modify ( res ) :
self . writeto ( " parent/subdir/foo.txt " , " FOOF! " )
# and turn a file into a directory
os . unlink ( os . path . join ( source , " parent/blah.txt " ) )
os . mkdir ( os . path . join ( source , " parent/blah.txt " ) )
self . writeto ( " parent/blah.txt/surprise file " , " surprise " )
self . writeto ( " parent/blah.txt/surprisedir/subfile " , " surprise " )
# turn a directory into a file
os . rmdir ( os . path . join ( source , " empty " ) )
self . writeto ( " empty " , " imagine nothing being here " )
2009-02-11 01:49:10 +00:00
return do_backup ( )
2009-02-03 04:09:02 +00:00
d . addCallback ( _modify )
2009-02-06 05:07:01 +00:00
def _check5a ( ( rc , out , err ) ) :
# second backup should reuse bar.txt (if backupdb is available),
# and upload the rest. None of the directories can be reused.
2009-02-11 01:49:10 +00:00
self . failUnlessEqual ( err , " " )
2009-02-06 05:07:01 +00:00
self . failUnlessEqual ( rc , 0 )
if have_bdb :
2010-01-20 09:42:49 +00:00
fu , fr , fs , dc , dr , ds = self . count_output ( out )
2009-02-06 05:07:01 +00:00
# new foo.txt, surprise file, subfile, empty
self . failUnlessEqual ( fu , 4 )
# old bar.txt
self . failUnlessEqual ( fr , 1 )
2010-01-20 09:42:49 +00:00
self . failUnlessEqual ( fs , 0 )
2009-02-06 05:07:01 +00:00
# home, parent, subdir, blah.txt, surprisedir
self . failUnlessEqual ( dc , 5 )
self . failUnlessEqual ( dr , 0 )
2010-01-20 09:42:49 +00:00
self . failUnlessEqual ( ds , 0 )
2009-02-06 05:07:01 +00:00
d . addCallback ( _check5a )
2009-02-03 04:09:02 +00:00
d . addCallback ( lambda res : self . do_cli ( " ls " , " tahoe:backups/Archives " ) )
def _check6 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
self . new_archives = out . split ( )
2009-02-06 05:10:42 +00:00
expected_new = 3
if have_bdb :
expected_new + = 1
self . failUnlessEqual ( len ( self . new_archives ) , expected_new )
2009-02-03 04:09:02 +00:00
self . failUnlessEqual ( sorted ( self . new_archives ) [ 0 ] ,
self . old_archives [ 0 ] )
d . addCallback ( _check6 )
d . addCallback ( lambda res : self . do_cli ( " get " , " tahoe:backups/Latest/parent/subdir/foo.txt " ) )
def _check7 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
self . failUnlessEqual ( out , " FOOF! " )
# the old snapshot should not be modified
return self . do_cli ( " get " , " tahoe:backups/Archives/ %s /parent/subdir/foo.txt " % self . old_archives [ 0 ] )
d . addCallback ( _check7 )
def _check8 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
self . failUnlessEqual ( out , " foo " )
d . addCallback ( _check8 )
return d
2009-02-06 09:17:53 +00:00
# on our old dapper buildslave, this test takes a long time (usually
# 130s), so we have to bump up the default 120s timeout. The create-alias
# and initial backup alone take 60s, probably because of the handful of
# dirnodes being created (RSA key generation). The backup between check4
# and check4a takes 6s, as does the backup before check4b.
2009-06-09 05:28:01 +00:00
test_backup . timeout = 3000
2009-02-18 00:15:11 +00:00
2009-02-22 17:51:06 +00:00
def test_exclude_options ( self ) :
root_listdir = ( ' lib.a ' , ' _darcs ' , ' subdir ' , ' nice_doc.lyx ' )
subdir_listdir = ( ' another_doc.lyx ' , ' run_snake_run.py ' , ' CVS ' , ' .svn ' , ' _darcs ' )
2009-03-07 10:04:28 +00:00
basedir = " cli/Backup/exclude_options "
fileutil . make_dirs ( basedir )
2009-02-23 01:35:02 +00:00
nodeurl_path = os . path . join ( basedir , ' node.url ' )
2010-02-06 01:38:55 +00:00
fileutil . write ( nodeurl_path , ' http://example.net:2357/ ' )
2009-02-22 17:51:06 +00:00
def _check_filtering ( filtered , all , included , excluded ) :
filtered = set ( filtered )
all = set ( all )
included = set ( included )
excluded = set ( excluded )
2009-02-22 23:42:14 +00:00
self . failUnlessEqual ( filtered , included )
self . failUnlessEqual ( all . difference ( filtered ) , excluded )
2009-02-22 17:51:06 +00:00
# test simple exclude
backup_options = cli . BackupOptions ( )
2009-02-23 01:35:02 +00:00
backup_options . parseOptions ( [ ' --exclude ' , ' *lyx ' , ' --node-directory ' ,
basedir , ' from ' , ' to ' ] )
2009-02-22 17:51:06 +00:00
filtered = list ( backup_options . filter_listdir ( root_listdir ) )
_check_filtering ( filtered , root_listdir , ( ' lib.a ' , ' _darcs ' , ' subdir ' ) ,
( ' nice_doc.lyx ' , ) )
# multiple exclude
backup_options = cli . BackupOptions ( )
2009-02-23 01:35:02 +00:00
backup_options . parseOptions ( [ ' --exclude ' , ' *lyx ' , ' --exclude ' , ' lib.? ' , ' --node-directory ' ,
basedir , ' from ' , ' to ' ] )
2009-02-22 17:51:06 +00:00
filtered = list ( backup_options . filter_listdir ( root_listdir ) )
_check_filtering ( filtered , root_listdir , ( ' _darcs ' , ' subdir ' ) ,
( ' nice_doc.lyx ' , ' lib.a ' ) )
# vcs metadata exclusion
backup_options = cli . BackupOptions ( )
2009-02-23 01:35:02 +00:00
backup_options . parseOptions ( [ ' --exclude-vcs ' , ' --node-directory ' ,
basedir , ' from ' , ' to ' ] )
2009-02-22 17:51:06 +00:00
filtered = list ( backup_options . filter_listdir ( subdir_listdir ) )
_check_filtering ( filtered , subdir_listdir , ( ' another_doc.lyx ' , ' run_snake_run.py ' , ) ,
( ' CVS ' , ' .svn ' , ' _darcs ' ) )
# read exclude patterns from file
exclusion_string = " _darcs \n *py \n .svn "
excl_filepath = os . path . join ( basedir , ' exclusion ' )
2010-02-06 01:38:55 +00:00
fileutil . write ( excl_filepath , exclusion_string )
2009-02-22 17:51:06 +00:00
backup_options = cli . BackupOptions ( )
2009-02-23 01:35:02 +00:00
backup_options . parseOptions ( [ ' --exclude-from ' , excl_filepath , ' --node-directory ' ,
basedir , ' from ' , ' to ' ] )
2009-02-22 17:51:06 +00:00
filtered = list ( backup_options . filter_listdir ( subdir_listdir ) )
_check_filtering ( filtered , subdir_listdir , ( ' another_doc.lyx ' , ' CVS ' ) ,
( ' .svn ' , ' _darcs ' , ' run_snake_run.py ' ) )
# text BackupConfigurationError
self . failUnlessRaises ( cli . BackupConfigurationError ,
backup_options . parseOptions ,
2009-02-23 01:35:02 +00:00
[ ' --exclude-from ' , excl_filepath + ' .no ' , ' --node-directory ' ,
basedir , ' from ' , ' to ' ] )
2009-02-22 17:51:06 +00:00
2009-02-22 23:43:56 +00:00
# test that an iterator works too
backup_options = cli . BackupOptions ( )
2009-02-23 01:35:02 +00:00
backup_options . parseOptions ( [ ' --exclude ' , ' *lyx ' , ' --node-directory ' ,
basedir , ' from ' , ' to ' ] )
2009-02-22 23:43:56 +00:00
filtered = list ( backup_options . filter_listdir ( iter ( root_listdir ) ) )
_check_filtering ( filtered , root_listdir , ( ' lib.a ' , ' _darcs ' , ' subdir ' ) ,
( ' nice_doc.lyx ' , ) )
2009-02-22 17:51:06 +00:00
2010-01-20 09:42:49 +00:00
def test_ignore_symlinks ( self ) :
if not hasattr ( os , ' symlink ' ) :
raise unittest . SkipTest ( " There is no symlink on this platform. " )
self . basedir = os . path . dirname ( self . mktemp ( ) )
self . set_up_grid ( )
source = os . path . join ( self . basedir , " home " )
self . writeto ( " foo.txt " , " foo " )
os . symlink ( os . path . join ( source , " foo.txt " ) , os . path . join ( source , " foo2.txt " ) )
d = self . do_cli ( " create-alias " , " tahoe " )
d . addCallback ( lambda res : self . do_cli ( " backup " , " --verbose " , source , " tahoe:test " ) )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 2 )
2010-01-27 22:35:17 +00:00
foo2 = os . path . join ( source , " foo2.txt " )
self . failUnlessEqual ( err , " WARNING: cannot backup symlink %s \n " % foo2 )
2010-01-20 09:42:49 +00:00
fu , fr , fs , dc , dr , ds = self . count_output ( out )
# foo.txt
self . failUnlessEqual ( fu , 1 )
self . failUnlessEqual ( fr , 0 )
# foo2.txt
self . failUnlessEqual ( fs , 1 )
# home
self . failUnlessEqual ( dc , 1 )
self . failUnlessEqual ( dr , 0 )
self . failUnlessEqual ( ds , 0 )
d . addCallback ( _check )
return d
def test_ignore_unreadable_file ( self ) :
self . basedir = os . path . dirname ( self . mktemp ( ) )
self . set_up_grid ( )
source = os . path . join ( self . basedir , " home " )
self . writeto ( " foo.txt " , " foo " )
os . chmod ( os . path . join ( source , " foo.txt " ) , 0000 )
d = self . do_cli ( " create-alias " , " tahoe " )
d . addCallback ( lambda res : self . do_cli ( " backup " , source , " tahoe:test " ) )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 2 )
self . failUnlessEqual ( err , " WARNING: permission denied on file %s \n " % os . path . join ( source , " foo.txt " ) )
fu , fr , fs , dc , dr , ds = self . count_output ( out )
self . failUnlessEqual ( fu , 0 )
self . failUnlessEqual ( fr , 0 )
# foo.txt
self . failUnlessEqual ( fs , 1 )
# home
self . failUnlessEqual ( dc , 1 )
self . failUnlessEqual ( dr , 0 )
self . failUnlessEqual ( ds , 0 )
d . addCallback ( _check )
# This is necessary for the temp files to be correctly removed
def _cleanup ( self ) :
os . chmod ( os . path . join ( source , " foo.txt " ) , 0644 )
d . addCallback ( _cleanup )
d . addErrback ( _cleanup )
return d
def test_ignore_unreadable_directory ( self ) :
self . basedir = os . path . dirname ( self . mktemp ( ) )
self . set_up_grid ( )
source = os . path . join ( self . basedir , " home " )
os . mkdir ( source )
os . mkdir ( os . path . join ( source , " test " ) )
os . chmod ( os . path . join ( source , " test " ) , 0000 )
d = self . do_cli ( " create-alias " , " tahoe " )
d . addCallback ( lambda res : self . do_cli ( " backup " , source , " tahoe:test " ) )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 2 )
self . failUnlessEqual ( err , " WARNING: permission denied on directory %s \n " % os . path . join ( source , " test " ) )
fu , fr , fs , dc , dr , ds = self . count_output ( out )
self . failUnlessEqual ( fu , 0 )
self . failUnlessEqual ( fr , 0 )
self . failUnlessEqual ( fs , 0 )
# home, test
self . failUnlessEqual ( dc , 2 )
self . failUnlessEqual ( dr , 0 )
# test
self . failUnlessEqual ( ds , 1 )
d . addCallback ( _check )
# This is necessary for the temp files to be correctly removed
def _cleanup ( self ) :
os . chmod ( os . path . join ( source , " test " ) , 0655 )
d . addCallback ( _cleanup )
d . addErrback ( _cleanup )
return d
2010-02-12 06:21:37 +00:00
def test_backup_without_alias ( self ) :
# 'tahoe backup' should output a sensible error message when invoked
# without an alias instead of a stack trace.
self . basedir = os . path . dirname ( self . mktemp ( ) )
self . set_up_grid ( )
source = os . path . join ( self . basedir , " file1 " )
d = self . do_cli ( ' backup ' , source , source )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
return d
def test_backup_with_nonexistent_alias ( self ) :
# 'tahoe backup' should output a sensible error message when invoked
# with a nonexistent alias.
self . basedir = os . path . dirname ( self . mktemp ( ) )
self . set_up_grid ( )
source = os . path . join ( self . basedir , " file1 " )
d = self . do_cli ( " backup " , source , " nonexistent: " + source )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
self . failUnlessIn ( " nonexistent " , err )
d . addCallback ( _check )
return d
2010-01-20 09:42:49 +00:00
2009-02-18 00:15:11 +00:00
class Check ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
def test_check ( self ) :
self . basedir = " cli/Check/check "
self . set_up_grid ( )
c0 = self . g . clients [ 0 ]
DATA = " data " * 100
d = c0 . create_mutable_file ( DATA )
def _stash_uri ( n ) :
self . uri = n . get_uri ( )
d . addCallback ( _stash_uri )
d . addCallback ( lambda ign : self . do_cli ( " check " , self . uri ) )
def _check1 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
self . failUnless ( " Summary: Healthy " in lines , out )
self . failUnless ( " good-shares: 10 (encoding is 3-of-10) " in lines , out )
d . addCallback ( _check1 )
d . addCallback ( lambda ign : self . do_cli ( " check " , " --raw " , self . uri ) )
def _check2 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
data = simplejson . loads ( out )
self . failUnlessEqual ( data [ " summary " ] , " Healthy " )
d . addCallback ( _check2 )
def _clobber_shares ( ignored ) :
# delete one, corrupt a second
shares = self . find_shares ( self . uri )
self . failUnlessEqual ( len ( shares ) , 10 )
os . unlink ( shares [ 0 ] [ 2 ] )
cso = debug . CorruptShareOptions ( )
cso . stdout = StringIO ( )
cso . parseOptions ( [ shares [ 1 ] [ 2 ] ] )
storage_index = uri . from_string ( self . uri ) . get_storage_index ( )
self . _corrupt_share_line = " server %s , SI %s , shnum %d " % \
( base32 . b2a ( shares [ 1 ] [ 1 ] ) ,
base32 . b2a ( storage_index ) ,
shares [ 1 ] [ 0 ] )
debug . corrupt_share ( cso )
d . addCallback ( _clobber_shares )
d . addCallback ( lambda ign : self . do_cli ( " check " , " --verify " , self . uri ) )
def _check3 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
summary = [ l for l in lines if l . startswith ( " Summary " ) ] [ 0 ]
self . failUnless ( " Summary: Unhealthy: 8 shares (enc 3-of-10) "
in summary , summary )
self . failUnless ( " good-shares: 8 (encoding is 3-of-10) " in lines , out )
self . failUnless ( " corrupt shares: " in lines , out )
self . failUnless ( self . _corrupt_share_line in lines , out )
d . addCallback ( _check3 )
d . addCallback ( lambda ign :
self . do_cli ( " check " , " --verify " , " --repair " , self . uri ) )
def _check4 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
self . failUnless ( " Summary: not healthy " in lines , out )
self . failUnless ( " good-shares: 8 (encoding is 3-of-10) " in lines , out )
self . failUnless ( " corrupt shares: " in lines , out )
self . failUnless ( self . _corrupt_share_line in lines , out )
self . failUnless ( " repair successful " in lines , out )
d . addCallback ( _check4 )
d . addCallback ( lambda ign :
self . do_cli ( " check " , " --verify " , " --repair " , self . uri ) )
def _check5 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
self . failUnless ( " Summary: healthy " in lines , out )
self . failUnless ( " good-shares: 10 (encoding is 3-of-10) " in lines , out )
self . failIf ( " corrupt shares: " in lines , out )
d . addCallback ( _check5 )
return d
def test_deep_check ( self ) :
self . basedir = " cli/Check/deep_check "
self . set_up_grid ( )
c0 = self . g . clients [ 0 ]
self . uris = { }
self . fileurls = { }
DATA = " data " * 100
2009-10-12 22:45:06 +00:00
d = c0 . create_dirnode ( )
2009-02-18 00:15:11 +00:00
def _stash_root_and_create_file ( n ) :
self . rootnode = n
self . rooturi = n . get_uri ( )
return n . add_file ( u " good " , upload . Data ( DATA , convergence = " " ) )
d . addCallback ( _stash_root_and_create_file )
def _stash_uri ( fn , which ) :
self . uris [ which ] = fn . get_uri ( )
2009-02-25 06:44:15 +00:00
return fn
2009-02-18 00:15:11 +00:00
d . addCallback ( _stash_uri , " good " )
d . addCallback ( lambda ign :
self . rootnode . add_file ( u " small " ,
upload . Data ( " literal " ,
convergence = " " ) ) )
d . addCallback ( _stash_uri , " small " )
d . addCallback ( lambda ign : c0 . create_mutable_file ( DATA + " 1 " ) )
d . addCallback ( lambda fn : self . rootnode . set_node ( u " mutable " , fn ) )
d . addCallback ( _stash_uri , " mutable " )
d . addCallback ( lambda ign : self . do_cli ( " deep-check " , self . rooturi ) )
def _check1 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
self . failUnless ( " done: 4 objects checked, 4 healthy, 0 unhealthy "
in lines , out )
d . addCallback ( _check1 )
2009-02-25 06:44:15 +00:00
# root
# root/good
# root/small
# root/mutable
2009-02-18 00:15:11 +00:00
d . addCallback ( lambda ign : self . do_cli ( " deep-check " , " --verbose " ,
self . rooturi ) )
def _check2 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
self . failUnless ( " <root>: Healthy " in lines , out )
self . failUnless ( " small: Healthy (LIT) " in lines , out )
self . failUnless ( " good: Healthy " in lines , out )
self . failUnless ( " mutable: Healthy " in lines , out )
self . failUnless ( " done: 4 objects checked, 4 healthy, 0 unhealthy "
in lines , out )
d . addCallback ( _check2 )
2009-07-15 07:51:09 +00:00
d . addCallback ( lambda ign : self . do_cli ( " stats " , self . rooturi ) )
def _check_stats ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
self . failUnlessIn ( " count-immutable-files: 1 " , lines )
self . failUnlessIn ( " count-mutable-files: 1 " , lines )
self . failUnlessIn ( " count-literal-files: 1 " , lines )
self . failUnlessIn ( " count-directories: 1 " , lines )
self . failUnlessIn ( " size-immutable-files: 400 " , lines )
self . failUnlessIn ( " Size Histogram: " , lines )
self . failUnlessIn ( " 4-10 : 1 (10 B, 10 B) " , lines )
self . failUnlessIn ( " 317-1000 : 1 (1000 B, 1000 B) " , lines )
d . addCallback ( _check_stats )
2009-02-18 00:15:11 +00:00
def _clobber_shares ( ignored ) :
shares = self . find_shares ( self . uris [ " good " ] )
self . failUnlessEqual ( len ( shares ) , 10 )
os . unlink ( shares [ 0 ] [ 2 ] )
shares = self . find_shares ( self . uris [ " mutable " ] )
cso = debug . CorruptShareOptions ( )
cso . stdout = StringIO ( )
cso . parseOptions ( [ shares [ 1 ] [ 2 ] ] )
storage_index = uri . from_string ( self . uris [ " mutable " ] ) . get_storage_index ( )
self . _corrupt_share_line = " corrupt: server %s , SI %s , shnum %d " % \
( base32 . b2a ( shares [ 1 ] [ 1 ] ) ,
base32 . b2a ( storage_index ) ,
shares [ 1 ] [ 0 ] )
debug . corrupt_share ( cso )
d . addCallback ( _clobber_shares )
2009-02-25 06:44:15 +00:00
# root
# root/good [9 shares]
# root/small
# root/mutable [1 corrupt share]
2009-02-18 00:15:11 +00:00
d . addCallback ( lambda ign :
self . do_cli ( " deep-check " , " --verbose " , self . rooturi ) )
def _check3 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
self . failUnless ( " <root>: Healthy " in lines , out )
self . failUnless ( " small: Healthy (LIT) " in lines , out )
self . failUnless ( " mutable: Healthy " in lines , out ) # needs verifier
self . failUnless ( " good: Not Healthy: 9 shares (enc 3-of-10) "
in lines , out )
self . failIf ( self . _corrupt_share_line in lines , out )
self . failUnless ( " done: 4 objects checked, 3 healthy, 1 unhealthy "
in lines , out )
d . addCallback ( _check3 )
d . addCallback ( lambda ign :
self . do_cli ( " deep-check " , " --verbose " , " --verify " ,
self . rooturi ) )
def _check4 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
self . failUnless ( " <root>: Healthy " in lines , out )
self . failUnless ( " small: Healthy (LIT) " in lines , out )
mutable = [ l for l in lines if l . startswith ( " mutable " ) ] [ 0 ]
self . failUnless ( mutable . startswith ( " mutable: Unhealthy: 9 shares (enc 3-of-10) " ) ,
mutable )
self . failUnless ( self . _corrupt_share_line in lines , out )
self . failUnless ( " good: Not Healthy: 9 shares (enc 3-of-10) "
in lines , out )
self . failUnless ( " done: 4 objects checked, 2 healthy, 2 unhealthy "
in lines , out )
d . addCallback ( _check4 )
d . addCallback ( lambda ign :
self . do_cli ( " deep-check " , " --raw " ,
self . rooturi ) )
def _check5 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
units = [ simplejson . loads ( line ) for line in lines ]
# root, small, good, mutable, stats
self . failUnlessEqual ( len ( units ) , 4 + 1 )
d . addCallback ( _check5 )
d . addCallback ( lambda ign :
self . do_cli ( " deep-check " ,
" --verbose " , " --verify " , " --repair " ,
self . rooturi ) )
def _check6 ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
self . failUnless ( " <root>: healthy " in lines , out )
self . failUnless ( " small: healthy " in lines , out )
self . failUnless ( " mutable: not healthy " in lines , out )
self . failUnless ( self . _corrupt_share_line in lines , out )
self . failUnless ( " good: not healthy " in lines , out )
self . failUnless ( " done: 4 objects checked " in lines , out )
self . failUnless ( " pre-repair: 2 healthy, 2 unhealthy " in lines , out )
self . failUnless ( " 2 repairs attempted, 2 successful, 0 failed "
in lines , out )
self . failUnless ( " post-repair: 4 healthy, 0 unhealthy " in lines , out )
d . addCallback ( _check6 )
2009-02-25 06:44:15 +00:00
# now add a subdir, and a file below that, then make the subdir
# unrecoverable
2009-10-13 02:15:20 +00:00
d . addCallback ( lambda ign : self . rootnode . create_subdirectory ( u " subdir " ) )
2009-02-25 06:44:15 +00:00
d . addCallback ( _stash_uri , " subdir " )
d . addCallback ( lambda fn :
fn . add_file ( u " subfile " , upload . Data ( DATA + " 2 " , " " ) ) )
d . addCallback ( lambda ign :
self . delete_shares_numbered ( self . uris [ " subdir " ] ,
range ( 10 ) ) )
# root
# root/good
# root/small
# root/mutable
# root/subdir [unrecoverable: 0 shares]
# root/subfile
d . addCallback ( lambda ign : self . do_cli ( " manifest " , self . rooturi ) )
def _manifest_failed ( ( rc , out , err ) ) :
self . failIfEqual ( rc , 0 )
2009-02-25 08:46:21 +00:00
self . failUnlessIn ( " ERROR: UnrecoverableFileError " , err )
2009-02-25 06:44:15 +00:00
# the fatal directory should still show up, as the last line
self . failUnlessIn ( " subdir \n " , out )
d . addCallback ( _manifest_failed )
d . addCallback ( lambda ign : self . do_cli ( " deep-check " , self . rooturi ) )
def _deep_check_failed ( ( rc , out , err ) ) :
self . failIfEqual ( rc , 0 )
2009-02-25 08:46:21 +00:00
self . failUnlessIn ( " ERROR: UnrecoverableFileError " , err )
2009-02-25 06:44:15 +00:00
# we want to make sure that the error indication is the last
# thing that gets emitted
self . failIf ( " done: " in out , out )
d . addCallback ( _deep_check_failed )
# this test is disabled until the deep-repair response to an
# unrepairable directory is fixed. The failure-to-repair should not
# throw an exception, but the failure-to-traverse that follows
# should throw UnrecoverableFileError.
#d.addCallback(lambda ign:
# self.do_cli("deep-check", "--repair", self.rooturi))
#def _deep_check_repair_failed((rc, out, err)):
# self.failIfEqual(rc, 0)
# print err
2009-02-25 08:46:21 +00:00
# self.failUnlessIn("ERROR: UnrecoverableFileError", err)
2009-02-25 06:44:15 +00:00
# self.failIf("done:" in out, out)
#d.addCallback(_deep_check_repair_failed)
2009-02-18 00:15:11 +00:00
return d
2010-02-12 06:21:37 +00:00
def test_check_without_alias ( self ) :
# 'tahoe check' should output a sensible error message if it needs to
# find the default alias and can't
self . basedir = " cli/Check/check_without_alias "
self . set_up_grid ( )
d = self . do_cli ( " check " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
d . addCallback ( lambda ign : self . do_cli ( " deep-check " ) )
d . addCallback ( _check )
return d
def test_check_with_nonexistent_alias ( self ) :
# 'tahoe check' should output a sensible error message if it needs to
# find an alias and can't.
self . basedir = " cli/Check/check_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " check " , " nonexistent: " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
self . failUnlessIn ( " nonexistent " , err )
d . addCallback ( _check )
return d
2009-03-04 05:11:46 +00:00
class Errors ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
2009-12-27 23:54:44 +00:00
def test_get ( self ) :
self . basedir = " cli/Errors/get "
2009-03-04 05:11:46 +00:00
self . set_up_grid ( )
c0 = self . g . clients [ 0 ]
self . fileurls = { }
DATA = " data " * 100
d = c0 . upload ( upload . Data ( DATA , convergence = " " ) )
def _stash_bad ( ur ) :
self . uri_1share = ur . uri
self . delete_shares_numbered ( ur . uri , range ( 1 , 10 ) )
d . addCallback ( _stash_bad )
d . addCallback ( lambda ign : self . do_cli ( " get " , self . uri_1share ) )
def _check1 ( ( rc , out , err ) ) :
self . failIfEqual ( rc , 0 )
self . failUnless ( " 410 Gone " in err , err )
2009-06-25 02:17:07 +00:00
self . failUnlessIn ( " NotEnoughSharesError: " , err )
self . failUnlessIn ( " Failed to get enough shareholders: have 1, need 3 " , err )
2009-03-04 05:11:46 +00:00
d . addCallback ( _check1 )
2009-12-27 23:54:44 +00:00
targetf = os . path . join ( self . basedir , " output " )
d . addCallback ( lambda ign : self . do_cli ( " get " , self . uri_1share , targetf ) )
def _check2 ( ( rc , out , err ) ) :
self . failIfEqual ( rc , 0 )
self . failUnless ( " 410 Gone " in err , err )
self . failUnlessIn ( " NotEnoughSharesError: " , err )
self . failUnlessIn ( " Failed to get enough shareholders: have 1, need 3 " , err )
self . failIf ( os . path . exists ( targetf ) )
d . addCallback ( _check2 )
2009-03-04 05:11:46 +00:00
return d
2009-07-15 07:51:09 +00:00
2010-02-12 06:21:37 +00:00
class Get ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
def test_get_without_alias ( self ) :
# 'tahoe get' should output a useful error message when invoked
# without an explicit alias and when the default 'tahoe' alias
# hasn't been created yet.
self . basedir = " cli/Get/get_without_alias "
self . set_up_grid ( )
d = self . do_cli ( ' get ' , ' file ' )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
return d
def test_get_with_nonexistent_alias ( self ) :
# 'tahoe get' should output a useful error message when invoked with
# an explicit alias that doesn't exist.
self . basedir = " cli/Get/get_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " get " , " nonexistent:file " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
self . failUnlessIn ( " nonexistent " , err )
d . addCallback ( _check )
return d
class Manifest ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
def test_manifest_without_alias ( self ) :
# 'tahoe manifest' should output a useful error message when invoked
# without an explicit alias when the default 'tahoe' alias is
# missing.
self . basedir = " cli/Manifest/manifest_without_alias "
self . set_up_grid ( )
d = self . do_cli ( " manifest " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
return d
def test_manifest_with_nonexistent_alias ( self ) :
# 'tahoe manifest' should output a useful error message when invoked
# with an explicit alias that doesn't exist.
self . basedir = " cli/Manifest/manifest_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " manifest " , " nonexistent: " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
self . failUnlessIn ( " nonexistent " , err )
d . addCallback ( _check )
return d
class Mkdir ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
def test_mkdir_with_nonexistent_alias ( self ) :
# when invoked with an alias that doesn't exist, 'tahoe mkdir'
# should output a sensible error message rather than a stack
# trace.
self . basedir = " cli/Mkdir/mkdir_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " mkdir " , " havasu: " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
return d
class Rm ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
def test_rm_without_alias ( self ) :
# 'tahoe rm' should behave sensibly when invoked without an explicit
# alias before the default 'tahoe' alias has been created.
self . basedir = " cli/Rm/rm_without_alias "
self . set_up_grid ( )
d = self . do_cli ( " rm " , " afile " )
def _check ( ( rc , out , err ) ) :
self . failUnlessIn ( " error: " , err )
self . failUnlessEqual ( rc , 1 )
d . addCallback ( _check )
return d
def test_rm_with_nonexistent_alias ( self ) :
# 'tahoe rm' should behave sensibly when invoked with an explicit
# alias that doesn't exist.
self . basedir = " cli/Rm/rm_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " rm " , " nonexistent:afile " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
self . failUnlessIn ( " nonexistent " , err )
d . addCallback ( _check )
return d
2009-07-15 07:51:09 +00:00
class Stats ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
def test_empty_directory ( self ) :
self . basedir = " cli/Stats/empty_directory "
self . set_up_grid ( )
c0 = self . g . clients [ 0 ]
self . fileurls = { }
2009-10-12 22:45:06 +00:00
d = c0 . create_dirnode ( )
2009-07-15 07:51:09 +00:00
def _stash_root ( n ) :
self . rootnode = n
self . rooturi = n . get_uri ( )
d . addCallback ( _stash_root )
# make sure we can get stats on an empty directory too
d . addCallback ( lambda ign : self . do_cli ( " stats " , self . rooturi ) )
def _check_stats ( ( rc , out , err ) ) :
self . failUnlessEqual ( err , " " )
self . failUnlessEqual ( rc , 0 )
lines = out . splitlines ( )
self . failUnlessIn ( " count-immutable-files: 0 " , lines )
self . failUnlessIn ( " count-mutable-files: 0 " , lines )
self . failUnlessIn ( " count-literal-files: 0 " , lines )
self . failUnlessIn ( " count-directories: 1 " , lines )
self . failUnlessIn ( " size-immutable-files: 0 " , lines )
self . failIfIn ( " Size Histogram: " , lines )
d . addCallback ( _check_stats )
return d
2010-02-12 06:21:37 +00:00
def test_stats_without_alias ( self ) :
# when invoked with no explicit alias and before the default 'tahoe'
# alias is created, 'tahoe stats' should output an informative error
# message, not a stack trace.
self . basedir = " cli/Stats/stats_without_alias "
self . set_up_grid ( )
d = self . do_cli ( " stats " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
return d
def test_stats_with_nonexistent_alias ( self ) :
# when invoked with an explicit alias that doesn't exist,
# 'tahoe stats' should output a useful error message.
self . basedir = " cli/Stats/stats_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " stats " , " havasu: " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
return d
class Webopen ( GridTestMixin , CLITestMixin , unittest . TestCase ) :
def test_webopen_with_nonexistent_alias ( self ) :
# when invoked with an alias that doesn't exist, 'tahoe webopen'
# should output an informative error message instead of a stack
# trace.
self . basedir = " cli/Webopen/webopen_with_nonexistent_alias "
self . set_up_grid ( )
d = self . do_cli ( " webopen " , " fake: " )
def _check ( ( rc , out , err ) ) :
self . failUnlessEqual ( rc , 1 )
self . failUnlessIn ( " error: " , err )
d . addCallback ( _check )
return d