2011-08-08 23:40:49 +00:00
|
|
|
|
2011-08-08 23:54:22 +00:00
|
|
|
import os, sys
|
2011-08-08 23:40:49 +00:00
|
|
|
|
|
|
|
from twisted.trial import unittest
|
2015-10-07 23:03:28 +00:00
|
|
|
from twisted.internet import defer, task
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
from allmydata.interfaces import IDirectoryNode
|
2015-10-16 19:39:39 +00:00
|
|
|
from allmydata.util.assertutil import precondition
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-04-28 19:43:09 +00:00
|
|
|
from allmydata.util import fake_inotify, fileutil
|
2015-10-13 14:56:00 +00:00
|
|
|
from allmydata.util.deferredutil import DeferredListShouldSucceed
|
2015-04-28 19:43:09 +00:00
|
|
|
from allmydata.util.encodingutil import get_filesystem_encoding, to_filepath
|
2011-08-08 23:40:49 +00:00
|
|
|
from allmydata.util.consumer import download_to_data
|
|
|
|
from allmydata.test.no_network import GridTestMixin
|
2011-08-10 03:15:58 +00:00
|
|
|
from allmydata.test.common_util import ReallyEqualMixin, NonASCIIPathMixin
|
2011-08-08 23:40:49 +00:00
|
|
|
from allmydata.test.common import ShouldFailMixin
|
2015-10-01 21:40:10 +00:00
|
|
|
from .test_cli_magic_folder import MagicFolderCLITestMixin
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
from allmydata.frontends import magic_folder
|
2015-10-16 02:24:46 +00:00
|
|
|
from allmydata.frontends.magic_folder import MagicFolder, Downloader, WriteFileMixin
|
2015-10-08 15:01:46 +00:00
|
|
|
from allmydata import magicfolderdb, magicpath
|
2015-04-28 19:43:09 +00:00
|
|
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
2015-10-22 13:30:13 +00:00
|
|
|
from allmydata.immutable.upload import Data
|
2011-08-08 23:40:49 +00:00
|
|
|
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
class MagicFolderTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, ReallyEqualMixin, NonASCIIPathMixin):
|
2011-08-08 23:40:49 +00:00
|
|
|
"""
|
|
|
|
These tests will be run both with a mock notifier, and (on platforms that support it)
|
|
|
|
with the real INotify.
|
|
|
|
"""
|
|
|
|
|
2015-04-28 19:43:09 +00:00
|
|
|
def setUp(self):
|
|
|
|
GridTestMixin.setUp(self)
|
|
|
|
temp = self.mktemp()
|
|
|
|
self.basedir = abspath_expanduser_unicode(temp.decode(get_filesystem_encoding()))
|
2015-10-01 21:40:10 +00:00
|
|
|
self.magicfolder = None
|
2015-10-16 02:27:59 +00:00
|
|
|
self.patch(Downloader, 'REMOTE_SCAN_INTERVAL', 0)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
|
|
|
def _get_count(self, name, client=None):
|
|
|
|
counters = (client or self.get_client()).stats_provider.get_stats()["counters"]
|
|
|
|
return counters.get('magic_folder.%s' % (name,), 0)
|
|
|
|
|
|
|
|
def _createdb(self):
|
|
|
|
dbfile = abspath_expanduser_unicode(u"magicfolderdb.sqlite", base=self.basedir)
|
2015-10-08 15:01:46 +00:00
|
|
|
mdb = magicfolderdb.get_magicfolderdb(dbfile, create_version=(magicfolderdb.SCHEMA_v1, 1))
|
|
|
|
self.failUnless(mdb, "unable to create magicfolderdb from %r" % (dbfile,))
|
|
|
|
self.failUnlessEqual(mdb.VERSION, 1)
|
|
|
|
return mdb
|
2015-10-01 21:40:10 +00:00
|
|
|
|
|
|
|
def _restart_client(self, ign):
|
|
|
|
#print "_restart_client"
|
|
|
|
d = self.restart_client()
|
|
|
|
d.addCallback(self._wait_until_started)
|
|
|
|
return d
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
def _wait_until_started(self, ign):
|
|
|
|
#print "_wait_until_started"
|
|
|
|
self.magicfolder = self.get_client().getServiceNamed('magic-folder')
|
|
|
|
return self.magicfolder.ready()
|
|
|
|
|
|
|
|
def test_db_basic(self):
|
|
|
|
fileutil.make_dirs(self.basedir)
|
|
|
|
self._createdb()
|
|
|
|
|
|
|
|
def test_db_persistence(self):
|
|
|
|
"""Test that a file upload creates an entry in the database."""
|
|
|
|
|
|
|
|
fileutil.make_dirs(self.basedir)
|
|
|
|
db = self._createdb()
|
|
|
|
|
|
|
|
relpath1 = u"myFile1"
|
|
|
|
pathinfo = fileutil.PathInfo(isdir=False, isfile=True, islink=False,
|
|
|
|
exists=True, size=1, mtime=123, ctime=456)
|
2015-10-12 18:07:39 +00:00
|
|
|
db.did_upload_version(relpath1, 0, 'URI:LIT:1', 'URI:LIT:0', 0, pathinfo)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
|
|
|
c = db.cursor
|
|
|
|
c.execute("SELECT size, mtime, ctime"
|
|
|
|
" FROM local_files"
|
|
|
|
" WHERE path=?",
|
|
|
|
(relpath1,))
|
|
|
|
row = c.fetchone()
|
|
|
|
self.failUnlessEqual(row, (pathinfo.size, pathinfo.mtime, pathinfo.ctime))
|
|
|
|
|
|
|
|
# Second test uses db.is_new_file instead of SQL query directly
|
|
|
|
# to confirm the previous upload entry in the db.
|
|
|
|
relpath2 = u"myFile2"
|
|
|
|
path2 = os.path.join(self.basedir, relpath2)
|
|
|
|
fileutil.write(path2, "meow\n")
|
|
|
|
pathinfo = fileutil.get_pathinfo(path2)
|
2015-10-12 18:07:39 +00:00
|
|
|
db.did_upload_version(relpath2, 0, 'URI:LIT:2', 'URI:LIT:1', 0, pathinfo)
|
2015-10-01 21:40:10 +00:00
|
|
|
self.failUnlessFalse(db.is_new_file(pathinfo, relpath2))
|
|
|
|
|
|
|
|
different_pathinfo = fileutil.PathInfo(isdir=False, isfile=True, islink=False,
|
|
|
|
exists=True, size=0, mtime=pathinfo.mtime, ctime=pathinfo.ctime)
|
|
|
|
self.failUnlessTrue(db.is_new_file(different_pathinfo, relpath2))
|
|
|
|
|
|
|
|
def test_magicfolder_start_service(self):
|
2011-08-08 23:40:49 +00:00
|
|
|
self.set_up_grid()
|
2015-10-01 21:40:10 +00:00
|
|
|
|
|
|
|
self.local_dir = abspath_expanduser_unicode(self.unicode_or_fallback(u"l\u00F8cal_dir", u"local_dir"),
|
|
|
|
base=self.basedir)
|
2011-08-10 03:15:58 +00:00
|
|
|
self.mkdir_nonascii(self.local_dir)
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
d = defer.succeed(None)
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 0))
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(lambda ign: self.create_invite_join_magic_folder(u"Alice", self.local_dir))
|
|
|
|
d.addCallback(self._restart_client)
|
|
|
|
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 1))
|
|
|
|
d.addBoth(self.cleanup)
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 0))
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_move_tree(self):
|
|
|
|
self.set_up_grid()
|
|
|
|
|
|
|
|
self.local_dir = abspath_expanduser_unicode(self.unicode_or_fallback(u"l\u00F8cal_dir", u"local_dir"),
|
|
|
|
base=self.basedir)
|
|
|
|
self.mkdir_nonascii(self.local_dir)
|
|
|
|
|
|
|
|
empty_tree_name = self.unicode_or_fallback(u"empty_tr\u00EAe", u"empty_tree")
|
|
|
|
empty_tree_dir = abspath_expanduser_unicode(empty_tree_name, base=self.basedir)
|
|
|
|
new_empty_tree_dir = abspath_expanduser_unicode(empty_tree_name, base=self.local_dir)
|
|
|
|
|
|
|
|
small_tree_name = self.unicode_or_fallback(u"small_tr\u00EAe", u"empty_tree")
|
|
|
|
small_tree_dir = abspath_expanduser_unicode(small_tree_name, base=self.basedir)
|
|
|
|
new_small_tree_dir = abspath_expanduser_unicode(small_tree_name, base=self.local_dir)
|
|
|
|
|
|
|
|
d = self.create_invite_join_magic_folder(u"Alice", self.local_dir)
|
|
|
|
d.addCallback(self._restart_client)
|
|
|
|
|
|
|
|
def _check_move_empty_tree(res):
|
2015-10-12 18:07:39 +00:00
|
|
|
print "_check_move_empty_tree"
|
2015-10-13 14:56:00 +00:00
|
|
|
downloaded_d = self.magicfolder.downloader.set_hook('processed')
|
|
|
|
uploaded_d = self.magicfolder.uploader.set_hook('processed')
|
2015-10-01 21:40:10 +00:00
|
|
|
self.mkdir_nonascii(empty_tree_dir)
|
|
|
|
os.rename(empty_tree_dir, new_empty_tree_dir)
|
|
|
|
self.notify(to_filepath(new_empty_tree_dir), self.inotify.IN_MOVED_TO)
|
2015-10-13 14:56:00 +00:00
|
|
|
|
|
|
|
return DeferredListShouldSucceed([downloaded_d, uploaded_d])
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(_check_move_empty_tree)
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 0))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 1))
|
|
|
|
|
2015-10-13 14:56:00 +00:00
|
|
|
# FIXME check that Bob downloaded/created the empty tree.
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
def _check_move_small_tree(res):
|
2015-10-12 18:07:39 +00:00
|
|
|
print "_check_move_small_tree"
|
2015-10-13 14:56:00 +00:00
|
|
|
downloaded_d = self.magicfolder.downloader.set_hook('processed', ignore_count=1)
|
|
|
|
uploaded_d = self.magicfolder.uploader.set_hook('processed', ignore_count=1)
|
2015-10-01 21:40:10 +00:00
|
|
|
self.mkdir_nonascii(small_tree_dir)
|
|
|
|
fileutil.write(abspath_expanduser_unicode(u"what", base=small_tree_dir), "say when")
|
|
|
|
os.rename(small_tree_dir, new_small_tree_dir)
|
|
|
|
self.notify(to_filepath(new_small_tree_dir), self.inotify.IN_MOVED_TO)
|
2015-10-13 14:56:00 +00:00
|
|
|
|
|
|
|
return DeferredListShouldSucceed([downloaded_d, uploaded_d])
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(_check_move_small_tree)
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 3))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 1))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2))
|
|
|
|
|
|
|
|
def _check_moved_tree_is_watched(res):
|
2015-10-12 18:07:39 +00:00
|
|
|
print "_check_moved_tree_is_watched"
|
2015-10-13 14:56:00 +00:00
|
|
|
downloaded_d = self.magicfolder.downloader.set_hook('processed', ignore_count=1)
|
|
|
|
uploaded_d = self.magicfolder.uploader.set_hook('processed')
|
2015-10-01 21:40:10 +00:00
|
|
|
fileutil.write(abspath_expanduser_unicode(u"another", base=new_small_tree_dir), "file")
|
|
|
|
self.notify(to_filepath(abspath_expanduser_unicode(u"another", base=new_small_tree_dir)), self.inotify.IN_CLOSE_WRITE)
|
2015-10-13 14:56:00 +00:00
|
|
|
|
|
|
|
return DeferredListShouldSucceed([downloaded_d, uploaded_d])
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(_check_moved_tree_is_watched)
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 4))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 2))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2))
|
|
|
|
|
|
|
|
# Files that are moved out of the upload directory should no longer be watched.
|
|
|
|
#def _move_dir_away(ign):
|
|
|
|
# os.rename(new_empty_tree_dir, empty_tree_dir)
|
|
|
|
# # Wuh? Why don't we get this event for the real test?
|
|
|
|
# #self.notify(to_filepath(new_empty_tree_dir), self.inotify.IN_MOVED_FROM)
|
|
|
|
#d.addCallback(_move_dir_away)
|
|
|
|
#def create_file(val):
|
|
|
|
# test_file = abspath_expanduser_unicode(u"what", base=empty_tree_dir)
|
|
|
|
# fileutil.write(test_file, "meow")
|
|
|
|
# #self.notify(...)
|
|
|
|
# return
|
|
|
|
#d.addCallback(create_file)
|
|
|
|
#d.addCallback(lambda ign: time.sleep(1)) # XXX ICK
|
|
|
|
#d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
|
|
|
|
#d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 4))
|
|
|
|
#d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 2))
|
|
|
|
#d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
|
|
|
#d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2))
|
|
|
|
|
|
|
|
d.addBoth(self.cleanup)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_persistence(self):
|
|
|
|
"""
|
|
|
|
Perform an upload of a given file and then stop the client.
|
|
|
|
Start a new client and magic-folder service... and verify that the file is NOT uploaded
|
|
|
|
a second time. This test is meant to test the database persistence along with
|
|
|
|
the startup and shutdown code paths of the magic-folder service.
|
|
|
|
"""
|
|
|
|
self.set_up_grid()
|
|
|
|
self.local_dir = abspath_expanduser_unicode(u"test_persistence", base=self.basedir)
|
|
|
|
self.mkdir_nonascii(self.local_dir)
|
|
|
|
self.collective_dircap = ""
|
|
|
|
|
|
|
|
d = defer.succeed(None)
|
|
|
|
d.addCallback(lambda ign: self.create_invite_join_magic_folder(u"Alice", self.local_dir))
|
|
|
|
d.addCallback(self._restart_client)
|
|
|
|
|
|
|
|
def create_test_file(filename):
|
|
|
|
d2 = self.magicfolder.uploader.set_hook('processed')
|
|
|
|
test_file = abspath_expanduser_unicode(filename, base=self.local_dir)
|
|
|
|
fileutil.write(test_file, "meow %s" % filename)
|
|
|
|
self.notify(to_filepath(test_file), self.inotify.IN_CLOSE_WRITE)
|
|
|
|
return d2
|
|
|
|
d.addCallback(lambda ign: create_test_file(u"what1"))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
|
|
|
d.addCallback(self.cleanup)
|
|
|
|
|
|
|
|
d.addCallback(self._restart_client)
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
|
|
|
d.addCallback(lambda ign: create_test_file(u"what2"))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 2))
|
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
|
|
|
d.addBoth(self.cleanup)
|
|
|
|
return d
|
|
|
|
|
2015-10-08 19:00:25 +00:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_delete(self):
|
|
|
|
self.set_up_grid()
|
|
|
|
self.local_dir = os.path.join(self.basedir, u"local_dir")
|
|
|
|
self.mkdir_nonascii(self.local_dir)
|
|
|
|
|
|
|
|
yield self.create_invite_join_magic_folder(u"Alice\u0101", self.local_dir)
|
|
|
|
yield self._restart_client(None)
|
|
|
|
|
|
|
|
try:
|
|
|
|
# create a file
|
|
|
|
up_proc = self.magicfolder.uploader.set_hook('processed')
|
|
|
|
# down_proc = self.magicfolder.downloader.set_hook('processed')
|
|
|
|
path = os.path.join(self.local_dir, u'foo')
|
|
|
|
with open(path, 'w') as f:
|
|
|
|
f.write('foo\n')
|
|
|
|
self.notify(to_filepath(path), self.inotify.IN_CLOSE_WRITE)
|
|
|
|
yield up_proc
|
|
|
|
self.assertTrue(os.path.exists(path))
|
|
|
|
|
|
|
|
# the real test part: delete the file
|
|
|
|
up_proc = self.magicfolder.uploader.set_hook('processed')
|
|
|
|
os.unlink(path)
|
|
|
|
self.notify(to_filepath(path), self.inotify.IN_DELETE)
|
|
|
|
yield up_proc
|
|
|
|
self.assertFalse(os.path.exists(path))
|
|
|
|
|
|
|
|
# ensure we still have a DB entry, and that the version is 1
|
|
|
|
node, metadata = yield self.magicfolder.downloader._get_collective_latest_file(u'foo')
|
|
|
|
self.assertTrue(node is not None, "Failed to find '{}' in DMD".format(path))
|
|
|
|
self.failUnlessEqual(metadata['version'], 1)
|
|
|
|
|
|
|
|
finally:
|
|
|
|
yield self.cleanup(None)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_delete_and_restore(self):
|
|
|
|
self.set_up_grid()
|
|
|
|
self.local_dir = os.path.join(self.basedir, u"local_dir")
|
|
|
|
self.mkdir_nonascii(self.local_dir)
|
|
|
|
|
|
|
|
yield self.create_invite_join_magic_folder(u"Alice\u0101", self.local_dir)
|
|
|
|
yield self._restart_client(None)
|
|
|
|
|
|
|
|
try:
|
|
|
|
# create a file
|
|
|
|
up_proc = self.magicfolder.uploader.set_hook('processed')
|
|
|
|
# down_proc = self.magicfolder.downloader.set_hook('processed')
|
|
|
|
path = os.path.join(self.local_dir, u'foo')
|
|
|
|
with open(path, 'w') as f:
|
|
|
|
f.write('foo\n')
|
|
|
|
self.notify(to_filepath(path), self.inotify.IN_CLOSE_WRITE)
|
|
|
|
yield up_proc
|
|
|
|
self.assertTrue(os.path.exists(path))
|
|
|
|
|
|
|
|
# delete the file
|
|
|
|
up_proc = self.magicfolder.uploader.set_hook('processed')
|
|
|
|
os.unlink(path)
|
|
|
|
self.notify(to_filepath(path), self.inotify.IN_DELETE)
|
|
|
|
yield up_proc
|
|
|
|
self.assertFalse(os.path.exists(path))
|
|
|
|
|
|
|
|
# ensure we still have a DB entry, and that the version is 1
|
|
|
|
node, metadata = yield self.magicfolder.downloader._get_collective_latest_file(u'foo')
|
|
|
|
self.assertTrue(node is not None, "Failed to find '{}' in DMD".format(path))
|
|
|
|
self.failUnlessEqual(metadata['version'], 1)
|
|
|
|
|
|
|
|
# restore the file, with different contents
|
|
|
|
up_proc = self.magicfolder.uploader.set_hook('processed')
|
|
|
|
path = os.path.join(self.local_dir, u'foo')
|
|
|
|
with open(path, 'w') as f:
|
|
|
|
f.write('bar\n')
|
|
|
|
self.notify(to_filepath(path), self.inotify.IN_CLOSE_WRITE)
|
|
|
|
yield up_proc
|
|
|
|
|
|
|
|
# ensure we still have a DB entry, and that the version is 2
|
|
|
|
node, metadata = yield self.magicfolder.downloader._get_collective_latest_file(u'foo')
|
|
|
|
self.assertTrue(node is not None, "Failed to find '{}' in DMD".format(path))
|
|
|
|
self.failUnlessEqual(metadata['version'], 2)
|
|
|
|
|
|
|
|
finally:
|
|
|
|
yield self.cleanup(None)
|
|
|
|
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_alice_delete_bob_restore(self):
|
|
|
|
alice_clock = task.Clock()
|
|
|
|
bob_clock = task.Clock()
|
|
|
|
yield self.setup_alice_and_bob(alice_clock, bob_clock)
|
|
|
|
alice_dir = self.alice_magicfolder.uploader._local_path_u
|
|
|
|
bob_dir = self.bob_magicfolder.uploader._local_path_u
|
|
|
|
alice_fname = os.path.join(alice_dir, 'blam')
|
|
|
|
bob_fname = os.path.join(bob_dir, 'blam')
|
|
|
|
|
|
|
|
try:
|
|
|
|
# alice creates a file, bob downloads it
|
|
|
|
alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
|
|
|
|
bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
|
|
|
|
|
|
|
|
with open(alice_fname, 'wb') as f:
|
|
|
|
f.write('contents0\n')
|
|
|
|
self.notify(to_filepath(alice_fname), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
|
|
|
|
|
|
|
|
alice_clock.advance(0)
|
|
|
|
yield alice_proc # alice uploads
|
|
|
|
|
|
|
|
bob_clock.advance(0)
|
|
|
|
yield bob_proc # bob downloads
|
|
|
|
|
|
|
|
# check the state
|
|
|
|
yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0)
|
|
|
|
yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0)
|
|
|
|
yield self.failUnlessReallyEqual(
|
|
|
|
self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client),
|
|
|
|
0
|
|
|
|
)
|
|
|
|
yield self.failUnlessReallyEqual(
|
|
|
|
self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client),
|
|
|
|
1
|
|
|
|
)
|
|
|
|
|
|
|
|
print("BOB DELETE")
|
|
|
|
# now bob deletes it (bob should upload, alice download)
|
|
|
|
bob_proc = self.bob_magicfolder.uploader.set_hook('processed')
|
|
|
|
alice_proc = self.alice_magicfolder.downloader.set_hook('processed')
|
|
|
|
os.unlink(bob_fname)
|
|
|
|
self.notify(to_filepath(bob_fname), self.inotify.IN_DELETE, magic=self.bob_magicfolder)
|
|
|
|
|
|
|
|
bob_clock.advance(0)
|
|
|
|
yield bob_proc
|
|
|
|
alice_clock.advance(0)
|
|
|
|
yield alice_proc
|
|
|
|
|
|
|
|
# check versions
|
|
|
|
node, metadata = yield self.alice_magicfolder.downloader._get_collective_latest_file(u'blam')
|
|
|
|
self.assertTrue(metadata['deleted'])
|
|
|
|
yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 1)
|
|
|
|
|
|
|
|
print("ALICE RESTORE")
|
|
|
|
# now alice restores it (alice should upload, bob download)
|
|
|
|
alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
|
|
|
|
bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
|
|
|
|
with open(alice_fname, 'wb') as f:
|
|
|
|
f.write('new contents\n')
|
|
|
|
self.notify(to_filepath(alice_fname), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
|
|
|
|
|
|
|
|
alice_clock.advance(0)
|
|
|
|
yield alice_proc
|
|
|
|
bob_clock.advance(0)
|
|
|
|
yield bob_proc
|
|
|
|
|
|
|
|
# check versions
|
|
|
|
node, metadata = yield self.alice_magicfolder.downloader._get_collective_latest_file(u'blam')
|
|
|
|
self.assertTrue('deleted' not in metadata or not metadata['deleted'])
|
|
|
|
yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 2)
|
|
|
|
yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 2)
|
|
|
|
yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 2)
|
|
|
|
yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 2)
|
|
|
|
|
|
|
|
finally:
|
|
|
|
# cleanup
|
|
|
|
d0 = self.alice_magicfolder.finish()
|
|
|
|
alice_clock.advance(0)
|
|
|
|
yield d0
|
|
|
|
|
|
|
|
d1 = self.bob_magicfolder.finish()
|
|
|
|
bob_clock.advance(0)
|
|
|
|
yield d1
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_alice_create_bob_update(self):
|
|
|
|
alice_clock = task.Clock()
|
|
|
|
bob_clock = task.Clock()
|
2015-10-26 17:22:13 +00:00
|
|
|
yield self.setup_alice_and_bob(alice_clock, bob_clock)
|
2015-10-08 19:00:25 +00:00
|
|
|
alice_dir = self.alice_magicfolder.uploader._local_path_u
|
|
|
|
bob_dir = self.bob_magicfolder.uploader._local_path_u
|
|
|
|
alice_fname = os.path.join(alice_dir, 'blam')
|
|
|
|
bob_fname = os.path.join(bob_dir, 'blam')
|
|
|
|
|
|
|
|
try:
|
|
|
|
# alice creates a file, bob downloads it
|
|
|
|
alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
|
|
|
|
bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
|
|
|
|
|
|
|
|
with open(alice_fname, 'wb') as f:
|
|
|
|
f.write('contents0\n')
|
|
|
|
self.notify(to_filepath(alice_fname), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
|
|
|
|
|
|
|
|
alice_clock.advance(0)
|
|
|
|
yield alice_proc # alice uploads
|
|
|
|
|
|
|
|
bob_clock.advance(0)
|
|
|
|
yield bob_proc # bob downloads
|
|
|
|
|
|
|
|
# check the state
|
|
|
|
yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0)
|
|
|
|
yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0)
|
|
|
|
yield self.failUnlessReallyEqual(
|
|
|
|
self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client),
|
|
|
|
0
|
|
|
|
)
|
|
|
|
yield self.failUnlessReallyEqual(
|
|
|
|
self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client),
|
|
|
|
1
|
|
|
|
)
|
|
|
|
|
|
|
|
# now bob updates it (bob should upload, alice download)
|
|
|
|
bob_proc = self.bob_magicfolder.uploader.set_hook('processed')
|
|
|
|
alice_proc = self.alice_magicfolder.downloader.set_hook('processed')
|
|
|
|
with open(bob_fname, 'wb') as f:
|
|
|
|
f.write('bob wuz here\n')
|
|
|
|
self.notify(to_filepath(bob_fname), self.inotify.IN_CLOSE_WRITE, magic=self.bob_magicfolder)
|
|
|
|
|
|
|
|
bob_clock.advance(0)
|
|
|
|
yield bob_proc
|
|
|
|
alice_clock.advance(0)
|
|
|
|
yield alice_proc
|
|
|
|
|
|
|
|
# check the state
|
|
|
|
yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 1)
|
|
|
|
|
|
|
|
finally:
|
|
|
|
# cleanup
|
|
|
|
d0 = self.alice_magicfolder.finish()
|
|
|
|
alice_clock.advance(0)
|
|
|
|
yield d0
|
|
|
|
|
|
|
|
d1 = self.bob_magicfolder.finish()
|
|
|
|
bob_clock.advance(0)
|
|
|
|
yield d1
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_alice_delete_and_restore(self):
|
|
|
|
alice_clock = task.Clock()
|
|
|
|
bob_clock = task.Clock()
|
|
|
|
yield self.setup_alice_and_bob(alice_clock, bob_clock)
|
|
|
|
alice_dir = self.alice_magicfolder.uploader._local_path_u
|
|
|
|
bob_dir = self.bob_magicfolder.uploader._local_path_u
|
|
|
|
alice_fname = os.path.join(alice_dir, 'blam')
|
|
|
|
bob_fname = os.path.join(bob_dir, 'blam')
|
|
|
|
|
|
|
|
try:
|
|
|
|
# alice creates a file, bob downloads it
|
|
|
|
alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
|
|
|
|
bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
|
|
|
|
|
|
|
|
with open(alice_fname, 'wb') as f:
|
|
|
|
f.write('contents0\n')
|
|
|
|
self.notify(to_filepath(alice_fname), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
|
|
|
|
|
|
|
|
alice_clock.advance(0)
|
|
|
|
yield alice_proc # alice uploads
|
|
|
|
|
|
|
|
bob_clock.advance(0)
|
|
|
|
yield bob_proc # bob downloads
|
|
|
|
|
|
|
|
# check the state
|
|
|
|
yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0)
|
|
|
|
yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0)
|
|
|
|
yield self.failUnlessReallyEqual(
|
|
|
|
self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client),
|
|
|
|
0
|
|
|
|
)
|
|
|
|
yield self.failUnlessReallyEqual(
|
|
|
|
self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client),
|
|
|
|
1
|
|
|
|
)
|
2015-10-26 17:22:13 +00:00
|
|
|
self.failUnless(os.path.exists(bob_fname))
|
2015-10-08 19:00:25 +00:00
|
|
|
|
|
|
|
# now alice deletes it (alice should upload, bob download)
|
|
|
|
alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
|
|
|
|
bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
|
|
|
|
os.unlink(alice_fname)
|
|
|
|
self.notify(to_filepath(alice_fname), self.inotify.IN_DELETE, magic=self.alice_magicfolder)
|
|
|
|
|
|
|
|
alice_clock.advance(0)
|
|
|
|
yield alice_proc
|
|
|
|
bob_clock.advance(0)
|
|
|
|
yield bob_proc
|
|
|
|
|
|
|
|
# check the state
|
|
|
|
yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
|
|
|
|
yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 1)
|
2015-10-26 17:22:13 +00:00
|
|
|
self.failIf(os.path.exists(bob_fname))
|
2015-10-08 19:00:25 +00:00
|
|
|
|
|
|
|
# now alice restores the file (with new contents)
|
|
|
|
alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
|
|
|
|
bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
|
|
|
|
with open(alice_fname, 'wb') as f:
|
|
|
|
f.write('alice wuz here\n')
|
|
|
|
self.notify(to_filepath(alice_fname), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
|
|
|
|
|
|
|
|
alice_clock.advance(0)
|
|
|
|
yield alice_proc
|
|
|
|
bob_clock.advance(0)
|
|
|
|
yield bob_proc
|
|
|
|
|
|
|
|
# check the state
|
|
|
|
yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 2)
|
|
|
|
yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 2)
|
|
|
|
yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 2)
|
|
|
|
yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 2)
|
2015-10-26 17:22:13 +00:00
|
|
|
self.failUnless(os.path.exists(bob_fname))
|
2015-10-08 19:00:25 +00:00
|
|
|
|
|
|
|
finally:
|
|
|
|
# cleanup
|
|
|
|
d0 = self.alice_magicfolder.finish()
|
|
|
|
alice_clock.advance(0)
|
|
|
|
yield d0
|
|
|
|
|
|
|
|
d1 = self.bob_magicfolder.finish()
|
|
|
|
bob_clock.advance(0)
|
|
|
|
yield d1
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
def test_magic_folder(self):
|
|
|
|
self.set_up_grid()
|
|
|
|
self.local_dir = os.path.join(self.basedir, self.unicode_or_fallback(u"loc\u0101l_dir", u"local_dir"))
|
|
|
|
self.mkdir_nonascii(self.local_dir)
|
|
|
|
|
|
|
|
d = self.create_invite_join_magic_folder(u"Alice\u0101", self.local_dir)
|
|
|
|
d.addCallback(self._restart_client)
|
2011-08-08 23:40:49 +00:00
|
|
|
|
|
|
|
# Write something short enough for a LIT file.
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(lambda ign: self._check_file(u"short", "test"))
|
2011-08-08 23:40:49 +00:00
|
|
|
|
|
|
|
# Write to the same file again with different data.
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(lambda ign: self._check_file(u"short", "different"))
|
2011-08-08 23:40:49 +00:00
|
|
|
|
|
|
|
# Test that temporary files are not uploaded.
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(lambda ign: self._check_file(u"tempfile", "test", temporary=True))
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-16 19:39:39 +00:00
|
|
|
# Test creation of a subdirectory.
|
|
|
|
d.addCallback(lambda ign: self._check_mkdir(u"directory"))
|
2011-08-08 23:40:49 +00:00
|
|
|
|
|
|
|
# Write something longer, and also try to test a Unicode name if the fs can represent it.
|
2011-08-10 03:15:58 +00:00
|
|
|
name_u = self.unicode_or_fallback(u"l\u00F8ng", u"long")
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(lambda ign: self._check_file(name_u, "test"*100))
|
2011-08-08 23:40:49 +00:00
|
|
|
|
|
|
|
# TODO: test that causes an upload failure.
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addBoth(self.cleanup)
|
2011-08-08 23:40:49 +00:00
|
|
|
return d
|
|
|
|
|
2015-10-16 19:39:39 +00:00
|
|
|
def _check_mkdir(self, name_u):
|
|
|
|
return self._check_file(name_u + u"/", "", directory=True)
|
|
|
|
|
|
|
|
def _check_file(self, name_u, data, temporary=False, directory=False):
|
|
|
|
precondition(not (temporary and directory), temporary=temporary, directory=directory)
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
previously_uploaded = self._get_count('uploader.objects_succeeded')
|
|
|
|
previously_disappeared = self._get_count('uploader.objects_disappeared')
|
2011-08-09 00:11:17 +00:00
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
d = self.magicfolder.uploader.set_hook('processed')
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-04-28 19:43:09 +00:00
|
|
|
path_u = abspath_expanduser_unicode(name_u, base=self.local_dir)
|
|
|
|
path = to_filepath(path_u)
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-16 19:39:39 +00:00
|
|
|
if directory:
|
|
|
|
os.mkdir(path_u)
|
|
|
|
event_mask = self.inotify.IN_CREATE | self.inotify.IN_ISDIR
|
|
|
|
else:
|
|
|
|
# We don't use FilePath.setContent() here because it creates a temporary file that
|
|
|
|
# is renamed into place, which causes events that the test is not expecting.
|
|
|
|
f = open(path_u, "wb")
|
|
|
|
try:
|
|
|
|
if temporary and sys.platform != "win32":
|
|
|
|
os.unlink(path_u)
|
|
|
|
f.write(data)
|
|
|
|
finally:
|
|
|
|
f.close()
|
|
|
|
if temporary and sys.platform == "win32":
|
2015-04-28 19:43:09 +00:00
|
|
|
os.unlink(path_u)
|
2015-10-16 19:39:39 +00:00
|
|
|
self.notify(path, self.inotify.IN_DELETE)
|
|
|
|
event_mask = self.inotify.IN_CLOSE_WRITE
|
|
|
|
|
2015-04-28 19:43:09 +00:00
|
|
|
fileutil.flush_volume(path_u)
|
2015-10-16 19:39:39 +00:00
|
|
|
self.notify(path, event_mask)
|
|
|
|
encoded_name_u = magicpath.path2magic(name_u)
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
|
2011-08-08 23:40:49 +00:00
|
|
|
if temporary:
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_disappeared'),
|
2011-08-08 23:40:49 +00:00
|
|
|
previously_disappeared + 1))
|
|
|
|
else:
|
2015-10-16 19:39:39 +00:00
|
|
|
d.addCallback(lambda ign: self.upload_dirnode.get(encoded_name_u))
|
2011-08-08 23:40:49 +00:00
|
|
|
d.addCallback(download_to_data)
|
|
|
|
d.addCallback(lambda actual_data: self.failUnlessReallyEqual(actual_data, data))
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'),
|
2011-08-08 23:40:49 +00:00
|
|
|
previously_uploaded + 1))
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
|
|
|
return d
|
|
|
|
|
|
|
|
def _check_version_in_dmd(self, magicfolder, relpath_u, expected_version):
|
|
|
|
encoded_name_u = magicpath.path2magic(relpath_u)
|
|
|
|
d = magicfolder.downloader._get_collective_latest_file(encoded_name_u)
|
|
|
|
def check_latest(result):
|
|
|
|
if result[0] is not None:
|
|
|
|
node, metadata = result
|
|
|
|
d.addCallback(lambda ign: self.failUnlessEqual(metadata['version'], expected_version))
|
|
|
|
d.addCallback(check_latest)
|
2011-08-08 23:40:49 +00:00
|
|
|
return d
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
def _check_version_in_local_db(self, magicfolder, relpath_u, expected_version):
|
|
|
|
version = magicfolder._db.get_local_file_version(relpath_u)
|
|
|
|
#print "_check_version_in_local_db: %r has version %s" % (relpath_u, version)
|
|
|
|
self.failUnlessEqual(version, expected_version)
|
|
|
|
|
2015-10-08 19:00:25 +00:00
|
|
|
def _check_file_gone(self, magicfolder, relpath_u):
|
|
|
|
path = os.path.join(magicfolder.uploader._local_path_u, relpath_u)
|
|
|
|
self.assertTrue(not os.path.exists(path))
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
def test_alice_bob(self):
|
2015-10-07 23:03:28 +00:00
|
|
|
alice_clock = task.Clock()
|
|
|
|
bob_clock = task.Clock()
|
|
|
|
d = self.setup_alice_and_bob(alice_clock, bob_clock)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
2015-10-15 21:56:38 +00:00
|
|
|
def _check_uploader_count(ign, name, expected):
|
|
|
|
self.failUnlessReallyEqual(self._get_count('uploader.'+name, client=self.alice_magicfolder._client),
|
|
|
|
expected)
|
|
|
|
def _check_downloader_count(ign, name, expected):
|
|
|
|
self.failUnlessReallyEqual(self._get_count('downloader.'+name, client=self.bob_magicfolder._client),
|
|
|
|
expected)
|
|
|
|
|
2015-10-22 13:30:13 +00:00
|
|
|
def _wait_for_Bob(ign, downloaded_d):
|
|
|
|
print "Now waiting for Bob to download\n"
|
|
|
|
bob_clock.advance(0)
|
|
|
|
return downloaded_d
|
|
|
|
|
2015-10-15 21:56:38 +00:00
|
|
|
def _wait_for(ign, something_to_do):
|
|
|
|
downloaded_d = self.bob_magicfolder.downloader.set_hook('processed')
|
|
|
|
uploaded_d = self.alice_magicfolder.uploader.set_hook('processed')
|
|
|
|
something_to_do()
|
|
|
|
print "Waiting for Alice to upload\n"
|
|
|
|
alice_clock.advance(0)
|
2015-10-22 13:30:13 +00:00
|
|
|
uploaded_d.addCallback(_wait_for_Bob, downloaded_d)
|
2015-10-15 21:56:38 +00:00
|
|
|
return uploaded_d
|
|
|
|
|
|
|
|
def Alice_to_write_a_file():
|
2015-10-01 21:40:10 +00:00
|
|
|
print "Alice writes a file\n"
|
|
|
|
self.file_path = abspath_expanduser_unicode(u"file1", base=self.alice_magicfolder.uploader._local_path_u)
|
|
|
|
fileutil.write(self.file_path, "meow, meow meow. meow? meow meow! meow.")
|
|
|
|
self.magicfolder = self.alice_magicfolder
|
|
|
|
self.notify(to_filepath(self.file_path), self.inotify.IN_CLOSE_WRITE)
|
2015-10-15 21:56:38 +00:00
|
|
|
d.addCallback(_wait_for, Alice_to_write_a_file)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
|
|
|
d.addCallback(lambda ign: self._check_version_in_dmd(self.alice_magicfolder, u"file1", 0))
|
|
|
|
d.addCallback(lambda ign: self._check_version_in_local_db(self.alice_magicfolder, u"file1", 0))
|
2015-10-15 21:56:38 +00:00
|
|
|
d.addCallback(_check_uploader_count, 'objects_failed', 0)
|
|
|
|
d.addCallback(_check_uploader_count, 'objects_succeeded', 1)
|
|
|
|
d.addCallback(_check_uploader_count, 'files_uploaded', 1)
|
|
|
|
d.addCallback(_check_uploader_count, 'objects_queued', 0)
|
|
|
|
d.addCallback(_check_uploader_count, 'directories_created', 0)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
|
|
|
d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, u"file1", 0))
|
2015-10-15 21:56:38 +00:00
|
|
|
d.addCallback(_check_downloader_count, 'objects_failed', 0)
|
|
|
|
d.addCallback(_check_downloader_count, 'objects_downloaded', 1)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
2015-10-15 21:56:38 +00:00
|
|
|
def Alice_to_delete_file():
|
2015-10-01 21:40:10 +00:00
|
|
|
print "Alice deletes the file!\n"
|
|
|
|
os.unlink(self.file_path)
|
2015-10-15 21:56:38 +00:00
|
|
|
self.magicfolder = self.alice_magicfolder
|
2015-10-01 21:40:10 +00:00
|
|
|
self.notify(to_filepath(self.file_path), self.inotify.IN_DELETE)
|
2015-10-15 21:56:38 +00:00
|
|
|
d.addCallback(_wait_for, Alice_to_delete_file)
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
d.addCallback(lambda ign: self._check_version_in_dmd(self.alice_magicfolder, u"file1", 1))
|
|
|
|
d.addCallback(lambda ign: self._check_version_in_local_db(self.alice_magicfolder, u"file1", 1))
|
2015-10-15 21:56:38 +00:00
|
|
|
d.addCallback(_check_uploader_count, 'objects_failed', 0)
|
|
|
|
d.addCallback(_check_uploader_count, 'objects_succeeded', 2)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
|
|
|
d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, u"file1", 1))
|
|
|
|
d.addCallback(lambda ign: self._check_version_in_dmd(self.bob_magicfolder, u"file1", 1))
|
2015-10-08 19:00:25 +00:00
|
|
|
d.addCallback(lambda ign: self._check_file_gone(self.bob_magicfolder, u"file1"))
|
2015-10-15 21:56:38 +00:00
|
|
|
d.addCallback(_check_downloader_count, 'objects_failed', 0)
|
|
|
|
d.addCallback(_check_downloader_count, 'objects_downloaded', 2)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
2015-10-15 21:56:38 +00:00
|
|
|
def Alice_to_rewrite_file():
|
2015-10-01 21:40:10 +00:00
|
|
|
print "Alice rewrites file\n"
|
|
|
|
self.file_path = abspath_expanduser_unicode(u"file1", base=self.alice_magicfolder.uploader._local_path_u)
|
|
|
|
fileutil.write(self.file_path, "Alice suddenly sees the white rabbit running into the forest.")
|
|
|
|
self.notify(to_filepath(self.file_path), self.inotify.IN_CLOSE_WRITE)
|
2015-10-15 21:56:38 +00:00
|
|
|
d.addCallback(_wait_for, Alice_to_rewrite_file)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
|
|
|
d.addCallback(lambda ign: self._check_version_in_dmd(self.alice_magicfolder, u"file1", 2))
|
|
|
|
d.addCallback(lambda ign: self._check_version_in_local_db(self.alice_magicfolder, u"file1", 2))
|
2015-10-15 21:56:38 +00:00
|
|
|
d.addCallback(_check_uploader_count, 'objects_failed', 0)
|
|
|
|
d.addCallback(_check_uploader_count, 'objects_succeeded', 3)
|
|
|
|
d.addCallback(_check_uploader_count, 'files_uploaded', 3)
|
|
|
|
d.addCallback(_check_uploader_count, 'objects_queued', 0)
|
|
|
|
d.addCallback(_check_uploader_count, 'directories_created', 0)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
|
|
|
d.addCallback(lambda ign: self._check_version_in_dmd(self.bob_magicfolder, u"file1", 2))
|
|
|
|
d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, u"file1", 2))
|
2015-10-15 21:56:38 +00:00
|
|
|
d.addCallback(_check_downloader_count, 'objects_failed', 0)
|
|
|
|
d.addCallback(_check_downloader_count, 'objects_downloaded', 3)
|
|
|
|
|
2015-10-22 13:30:13 +00:00
|
|
|
path_u = u"/tmp/magic_folder_test"
|
|
|
|
encoded_path_u = magicpath.path2magic(u"/tmp/magic_folder_test")
|
|
|
|
|
|
|
|
def Alice_tries_to_p0wn_Bob(ign):
|
|
|
|
print "Alice tries to p0wn Bob\n"
|
|
|
|
processed_d = self.bob_magicfolder.downloader.set_hook('processed')
|
|
|
|
|
|
|
|
# upload a file that would provoke the security bug from #2506
|
|
|
|
uploadable = Data("", self.alice_magicfolder._client.convergence)
|
|
|
|
alice_dmd = self.alice_magicfolder.uploader._upload_dirnode
|
|
|
|
|
|
|
|
d2 = alice_dmd.add_file(encoded_path_u, uploadable, metadata={"version": 0}, overwrite=True)
|
|
|
|
d2.addCallback(lambda ign: self.failUnless(alice_dmd.has_child(encoded_path_u)))
|
|
|
|
d2.addCallback(_wait_for_Bob, processed_d)
|
|
|
|
return d2
|
|
|
|
d.addCallback(Alice_tries_to_p0wn_Bob)
|
|
|
|
|
|
|
|
d.addCallback(lambda ign: self.failIf(os.path.exists(path_u)))
|
|
|
|
d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, encoded_path_u, None))
|
|
|
|
d.addCallback(_check_downloader_count, 'objects_excluded', 1)
|
|
|
|
d.addCallback(_check_downloader_count, 'objects_downloaded', 3)
|
|
|
|
|
2015-10-15 21:56:38 +00:00
|
|
|
def _cleanup(ign, magicfolder, clock):
|
2015-10-20 20:37:05 +00:00
|
|
|
if magicfolder is not None:
|
|
|
|
d2 = magicfolder.finish()
|
|
|
|
clock.advance(0)
|
|
|
|
return d2
|
2015-10-01 21:40:10 +00:00
|
|
|
|
|
|
|
def cleanup_Alice_and_Bob(result):
|
|
|
|
print "cleanup alice bob test\n"
|
|
|
|
d = defer.succeed(None)
|
2015-10-15 21:56:38 +00:00
|
|
|
d.addCallback(_cleanup, self.alice_magicfolder, alice_clock)
|
|
|
|
d.addCallback(_cleanup, self.bob_magicfolder, bob_clock)
|
2015-10-08 13:42:47 +00:00
|
|
|
d.addCallback(lambda ign: result)
|
2015-10-01 21:40:10 +00:00
|
|
|
return d
|
2015-10-08 13:42:47 +00:00
|
|
|
d.addBoth(cleanup_Alice_and_Bob)
|
2015-10-01 21:40:10 +00:00
|
|
|
return d
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-15 21:56:38 +00:00
|
|
|
|
2015-09-16 13:59:49 +00:00
|
|
|
class MockTest(MagicFolderTestMixin, unittest.TestCase):
|
2011-08-08 23:40:49 +00:00
|
|
|
"""This can run on any platform, and even if twisted.internet.inotify can't be imported."""
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
def setUp(self):
|
|
|
|
MagicFolderTestMixin.setUp(self)
|
|
|
|
self.inotify = fake_inotify
|
|
|
|
self.patch(magic_folder, 'get_inotify_module', lambda: self.inotify)
|
|
|
|
|
2015-10-08 19:00:25 +00:00
|
|
|
def notify(self, path, mask, magic=None):
|
|
|
|
if magic is None:
|
|
|
|
magic = self.magicfolder
|
|
|
|
magic.uploader._notifier.event(path, mask)
|
2015-10-01 21:40:10 +00:00
|
|
|
|
2011-08-08 23:40:49 +00:00
|
|
|
def test_errors(self):
|
|
|
|
self.set_up_grid()
|
2015-04-28 19:43:09 +00:00
|
|
|
|
|
|
|
errors_dir = abspath_expanduser_unicode(u"errors_dir", base=self.basedir)
|
2011-08-08 23:40:49 +00:00
|
|
|
os.mkdir(errors_dir)
|
2015-04-28 19:43:09 +00:00
|
|
|
not_a_dir = abspath_expanduser_unicode(u"NOT_A_DIR", base=self.basedir)
|
|
|
|
fileutil.write(not_a_dir, "")
|
|
|
|
magicfolderdb = abspath_expanduser_unicode(u"magicfolderdb", base=self.basedir)
|
|
|
|
doesnotexist = abspath_expanduser_unicode(u"doesnotexist", base=self.basedir)
|
2011-08-08 23:40:49 +00:00
|
|
|
|
|
|
|
client = self.g.clients[0]
|
|
|
|
d = client.create_dirnode()
|
2015-10-01 21:40:10 +00:00
|
|
|
def _check_errors(n):
|
2011-08-08 23:40:49 +00:00
|
|
|
self.failUnless(IDirectoryNode.providedBy(n))
|
2011-08-09 21:59:13 +00:00
|
|
|
upload_dircap = n.get_uri()
|
|
|
|
readonly_dircap = n.get_readonly_uri()
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2011-08-09 22:12:31 +00:00
|
|
|
self.shouldFail(AssertionError, 'nonexistent local.directory', 'there is no directory',
|
2015-10-01 21:40:10 +00:00
|
|
|
MagicFolder, client, upload_dircap, '', doesnotexist, magicfolderdb)
|
2011-08-09 22:12:31 +00:00
|
|
|
self.shouldFail(AssertionError, 'non-directory local.directory', 'is not a directory',
|
2015-10-01 21:40:10 +00:00
|
|
|
MagicFolder, client, upload_dircap, '', not_a_dir, magicfolderdb)
|
2011-08-09 22:12:31 +00:00
|
|
|
self.shouldFail(AssertionError, 'bad upload.dircap', 'does not refer to a directory',
|
2015-10-01 21:40:10 +00:00
|
|
|
MagicFolder, client, 'bad', '', errors_dir, magicfolderdb)
|
2011-08-09 22:12:31 +00:00
|
|
|
self.shouldFail(AssertionError, 'non-directory upload.dircap', 'does not refer to a directory',
|
2015-10-01 21:40:10 +00:00
|
|
|
MagicFolder, client, 'URI:LIT:foo', '', errors_dir, magicfolderdb)
|
2011-08-09 22:12:31 +00:00
|
|
|
self.shouldFail(AssertionError, 'readonly upload.dircap', 'is not a writecap to a directory',
|
2015-10-01 21:40:10 +00:00
|
|
|
MagicFolder, client, readonly_dircap, '', errors_dir, magicfolderdb,)
|
|
|
|
self.shouldFail(AssertionError, 'collective dircap',
|
|
|
|
"The URI in 'private/collective_dircap' is not a readonly cap to a directory.",
|
|
|
|
MagicFolder, client, upload_dircap, upload_dircap, errors_dir, magicfolderdb)
|
|
|
|
|
|
|
|
def _not_implemented():
|
|
|
|
raise NotImplementedError("blah")
|
|
|
|
self.patch(magic_folder, 'get_inotify_module', _not_implemented)
|
|
|
|
self.shouldFail(NotImplementedError, 'unsupported', 'blah',
|
|
|
|
MagicFolder, client, upload_dircap, '', errors_dir, magicfolderdb)
|
|
|
|
d.addCallback(_check_errors)
|
2011-08-08 23:40:49 +00:00
|
|
|
return d
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
def test_write_downloaded_file(self):
|
|
|
|
workdir = u"cli/MagicFolder/write-downloaded-file"
|
|
|
|
local_file = fileutil.abspath_expanduser_unicode(os.path.join(workdir, "foobar"))
|
|
|
|
|
2015-10-16 02:24:46 +00:00
|
|
|
class TestWriteFileMixin(WriteFileMixin):
|
|
|
|
def _log(self, msg):
|
|
|
|
pass
|
|
|
|
|
|
|
|
writefile = TestWriteFileMixin()
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
# create a file with name "foobar" with content "foo"
|
|
|
|
# write downloaded file content "bar" into "foobar" with is_conflict = False
|
|
|
|
fileutil.make_dirs(workdir)
|
|
|
|
fileutil.write(local_file, "foo")
|
|
|
|
|
|
|
|
# if is_conflict is False, then the .conflict file shouldn't exist.
|
2015-10-16 02:24:46 +00:00
|
|
|
writefile._write_downloaded_file(local_file, "bar", False, None)
|
2015-10-01 21:40:10 +00:00
|
|
|
conflicted_path = local_file + u".conflict"
|
|
|
|
self.failIf(os.path.exists(conflicted_path))
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
# At this point, the backup file should exist with content "foo"
|
|
|
|
backup_path = local_file + u".backup"
|
|
|
|
self.failUnless(os.path.exists(backup_path))
|
|
|
|
self.failUnlessEqual(fileutil.read(backup_path), "foo")
|
|
|
|
|
|
|
|
# .tmp file shouldn't exist
|
|
|
|
self.failIf(os.path.exists(local_file + u".tmp"))
|
|
|
|
|
|
|
|
# .. and the original file should have the new content
|
|
|
|
self.failUnlessEqual(fileutil.read(local_file), "bar")
|
|
|
|
|
|
|
|
# now a test for conflicted case
|
2015-10-16 02:24:46 +00:00
|
|
|
writefile._write_downloaded_file(local_file, "bar", True, None)
|
2015-10-01 21:40:10 +00:00
|
|
|
self.failUnless(os.path.exists(conflicted_path))
|
|
|
|
|
|
|
|
# .tmp file shouldn't exist
|
|
|
|
self.failIf(os.path.exists(local_file + u".tmp"))
|
2011-08-08 23:40:49 +00:00
|
|
|
|
|
|
|
|
2015-09-16 13:59:49 +00:00
|
|
|
class RealTest(MagicFolderTestMixin, unittest.TestCase):
|
2011-08-08 23:40:49 +00:00
|
|
|
"""This is skipped unless both Twisted and the platform support inotify."""
|
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
def setUp(self):
|
|
|
|
MagicFolderTestMixin.setUp(self)
|
|
|
|
self.inotify = magic_folder.get_inotify_module()
|
2011-08-08 23:40:49 +00:00
|
|
|
|
2015-10-08 19:00:25 +00:00
|
|
|
def notify(self, path, mask, **kw):
|
2015-10-01 21:40:10 +00:00
|
|
|
# Writing to the filesystem causes the notification.
|
2011-08-08 23:40:49 +00:00
|
|
|
pass
|
2015-04-28 19:58:07 +00:00
|
|
|
|
2015-10-01 21:40:10 +00:00
|
|
|
try:
|
|
|
|
magic_folder.get_inotify_module()
|
|
|
|
except NotImplementedError:
|
2015-09-16 13:59:49 +00:00
|
|
|
RealTest.skip = "Magic Folder support can only be tested for-real on an OS that supports inotify or equivalent."
|