From e5abd358ae5d65bdc2d229e5f6b9293c32c92c52 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Jan 2020 15:33:14 -0500 Subject: [PATCH 0001/1472] news fragment --- newsfragments/2239.removed | 1 + 1 file changed, 1 insertion(+) create mode 100644 newsfragments/2239.removed diff --git a/newsfragments/2239.removed b/newsfragments/2239.removed new file mode 100644 index 000000000..2f46ffe8e --- /dev/null +++ b/newsfragments/2239.removed @@ -0,0 +1 @@ +Untested and unmaintained code for running Tahoe-LAFS as a Windows service has been removed. From f6e7721dadd7043b5ea8f078596455c52a819e34 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Jan 2020 15:33:21 -0500 Subject: [PATCH 0002/1472] Remove tahoesvc and pkgresutil --- .../test/test_python2_regressions.py | 1 - src/allmydata/util/pkgresutil.py | 46 ----- src/allmydata/windows/tahoesvc.py | 177 ------------------ 3 files changed, 224 deletions(-) delete mode 100644 src/allmydata/util/pkgresutil.py delete mode 100644 src/allmydata/windows/tahoesvc.py diff --git a/src/allmydata/test/test_python2_regressions.py b/src/allmydata/test/test_python2_regressions.py index 989f5e883..565e3ad80 100644 --- a/src/allmydata/test/test_python2_regressions.py +++ b/src/allmydata/test/test_python2_regressions.py @@ -19,7 +19,6 @@ BLACKLIST = { "allmydata.watchdog.inotify", "allmydata.windows.inotify", "allmydata.windows.registry", - "allmydata.windows.tahoesvc", } diff --git a/src/allmydata/util/pkgresutil.py b/src/allmydata/util/pkgresutil.py deleted file mode 100644 index bb1d985c6..000000000 --- a/src/allmydata/util/pkgresutil.py +++ /dev/null @@ -1,46 +0,0 @@ - -def install(): - """ - This installs a hook into setuptools' pkg_resources infrastructure, so that resource - files can be found in files relative to the runnin executable, in addition to the - usual egg and source lookup mechanisms. This overrides the ZipProvider, since that - is the lookup mechanism triggered within pkg_resources when running code out of a - py2exe or py2app build's library.zip. - """ - import os, sys - import pkg_resources, zipimport - - platform_libdirs = { - 'darwin': '../Resources/pkg_resources', - } - exedir = os.path.dirname(sys.executable) - libdir = platform_libdirs.get(sys.platform, 'pkg_resources') - - class Provider(pkg_resources.ZipProvider): - - def __init__(self, module): - self._module_name = module.__name__ - pkg_resources.ZipProvider.__init__(self, module) - - def get_resource_filename(self, manager, resource_name): - #print 'get_resource_filename(%s, %s)' % (manager, resource_name) - path = [exedir, libdir] + self._module_name.split('.') + [resource_name] - localfile = os.path.join(*path) - #print ' checking(%s)' % (localfile,) - if os.path.exists(localfile): - #print 'found locally' - return localfile - else: - try: - ret = pkg_resources.ZipProvider.get_resource_filename(self, manager, resource_name) - #print 'returning %s' % (ret,) - return ret - except NotImplementedError: - #print 'get_resource_filename(%s,%s): not found' % (self._module_name, resource_name) - #import traceback - #traceback.print_exc() - return '' - - pkg_resources.register_loader_type(zipimport.zipimporter, Provider) - - diff --git a/src/allmydata/windows/tahoesvc.py b/src/allmydata/windows/tahoesvc.py deleted file mode 100644 index efaa439d5..000000000 --- a/src/allmydata/windows/tahoesvc.py +++ /dev/null @@ -1,177 +0,0 @@ -import sys -reload(sys) -sys.setdefaultencoding("utf-8") - -import win32serviceutil -import win32service -import win32event -import win32evtlogutil - -import os -import thread -import time -import traceback - -# this logging should go away once service startup is considered debugged. -logfilehandle = file('c:\\tahoe_service.log', 'ab+') -def logmsg(msg): - logfilehandle.write("%s: %s\r\n" % (time.strftime('%Y%m%d_%H%M%S'), msg)) - logfilehandle.flush() -logmsg('service loaded') - -# -# Now with some bootstrap util functions in place, let's try and init things: -try: - from allmydata.util import pkgresutil # override pkg_resources zip provider for py2exe deployment - pkgresutil.install() # this is done before nevow is imported - - logmsg('loading base dir') - from allmydata.windows import registry - basedir = registry.get_base_dir_path() - logmsg("got base dir (%s)" % (basedir,)) - if not basedir: - regpth = "%s : %s " % (registry._AMD_KEY, registry._BDIR_KEY) - raise RuntimeError('"%s" not set in registry' % (regpth,)) - os.chdir(basedir) - logmsg("chdir(%s)" % (basedir,)) -except: - logmsg("exception") - traceback.print_exc(None, logfilehandle) - logfilehandle.flush() - logfilehandle.close() - raise - -class Tahoe(win32serviceutil.ServiceFramework): - _svc_name_ = "Tahoe" - _svc_display_name_ = "Tahoe-LAFS Node" - def __init__(self, args): - logmsg("init") - try: - # The exe-file has messages for the Event Log Viewer. - # Register the exe-file as event source. - # - # Probably it would be better if this is done at installation time, - # so that it also could be removed if the service is uninstalled. - # Unfortunately it cannot be done in the 'if __name__ == "__main__"' - # block below, because the 'frozen' exe-file does not run this code. - # - logmsg("service start") - win32evtlogutil.AddSourceToRegistry(self._svc_display_name_, - sys.executable, - "Application") - win32serviceutil.ServiceFramework.__init__(self, args) - self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) - except: - try: - logmsg("exception") - traceback.print_exc(None, logfilehandle) - logfilehandle.flush() - logfilehandle.close() - except: - os.abort() - - def SvcStop(self): - logmsg("service stop") - self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) - win32event.SetEvent(self.hWaitStop) - - def SvcDoRun(self): - try: - logmsg("service run") - import servicemanager - # Write a 'started' event to the event log... - win32evtlogutil.ReportEvent(self._svc_display_name_, - servicemanager.PYS_SERVICE_STARTED, - 0, # category - servicemanager.EVENTLOG_INFORMATION_TYPE, - (self._svc_name_, '')) - - reactor_type = registry.get_registry_value('reactor') - if reactor_type == 'iocp': - from twisted.internet import iocpreactor - iocpreactor.install() - else: - from twisted.internet import selectreactor - selectreactor.install() - from twisted.internet import reactor - - if os.path.exists('DISABLE_STARTUP'): - logmsg("DISABLE_STARTUP exists: exiting") - else: - logmsg("runing reactorthread") - - # launch main thread... - thread.start_new_thread(self.launch_node, ()) - - # ...and block until service stop request - win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE) - - logmsg("wake up") - - reactor.callFromThread(reactor.stop) - - time.sleep(2) # give the node/reactor a chance to cleanup - - # and write a 'stopped' event to the event log. - win32evtlogutil.ReportEvent(self._svc_display_name_, - servicemanager.PYS_SERVICE_STOPPED, - 0, # category - servicemanager.EVENTLOG_INFORMATION_TYPE, - (self._svc_name_, '')) - except: - try: - logmsg("exception") - traceback.print_exc(None, logfilehandle) - logfilehandle.flush() - logfilehandle.close() - except: - os.abort() - - def launch_node(self): - try: - logmsg("main thread startup") - - # import dependencies so that py2exe finds them - # nevow requires all these for its voodoo module import time adaptor registrations - from nevow import accessors, appserver, static, rend, url, util, query, i18n, flat - from nevow import guard, stan, testutil, context - from nevow.flat import flatmdom, flatstan, twist - from formless import webform, processors, annotate, iformless - from decimal import Decimal - - import allmydata.web - - # junk to appease pyflakes's outrage at py2exe's needs - [ - accessors, appserver, static, rend, url, util, query, i18n, flat, guard, stan, testutil, - context, flatmdom, flatstan, twist, webform, processors, annotate, iformless, Decimal, - allmydata, - ] - - from twisted.internet import reactor - from twisted.python import log, logfile - from allmydata import client - - # set up twisted logging. this will become part of the node rsn. - logdir = os.path.join(basedir, 'logs') - if not os.path.exists(logdir): - os.makedirs(logdir) - lf = logfile.LogFile('tahoesvc.log', logdir) - log.startLogging(lf) - - # run the node itself - c = client.Client(basedir) - reactor.callLater(0, c.startService) # after reactor startup - reactor.run(installSignalHandlers=False) - - logmsg("main thread shutdown") - except: - logmsg("exception") - traceback.print_exc(None, logfilehandle) - logfilehandle.flush() - os.abort() - -if __name__ == '__main__': - logmsg("service main") - win32serviceutil.HandleCommandLine(Tahoe) - From b5805ff63349e5bbc9c64e5ed5a6996c4de95293 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Wed, 15 Jan 2020 15:45:47 -0500 Subject: [PATCH 0003/1472] news fragment --- newsfragments/3274.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3274.minor diff --git a/newsfragments/3274.minor b/newsfragments/3274.minor new file mode 100644 index 000000000..e69de29bb From 7c575c8add4f0060a3afcc03ff0f8b217f09a125 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Wed, 15 Jan 2020 15:46:07 -0500 Subject: [PATCH 0004/1472] skip the relevant Eliot test --- nix/eliot.nix | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nix/eliot.nix b/nix/eliot.nix index f6d6b3061..04344ff79 100644 --- a/nix/eliot.nix +++ b/nix/eliot.nix @@ -14,6 +14,10 @@ buildPythonPackage rec { --replace "boltons >= 19.0.1" boltons # depends on eliot.prettyprint._main which we don't have here. rm eliot/tests/test_prettyprint.py + + # Fails intermittently. + substituteInPlace eliot/tests/test_validation.py \ + --replace "def test_omitLoggerFromActionType" "def xtest_omitLoggerFromActionType" ''; checkInputs = [ testtools pytest hypothesis ]; From d10a5be11d6eef0198436e471e7e4ef3b81a2c1f Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 4 Feb 2020 19:50:03 -0500 Subject: [PATCH 0005/1472] make bdist_wheel update the version first --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 8e653f7d1..de40c9e23 100644 --- a/setup.cfg +++ b/setup.cfg @@ -4,3 +4,4 @@ sdist = update_version sdist install = update_version install develop = update_version develop bdist_egg = update_version bdist_egg +bdist_wheel = update_version bdist_wheel From 5d9df98c0329c97487f8b4e2f7ca1304fcc2a833 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 4 Feb 2020 19:54:01 -0500 Subject: [PATCH 0006/1472] news fragment --- newsfragments/3281.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3281.minor diff --git a/newsfragments/3281.minor b/newsfragments/3281.minor new file mode 100644 index 000000000..e69de29bb From 461f42e7bc560e6abceff135728a1bcda1913c3c Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Thu, 13 Feb 2020 10:14:25 -0500 Subject: [PATCH 0007/1472] news fragment --- newsfragments/3284.removed | 1 + 1 file changed, 1 insertion(+) create mode 100644 newsfragments/3284.removed diff --git a/newsfragments/3284.removed b/newsfragments/3284.removed new file mode 100644 index 000000000..7e31d352a --- /dev/null +++ b/newsfragments/3284.removed @@ -0,0 +1 @@ +The Magic Folder frontend has been split out into a stand-alone project. The functionality is no longer part of Tahoe-LAFS itself. Learn more at . From 76d667865e8f10449a541b1427952670fc225c70 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Thu, 13 Feb 2020 10:21:45 -0500 Subject: [PATCH 0008/1472] Remove some top-level packaging related to magic-folders --- Makefile | 6 ------ setup.py | 5 +---- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 63d9db980..6e0009ddf 100644 --- a/Makefile +++ b/Makefile @@ -42,12 +42,6 @@ upload-osx-pkg: # echo not uploading tahoe-lafs-osx-pkg because this is not trunk but is branch \"${BB_BRANCH}\" ; \ # fi -.PHONY: smoketest -smoketest: - -python ./src/allmydata/test/check_magicfolder_smoke.py kill - -rm -rf smoke_magicfolder/ - python ./src/allmydata/test/check_magicfolder_smoke.py - # code coverage-based testing is disabled temporarily, as we switch to tox. # This will eventually be added to a tox environment. The following comments # and variable settings are retained as notes for that future effort. diff --git a/setup.py b/setup.py index 72561a9d9..caa37f16e 100644 --- a/setup.py +++ b/setup.py @@ -62,9 +62,7 @@ install_requires = [ # version of cryptography will *really* be installed. "cryptography >= 2.6", - # * On Linux we need at least Twisted 10.1.0 for inotify support - # used by the drop-upload frontend. - # * We also need Twisted 10.1.0 for the FTP frontend in order for + # * We need Twisted 10.1.0 for the FTP frontend in order for # Twisted's FTP server to support asynchronous close. # * The SFTP frontend depends on Twisted 11.0.0 to fix the SSH server # rekeying bug @@ -354,7 +352,6 @@ setup(name="tahoe-lafs", # also set in __init__.py # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2392 for some # discussion. ':sys_platform=="win32"': ["pywin32 != 226"], - ':sys_platform!="win32" and sys_platform!="linux2"': ["watchdog"], # For magic-folder on "darwin" (macOS) and the BSDs "test": [ # Pin a specific pyflakes so we don't have different folks # disagreeing on what is or is not a lint issue. We can bump From e3a6d759627ad76bf28eaf84dbdbd8a1719f2302 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Thu, 13 Feb 2020 10:23:27 -0500 Subject: [PATCH 0009/1472] Remove the obvious documentation references --- docs/configuration.rst | 7 - docs/frontends/magic-folder.rst | 148 --- docs/index.rst | 2 - docs/magic-folder-howto.rst | 176 ---- docs/magic-wormhole-invites.rst | 4 +- docs/proposed/index.rst | 4 - .../magic-folder/filesystem-integration.rst | 118 --- .../multi-party-conflict-detection.rst | 373 ------- .../magic-folder/remote-to-local-sync.rst | 951 ------------------ .../magic-folder/user-interface-design.rst | 205 ---- 10 files changed, 1 insertion(+), 1987 deletions(-) delete mode 100644 docs/frontends/magic-folder.rst delete mode 100644 docs/magic-folder-howto.rst delete mode 100644 docs/proposed/magic-folder/filesystem-integration.rst delete mode 100644 docs/proposed/magic-folder/multi-party-conflict-detection.rst delete mode 100644 docs/proposed/magic-folder/remote-to-local-sync.rst delete mode 100644 docs/proposed/magic-folder/user-interface-design.rst diff --git a/docs/configuration.rst b/docs/configuration.rst index 02a97d2b8..ab4751a04 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -82,7 +82,6 @@ Client/server nodes provide one or more of the following services: * web-API service * SFTP service * FTP service -* Magic Folder service * helper service * storage service. @@ -719,12 +718,6 @@ SFTP, FTP for instructions on configuring these services, and the ``[sftpd]`` and ``[ftpd]`` sections of ``tahoe.cfg``. -Magic Folder - - A node running on Linux or Windows can be configured to automatically - upload files that are created or changed in a specified local directory. - See :doc:`frontends/magic-folder` for details. - Storage Server Configuration ============================ diff --git a/docs/frontends/magic-folder.rst b/docs/frontends/magic-folder.rst deleted file mode 100644 index 694958563..000000000 --- a/docs/frontends/magic-folder.rst +++ /dev/null @@ -1,148 +0,0 @@ -.. -*- coding: utf-8-with-signature -*- - -================================ -Tahoe-LAFS Magic Folder Frontend -================================ - -1. `Introduction`_ -2. `Configuration`_ -3. `Known Issues and Limitations With Magic-Folder`_ - - -Introduction -============ - -The Magic Folder frontend synchronizes local directories on two or more -clients, using a Tahoe-LAFS grid for storage. Whenever a file is created -or changed under the local directory of one of the clients, the change is -propagated to the grid and then to the other clients. - -The implementation of the "drop-upload" frontend, on which Magic Folder is -based, was written as a prototype at the First International Tahoe-LAFS -Summit in June 2011. In 2015, with the support of a grant from the -`Open Technology Fund`_, it was redesigned and extended to support -synchronization between clients. It currently works on Linux and Windows. - -Magic Folder is not currently in as mature a state as the other frontends -(web, CLI, SFTP and FTP). This means that you probably should not rely on -all changes to files in the local directory to result in successful uploads. -There might be (and have been) incompatible changes to how the feature is -configured. - -We are very interested in feedback on how well this feature works for you, and -suggestions to improve its usability, functionality, and reliability. - -.. _`Open Technology Fund`: https://www.opentech.fund/ - - -Configuration -============= - -The Magic Folder frontend runs as part of a gateway node. To set it up, you -must use the tahoe magic-folder CLI. For detailed information see our -:doc:`Magic-Folder CLI design -documentation<../proposed/magic-folder/user-interface-design>`. For a -given Magic-Folder collective directory you need to run the ``tahoe -magic-folder create`` command. After that the ``tahoe magic-folder invite`` -command must used to generate an *invite code* for each member of the -magic-folder collective. A confidential, authenticated communications channel -should be used to transmit the invite code to each member, who will be -joining using the ``tahoe magic-folder join`` command. - -These settings are persisted in the ``[magic_folder]`` section of the -gateway's ``tahoe.cfg`` file. - -``[magic_folder]`` - -``enabled = (boolean, optional)`` - - If this is ``True``, Magic Folder will be enabled. The default value is - ``False``. - -``local.directory = (UTF-8 path)`` - - This specifies the local directory to be monitored for new or changed - files. If the path contains non-ASCII characters, it should be encoded - in UTF-8 regardless of the system's filesystem encoding. Relative paths - will be interpreted starting from the node's base directory. - -You should not normally need to set these fields manually because they are -set by the ``tahoe magic-folder create`` and/or ``tahoe magic-folder join`` -commands. Use the ``--help`` option to these commands for more information. - -After setting up a Magic Folder collective and starting or restarting each -gateway, you can confirm that the feature is working by copying a file into -any local directory, and checking that it appears on other clients. -Large files may take some time to appear. - -The 'Operational Statistics' page linked from the Welcome page shows counts -of the number of files uploaded, the number of change events currently -queued, and the number of failed uploads. The 'Recent Uploads and Downloads' -page and the node :doc:`log<../logging>` may be helpful to determine the -cause of any failures. - - -.. _Known Issues in Magic-Folder: - -Known Issues and Limitations With Magic-Folder -============================================== - -This feature only works on Linux and Windows. There is a ticket to add -support for Mac OS X and BSD-based systems (`#1432`_). - -The only way to determine whether uploads have failed is to look at the -'Operational Statistics' page linked from the Welcome page. This only shows -a count of failures, not the names of files. Uploads are never retried. - -The Magic Folder frontend performs its uploads sequentially (i.e. it waits -until each upload is finished before starting the next), even when there -would be enough memory and bandwidth to efficiently perform them in parallel. -A Magic Folder upload can occur in parallel with an upload by a different -frontend, though. (`#1459`_) - -On Linux, if there are a large number of near-simultaneous file creation or -change events (greater than the number specified in the file -``/proc/sys/fs/inotify/max_queued_events``), it is possible that some events -could be missed. This is fairly unlikely under normal circumstances, because -the default value of ``max_queued_events`` in most Linux distributions is -16384, and events are removed from this queue immediately without waiting for -the corresponding upload to complete. (`#1430`_) - -The Windows implementation might also occasionally miss file creation or -change events, due to limitations of the underlying Windows API -(ReadDirectoryChangesW). We do not know how likely or unlikely this is. -(`#1431`_) - -Some filesystems may not support the necessary change notifications. -So, it is recommended for the local directory to be on a directly attached -disk-based filesystem, not a network filesystem or one provided by a virtual -machine. - -The ``private/magic_folder_dircap`` and ``private/collective_dircap`` files -cannot use an alias or path to specify the upload directory. (`#1711`_) - -If a file in the upload directory is changed (actually relinked to a new -file), then the old file is still present on the grid, and any other caps -to it will remain valid. Eventually it will be possible to use -:doc:`../garbage-collection` to reclaim the space used by these files; however -currently they are retained indefinitely. (`#2440`_) - -Unicode filenames are supported on both Linux and Windows, but on Linux, the -local name of a file must be encoded correctly in order for it to be uploaded. -The expected encoding is that printed by -``python -c "import sys; print sys.getfilesystemencoding()"``. - -On Windows, local directories with non-ASCII names are not currently working. -(`#2219`_) - -On Windows, when a node has Magic Folder enabled, it is unresponsive to Ctrl-C -(it can only be killed using Task Manager or similar). (`#2218`_) - -.. _`#1430`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1430 -.. _`#1431`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1431 -.. _`#1432`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1432 -.. _`#1459`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1459 -.. _`#1711`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1711 -.. _`#2218`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2218 -.. _`#2219`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2219 -.. _`#2440`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2440 diff --git a/docs/index.rst b/docs/index.rst index 566e78d4f..98b4f241b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -20,7 +20,6 @@ Contents: frontends/CLI frontends/webapi frontends/FTP-and-SFTP - frontends/magic-folder frontends/download-status known_issues @@ -37,7 +36,6 @@ Contents: expenses cautions write_coordination - magic-folder-howto backupdb anonymity-configuration diff --git a/docs/magic-folder-howto.rst b/docs/magic-folder-howto.rst deleted file mode 100644 index b368972b8..000000000 --- a/docs/magic-folder-howto.rst +++ /dev/null @@ -1,176 +0,0 @@ -.. _magic-folder-howto: - -========================= -Magic Folder Set-up Howto -========================= - -#. `This document`_ -#. `Setting up a local test grid`_ -#. `Setting up Magic Folder`_ -#. `Testing`_ - - -This document -============= - -This is preliminary documentation of how to set up Magic Folder using a test -grid on a single Linux or Windows machine, with two clients and one server. -It is aimed at a fairly technical audience. - -For an introduction to Magic Folder and how to configure it -more generally, see :doc:`frontends/magic-folder`. - -It it possible to adapt these instructions to run the nodes on -different machines, to synchronize between three or more clients, -to mix Windows and Linux clients, and to use multiple servers -(if the Tahoe-LAFS encoding parameters are changed). - - -Setting up a local test grid -============================ - -Linux ------ - -Run these commands:: - - mkdir ../grid - bin/tahoe create-introducer ../grid/introducer - bin/tahoe start ../grid/introducer - export FURL=`cat ../grid/introducer/private/introducer.furl` - bin/tahoe create-node --introducer="$FURL" ../grid/server - bin/tahoe create-client --introducer="$FURL" ../grid/alice - bin/tahoe create-client --introducer="$FURL" ../grid/bob - - -Windows -------- - -Run:: - - mkdir ..\grid - bin\tahoe create-introducer ..\grid\introducer - bin\tahoe start ..\grid\introducer - -Leave the introducer running in that Command Prompt, -and in a separate Command Prompt (with the same current -directory), run:: - - set /p FURL=<..\grid\introducer\private\introducer.furl - bin\tahoe create-node --introducer=%FURL% ..\grid\server - bin\tahoe create-client --introducer=%FURL% ..\grid\alice - bin\tahoe create-client --introducer=%FURL% ..\grid\bob - - -Both Linux and Windows ----------------------- - -(Replace ``/`` with ``\`` for Windows paths.) - -Edit ``../grid/alice/tahoe.cfg``, and make the following -changes to the ``[node]`` and ``[client]`` sections:: - - [node] - nickname = alice - web.port = tcp:3457:interface=127.0.0.1 - - [client] - shares.needed = 1 - shares.happy = 1 - shares.total = 1 - -Edit ``../grid/bob/tahoe.cfg``, and make the following -change to the ``[node]`` section, and the same change as -above to the ``[client]`` section:: - - [node] - nickname = bob - web.port = tcp:3458:interface=127.0.0.1 - -Note that when running nodes on a single machine, -unique port numbers must be used for each node (and they -must not clash with ports used by other server software). -Here we have used the default of 3456 for the server, -3457 for alice, and 3458 for bob. - -Now start all of the nodes (the introducer should still be -running from above):: - - bin/tahoe start ../grid/server - bin/tahoe start ../grid/alice - bin/tahoe start ../grid/bob - -On Windows, a separate Command Prompt is needed to run each -node. - -Open a web browser on http://127.0.0.1:3457/ and verify that -alice is connected to the introducer and one storage server. -Then do the same for http://127.0.0.1:3568/ to verify that -bob is connected. Leave all of the nodes running for the -next stage. - - -Setting up Magic Folder -======================= - -Linux ------ - -Run:: - - mkdir -p ../local/alice ../local/bob - bin/tahoe -d ../grid/alice magic-folder create magic: alice ../local/alice - bin/tahoe -d ../grid/alice magic-folder invite magic: bob >invitecode - export INVITECODE=`cat invitecode` - bin/tahoe -d ../grid/bob magic-folder join "$INVITECODE" ../local/bob - - bin/tahoe restart ../grid/alice - bin/tahoe restart ../grid/bob - -Windows -------- - -Run:: - - mkdir ..\local\alice ..\local\bob - bin\tahoe -d ..\grid\alice magic-folder create magic: alice ..\local\alice - bin\tahoe -d ..\grid\alice magic-folder invite magic: bob >invitecode - set /p INVITECODE=`) invites and joins work this way -as well. +Introducer with new clients. This is a two-part process. Alice runs a grid and wishes to have her friend Bob use it as a client. She runs ``tahoe invite bob`` which diff --git a/docs/proposed/index.rst b/docs/proposed/index.rst index a052baeff..d01d92d2d 100644 --- a/docs/proposed/index.rst +++ b/docs/proposed/index.rst @@ -14,8 +14,4 @@ index only lists the files that are in .rst format. :maxdepth: 2 leasedb - magic-folder/filesystem-integration - magic-folder/remote-to-local-sync - magic-folder/user-interface-design - magic-folder/multi-party-conflict-detection http-storage-node-protocol diff --git a/docs/proposed/magic-folder/filesystem-integration.rst b/docs/proposed/magic-folder/filesystem-integration.rst deleted file mode 100644 index 589db2322..000000000 --- a/docs/proposed/magic-folder/filesystem-integration.rst +++ /dev/null @@ -1,118 +0,0 @@ -Magic Folder local filesystem integration design -================================================ - -*Scope* - -This document describes how to integrate the local filesystem with Magic -Folder in an efficient and reliable manner. For now we ignore Remote to -Local synchronization; the design and implementation of this is scheduled -for a later time. We also ignore multiple writers for the same Magic -Folder, which may or may not be supported in future. The design here will -be updated to account for those features in later Objectives. Objective 3 -may require modifying the database schema or operation, and Objective 5 -may modify the User interface. - -Tickets on the Tahoe-LAFS trac with the `otf-magic-folder-objective2`_ -keyword are within the scope of the local filesystem integration for -Objective 2. - -.. _otf-magic-folder-objective2: https://tahoe-lafs.org/trac/tahoe-lafs/query?status=!closed&keywords=~otf-magic-folder-objective2 - -.. _filesystem_integration-local-scanning-and-database: - -*Local scanning and database* - -When a Magic-Folder-enabled node starts up, it scans all directories -under the local directory and adds every file to a first-in first-out -"scan queue". When processing the scan queue, redundant uploads are -avoided by using the same mechanism the Tahoe backup command uses: we -keep track of previous uploads by recording each file's metadata such as -size, ``ctime`` and ``mtime``. This information is stored in a database, -referred to from now on as the magic folder db. Using this recorded -state, we ensure that when Magic Folder is subsequently started, the -local directory tree can be scanned quickly by comparing current -filesystem metadata with the previously recorded metadata. Each file -referenced in the scan queue is uploaded only if its metadata differs at -the time it is processed. If a change event is detected for a file that -is already queued (and therefore will be processed later), the redundant -event is ignored. - -To implement the magic folder db, we will use an SQLite schema that -initially is the existing Tahoe-LAFS backup schema. This schema may -change in later objectives; this will cause no backward compatibility -problems, because this new feature will be developed on a branch that -makes no compatibility guarantees. However we will have a separate SQLite -database file and separate mutex lock just for Magic Folder. This avoids -usability problems related to mutual exclusion. (If a single file and -lock were used, a backup would block Magic Folder updates for a long -time, and a user would not be able to tell when backups are possible -because Magic Folder would acquire a lock at arbitrary times.) - - -*Eventual consistency property* - -During the process of reading a file in order to upload it, it is not -possible to prevent further local writes. Such writes will result in -temporary inconsistency (that is, the uploaded file will not reflect -what the contents of the local file were at any specific time). Eventual -consistency is reached when the queue of pending uploads is empty. That -is, a consistent snapshot will be achieved eventually when local writes -to the target folder cease for a sufficiently long period of time. - - -*Detecting filesystem changes* - -For the Linux implementation, we will use the `inotify`_ Linux kernel -subsystem to gather events on the local Magic Folder directory tree. This -implementation was already present in Tahoe-LAFS 1.9.0, but needs to be -changed to gather directory creation and move events, in addition to the -events indicating that a file has been written that are gathered by the -current code. - -.. _`inotify`: https://en.wikipedia.org/wiki/Inotify - -For the Windows implementation, we will use the ``ReadDirectoryChangesW`` -Win32 API. The prototype implementation simulates a Python interface to -the inotify API in terms of ``ReadDirectoryChangesW``, allowing most of -the code to be shared across platforms. - -The alternative of using `NTFS Change Journals`_ for Windows was -considered, but appears to be more complicated and does not provide any -additional functionality over the scanning approach described above. -The Change Journal mechanism is also only available for NTFS filesystems, -but FAT32 filesystems are still common in user installations of Windows. - -.. _`NTFS Change Journals`: https://msdn.microsoft.com/en-us/library/aa363803%28VS.85%29.aspx - -When we detect the creation of a new directory below the local Magic -Folder directory, we create it in the Tahoe-LAFS filesystem, and also -scan the new local directory for new files. This scan is necessary to -avoid missing events for creation of files in a new directory before it -can be watched, and to correctly handle cases where an existing directory -is moved to be under the local Magic Folder directory. - - -*User interface* - -The Magic Folder local filesystem integration will initially have a -provisional configuration file-based interface that may not be ideal from -a usability perspective. Creating our local filesystem integration in -this manner will allow us to use and test it independently of the rest of -the Magic Folder software components. We will focus greater attention on -user interface design as a later milestone in our development roadmap. - -The configuration file, ``tahoe.cfg``, must define a target local -directory to be synchronized. Provisionally, this configuration will -replace the current ``[drop_upload]`` section:: - - [magic_folder] - enabled = true - local.directory = "/home/human" - -When a filesystem directory is first configured for Magic Folder, the user -needs to create the remote Tahoe-LAFS directory using ``tahoe mkdir``, -and configure the Magic-Folder-enabled node with its URI (e.g. by putting -it in a file ``private/magic_folder_dircap``). If there are existing -files in the local directory, they will be uploaded as a result of the -initial scan described earlier. - diff --git a/docs/proposed/magic-folder/multi-party-conflict-detection.rst b/docs/proposed/magic-folder/multi-party-conflict-detection.rst deleted file mode 100644 index fb1ae8339..000000000 --- a/docs/proposed/magic-folder/multi-party-conflict-detection.rst +++ /dev/null @@ -1,373 +0,0 @@ -Multi-party Conflict Detection -============================== - -The current Magic-Folder remote conflict detection design does not properly detect remote conflicts -for groups of three or more parties. This design is specified in the "Fire Dragon" section of this document: -https://github.com/tahoe-lafs/tahoe-lafs/blob/2551.wip.2/docs/proposed/magic-folder/remote-to-local-sync.rst#fire-dragons-distinguishing-conflicts-from-overwrites - -This Tahoe-LAFS trac ticket comment outlines a scenario with -three parties in which a remote conflict is falsely detected: - -.. _`ticket comment`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2551#comment:22 - - -Summary and definitions -======================= - -Abstract file: a file being shared by a Magic Folder. - -Local file: a file in a client's local filesystem corresponding to an abstract file. - -Relative path: the path of an abstract or local file relative to the Magic Folder root. - -Version: a snapshot of an abstract file, with associated metadata, that is uploaded by a Magic Folder client. - -A version is associated with the file's relative path, its contents, and -mtime and ctime timestamps. Versions also have a unique identity. - -Follows relation: -* If and only if a change to a client's local file at relative path F that results in an upload of version V', -was made when the client already had version V of that file, then we say that V' directly follows V. -* The follows relation is the irreflexive transitive closure of the "directly follows" relation. - -The follows relation is transitive and acyclic, and therefore defines a DAG called the -Version DAG. Different abstract files correspond to disconnected sets of nodes in the Version DAG -(in other words there are no "follows" relations between different files). - -The DAG is only ever extended, not mutated. - -The desired behaviour for initially classifying overwrites and conflicts is as follows: - -* if a client Bob currently has version V of a file at relative path F, and it sees a new version V' - of that file in another client Alice's DMD, such that V' follows V, then the write of the new version - is initially an overwrite and should be to the same filename. -* if, in the same situation, V' does not follow V, then the write of the new version should be - classified as a conflict. - -The existing :doc:`remote-to-local-sync` document defines when an initial -overwrite should be reclassified as a conflict. - -The above definitions completely specify the desired solution of the false -conflict behaviour described in the `ticket comment`_. However, they do not give -a concrete algorithm to compute the follows relation, or a representation in the -Tahoe-LAFS file store of the metadata needed to compute it. - -We will consider two alternative designs, proposed by Leif Ryge and -Zooko Wilcox-O'Hearn, that aim to fill this gap. - - - -Leif's Proposal: Magic-Folder "single-file" snapshot design -=========================================================== - -Abstract --------- - -We propose a relatively simple modification to the initial Magic Folder design which -adds merkle DAGs of immutable historical snapshots for each file. The full history -does not necessarily need to be retained, and the choice of how much history to retain -can potentially be made on a per-file basis. - -Motivation: ------------ - -no SPOFs, no admins -``````````````````` - -Additionally, the initial design had two cases of excess authority: - -1. The magic folder administrator (inviter) has everyone's write-caps and is thus essentially "root" -2. Each client shares ambient authority and can delete anything or everything and - (assuming there is not a conflict) the data will be deleted from all clients. So, each client - is effectively "root" too. - -Thus, while it is useful for file synchronization, the initial design is a much less safe place -to store data than in a single mutable tahoe directory (because more client computers have the -possibility to delete it). - - -Glossary --------- - -- merkle DAG: like a merkle tree but with multiple roots, and with each node potentially having multiple parents -- magic folder: a logical directory that can be synchronized between many clients - (devices, users, ...) using a Tahoe-LAFS storage grid -- client: a Magic-Folder-enabled Tahoe-LAFS client instance that has access to a magic folder -- DMD: "distributed mutable directory", a physical Tahoe-LAFS mutable directory. - Each client has the write cap to their own DMD, and read caps to all other client's DMDs - (as in the original Magic Folder design). -- snapshot: a reference to a version of a file; represented as an immutable directory containing - an entry called "content" (pointing to the immutable file containing the file's contents), - and an entry called "parent0" (pointing to a parent snapshot), and optionally parent1 through - parentN pointing at other parents. The Magic Folder snapshot object is conceptually very similar - to a git commit object, except for that it is created automatically and it records the history of an - individual file rather than an entire repository. Also, commits do not need to have authors - (although an author field could be easily added later). -- deletion snapshot: immutable directory containing no content entry (only one or more parents) -- capability: a Tahoe-LAFS diminishable cryptographic capability -- cap: short for capability -- conflict: the situation when another client's current snapshot for a file is different than our current snapshot, and is not a descendant of ours. -- overwrite: the situation when another client's current snapshot for a file is a (not necessarily direct) descendant of our current snapshot. - - -Overview --------- - -This new design will track the history of each file using "snapshots" which are -created at each upload. Each snapshot will specify one or more parent snapshots, -forming a directed acyclic graph. A Magic-Folder user's DMD uses a flattened directory -hierarchy naming scheme, as in the original design. But, instead of pointing directly -at file contents, each file name will link to that user's latest snapshot for that file. - -Inside the dmd there will also be an immutable directory containing the client's subscriptions -(read-caps to other clients' dmds). - -Clients periodically poll each other's DMDs. When they see the current snapshot for a file is -different than their own current snapshot for that file, they immediately begin downloading its -contents and then walk backwards through the DAG from the new snapshot until they find their own -snapshot or a common ancestor. - -For the common ancestor search to be efficient, the client will need to keep a local store (in the magic folder db) of all of the snapshots -(but not their contents) between the oldest current snapshot of any of their subscriptions and their own current snapshot. -See "local cache purging policy" below for more details. - -If the new snapshot is a descendant of the client's existing snapshot, then this update -is an "overwrite" - like a git fast-forward. So, when the download of the new file completes it can overwrite -the existing local file with the new contents and update its dmd to point at the new snapshot. - -If the new snapshot is not a descendant of the client's current snapshot, then the update is a -conflict. The new file is downloaded and named $filename.conflict-$user1,$user2 (including a list -of other subscriptions who have that version as their current version). - -Changes to the local .conflict- file are not tracked. When that file disappears -(either by deletion, or being renamed) a new snapshot for the conflicting file is -created which has two parents - the client's snapshot prior to the conflict, and the -new conflicting snapshot. If multiple .conflict files are deleted or renamed in a short -period of time, a single conflict-resolving snapshot with more than two parents can be created. - -! I think this behavior will confuse users. - -Tahoe-LAFS snapshot objects ---------------------------- - -These Tahoe-LAFS snapshot objects only track the history of a single file, not a directory hierarchy. -Snapshot objects contain only two field types: -- ``Content``: an immutable capability of the file contents (omitted if deletion snapshot) -- ``Parent0..N``: immutable capabilities representing parent snapshots - -Therefore in this system an interesting side effect of this Tahoe snapshot object is that there is no -snapshot author. The only notion of an identity in the Magic-Folder system is the write capability of the user's DMD. - -The snapshot object is an immutable directory which looks like this: -content -> immutable cap to file content -parent0 -> immutable cap to a parent snapshot object -parent1..N -> more parent snapshots - - -Snapshot Author Identity ------------------------- - -Snapshot identity might become an important feature so that bad actors -can be recognized and other clients can stop "subscribing" to (polling for) updates from them. - -Perhaps snapshots could be signed by the user's Magic-Folder write key for this purpose? Probably a bad idea to reuse the write-cap key for this. Better to introduce ed25519 identity keys which can (optionally) sign snapshot contents and store the signature as another member of the immutable directory. - - -Conflict Resolution -------------------- - -detection of conflicts -`````````````````````` - -A Magic-Folder client updates a given file's current snapshot link to a snapshot which is a descendent -of the previous snapshot. For a given file, let's say "file1", Alice can detect that Bob's DMD has a "file1" -that links to a snapshot which conflicts. Two snapshots conflict if one is not an ancestor of the other. - - -a possible UI for resolving conflicts -````````````````````````````````````` - -If Alice links a conflicting snapshot object for a file named "file1", -Bob and Carole will see a file in their Magic-Folder called "file1.conflicted.Alice". -Alice conversely will see an additional file called "file1.conflicted.previous". -If Alice wishes to resolve the conflict with her new version of the file then -she simply deletes the file called "file1.conflicted.previous". If she wants to -choose the other version then she moves it into place: - - mv file1.conflicted.previous file1 - - -This scheme works for N number of conflicts. Bob for instance could choose -the same resolution for the conflict, like this: - - mv file1.Alice file1 - - -Deletion propagation and eventual Garbage Collection ----------------------------------------------------- - -When a user deletes a file, this is represented by a link from their DMD file -object to a deletion snapshot. Eventually all users will link this deletion -snapshot into their DMD. When all users have the link then they locally cache -the deletion snapshot and remove the link to that file in their DMD. -Deletions can of course be undeleted; this means creating a new snapshot -object that specifies itself a descent of the deletion snapshot. - -Clients periodically renew leases to all capabilities recursively linked -to in their DMD. Files which are unlinked by ALL the users of a -given Magic-Folder will eventually be garbage collected. - -Lease expirey duration must be tuned properly by storage servers such that -Garbage Collection does not occur too frequently. - - - -Performance Considerations --------------------------- - -local changes -````````````` - -Our old scheme requires two remote Tahoe-LAFS operations per local file modification: -1. upload new file contents (as an immutable file) -2. modify mutable directory (DMD) to link to the immutable file cap - -Our new scheme requires three remote operations: -1. upload new file contents (as in immutable file) -2. upload immutable directory representing Tahoe-LAFS snapshot object -3. modify mutable directory (DMD) to link to the immutable snapshot object - -remote changes -`````````````` - -Our old scheme requires one remote Tahoe-LAFS operation per remote file modification (not counting the polling of the dmd): -1. Download new file content - -Our new scheme requires a minimum of two remote operations (not counting the polling of the dmd) for conflicting downloads, or three remote operations for overwrite downloads: -1. Download new snapshot object -2. Download the content it points to -3. If the download is an overwrite, modify the DMD to indicate that the downloaded version is their current version. - -If the new snapshot is not a direct descendant of our current snapshot or the other party's previous snapshot we saw, we will also need to download more snapshots to determine if it is a conflict or an overwrite. However, those can be done in -parallel with the content download since we will need to download the content in either case. - -While the old scheme is obviously more efficient, we think that the properties provided by the new scheme make it worth the additional cost. - -Physical updates to the DMD overiouslly need to be serialized, so multiple logical updates should be combined when an update is already in progress. - -conflict detection and local caching -```````````````````````````````````` - -Local caching of snapshots is important for performance. -We refer to the client's local snapshot cache as the ``magic-folder db``. - -Conflict detection can be expensive because it may require the client -to download many snapshots from the other user's DMD in order to try -and find it's own current snapshot or a descendent. The cost of scanning -the remote DMDs should not be very high unless the client conducting the -scan has lots of history to download because of being offline for a long -time while many new snapshots were distributed. - - -local cache purging policy -`````````````````````````` - -The client's current snapshot for each file should be cached at all times. -When all clients' views of a file are synchronized (they all have the same -snapshot for that file), no ancestry for that file needs to be cached. -When clients' views of a file are *not* synchronized, the most recent -common ancestor of all clients' snapshots must be kept cached, as must -all intermediate snapshots. - - -Local Merge Property --------------------- - -Bob can in fact, set a pre-existing directory (with files) as his new Magic-Folder directory, resulting -in a merge of the Magic-Folder with Bob's local directory. Filename collisions will result in conflicts -because Bob's new snapshots are not descendent's of the existing Magic-Folder file snapshots. - - -Example: simultaneous update with four parties: - -1. A, B, C, D are in sync for file "foo" at snapshot X -2. A and B simultaneously change the file, creating snapshots XA and XB (both descendants of X). -3. C hears about XA first, and D hears about XB first. Both accept an overwrite. -4. All four parties hear about the other update they hadn't heard about yet. -5. Result: - - everyone's local file "foo" has the content pointed to by the snapshot in their DMD's "foo" entry - - A and C's DMDs each have the "foo" entry pointing at snapshot XA - - B and D's DMDs each have the "foo" entry pointing at snapshot XB - - A and C have a local file called foo.conflict-B,D with XB's content - - B and D have a local file called foo.conflict-A,C with XA's content - -Later: - - - Everyone ignores the conflict, and continue updating their local "foo". but slowly enough that there are no further conflicts, so that A and C remain in sync with eachother, and B and D remain in sync with eachother. - - - A and C's foo.conflict-B,D file continues to be updated with the latest version of the file B and D are working on, and vice-versa. - - - A and C edit the file at the same time again, causing a new conflict. - - - Local files are now: - - A: "foo", "foo.conflict-B,D", "foo.conflict-C" - - C: "foo", "foo.conflict-B,D", "foo.conflict-A" - - B and D: "foo", "foo.conflict-A", "foo.conflict-C" - - - Finally, D decides to look at "foo.conflict-A" and "foo.conflict-C", and they manually integrate (or decide to ignore) the differences into their own local file "foo". - - - D deletes their conflict files. - - - D's DMD now points to a snapshot that is a descendant of everyone else's current snapshot, resolving all conflicts. - - - The conflict files on A, B, and C disappear, and everyone's local file "foo" contains D's manually-merged content. - - -Daira: I think it is too complicated to include multiple nicknames in the .conflict files -(e.g. "foo.conflict-B,D"). It should be sufficient to have one file for each other client, -reflecting that client's latest version, regardless of who else it conflicts with. - - -Zooko's Design (as interpreted by Daira) -======================================== - -A version map is a mapping from client nickname to version number. - -Definition: a version map M' strictly-follows a mapping M iff for every entry c->v -in M, there is an entry c->v' in M' such that v' > v. - - -Each client maintains a 'local version map' and a 'conflict version map' for each file -in its magic folder db. -If it has never written the file, then the entry for its own nickname in the local version -map is zero. The conflict version map only contains entries for nicknames B where -"$FILENAME.conflict-$B" exists. - -When a client A uploads a file, it increments the version for its own nickname in its -local version map for the file, and includes that map as metadata with its upload. - -A download by client A from client B is an overwrite iff the downloaded version map -strictly-follows A's local version map for that file; in this case A replaces its local -version map with the downloaded version map. Otherwise it is a conflict, and the -download is put into "$FILENAME.conflict-$B"; in this case A's -local version map remains unchanged, and the entry B->v taken from the downloaded -version map is added to its conflict version map. - -If client A deletes or renames a conflict file "$FILENAME.conflict-$B", then A copies -the entry for B from its conflict version map to its local version map, deletes -the entry for B in its conflict version map, and performs another upload (with -incremented version number) of $FILENAME. - - -Example: - A, B, C = (10, 20, 30) everyone agrees. - A updates: (11, 20, 30) - B updates: (10, 21, 30) - -C will see either A or B first. Both would be an overwrite, if considered alone. - - - diff --git a/docs/proposed/magic-folder/remote-to-local-sync.rst b/docs/proposed/magic-folder/remote-to-local-sync.rst deleted file mode 100644 index 32c2fd5da..000000000 --- a/docs/proposed/magic-folder/remote-to-local-sync.rst +++ /dev/null @@ -1,951 +0,0 @@ -Magic Folder design for remote-to-local sync -============================================ - -Scope ------ - -In this Objective we will design remote-to-local synchronization: - -* How to efficiently determine which objects (files and directories) have - to be downloaded in order to bring the current local filesystem into sync - with the newly-discovered version of the remote filesystem. -* How to distinguish overwrites, in which the remote side was aware of - your most recent version and overwrote it with a new version, from - conflicts, in which the remote side was unaware of your most recent - version when it published its new version. The latter needs to be raised - to the user as an issue the user will have to resolve and the former must - not bother the user. -* How to overwrite the (stale) local versions of those objects with the - newly acquired objects, while preserving backed-up versions of those - overwritten objects in case the user didn't want this overwrite and wants - to recover the old version. - -Tickets on the Tahoe-LAFS trac with the `otf-magic-folder-objective4`_ -keyword are within the scope of the remote-to-local synchronization -design. - -.. _otf-magic-folder-objective4: https://tahoe-lafs.org/trac/tahoe-lafs/query?status=!closed&keywords=~otf-magic-folder-objective4 - - -Glossary -'''''''' - -Object: a file or directory - -DMD: distributed mutable directory - -Folder: an abstract directory that is synchronized between clients. -(A folder is not the same as the directory corresponding to it on -any particular client, nor is it the same as a DMD.) - -Collective: the set of clients subscribed to a given Magic Folder. - -Descendant: a direct or indirect child in a directory or folder tree - -Subfolder: a folder that is a descendant of a magic folder - -Subpath: the path from a magic folder to one of its descendants - -Write: a modification to a local filesystem object by a client - -Read: a read from a local filesystem object by a client - -Upload: an upload of a local object to the Tahoe-LAFS file store - -Download: a download from the Tahoe-LAFS file store to a local object - -Pending notification: a local filesystem change that has been detected -but not yet processed. - - -Representing the Magic Folder in Tahoe-LAFS -------------------------------------------- - -Unlike the local case where we use inotify or ReadDirectoryChangesW to -detect filesystem changes, we have no mechanism to register a monitor for -changes to a Tahoe-LAFS directory. Therefore, we must periodically poll -for changes. - -An important constraint on the solution is Tahoe-LAFS' ":doc:`write -coordination directive<../../write_coordination>`", which prohibits -concurrent writes by different storage clients to the same mutable object: - - Tahoe does not provide locking of mutable files and directories. If - there is more than one simultaneous attempt to change a mutable file - or directory, then an UncoordinatedWriteError may result. This might, - in rare cases, cause the file or directory contents to be accidentally - deleted. The user is expected to ensure that there is at most one - outstanding write or update request for a given file or directory at - a time. One convenient way to accomplish this is to make a different - file or directory for each person or process that wants to write. - -Since it is a goal to allow multiple users to write to a Magic Folder, -if the write coordination directive remains the same as above, then we -will not be able to implement the Magic Folder as a single Tahoe-LAFS -DMD. In general therefore, we will have multiple DMDs —spread across -clients— that together represent the Magic Folder. Each client in a -Magic Folder collective polls the other clients' DMDs in order to detect -remote changes. - -Six possible designs were considered for the representation of subfolders -of the Magic Folder: - -1. All subfolders written by a given Magic Folder client are collapsed -into a single client DMD, containing immutable files. The child name of -each file encodes the full subpath of that file relative to the Magic -Folder. - -2. The DMD tree under a client DMD is a direct copy of the folder tree -written by that client to the Magic Folder. Not all subfolders have -corresponding DMDs; only those to which that client has written files or -child subfolders. - -3. The directory tree under a client DMD is a ``tahoe backup`` structure -containing immutable snapshots of the folder tree written by that client -to the Magic Folder. As in design 2, only objects written by that client -are present. - -4. *Each* client DMD contains an eventually consistent mirror of all -files and folders written by *any* Magic Folder client. Thus each client -must also copy changes made by other Magic Folder clients to its own -client DMD. - -5. *Each* client DMD contains a ``tahoe backup`` structure containing -immutable snapshots of all files and folders written by *any* Magic -Folder client. Thus each client must also create another snapshot in its -own client DMD when changes are made by another client. (It can potentially -batch changes, subject to latency requirements.) - -6. The write coordination problem is solved by implementing `two-phase -commit`_. Then, the representation consists of a single DMD tree which is -written by all clients. - -.. _`two-phase commit`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1755 - -Here is a summary of advantages and disadvantages of each design: - -+----------------------------+ -| Key | -+=======+====================+ -| \+\+ | major advantage | -+-------+--------------------+ -| \+ | minor advantage | -+-------+--------------------+ -| ‒ | minor disadvantage | -+-------+--------------------+ -| ‒ ‒ | major disadvantage | -+-------+--------------------+ -| ‒ ‒ ‒ | showstopper | -+-------+--------------------+ - - -123456+: All designs have the property that a recursive add-lease operation -starting from a *collective directory* containing all of the client DMDs, -will find all of the files and directories used in the Magic Folder -representation. Therefore the representation is compatible with :doc:`garbage -collection <../../garbage-collection>`, even when a pre-Magic-Folder client -does the lease marking. - -123456+: All designs avoid "breaking" pre-Magic-Folder clients that read -a directory or file that is part of the representation. - -456++: Only these designs allow a readcap to one of the client -directories —or one of their subdirectories— to be directly shared -with other Tahoe-LAFS clients (not necessarily Magic Folder clients), -so that such a client sees all of the contents of the Magic Folder. -Note that this was not a requirement of the OTF proposal, although it -is useful. - -135+: A Magic Folder client has only one mutable Tahoe-LAFS object to -monitor per other client. This minimizes communication bandwidth for -polling, or alternatively the latency possible for a given polling -bandwidth. - -1236+: A client does not need to make changes to its own DMD that repeat -changes that another Magic Folder client had previously made. This reduces -write bandwidth and complexity. - -1‒: If the Magic Folder has many subfolders, their files will all be -collapsed into the same DMD, which could get quite large. In practice a -single DMD can easily handle the number of files expected to be written -by a client, so this is unlikely to be a significant issue. - -123‒ ‒: In these designs, the set of files in a Magic Folder is -represented as the union of the files in all client DMDs. However, -when a file is modified by more than one client, it will be linked -from multiple client DMDs. We therefore need a mechanism, such as a -version number or a monotonically increasing timestamp, to determine -which copy takes priority. - -35‒ ‒: When a Magic Folder client detects a remote change, it must -traverse an immutable directory structure to see what has changed. -Completely unchanged subtrees will have the same URI, allowing some of -this traversal to be shortcutted. - -24‒ ‒ ‒: When a Magic Folder client detects a remote change, it must -traverse a mutable directory structure to see what has changed. This is -more complex and less efficient than traversing an immutable structure, -because shortcutting is not possible (each DMD retains the same URI even -if a descendant object has changed), and because the structure may change -while it is being traversed. Also the traversal needs to be robust -against cycles, which can only occur in mutable structures. - -45‒ ‒: When a change occurs in one Magic Folder client, it will propagate -to all the other clients. Each client will therefore see multiple -representation changes for a single logical change to the Magic Folder -contents, and must suppress the duplicates. This is particularly -problematic for design 4 where it interacts with the preceding issue. - -4‒ ‒ ‒, 5‒ ‒: There is the potential for client DMDs to get "out of sync" -with each other, potentially for long periods if errors occur. Thus each -client must be able to "repair" its client directory (and its -subdirectory structure) concurrently with performing its own writes. This -is a significant complexity burden and may introduce failure modes that -could not otherwise happen. - -6‒ ‒ ‒: While two-phase commit is a well-established protocol, its -application to Tahoe-LAFS requires significant design work, and may still -leave some corner cases of the write coordination problem unsolved. - - -+------------------------------------------------+-----------------------------------------+ -| Design Property | Designs Proposed | -+================================================+======+======+======+======+======+======+ -| **advantages** | *1* | *2* | *3* | *4* | *5* | *6* | -+------------------------------------------------+------+------+------+------+------+------+ -| Compatible with garbage collection |\+ |\+ |\+ |\+ |\+ |\+ | -+------------------------------------------------+------+------+------+------+------+------+ -| Does not break old clients |\+ |\+ |\+ |\+ |\+ |\+ | -+------------------------------------------------+------+------+------+------+------+------+ -| Allows direct sharing | | | |\+\+ |\+\+ |\+\+ | -+------------------------------------------------+------+------+------+------+------+------+ -| Efficient use of bandwidth |\+ | |\+ | |\+ | | -+------------------------------------------------+------+------+------+------+------+------+ -| No repeated changes |\+ |\+ |\+ | | |\+ | -+------------------------------------------------+------+------+------+------+------+------+ -| **disadvantages** | *1* | *2* | *3* | *4* | *5* | *6* | -+------------------------------------------------+------+------+------+------+------+------+ -| Can result in large DMDs |‒ | | | | | | -+------------------------------------------------+------+------+------+------+------+------+ -| Need version number to determine priority |‒ ‒ |‒ ‒ |‒ ‒ | | | | -+------------------------------------------------+------+------+------+------+------+------+ -| Must traverse immutable directory structure | | |‒ ‒ | |‒ ‒ | | -+------------------------------------------------+------+------+------+------+------+------+ -| Must traverse mutable directory structure | |‒ ‒ ‒ | |‒ ‒ ‒ | | | -+------------------------------------------------+------+------+------+------+------+------+ -| Must suppress duplicate representation changes | | | |‒ ‒ |‒ ‒ | | -+------------------------------------------------+------+------+------+------+------+------+ -| "Out of sync" problem | | | |‒ ‒ ‒ |‒ ‒ | | -+------------------------------------------------+------+------+------+------+------+------+ -| Unsolved design problems | | | | | |‒ ‒ ‒ | -+------------------------------------------------+------+------+------+------+------+------+ - - -Evaluation of designs -''''''''''''''''''''' - -Designs 2 and 3 have no significant advantages over design 1, while -requiring higher polling bandwidth and greater complexity due to the need -to create subdirectories. These designs were therefore rejected. - -Design 4 was rejected due to the out-of-sync problem, which is severe -and possibly unsolvable for mutable structures. - -For design 5, the out-of-sync problem is still present but possibly -solvable. However, design 5 is substantially more complex, less efficient -in bandwidth/latency, and less scalable in number of clients and -subfolders than design 1. It only gains over design 1 on the ability to -share directory readcaps to the Magic Folder (or subfolders), which was -not a requirement. It would be possible to implement this feature in -future by switching to design 6. - -For the time being, however, design 6 was considered out-of-scope for -this project. - -Therefore, design 1 was chosen. That is: - - All subfolders written by a given Magic Folder client are collapsed - into a single client DMD, containing immutable files. The child name - of each file encodes the full subpath of that file relative to the - Magic Folder. - -Each directory entry in a DMD also stores a version number, so that the -latest version of a file is well-defined when it has been modified by -multiple clients. - -To enable representing empty directories, a client that creates a -directory should link a corresponding zero-length file in its DMD, -at a name that ends with the encoded directory separator character. - -We want to enable dynamic configuration of the membership of a Magic -Folder collective, without having to reconfigure or restart each client -when another client joins. To support this, we have a single collective -directory that links to all of the client DMDs, named by their client -nicknames. If the collective directory is mutable, then it is possible -to change its contents in order to add clients. Note that a client DMD -should not be unlinked from the collective directory unless all of its -files are first copied to some other client DMD. - -A client needs to be able to write to its own DMD, and read from other DMDs. -To be consistent with the `Principle of Least Authority`_, each client's -reference to its own DMD is a write capability, whereas its reference -to the collective directory is a read capability. The latter transitively -grants read access to all of the other client DMDs and the files linked -from them, as required. - -.. _`Principle of Least Authority`: http://www.eros-os.org/papers/secnotsep.pdf - -Design and implementation of the user interface for maintaining this -DMD structure and configuration will be addressed in Objectives 5 and 6. - -During operation, each client will poll for changes on other clients -at a predetermined frequency. On each poll, it will reread the collective -directory (to allow for added or removed clients), and then read each -client DMD linked from it. - -"Hidden" files, and files with names matching the patterns used for backup, -temporary, and conflicted files, will be ignored, i.e. not synchronized -in either direction. A file is hidden if it has a filename beginning with -"." (on any platform), or has the hidden or system attribute on Windows. - - -Conflict Detection and Resolution ---------------------------------- - -The combination of local filesystems and distributed objects is -an example of shared state concurrency, which is highly error-prone -and can result in race conditions that are complex to analyze. -Unfortunately we have no option but to use shared state in this -situation. - -We call the resulting design issues "dragons" (as in "Here be dragons"), -which as a convenient mnemonic we have named after the classical -Greek elements Earth, Fire, Air, and Water. - -Note: all filenames used in the following sections are examples, -and the filename patterns we use in the actual implementation may -differ. The actual patterns will probably include timestamps, and -for conflicted files, the nickname of the client that last changed -the file. - - -Earth Dragons: Collisions between local filesystem operations and downloads -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -Write/download collisions -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Suppose that Alice's Magic Folder client is about to write a -version of ``foo`` that it has downloaded in response to a remote -change. - -The criteria for distinguishing overwrites from conflicts are -described later in the `Fire Dragons`_ section. Suppose that the -remote change has been initially classified as an overwrite. -(As we will see, it may be reclassified in some circumstances.) - -.. _`Fire Dragons`: #fire-dragons-distinguishing-conflicts-from-overwrites - -Note that writing a file that does not already have an entry in the -:ref:`magic folder db` is -initially classed as an overwrite. - -A *write/download collision* occurs when another program writes -to ``foo`` in the local filesystem, concurrently with the new -version being written by the Magic Folder client. We need to -ensure that this does not cause data loss, as far as possible. - -An important constraint on the design is that on Windows, it is -not possible to rename a file to the same name as an existing -file in that directory. Also, on Windows it may not be possible to -delete or rename a file that has been opened by another process -(depending on the sharing flags specified by that process). -Therefore we need to consider carefully how to handle failure -conditions. - -In our proposed design, Alice's Magic Folder client follows -this procedure for an overwrite in response to a remote change: - -1. Write a temporary file, say ``.foo.tmp``. -2. Use the procedure described in the `Fire Dragons_` section - to obtain an initial classification as an overwrite or a - conflict. (This takes as input the ``last_downloaded_uri`` - field from the directory entry of the changed ``foo``.) -3. Set the ``mtime`` of the replacement file to be at least *T* seconds - before the current local time. Stat the replacement file - to obtain its ``mtime`` and ``ctime`` as stored in the local - filesystem, and update the file's last-seen statinfo in - the magic folder db with this information. (Note that the - retrieved ``mtime`` may differ from the one that was set due - to rounding.) -4. Perform a ''file replacement'' operation (explained below) - with backup filename ``foo.backup``, replaced file ``foo``, - and replacement file ``.foo.tmp``. If any step of this - operation fails, reclassify as a conflict and stop. - -To reclassify as a conflict, attempt to rename ``.foo.tmp`` to -``foo.conflicted``, suppressing errors. - -The implementation of file replacement differs between Unix -and Windows. On Unix, it can be implemented as follows: - -* 4a. Stat the replaced path, and set the permissions of the - replacement file to be the same as the replaced file, - bitwise-or'd with octal 600 (``rw-------``). If the replaced - file does not exist, set the permissions according to the - user's umask. If there is a directory at the replaced path, - fail. -* 4b. Attempt to move the replaced file (``foo``) to the - backup filename (``foo.backup``). If an ``ENOENT`` error - occurs because the replaced file does not exist, ignore this - error and continue with steps 4c and 4d. -* 4c. Attempt to create a hard link at the replaced filename - (``foo``) pointing to the replacement file (``.foo.tmp``). -* 4d. Attempt to unlink the replacement file (``.foo.tmp``), - suppressing errors. - -Note that, if there is no conflict, the entry for ``foo`` -recorded in the :ref:`magic folder -db` will -reflect the ``mtime`` set in step 3. The move operation in step -4b will cause a ``MOVED_FROM`` event for ``foo``, and the link -operation in step 4c will cause an ``IN_CREATE`` event for -``foo``. However, these events will not trigger an upload, -because they are guaranteed to be processed only after the file -replacement has finished, at which point the last-seen statinfo -recorded in the database entry will exactly match the metadata -for the file's inode on disk. (The two hard links — ``foo`` -and, while it still exists, ``.foo.tmp`` — share the same inode -and therefore the same metadata.) - -On Windows, file replacement can be implemented by a call to -the `ReplaceFileW`_ API (with the -``REPLACEFILE_IGNORE_MERGE_ERRORS`` flag). If an error occurs -because the replaced file does not exist, then we ignore this -error and attempt to move the replacement file to the replaced -file. - -Similar to the Unix case, the `ReplaceFileW`_ operation will -cause one or more change notifications for ``foo``. The replaced -``foo`` has the same ``mtime`` as the replacement file, and so any -such notification(s) will not trigger an unwanted upload. - -.. _`ReplaceFileW`: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365512%28v=vs.85%29.aspx - -To determine whether this procedure adequately protects against data -loss, we need to consider what happens if another process attempts to -update ``foo``, for example by renaming ``foo.other`` to ``foo``. -This requires us to analyze all possible interleavings between the -operations performed by the Magic Folder client and the other process. -(Note that atomic operations on a directory are totally ordered.) -The set of possible interleavings differs between Windows and Unix. - -On Unix, for the case where the replaced file already exists, we have: - -* Interleaving A: the other process' rename precedes our rename in - step 4b, and we get an ``IN_MOVED_TO`` event for its rename by - step 2. Then we reclassify as a conflict; its changes end up at - ``foo`` and ours end up at ``foo.conflicted``. This avoids data - loss. - -* Interleaving B: its rename precedes ours in step 4b, and we do - not get an event for its rename by step 2. Its changes end up at - ``foo.backup``, and ours end up at ``foo`` after being linked there - in step 4c. This avoids data loss. - -* Interleaving C: its rename happens between our rename in step 4b, - and our link operation in step 4c of the file replacement. The - latter fails with an ``EEXIST`` error because ``foo`` already - exists. We reclassify as a conflict; the old version ends up at - ``foo.backup``, the other process' changes end up at ``foo``, and - ours at ``foo.conflicted``. This avoids data loss. - -* Interleaving D: its rename happens after our link in step 4c, and - causes an ``IN_MOVED_TO`` event for ``foo``. Its rename also changes - the ``mtime`` for ``foo`` so that it is different from the ``mtime`` - calculated in step 3, and therefore different from the metadata - recorded for ``foo`` in the magic folder db. (Assuming no system - clock changes, its rename will set an ``mtime`` timestamp - corresponding to a time after step 4c, which is after the timestamp - *T* seconds before step 4a, provided that *T* seconds is - sufficiently greater than the timestamp granularity.) Therefore, an - upload will be triggered for ``foo`` after its change, which is - correct and avoids data loss. - -If the replaced file did not already exist, an ``ENOENT`` error -occurs at step 4b, and we continue with steps 4c and 4d. The other -process' rename races with our link operation in step 4c. If the -other process wins the race then the effect is similar to -Interleaving C, and if we win the race this it is similar to -Interleaving D. Either case avoids data loss. - - -On Windows, the internal implementation of `ReplaceFileW`_ is similar -to what we have described above for Unix; it works like this: - -* 4a′. Copy metadata (which does not include ``mtime``) from the - replaced file (``foo``) to the replacement file (``.foo.tmp``). - -* 4b′. Attempt to move the replaced file (``foo``) onto the - backup filename (``foo.backup``), deleting the latter if it - already exists. - -* 4c′. Attempt to move the replacement file (``.foo.tmp``) to the - replaced filename (``foo``); fail if the destination already - exists. - -Notice that this is essentially the same as the algorithm we use -for Unix, but steps 4c and 4d on Unix are combined into a single -step 4c′. (If there is a failure at steps 4c′ after step 4b′ has -completed, the `ReplaceFileW`_ call will fail with return code -``ERROR_UNABLE_TO_MOVE_REPLACEMENT_2``. However, it is still -preferable to use this API over two `MoveFileExW`_ calls, because -it retains the attributes and ACLs of ``foo`` where possible. -Also note that if the `ReplaceFileW`_ call fails with -``ERROR_FILE_NOT_FOUND`` because the replaced file does not exist, -then the replacment operation ignores this error and continues with -the equivalent of step 4c′, as on Unix.) - -However, on Windows the other application will not be able to -directly rename ``foo.other`` onto ``foo`` (which would fail because -the destination already exists); it will have to rename or delete -``foo`` first. Without loss of generality, let's say ``foo`` is -deleted. This complicates the interleaving analysis, because we -have two operations done by the other process interleaving with -three done by the magic folder process (rather than one operation -interleaving with four as on Unix). - -So on Windows, for the case where the replaced file already exists, -we have: - -* Interleaving A′: the other process' deletion of ``foo`` and its - rename of ``foo.other`` to ``foo`` both precede our rename in - step 4b. We get an event corresponding to its rename by step 2. - Then we reclassify as a conflict; its changes end up at ``foo`` - and ours end up at ``foo.conflicted``. This avoids data loss. - -* Interleaving B′: the other process' deletion of ``foo`` and its - rename of ``foo.other`` to ``foo`` both precede our rename in - step 4b. We do not get an event for its rename by step 2. - Its changes end up at ``foo.backup``, and ours end up at ``foo`` - after being moved there in step 4c′. This avoids data loss. - -* Interleaving C′: the other process' deletion of ``foo`` precedes - our rename of ``foo`` to ``foo.backup`` done by `ReplaceFileW`_, - but its rename of ``foo.other`` to ``foo`` does not, so we get - an ``ERROR_FILE_NOT_FOUND`` error from `ReplaceFileW`_ indicating - that the replaced file does not exist. We ignore this error and - attempt to move ``foo.tmp`` to ``foo``, racing with the other - process which is attempting to move ``foo.other`` to ``foo``. - If we win the race, then our changes end up at ``foo``, and the - other process' move fails. If the other process wins the race, - then its changes end up at ``foo``, our move fails, and we - reclassify as a conflict, so that our changes end up at - ``foo.conflicted``. Either possibility avoids data loss. - -* Interleaving D′: the other process' deletion and/or rename happen - during the call to `ReplaceFileW`_, causing the latter to fail. - There are two subcases: - - * if the error is ``ERROR_UNABLE_TO_MOVE_REPLACEMENT_2``, then - ``foo`` is renamed to ``foo.backup`` and ``.foo.tmp`` remains - at its original name after the call. - * for all other errors, ``foo`` and ``.foo.tmp`` both remain at - their original names after the call. - - In both subcases, we reclassify as a conflict and rename ``.foo.tmp`` - to ``foo.conflicted``. This avoids data loss. - -* Interleaving E′: the other process' deletion of ``foo`` and attempt - to rename ``foo.other`` to ``foo`` both happen after all internal - operations of `ReplaceFileW`_ have completed. This causes deletion - and rename events for ``foo`` (which will in practice be merged due - to the pending delay, although we don't rely on that for - correctness). The rename also changes the ``mtime`` for ``foo`` so - that it is different from the ``mtime`` calculated in step 3, and - therefore different from the metadata recorded for ``foo`` in the - magic folder db. (Assuming no system clock changes, its rename will - set an ``mtime`` timestamp corresponding to a time after the - internal operations of `ReplaceFileW`_ have completed, which is - after the timestamp *T* seconds before `ReplaceFileW`_ is called, - provided that *T* seconds is sufficiently greater than the timestamp - granularity.) Therefore, an upload will be triggered for ``foo`` - after its change, which is correct and avoids data loss. - -.. _`MoveFileExW`: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365240%28v=vs.85%29.aspx - -If the replaced file did not already exist, we get an -``ERROR_FILE_NOT_FOUND`` error from `ReplaceFileW`_, and attempt to -move ``foo.tmp`` to ``foo``. This is similar to Interleaving C, and -either possibility for the resulting race avoids data loss. - -We also need to consider what happens if another process opens ``foo`` -and writes to it directly, rather than renaming another file onto it: - -* On Unix, open file handles refer to inodes, not paths. If the other - process opens ``foo`` before it has been renamed to ``foo.backup``, - and then closes the file, changes will have been written to the file - at the same inode, even if that inode is now linked at ``foo.backup``. - This avoids data loss. - -* On Windows, we have two subcases, depending on whether the sharing - flags specified by the other process when it opened its file handle - included ``FILE_SHARE_DELETE``. (This flag covers both deletion and - rename operations.) - - i. If the sharing flags *do not* allow deletion/renaming, the - `ReplaceFileW`_ operation will fail without renaming ``foo``. - In this case we will end up with ``foo`` changed by the other - process, and the downloaded file still in ``foo.tmp``. - This avoids data loss. - - ii. If the sharing flags *do* allow deletion/renaming, then - data loss or corruption may occur. This is unavoidable and - can be attributed to other process making a poor choice of - sharing flags (either explicitly if it used `CreateFile`_, or - via whichever higher-level API it used). - -.. _`CreateFile`: https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858%28v=vs.85%29.aspx - -Note that it is possible that another process tries to open the file -between steps 4b and 4c (or 4b′ and 4c′ on Windows). In this case the -open will fail because ``foo`` does not exist. Nevertheless, no data -will be lost, and in many cases the user will be able to retry the -operation. - -Above we only described the case where the download was initially -classified as an overwrite. If it was classed as a conflict, the -procedure is the same except that we choose a unique filename -for the conflicted file (say, ``foo.conflicted_unique``). We write -the new contents to ``.foo.tmp`` and then rename it to -``foo.conflicted_unique`` in such a way that the rename will fail -if the destination already exists. (On Windows this is a simple -rename; on Unix it can be implemented as a link operation followed -by an unlink, similar to steps 4c and 4d above.) If this fails -because another process wrote ``foo.conflicted_unique`` after we -chose the filename, then we retry with a different filename. - - -Read/download collisions -~~~~~~~~~~~~~~~~~~~~~~~~ - -A *read/download collision* occurs when another program reads -from ``foo`` in the local filesystem, concurrently with the new -version being written by the Magic Folder client. We want to -ensure that any successful attempt to read the file by the other -program obtains a consistent view of its contents. - -On Unix, the above procedure for writing downloads is sufficient -to achieve this. There are three cases: - -* A. The other process opens ``foo`` for reading before it is - renamed to ``foo.backup``. Then the file handle will continue to - refer to the old file across the rename, and the other process - will read the old contents. - -* B. The other process attempts to open ``foo`` after it has been - renamed to ``foo.backup``, and before it is linked in step c. - The open call fails, which is acceptable. - -* C. The other process opens ``foo`` after it has been linked to - the new file. Then it will read the new contents. - -On Windows, the analysis is very similar, but case A′ needs to -be split into two subcases, depending on the sharing mode the other -process uses when opening the file for reading: - -* A′. The other process opens ``foo`` before the Magic Folder - client's attempt to rename ``foo`` to ``foo.backup`` (as part - of the implementation of `ReplaceFileW`_). The subcases are: - - i. The other process uses sharing flags that deny deletion and - renames. The `ReplaceFileW`_ call fails, and the download is - reclassified as a conflict. The downloaded file ends up at - ``foo.conflicted``, which is correct. - - ii. The other process uses sharing flags that allow deletion - and renames. The `ReplaceFileW`_ call succeeds, and the - other process reads inconsistent data. This can be attributed - to a poor choice of sharing flags by the other process. - -* B′. The other process attempts to open ``foo`` at the point - during the `ReplaceFileW`_ call where it does not exist. - The open call fails, which is acceptable. - -* C′. The other process opens ``foo`` after it has been linked to - the new file. Then it will read the new contents. - - -For both write/download and read/download collisions, we have -considered only interleavings with a single other process, and -only the most common possibilities for the other process' -interaction with the file. If multiple other processes are -involved, or if a process performs operations other than those -considered, then we cannot say much about the outcome in general; -however, we believe that such cases will be much less common. - - - -Fire Dragons: Distinguishing conflicts from overwrites -'''''''''''''''''''''''''''''''''''''''''''''''''''''' - -When synchronizing a file that has changed remotely, the Magic Folder -client needs to distinguish between overwrites, in which the remote -side was aware of your most recent version (if any) and overwrote it -with a new version, and conflicts, in which the remote side was unaware -of your most recent version when it published its new version. Those two -cases have to be handled differently — the latter needs to be raised -to the user as an issue the user will have to resolve and the former -must not bother the user. - -For example, suppose that Alice's Magic Folder client sees a change -to ``foo`` in Bob's DMD. If the version it downloads from Bob's DMD -is "based on" the version currently in Alice's local filesystem at -the time Alice's client attempts to write the downloaded file ‒or if -there is no existing version in Alice's local filesystem at that time‒ -then it is an overwrite. Otherwise it is initially classified as a -conflict. - -This initial classification is used by the procedure for writing a -file described in the `Earth Dragons`_ section above. As explained -in that section, we may reclassify an overwrite as a conflict if an -error occurs during the write procedure. - -.. _`Earth Dragons`: #earth-dragons-collisions-between-local-filesystem-operations-and-downloads - -In order to implement this policy, we need to specify how the -"based on" relation between file versions is recorded and updated. - -We propose to record this information: - -* in the :ref:`magic folder - db`, for - local files; -* in the Tahoe-LAFS directory metadata, for files stored in the - Magic Folder. - -In the magic folder db we will add a *last-downloaded record*, -consisting of ``last_downloaded_uri`` and ``last_downloaded_timestamp`` -fields, for each path stored in the database. Whenever a Magic Folder -client downloads a file, it stores the downloaded version's URI and -the current local timestamp in this record. Since only immutable -files are used, the URI will be an immutable file URI, which is -deterministically and uniquely derived from the file contents and -the Tahoe-LAFS node's :doc:`convergence secret<../../convergence-secret>`. - -(Note that the last-downloaded record is updated regardless of -whether the download is an overwrite or a conflict. The rationale -for this to avoid "conflict loops" between clients, where every -new version after the first conflict would be considered as another -conflict.) - -Later, in response to a local filesystem change at a given path, the -Magic Folder client reads the last-downloaded record associated with -that path (if any) from the database and then uploads the current -file. When it links the uploaded file into its client DMD, it -includes the ``last_downloaded_uri`` field in the metadata of the -directory entry, overwriting any existing field of that name. If -there was no last-downloaded record associated with the path, this -field is omitted. - -Note that ``last_downloaded_uri`` field does *not* record the URI of -the uploaded file (which would be redundant); it records the URI of -the last download before the local change that caused the upload. -The field will be absent if the file has never been downloaded by -this client (i.e. if it was created on this client and no change -by any other client has been detected). - -A possible refinement also takes into account the -``last_downloaded_timestamp`` field from the magic folder db, and -compares it to the timestamp of the change that caused the upload -(which should be later, assuming no system clock changes). -If the duration between these timestamps is very short, then we -are uncertain about whether the process on Bob's system that wrote -the local file could have taken into account the last download. -We can use this information to be conservative about treating -changes as conflicts. So, if the duration is less than a configured -threshold, we omit the ``last_downloaded_uri`` field from the -metadata. This will have the effect of making other clients treat -this change as a conflict whenever they already have a copy of the -file. - -Conflict/overwrite decision algorithm -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Now we are ready to describe the algorithm for determining whether a -download for the file ``foo`` is an overwrite or a conflict (refining -step 2 of the procedure from the `Earth Dragons`_ section). - -Let ``last_downloaded_uri`` be the field of that name obtained from -the directory entry metadata for ``foo`` in Bob's DMD (this field -may be absent). Then the algorithm is: - -* 2a. Attempt to "stat" ``foo`` to get its *current statinfo* (size - in bytes, ``mtime``, and ``ctime``). If Alice has no local copy - of ``foo``, classify as an overwrite. - -* 2b. Read the following information for the path ``foo`` from the - local magic folder db: - - * the *last-seen statinfo*, if any (this is the size in - bytes, ``mtime``, and ``ctime`` stored in the ``local_files`` - table when the file was last uploaded); - * the ``last_uploaded_uri`` field of the ``local_files`` table - for this file, which is the URI under which the file was last - uploaded. - -* 2c. If any of the following are true, then classify as a conflict: - - * i. there are pending notifications of changes to ``foo``; - * ii. the last-seen statinfo is either absent (i.e. there is - no entry in the database for this path), or different from the - current statinfo; - * iii. either ``last_downloaded_uri`` or ``last_uploaded_uri`` - (or both) are absent, or they are different. - - Otherwise, classify as an overwrite. - - -Air Dragons: Collisions between local writes and uploads -'''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -Short of filesystem-specific features on Unix or the `shadow copy service`_ -on Windows (which is per-volume and therefore difficult to use in this -context), there is no way to *read* the whole contents of a file -atomically. Therefore, when we read a file in order to upload it, we -may read an inconsistent version if it was also being written locally. - -.. _`shadow copy service`: https://technet.microsoft.com/en-us/library/ee923636%28v=ws.10%29.aspx - -A well-behaved application can avoid this problem for its writes: - -* On Unix, if another process modifies a file by renaming a temporary - file onto it, then we will consistently read either the old contents - or the new contents. -* On Windows, if the other process uses sharing flags to deny reads - while it is writing a file, then we will consistently read either - the old contents or the new contents, unless a sharing error occurs. - In the case of a sharing error we should retry later, up to a - maximum number of retries. - -In the case of a not-so-well-behaved application writing to a file -at the same time we read from it, the magic folder will still be -eventually consistent, but inconsistent versions may be visible to -other users' clients. - -In Objective 2 we implemented a delay, called the *pending delay*, -after the notification of a filesystem change and before the file is -read in order to upload it (Tahoe-LAFS ticket `#1440`_). If another -change notification occurs within the pending delay time, the delay -is restarted. This helps to some extent because it means that if -files are written more quickly than the pending delay and less -frequently than the pending delay, we shouldn't encounter this -inconsistency. - -.. _`#1440`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1440 - -The likelihood of inconsistency could be further reduced, even for -writes by not-so-well-behaved applications, by delaying the actual -upload for a further period —called the *stability delay*— after the -file has finished being read. If a notification occurs between the -end of the pending delay and the end of the stability delay, then -the read would be aborted and the notification requeued. - -This would have the effect of ensuring that no write notifications -have been received for the file during a time window that brackets -the period when it was being read, with margin before and after -this period defined by the pending and stability delays. The delays -are intended to account for asynchronous notification of events, and -caching in the filesystem. - -Note however that we cannot guarantee that the delays will be long -enough to prevent inconsistency in any particular case. Also, the -stability delay would potentially affect performance significantly -because (unlike the pending delay) it is not overlapped when there -are multiple files on the upload queue. This performance impact -could be mitigated by uploading files in parallel where possible -(Tahoe-LAFS ticket `#1459`_). - -We have not yet decided whether to implement the stability delay, and -it is not planned to be implemented for the OTF objective 4 milestone. -Ticket `#2431`_ has been opened to track this idea. - -.. _`#1459`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1459 -.. _`#2431`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2431 - -Note that the situation of both a local process and the Magic Folder -client reading a file at the same time cannot cause any inconsistency. - - -Water Dragons: Handling deletion and renames -'''''''''''''''''''''''''''''''''''''''''''' - -Deletion of a file -~~~~~~~~~~~~~~~~~~ - -When a file is deleted from the filesystem of a Magic Folder client, -the most intuitive behavior is for it also to be deleted under that -name from other clients. To avoid data loss, the other clients should -actually rename their copies to a backup filename. - -It would not be sufficient for a Magic Folder client that deletes -a file to implement this simply by removing the directory entry from -its DMD. Indeed, the entry may not exist in the client's DMD if it -has never previously changed the file. - -Instead, the client links a zero-length file into its DMD and sets -``deleted: true`` in the directory entry metadata. Other clients -take this as a signal to rename their copies to the backup filename. - -Note that the entry for this zero-length file has a version number as -usual, and later versions may restore the file. - -When the downloader deletes a file (or renames it to a filename -ending in ``.backup``) in response to a remote change, a local -filesystem notification will occur, and we must make sure that this -is not treated as a local change. To do this we have the downloader -set the ``size`` field in the magic folder db to ``None`` (SQL NULL) -just before deleting the file, and suppress notifications for which -the local file does not exist, and the recorded ``size`` field is -``None``. - -When a Magic Folder client restarts, we can detect files that had -been downloaded but were deleted while it was not running, because -their paths will have last-downloaded records in the magic folder db -with a ``size`` other than ``None``, and without any corresponding -local file. - -Deletion of a directory -~~~~~~~~~~~~~~~~~~~~~~~ - -Local filesystems (unlike a Tahoe-LAFS filesystem) normally cannot -unlink a directory that has any remaining children. Therefore a -Magic Folder client cannot delete local copies of directories in -general, because they will typically contain backup files. This must -be done manually on each client if desired. - -Nevertheless, a Magic Folder client that deletes a directory should -set ``deleted: true`` on the metadata entry for the corresponding -zero-length file. This avoids the directory being recreated after -it has been manually deleted from a client. - -Renaming -~~~~~~~~ - -It is sufficient to handle renaming of a file by treating it as a -deletion and an addition under the new name. - -This also applies to directories, although users may find the -resulting behavior unintuitive: all of the files under the old name -will be renamed to backup filenames, and a new directory structure -created under the new name. We believe this is the best that can be -done without imposing unreasonable implementation complexity. - - -Summary -------- - -This completes the design of remote-to-local synchronization. -We realize that it may seem very complicated. Anecdotally, proprietary -filesystem synchronization designs we are aware of, such as Dropbox, -are said to incur similar or greater design complexity. diff --git a/docs/proposed/magic-folder/user-interface-design.rst b/docs/proposed/magic-folder/user-interface-design.rst deleted file mode 100644 index ea976bb00..000000000 --- a/docs/proposed/magic-folder/user-interface-design.rst +++ /dev/null @@ -1,205 +0,0 @@ -Magic Folder user interface design -================================== - -Scope ------ - -In this Objective we will design a user interface to allow users to conveniently -and securely indicate which folders on some devices should be "magically" linked -to which folders on other devices. - -This is a critical usability and security issue for which there is no known perfect -solution, but which we believe is amenable to a "good enough" trade-off solution. -This document explains the design and justifies its trade-offs in terms of security, -usability, and time-to-market. - -Tickets on the Tahoe-LAFS trac with the `otf-magic-folder-objective6`_ -keyword are within the scope of the user interface design. - -.. _otf-magic-folder-objective6: https://tahoe-lafs.org/trac/tahoe-lafs/query?status=!closed&keywords=~otf-magic-folder-objective6 - -Glossary -'''''''' - -Object: a file or directory - -DMD: distributed mutable directory - -Folder: an abstract directory that is synchronized between clients. -(A folder is not the same as the directory corresponding to it on -any particular client, nor is it the same as a DMD.) - -Collective: the set of clients subscribed to a given Magic Folder. - -Diminishing: the process of deriving, from an existing capability, -another capability that gives less authority (for example, deriving a -read cap from a read/write cap). - - -Design Constraints ------------------- - -The design of the Tahoe-side representation of a Magic Folder, and the -polling mechanism that the Magic Folder clients will use to detect remote -changes was discussed in :doc:`remote-to-local-sync`, -and we will not revisit that here. The assumption made by that design was -that each client would be configured with the following information: - -* a write cap to its own *client DMD*. -* a read cap to a *collective directory*. - -The collective directory contains links to each client DMD named by the -corresponding client's nickname. - -This design was chosen to allow straightforward addition of clients without -requiring each existing client to change its configuration. - -Note that each client in a Magic Folder collective has the authority to add, -modify or delete any object within the Magic Folder. It is also able to control -to some extent whether its writes will be treated by another client as overwrites -or as conflicts. However, there is still a reliability benefit to preventing a -client from accidentally modifying another client's DMD, or from accidentally -modifying the collective directory in a way that would lose data. This motivates -ensuring that each client only has access to the caps above, rather than, say, -every client having a write cap to the collective directory. - -Another important design constraint is that we cannot violate the :doc:`write -coordination directive<../../write_coordination>`; that is, we cannot write to -the same mutable directory from multiple clients, even during the setup phase -when adding a client. - -Within these constraints, for usability we want to minimize the number of steps -required to configure a Magic Folder collective. - - -Proposed Design ---------------- - -Three ``tahoe`` subcommands are added:: - - tahoe magic-folder create MAGIC: [MY_NICKNAME LOCAL_DIR] - - Create an empty Magic Folder. The MAGIC: local alias is set - to a write cap which can be used to refer to this Magic Folder - in future ``tahoe magic-folder invite`` commands. - - If MY_NICKNAME and LOCAL_DIR are given, the current client - immediately joins the newly created Magic Folder with that - nickname and local directory. - - - tahoe magic-folder invite MAGIC: THEIR_NICKNAME - - Print an "invitation" that can be used to invite another - client to join a Magic Folder, with the given nickname. - - The invitation must be sent to the user of the other client - over a secure channel (e.g. PGP email, OTR, or ssh). - - This command will normally be run by the same client that - created the Magic Folder. However, it may be run by a - different client if the ``MAGIC:`` alias is copied to - the ``private/aliases`` file of that other client, or if - ``MAGIC:`` is replaced by the write cap to which it points. - - - tahoe magic-folder join INVITATION LOCAL_DIR - - Accept an invitation created by ``tahoe magic-folder invite``. - The current client joins the specified Magic Folder, which will - appear in the local filesystem at the given directory. - - -There are no commands to remove a client or to revoke an -invitation, although those are possible features that could -be added in future. (When removing a client, it is necessary -to copy each file it added to some other client's DMD, if it -is the most recent version of that file.) - - -Implementation -'''''''''''''' - -For "``tahoe magic-folder create MAGIC: [MY_NICKNAME LOCAL_DIR]``" : - -1. Run "``tahoe create-alias MAGIC:``". -2. If ``MY_NICKNAME`` and ``LOCAL_DIR`` are given, do the equivalent of:: - - INVITATION=`tahoe invite-magic-folder MAGIC: MY_NICKNAME` - tahoe join-magic-folder INVITATION LOCAL_DIR - - -For "``tahoe magic-folder invite COLLECTIVE_WRITECAP NICKNAME``" : - -(``COLLECTIVE_WRITECAP`` can, as a special case, be an alias such as ``MAGIC:``.) - -1. Create an empty client DMD. Let its write URI be ``CLIENT_WRITECAP``. -2. Diminish ``CLIENT_WRITECAP`` to ``CLIENT_READCAP``, and - diminish ``COLLECTIVE_WRITECAP`` to ``COLLECTIVE_READCAP``. -3. Run "``tahoe ln CLIENT_READCAP COLLECTIVE_WRITECAP/NICKNAME``". -4. Print "``COLLECTIVE_READCAP+CLIENT_WRITECAP``" as the invitation, - accompanied by instructions on how to accept the invitation and - the need to send it over a secure channel. - - -For "``tahoe magic-folder join INVITATION LOCAL_DIR``" : - -1. Parse ``INVITATION`` as ``COLLECTIVE_READCAP+CLIENT_WRITECAP``. -2. Write ``CLIENT_WRITECAP`` to the file ``magic_folder_dircap`` - under the client's ``private`` directory. -3. Write ``COLLECTIVE_READCAP`` to the file ``collective_dircap`` - under the client's ``private`` directory. -4. Edit the client's ``tahoe.cfg`` to set - ``[magic_folder] enabled = True`` and - ``[magic_folder] local.directory = LOCAL_DIR``. - - -Discussion ----------- - -The proposed design has a minor violation of the -`Principle of Least Authority`_ in order to reduce the number -of steps needed. The invoker of "``tahoe magic-folder invite``" -creates the client DMD on behalf of the invited client, and -could retain its write cap (which is part of the invitation). - -.. _`Principle of Least Authority`: http://www.eros-os.org/papers/secnotsep.pdf - -A possible alternative design would be for the invited client -to create its own client DMD, and send it back to the inviter -to be linked into the collective directory. However this would -require another secure communication and another command -invocation per client. Given that, as mentioned earlier, each -client in a Magic Folder collective already has the authority -to add, modify or delete any object within the Magic Folder, -we considered the potential security/reliability improvement -here not to be worth the loss of usability. - -We also considered a design where each client had write access to -the collective directory. This would arguably be a more serious -violation of the Principle of Least Authority than the one above -(because all clients would have excess authority rather than just -the inviter). In any case, it was not clear how to make such a -design satisfy the :doc:`write coordination -directive<../../write_coordination>`, because the collective -directory would have needed to be written to by multiple clients. - -The reliance on a secure channel to send the invitation to its -intended recipient is not ideal, since it may involve additional -software such as clients for PGP, OTR, ssh etc. However, we believe -that this complexity is necessary rather than incidental, because -there must be some way to distinguish the intended recipient from -potential attackers who would try to become members of the Magic -Folder collective without authorization. By making use of existing -channels that have likely already been set up by security-conscious -users, we avoid reinventing the wheel or imposing substantial extra -implementation costs. - -The length of an invitation will be approximately the combined -length of a Tahoe-LAFS read cap and write cap. This is several -lines long, but still short enough to be cut-and-pasted successfully -if care is taken. Errors in copying the invitation can be detected -since Tahoe-LAFS cap URIs are self-authenticating. - -The implementation of the ``tahoe`` subcommands is straightforward -and raises no further difficult design issues. From 6bd2cc47b30d0f230b4c084cb7f50acd7c9ed228 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Thu, 13 Feb 2020 10:26:48 -0500 Subject: [PATCH 0010/1472] Remove some git configuration related to magic-folder --- .gitignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitignore b/.gitignore index bd76fc2ce..ee3b02b8f 100644 --- a/.gitignore +++ b/.gitignore @@ -43,7 +43,6 @@ zope.interface-*.egg /.tox/ /docs/_build/ /coverage.xml -/smoke_magicfolder/ /.hypothesis/ # This is the plaintext of the private environment needed for some CircleCI From 1fba2d6ea2d6ca59c427a58f034d8ad9c1081c6c Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Thu, 13 Feb 2020 10:26:57 -0500 Subject: [PATCH 0011/1472] Remove the magic-folder integration tests --- integration/conftest.py | 91 ------ integration/test_aaa_aardvark.py | 4 - integration/test_magic_folder.py | 462 ------------------------------- integration/test_tor.py | 2 +- integration/util.py | 4 - 5 files changed, 1 insertion(+), 562 deletions(-) delete mode 100644 integration/test_magic_folder.py diff --git a/integration/conftest.py b/integration/conftest.py index 10927f358..a85bc15f6 100644 --- a/integration/conftest.py +++ b/integration/conftest.py @@ -367,97 +367,6 @@ def bob(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, reques return process -@pytest.fixture(scope='session') -@log_call(action_type=u"integration:alice:invite", include_args=["temp_dir"]) -def alice_invite(reactor, alice, temp_dir, request): - node_dir = join(temp_dir, 'alice') - - with start_action(action_type=u"integration:alice:magic_folder:create"): - # FIXME XXX by the time we see "client running" in the logs, the - # storage servers aren't "really" ready to roll yet (uploads fairly - # consistently fail if we don't hack in this pause...) - proto = _CollectOutputProtocol() - _tahoe_runner_optional_coverage( - proto, - reactor, - request, - [ - 'magic-folder', 'create', - '--poll-interval', '2', - '--basedir', node_dir, 'magik:', 'alice', - join(temp_dir, 'magic-alice'), - ] - ) - pytest_twisted.blockon(proto.done) - - with start_action(action_type=u"integration:alice:magic_folder:invite") as a: - proto = _CollectOutputProtocol() - _tahoe_runner_optional_coverage( - proto, - reactor, - request, - [ - 'magic-folder', 'invite', - '--basedir', node_dir, 'magik:', 'bob', - ] - ) - pytest_twisted.blockon(proto.done) - invite = proto.output.getvalue() - a.add_success_fields(invite=invite) - - with start_action(action_type=u"integration:alice:magic_folder:restart"): - # before magic-folder works, we have to stop and restart (this is - # crappy for the tests -- can we fix it in magic-folder?) - try: - alice.transport.signalProcess('TERM') - pytest_twisted.blockon(alice.transport.exited) - except ProcessExitedAlready: - pass - with start_action(action_type=u"integration:alice:magic_folder:magic-text"): - magic_text = 'Completed initial Magic Folder scan successfully' - pytest_twisted.blockon(_run_node(reactor, node_dir, request, magic_text)) - await_client_ready(alice) - return invite - - -@pytest.fixture(scope='session') -@log_call( - action_type=u"integration:magic_folder", - include_args=["alice_invite", "temp_dir"], -) -def magic_folder(reactor, alice_invite, alice, bob, temp_dir, request): - print("pairing magic-folder") - bob_dir = join(temp_dir, 'bob') - proto = _CollectOutputProtocol() - _tahoe_runner_optional_coverage( - proto, - reactor, - request, - [ - 'magic-folder', 'join', - '--poll-interval', '1', - '--basedir', bob_dir, - alice_invite, - join(temp_dir, 'magic-bob'), - ] - ) - pytest_twisted.blockon(proto.done) - - # before magic-folder works, we have to stop and restart (this is - # crappy for the tests -- can we fix it in magic-folder?) - try: - print("Sending TERM to Bob") - bob.transport.signalProcess('TERM') - pytest_twisted.blockon(bob.transport.exited) - except ProcessExitedAlready: - pass - - magic_text = 'Completed initial Magic Folder scan successfully' - pytest_twisted.blockon(_run_node(reactor, bob_dir, request, magic_text)) - await_client_ready(bob) - return (join(temp_dir, 'magic-alice'), join(temp_dir, 'magic-bob')) - - @pytest.fixture(scope='session') def chutney(reactor, temp_dir): chutney_dir = join(temp_dir, 'chutney') diff --git a/integration/test_aaa_aardvark.py b/integration/test_aaa_aardvark.py index e4698a12a..4a2ef71a6 100644 --- a/integration/test_aaa_aardvark.py +++ b/integration/test_aaa_aardvark.py @@ -16,7 +16,3 @@ def test_create_introducer(introducer): def test_create_storage(storage_nodes): print("Created {} storage nodes".format(len(storage_nodes))) - - -def test_create_alice_bob_magicfolder(magic_folder): - print("Alice and Bob have paired magic-folders") diff --git a/integration/test_magic_folder.py b/integration/test_magic_folder.py deleted file mode 100644 index 32323cb64..000000000 --- a/integration/test_magic_folder.py +++ /dev/null @@ -1,462 +0,0 @@ -import sys -import time -import shutil -from os import mkdir, unlink, utime -from os.path import join, exists, getmtime - -import util - -import pytest_twisted - - -# tests converted from check_magicfolder_smoke.py -# see "conftest.py" for the fixtures (e.g. "magic_folder") - -def test_eliot_logs_are_written(alice, bob, temp_dir): - # The integration test configuration arranges for this logging - # configuration. Verify it actually does what we want. - # - # The alice and bob arguments looks unused but they actually tell pytest - # to set up all the magic-folder stuff. The assertions here are about - # side-effects of that setup. - assert exists(join(temp_dir, "alice", "logs", "eliot.json")) - assert exists(join(temp_dir, "bob", "logs", "eliot.json")) - - -def test_alice_writes_bob_receives(magic_folder): - alice_dir, bob_dir = magic_folder - - with open(join(alice_dir, "first_file"), "w") as f: - f.write("alice wrote this") - - util.await_file_contents(join(bob_dir, "first_file"), "alice wrote this") - return - - -def test_alice_writes_bob_receives_multiple(magic_folder): - """ - When Alice does a series of updates, Bob should just receive them - with no .backup or .conflict files being produced. - """ - alice_dir, bob_dir = magic_folder - - unwanted_files = [ - join(bob_dir, "multiple.backup"), - join(bob_dir, "multiple.conflict") - ] - - # first update - with open(join(alice_dir, "multiple"), "w") as f: - f.write("alice wrote this") - - util.await_file_contents( - join(bob_dir, "multiple"), "alice wrote this", - error_if=unwanted_files, - ) - - # second update - with open(join(alice_dir, "multiple"), "w") as f: - f.write("someone changed their mind") - - util.await_file_contents( - join(bob_dir, "multiple"), "someone changed their mind", - error_if=unwanted_files, - ) - - # third update - with open(join(alice_dir, "multiple"), "w") as f: - f.write("absolutely final version ship it") - - util.await_file_contents( - join(bob_dir, "multiple"), "absolutely final version ship it", - error_if=unwanted_files, - ) - - # forth update, but both "at once" so one should conflict - time.sleep(2) - with open(join(alice_dir, "multiple"), "w") as f: - f.write("okay one more attempt") - with open(join(bob_dir, "multiple"), "w") as f: - f.write("...but just let me add") - - bob_conflict = join(bob_dir, "multiple.conflict") - alice_conflict = join(alice_dir, "multiple.conflict") - - found = util.await_files_exist([ - bob_conflict, - alice_conflict, - ]) - - assert len(found) > 0, "Should have found a conflict" - print("conflict found (as expected)") - - -def test_alice_writes_bob_receives_old_timestamp(magic_folder): - alice_dir, bob_dir = magic_folder - fname = join(alice_dir, "ts_file") - ts = time.time() - (60 * 60 * 36) # 36 hours ago - - with open(fname, "w") as f: - f.write("alice wrote this") - utime(fname, (time.time(), ts)) - - fname = join(bob_dir, "ts_file") - util.await_file_contents(fname, "alice wrote this") - # make sure the timestamp is correct - assert int(getmtime(fname)) == int(ts) - return - - -def test_bob_writes_alice_receives(magic_folder): - alice_dir, bob_dir = magic_folder - - with open(join(bob_dir, "second_file"), "w") as f: - f.write("bob wrote this") - - util.await_file_contents(join(alice_dir, "second_file"), "bob wrote this") - return - - -def test_alice_deletes(magic_folder): - # alice writes a file, waits for bob to get it and then deletes it. - alice_dir, bob_dir = magic_folder - - with open(join(alice_dir, "delfile"), "w") as f: - f.write("alice wrote this") - - util.await_file_contents(join(bob_dir, "delfile"), "alice wrote this") - - # bob has the file; now alices deletes it - unlink(join(alice_dir, "delfile")) - - # bob should remove his copy, but preserve a backup - util.await_file_vanishes(join(bob_dir, "delfile")) - util.await_file_contents(join(bob_dir, "delfile.backup"), "alice wrote this") - return - - -def test_alice_creates_bob_edits(magic_folder): - alice_dir, bob_dir = magic_folder - - # alice writes a file - with open(join(alice_dir, "editfile"), "w") as f: - f.write("alice wrote this") - - util.await_file_contents(join(bob_dir, "editfile"), "alice wrote this") - - # now bob edits it - with open(join(bob_dir, "editfile"), "w") as f: - f.write("bob says foo") - - util.await_file_contents(join(alice_dir, "editfile"), "bob says foo") - - -def test_bob_creates_sub_directory(magic_folder): - alice_dir, bob_dir = magic_folder - - # bob makes a sub-dir, with a file in it - mkdir(join(bob_dir, "subdir")) - with open(join(bob_dir, "subdir", "a_file"), "w") as f: - f.write("bob wuz here") - - # alice gets it - util.await_file_contents(join(alice_dir, "subdir", "a_file"), "bob wuz here") - - # now bob deletes it again - shutil.rmtree(join(bob_dir, "subdir")) - - # alice should delete it as well - util.await_file_vanishes(join(alice_dir, "subdir", "a_file")) - # i *think* it's by design that the subdir won't disappear, - # because a "a_file.backup" should appear... - util.await_file_contents(join(alice_dir, "subdir", "a_file.backup"), "bob wuz here") - - -def test_bob_creates_alice_deletes_bob_restores(magic_folder): - alice_dir, bob_dir = magic_folder - - # bob creates a file - with open(join(bob_dir, "boom"), "w") as f: - f.write("bob wrote this") - - util.await_file_contents( - join(alice_dir, "boom"), - "bob wrote this" - ) - - # alice deletes it (so bob should as well .. but keep a backup) - unlink(join(alice_dir, "boom")) - util.await_file_vanishes(join(bob_dir, "boom")) - assert exists(join(bob_dir, "boom.backup")) - - # bob restore it, with new contents - unlink(join(bob_dir, "boom.backup")) - with open(join(bob_dir, "boom"), "w") as f: - f.write("bob wrote this again, because reasons") - - # XXX double-check this behavior is correct! - - # alice sees bob's update, but marks it as a conflict (because - # .. she previously deleted it? does that really make sense) - - util.await_file_contents( - join(alice_dir, "boom"), - "bob wrote this again, because reasons", - ) - - -def test_bob_creates_alice_deletes_alice_restores(magic_folder): - alice_dir, bob_dir = magic_folder - - # bob creates a file - with open(join(bob_dir, "boom2"), "w") as f: - f.write("bob wrote this") - - util.await_file_contents( - join(alice_dir, "boom2"), - "bob wrote this" - ) - - # alice deletes it (so bob should as well) - unlink(join(alice_dir, "boom2")) - util.await_file_vanishes(join(bob_dir, "boom2")) - - # alice restore it, with new contents - with open(join(alice_dir, "boom2"), "w") as f: - f.write("alice re-wrote this again, because reasons") - - util.await_file_contents( - join(bob_dir, "boom2"), - "alice re-wrote this again, because reasons" - ) - - -def test_bob_conflicts_with_alice_fresh(magic_folder): - # both alice and bob make a file at "the same time". - alice_dir, bob_dir = magic_folder - - # either alice or bob will "win" by uploading to the DMD first. - with open(join(bob_dir, 'alpha'), 'w') as f0, open(join(alice_dir, 'alpha'), 'w') as f1: - f0.write("this is bob's alpha\n") - f1.write("this is alice's alpha\n") - - # there should be conflicts - _bob_conflicts_alice_await_conflicts('alpha', alice_dir, bob_dir) - - -def test_bob_conflicts_with_alice_preexisting(magic_folder): - # both alice and bob edit a file at "the same time" (similar to - # above, but the file already exists before the edits) - alice_dir, bob_dir = magic_folder - - # have bob create the file - with open(join(bob_dir, 'beta'), 'w') as f: - f.write("original beta (from bob)\n") - util.await_file_contents(join(alice_dir, 'beta'), "original beta (from bob)\n") - - # both alice and bob now have a "beta" file, at version 0 - - # either alice or bob will "win" by uploading to the DMD first - # (however, they should both detect a conflict) - with open(join(bob_dir, 'beta'), 'w') as f: - f.write("this is bob's beta\n") - with open(join(alice_dir, 'beta'), 'w') as f: - f.write("this is alice's beta\n") - - # both alice and bob should see a conflict - _bob_conflicts_alice_await_conflicts("beta", alice_dir, bob_dir) - - -def _bob_conflicts_alice_await_conflicts(name, alice_dir, bob_dir): - """ - shared code between _fresh and _preexisting conflict test - """ - found = util.await_files_exist( - [ - join(bob_dir, '{}.conflict'.format(name)), - join(alice_dir, '{}.conflict'.format(name)), - ], - ) - - assert len(found) >= 1, "should be at least one conflict" - assert open(join(bob_dir, name), 'r').read() == "this is bob's {}\n".format(name) - assert open(join(alice_dir, name), 'r').read() == "this is alice's {}\n".format(name) - - alice_conflict = join(alice_dir, '{}.conflict'.format(name)) - bob_conflict = join(bob_dir, '{}.conflict'.format(name)) - if exists(bob_conflict): - assert open(bob_conflict, 'r').read() == "this is alice's {}\n".format(name) - if exists(alice_conflict): - assert open(alice_conflict, 'r').read() == "this is bob's {}\n".format(name) - - -@pytest_twisted.inlineCallbacks -def test_edmond_uploads_then_restarts(reactor, request, temp_dir, introducer_furl, flog_gatherer, storage_nodes): - """ - ticket 2880: if a magic-folder client uploads something, then - re-starts a spurious .backup file should not appear - """ - - edmond_dir = join(temp_dir, 'edmond') - edmond = yield util._create_node( - reactor, request, temp_dir, introducer_furl, flog_gatherer, - "edmond", web_port="tcp:9985:interface=localhost", - storage=False, - ) - - - magic_folder = join(temp_dir, 'magic-edmond') - mkdir(magic_folder) - created = False - # create a magic-folder - # (how can we know that the grid is ready?) - for _ in range(10): # try 10 times - try: - proto = util._CollectOutputProtocol() - transport = reactor.spawnProcess( - proto, - sys.executable, - [ - sys.executable, '-m', 'allmydata.scripts.runner', - 'magic-folder', 'create', - '--poll-interval', '2', - '--basedir', edmond_dir, - 'magik:', - 'edmond_magic', - magic_folder, - ] - ) - yield proto.done - created = True - break - except Exception as e: - print("failed to create magic-folder: {}".format(e)) - time.sleep(1) - - assert created, "Didn't create a magic-folder" - - # to actually-start the magic-folder we have to re-start - edmond.transport.signalProcess('TERM') - yield edmond.transport.exited - edmond = yield util._run_node(reactor, edmond.node_dir, request, 'Completed initial Magic Folder scan successfully') - util.await_client_ready(edmond) - - # add a thing to the magic-folder - with open(join(magic_folder, "its_a_file"), "w") as f: - f.write("edmond wrote this") - - # fixme, do status-update attempts in a loop below - time.sleep(5) - - # let it upload; poll the HTTP magic-folder status API until it is - # uploaded - from allmydata.scripts.magic_folder_cli import _get_json_for_fragment - - with open(join(edmond_dir, u'private', u'api_auth_token'), 'rb') as f: - token = f.read() - - uploaded = False - for _ in range(10): - options = { - "node-url": open(join(edmond_dir, u'node.url'), 'r').read().strip(), - } - try: - magic_data = _get_json_for_fragment( - options, - 'magic_folder?t=json', - method='POST', - post_args=dict( - t='json', - name='default', - token=token, - ) - ) - for mf in magic_data: - if mf['status'] == u'success' and mf['path'] == u'its_a_file': - uploaded = True - break - except Exception as e: - time.sleep(1) - - assert uploaded, "expected to upload 'its_a_file'" - - # re-starting edmond right now would "normally" trigger the 2880 bug - - # kill edmond - edmond.transport.signalProcess('TERM') - yield edmond.transport.exited - time.sleep(1) - edmond = yield util._run_node(reactor, edmond.node_dir, request, 'Completed initial Magic Folder scan successfully') - util.await_client_ready(edmond) - - # XXX how can we say for sure if we've waited long enough? look at - # tail of logs for magic-folder ... somethingsomething? - print("waiting 20 seconds to see if a .backup appears") - for _ in range(20): - assert exists(join(magic_folder, "its_a_file")) - assert not exists(join(magic_folder, "its_a_file.backup")) - time.sleep(1) - - -@pytest_twisted.inlineCallbacks -def test_alice_adds_files_while_bob_is_offline(reactor, request, temp_dir, magic_folder): - """ - Alice can add new files to a magic folder while Bob is offline. When Bob - comes back online his copy is updated to reflect the new files. - """ - alice_magic_dir, bob_magic_dir = magic_folder - alice_node_dir = join(temp_dir, "alice") - bob_node_dir = join(temp_dir, "bob") - - # Take Bob offline. - yield util.cli(request, reactor, bob_node_dir, "stop") - - # Create a couple files in Alice's local directory. - some_files = list( - (name * 3) + ".added-while-offline" - for name - in "xyz" - ) - for name in some_files: - with open(join(alice_magic_dir, name), "w") as f: - f.write(name + " some content") - - good = False - for i in range(15): - status = yield util.magic_folder_cli(request, reactor, alice_node_dir, "status") - good = status.count(".added-while-offline (36 B): good, version=0") == len(some_files) * 2 - if good: - # We saw each file as having a local good state and a remote good - # state. That means we're ready to involve Bob. - break - else: - time.sleep(1.0) - - assert good, ( - "Timed out waiting for good Alice state. Last status:\n{}".format(status) - ) - - # Start Bob up again - magic_text = 'Completed initial Magic Folder scan successfully' - yield util._run_node(reactor, bob_node_dir, request, magic_text) - - yield util.await_files_exist( - list( - join(bob_magic_dir, name) - for name - in some_files - ), - await_all=True, - ) - # Let it settle. It would be nicer to have a readable status output we - # could query. Parsing the current text format is more than I want to - # deal with right now. - time.sleep(1.0) - conflict_files = list(name + ".conflict" for name in some_files) - assert all( - list( - not exists(join(bob_magic_dir, name)) - for name - in conflict_files - ), - ) diff --git a/integration/test_tor.py b/integration/test_tor.py index 187754f08..633def8de 100644 --- a/integration/test_tor.py +++ b/integration/test_tor.py @@ -14,7 +14,7 @@ import pytest_twisted import util -# see "conftest.py" for the fixtures (e.g. "magic_folder") +# see "conftest.py" for the fixtures (e.g. "tor_network") @pytest_twisted.inlineCallbacks def test_onion_service_storage(reactor, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl): diff --git a/integration/util.py b/integration/util.py index aef9ba544..bbcf5efc6 100644 --- a/integration/util.py +++ b/integration/util.py @@ -498,7 +498,3 @@ def await_client_ready(tahoe, timeout=10, liveness=60*2): tahoe, ) ) - - -def magic_folder_cli(request, reactor, node_dir, *argv): - return cli(request, reactor, node_dir, "magic-folder", *argv) From ef73e18f3df31fb2f274ecdb9e8336f857d9734e Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Thu, 13 Feb 2020 10:43:50 -0500 Subject: [PATCH 0012/1472] Remove the greater part of the implementation --- src/allmydata/client.py | 48 - src/allmydata/frontends/magic_folder.py | 2111 -------------- src/allmydata/magicfolderdb.py | 204 -- src/allmydata/magicpath.py | 32 - src/allmydata/scripts/magic_folder_cli.py | 610 ----- src/allmydata/scripts/runner.py | 7 +- src/allmydata/test/cli/test_magic_folder.py | 814 ------ src/allmydata/test/test_client.py | 99 - src/allmydata/test/test_inotify.py | 171 -- src/allmydata/test/test_magic_folder.py | 2425 ----------------- src/allmydata/test/test_magicpath.py | 28 - .../test/test_python2_regressions.py | 3 - src/allmydata/test/test_websocket_logs.py | 2 +- src/allmydata/test/web/test_web.py | 98 - src/allmydata/util/eliotutil.py | 83 - src/allmydata/util/fake_inotify.py | 109 - src/allmydata/watchdog/__init__.py | 0 src/allmydata/watchdog/_watchdog_541.py | 16 - src/allmydata/watchdog/inotify.py | 212 -- src/allmydata/web/magic_folder.py | 52 - src/allmydata/web/root.py | 29 +- src/allmydata/web/static/css/new-tahoe.css | 5 - src/allmydata/web/statistics.xhtml | 12 - src/allmydata/web/status.py | 30 - src/allmydata/web/welcome.xhtml | 7 - src/allmydata/windows/inotify.py | 379 --- 26 files changed, 3 insertions(+), 7583 deletions(-) delete mode 100644 src/allmydata/frontends/magic_folder.py delete mode 100644 src/allmydata/magicfolderdb.py delete mode 100644 src/allmydata/magicpath.py delete mode 100644 src/allmydata/scripts/magic_folder_cli.py delete mode 100644 src/allmydata/test/cli/test_magic_folder.py delete mode 100644 src/allmydata/test/test_inotify.py delete mode 100644 src/allmydata/test/test_magic_folder.py delete mode 100644 src/allmydata/test/test_magicpath.py delete mode 100644 src/allmydata/util/fake_inotify.py delete mode 100644 src/allmydata/watchdog/__init__.py delete mode 100644 src/allmydata/watchdog/_watchdog_541.py delete mode 100644 src/allmydata/watchdog/inotify.py delete mode 100644 src/allmydata/web/magic_folder.py delete mode 100644 src/allmydata/windows/inotify.py diff --git a/src/allmydata/client.py b/src/allmydata/client.py index e45666049..5e8bf0d6d 100644 --- a/src/allmydata/client.py +++ b/src/allmydata/client.py @@ -85,9 +85,6 @@ _client_config = configutil.ValidConfiguration( "stats_gatherer.furl", "storage.plugins", ), - "drop_upload": ( # deprecated already? - "enabled", - ), "ftpd": ( "accounts.file", "accounts.url", @@ -121,12 +118,6 @@ _client_config = configutil.ValidConfiguration( "helper": ( "enabled", ), - "magic_folder": ( - "download.umask", - "enabled", - "local.directory", - "poll_interval", - ), }, is_valid_section=_is_valid_section, # Anything in a valid section is a valid item, for now. @@ -681,7 +672,6 @@ class _Client(node.Node, pollmixin.PollMixin): """ node.Node.__init__(self, config, main_tub, control_tub, i2p_provider, tor_provider) - self._magic_folders = dict() self.started_timestamp = time.time() self.logSource = "Client" self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy() @@ -707,7 +697,6 @@ class _Client(node.Node, pollmixin.PollMixin): self.init_helper() self.init_ftp_server() self.init_sftp_server() - self.init_magic_folder() # If the node sees an exit_trigger file, it will poll every second to see # whether the file still exists, and what its mtime is. If the file does not @@ -968,9 +957,6 @@ class _Client(node.Node, pollmixin.PollMixin): This returns a local authentication token, which is just some random data in "api_auth_token" which must be echoed to API calls. - - Currently only the URI '/magic' for magic-folder status; other - endpoints are invited to include this as well, as appropriate. """ return self.config.get_private_config('api_auth_token') @@ -1088,40 +1074,6 @@ class _Client(node.Node, pollmixin.PollMixin): sftp_portstr, pubkey_file, privkey_file) s.setServiceParent(self) - def init_magic_folder(self): - #print "init_magic_folder" - if self.config.get_config("drop_upload", "enabled", False, boolean=True): - raise node.OldConfigOptionError( - "The [drop_upload] section must be renamed to [magic_folder].\n" - "See docs/frontends/magic-folder.rst for more information." - ) - - if self.config.get_config("magic_folder", "enabled", False, boolean=True): - from allmydata.frontends import magic_folder - - try: - magic_folders = magic_folder.load_magic_folders(self.config._basedir) - except Exception as e: - log.msg("Error loading magic-folder config: {}".format(e)) - raise - - # start processing the upload queue when we've connected to - # enough servers - threshold = min(self.encoding_params["k"], - self.encoding_params["happy"] + 1) - - for (name, mf_config) in magic_folders.items(): - self.log("Starting magic_folder '{}'".format(name)) - s = magic_folder.MagicFolder.from_config(self, name, mf_config) - self._magic_folders[name] = s - s.setServiceParent(self) - - connected_d = self.storage_broker.when_connected_enough(threshold) - def connected_enough(ign, mf): - mf.ready() # returns a Deferred we ignore - return None - connected_d.addCallback(connected_enough, s) - def _check_exit_trigger(self, exit_trigger_file): if os.path.exists(exit_trigger_file): mtime = os.stat(exit_trigger_file)[stat.ST_MTIME] diff --git a/src/allmydata/frontends/magic_folder.py b/src/allmydata/frontends/magic_folder.py deleted file mode 100644 index bb2edce99..000000000 --- a/src/allmydata/frontends/magic_folder.py +++ /dev/null @@ -1,2111 +0,0 @@ -import six -import sys, os -import os.path -from errno import EEXIST -from collections import deque -from datetime import datetime -import time -import ConfigParser - -from twisted.python.log import msg as twmsg -from twisted.python.filepath import FilePath -from twisted.python.monkey import MonkeyPatcher -from twisted.internet import defer, reactor, task -from twisted.internet.error import AlreadyCancelled -from twisted.python.failure import Failure -from twisted.python import runtime -from twisted.application import service - -from zope.interface import Interface, Attribute, implementer - -from eliot import ( - Field, - Message, - start_action, - ActionType, - MessageType, - write_failure, - write_traceback, - log_call, -) -from eliot.twisted import ( - DeferredContext, -) - -from allmydata.util import ( - fileutil, - configutil, - yamlutil, - eliotutil, -) -from allmydata.interfaces import IDirectoryNode -from allmydata.util import log -from allmydata.util.fileutil import ( - precondition_abspath, - get_pathinfo, - ConflictError, - abspath_expanduser_unicode, -) -from allmydata.util.assertutil import precondition, _assert -from allmydata.util.deferredutil import HookMixin -from allmydata.util.progress import PercentProgress -from allmydata.util.encodingutil import listdir_filepath, to_filepath, \ - extend_filepath, unicode_from_filepath, unicode_segments_from, \ - quote_filepath, quote_local_unicode_path, FilenameEncodingError -from allmydata.util.time_format import format_time -from allmydata.immutable.upload import FileName, Data -from allmydata import magicfolderdb, magicpath - -if six.PY3: - long = int - - -# Mask off all non-owner permissions for magic-folders files by default. -_DEFAULT_DOWNLOAD_UMASK = 0o077 - -IN_EXCL_UNLINK = long(0x04000000) - - -class ConfigurationError(Exception): - """ - There was something wrong with some magic-folder configuration. - """ - - -def _get_inotify_module(): - try: - if sys.platform == "win32": - from allmydata.windows import inotify - elif runtime.platform.supportsINotify(): - from twisted.internet import inotify - elif not sys.platform.startswith("linux"): - from allmydata.watchdog import inotify - else: - raise NotImplementedError("filesystem notification needed for Magic Folder is not supported.\n" - "This currently requires Linux, Windows, or macOS.") - return inotify - except (ImportError, AttributeError) as e: - log.msg(e) - if sys.platform == "win32": - raise NotImplementedError("filesystem notification needed for Magic Folder is not supported.\n" - "Windows support requires at least Vista, and has only been tested on Windows 7.") - raise - - -def get_inotify_module(): - # Until Twisted #9579 is fixed, the Docker check just screws things up. - # Disable it. - monkey = MonkeyPatcher() - monkey.addPatch(runtime.platform, "isDocker", lambda: False) - return monkey.runWithPatches(_get_inotify_module) - - -def is_new_file(pathinfo, db_entry): - if db_entry is None: - return True - - if not pathinfo.exists and db_entry.size is None: - return False - - return ((pathinfo.size, pathinfo.ctime_ns, pathinfo.mtime_ns) != - (db_entry.size, db_entry.ctime_ns, db_entry.mtime_ns)) - - -def _upgrade_magic_folder_config(basedir): - """ - Helper that upgrades from single-magic-folder-only configs to - multiple magic-folder configuration style (in YAML) - """ - config_fname = os.path.join(basedir, "tahoe.cfg") - config = configutil.get_config(config_fname) - - collective_fname = os.path.join(basedir, "private", "collective_dircap") - upload_fname = os.path.join(basedir, "private", "magic_folder_dircap") - magic_folders = { - u"default": { - u"directory": config.get("magic_folder", "local.directory").decode("utf-8"), - u"collective_dircap": fileutil.read(collective_fname), - u"upload_dircap": fileutil.read(upload_fname), - u"poll_interval": int(config.get("magic_folder", "poll_interval")), - }, - } - fileutil.move_into_place( - source=os.path.join(basedir, "private", "magicfolderdb.sqlite"), - dest=os.path.join(basedir, "private", "magicfolder_default.sqlite"), - ) - save_magic_folders(basedir, magic_folders) - config.remove_option("magic_folder", "local.directory") - config.remove_option("magic_folder", "poll_interval") - configutil.write_config(os.path.join(basedir, 'tahoe.cfg'), config) - fileutil.remove_if_possible(collective_fname) - fileutil.remove_if_possible(upload_fname) - - -def maybe_upgrade_magic_folders(node_directory): - """ - If the given node directory is not already using the new-style - magic-folder config it will be upgraded to do so. (This should - only be done if the user is running a command that needs to modify - the config) - """ - yaml_fname = os.path.join(node_directory, u"private", u"magic_folders.yaml") - if os.path.exists(yaml_fname): - # we already have new-style magic folders - return - - config_fname = os.path.join(node_directory, "tahoe.cfg") - config = configutil.get_config(config_fname) - - # we have no YAML config; if we have config in tahoe.cfg then we - # can upgrade it to the YAML-based configuration - if config.has_option("magic_folder", "local.directory"): - _upgrade_magic_folder_config(node_directory) - - -def load_magic_folders(node_directory): - """ - Loads existing magic-folder configuration and returns it as a dict - mapping name -> dict of config. This will NOT upgrade from - old-style to new-style config (but WILL read old-style config and - return in the same way as if it was new-style). - - :param node_directory: path where node data is stored - :returns: dict mapping magic-folder-name to its config (also a dict) - """ - yaml_fname = os.path.join(node_directory, u"private", u"magic_folders.yaml") - folders = dict() - - config_fname = os.path.join(node_directory, "tahoe.cfg") - config = configutil.get_config(config_fname) - - if not os.path.exists(yaml_fname): - # there will still be a magic_folder section in a "new" - # config, but it won't have local.directory nor poll_interval - # in it. - if config.has_option("magic_folder", "local.directory"): - up_fname = os.path.join(node_directory, "private", "magic_folder_dircap") - coll_fname = os.path.join(node_directory, "private", "collective_dircap") - directory = config.get("magic_folder", "local.directory").decode('utf8') - try: - interval = int(config.get("magic_folder", "poll_interval")) - except ConfigParser.NoOptionError: - interval = 60 - - if config.has_option("magic_folder", "download.umask"): - umask = int(config.get("magic_folder", "download.umask"), 8) - else: - umask = _DEFAULT_DOWNLOAD_UMASK - - folders[u"default"] = { - u"directory": directory, - u"upload_dircap": fileutil.read(up_fname), - u"collective_dircap": fileutil.read(coll_fname), - u"poll_interval": interval, - u"umask": umask, - } - else: - # without any YAML file AND no local.directory option it's - # an error if magic-folder is "enabled" because we don't - # actually have enough config for any magic-folders at all - if config.has_section("magic_folder") \ - and config.getboolean("magic_folder", "enabled") \ - and not folders: - raise Exception( - "[magic_folder] is enabled but has no YAML file and no " - "'local.directory' option." - ) - - elif os.path.exists(yaml_fname): # yaml config-file exists - if config.has_option("magic_folder", "local.directory"): - raise Exception( - "magic-folder config has both old-style configuration" - " and new-style configuration; please remove the " - "'local.directory' key from tahoe.cfg or remove " - "'magic_folders.yaml' from {}".format(node_directory) - ) - with open(yaml_fname, "r") as f: - magic_folders = yamlutil.safe_load(f.read()) - if not isinstance(magic_folders, dict): - raise Exception( - "'{}' should contain a dict".format(yaml_fname) - ) - - folders = magic_folders['magic-folders'] - if not isinstance(folders, dict): - raise Exception( - "'magic-folders' in '{}' should be a dict".format(yaml_fname) - ) - - # check configuration - folders = dict( - (name, fix_magic_folder_config(yaml_fname, name, config)) - for (name, config) - in folders.items() - ) - return folders - - -def fix_magic_folder_config(yaml_fname, name, config): - """ - Check the given folder configuration for validity. - - If it refers to a local directory which does not exist, create that - directory with the configured permissions. - - :param unicode yaml_fname: The configuration file from which the - configuration was read. - - :param unicode name: The name of the magic-folder this particular - configuration blob is associated with. - - :param config: The configuration for a single magic-folder. This is - expected to be a ``dict`` with certain keys and values of certain - types but these properties will be checked. - - :raise ConfigurationError: If the given configuration object does not - conform to some magic-folder configuration requirement. - """ - if not isinstance(config, dict): - raise ConfigurationError( - "Each item in '{}' must itself be a dict".format(yaml_fname) - ) - - for k in ['collective_dircap', 'upload_dircap', 'directory', 'poll_interval']: - if k not in config: - raise ConfigurationError( - "Config for magic folder '{}' is missing '{}'".format( - name, k - ) - ) - - if not isinstance( - config.setdefault(u"umask", _DEFAULT_DOWNLOAD_UMASK), - int, - ): - raise Exception("magic-folder download umask must be an integer") - - # make sure directory for magic folder exists - dir_fp = to_filepath(config['directory']) - umask = config.setdefault('umask', 0o077) - - try: - os.mkdir(dir_fp.path, 0o777 & (~ umask)) - except OSError as e: - if EEXIST != e.errno: - # Report some unknown problem. - raise ConfigurationError( - "magic-folder {} configured path {} could not be created: " - "{}".format( - name, - dir_fp.path, - str(e), - ), - ) - elif not dir_fp.isdir(): - # Tell the user there's a collision. - raise ConfigurationError( - "magic-folder {} configured path {} exists and is not a " - "directory".format( - name, dir_fp.path, - ), - ) - - result_config = config.copy() - for k in ['collective_dircap', 'upload_dircap']: - if isinstance(config[k], unicode): - result_config[k] = config[k].encode('ascii') - return result_config - - - -def save_magic_folders(node_directory, folders): - fileutil.write_atomically( - os.path.join(node_directory, u"private", u"magic_folders.yaml"), - yamlutil.safe_dump({u"magic-folders": folders}), - ) - - config = configutil.get_config(os.path.join(node_directory, u"tahoe.cfg")) - configutil.set_config(config, "magic_folder", "enabled", "True") - configutil.write_config(os.path.join(node_directory, u"tahoe.cfg"), config) - - -class MagicFolder(service.MultiService): - - @classmethod - def from_config(cls, client_node, name, config): - """ - Create a ``MagicFolder`` from a client node and magic-folder - configuration. - - :param _Client client_node: The client node the magic-folder is - attached to. - - :param dict config: Magic-folder configuration like that in the list - returned by ``load_magic_folders``. - """ - db_filename = client_node.config.get_private_path("magicfolder_{}.sqlite".format(name)) - local_dir_config = config['directory'] - try: - poll_interval = int(config["poll_interval"]) - except ValueError: - raise ValueError("'poll_interval' option must be an int") - - return cls( - client=client_node, - upload_dircap=config["upload_dircap"], - collective_dircap=config["collective_dircap"], - # XXX surely a better way for this local_path_u business - local_path_u=abspath_expanduser_unicode( - local_dir_config, - base=client_node.config.get_config_path(), - ), - dbfile=abspath_expanduser_unicode(db_filename), - umask=config["umask"], - name=name, - downloader_delay=poll_interval, - ) - - def __init__(self, client, upload_dircap, collective_dircap, local_path_u, dbfile, umask, - name, uploader_delay=1.0, clock=None, downloader_delay=60): - precondition_abspath(local_path_u) - if not os.path.exists(local_path_u): - raise ValueError("'{}' does not exist".format(local_path_u)) - if not os.path.isdir(local_path_u): - raise ValueError("'{}' is not a directory".format(local_path_u)) - # this is used by 'service' things and must be unique in this Service hierarchy - self.name = 'magic-folder-{}'.format(name) - - service.MultiService.__init__(self) - - clock = clock or reactor - db = magicfolderdb.get_magicfolderdb(dbfile, create_version=(magicfolderdb.SCHEMA_v1, 1)) - if db is None: - raise Exception('ERROR: Unable to load magic folder db.') - - # for tests - self._client = client - self._db = db - - upload_dirnode = self._client.create_node_from_uri(upload_dircap) - collective_dirnode = self._client.create_node_from_uri(collective_dircap) - - self.uploader = Uploader(client, local_path_u, db, upload_dirnode, uploader_delay, clock) - self.downloader = Downloader(client, local_path_u, db, collective_dirnode, - upload_dirnode.get_readonly_uri(), clock, self.uploader.is_pending, umask, - self.set_public_status, poll_interval=downloader_delay) - self._public_status = (False, ['Magic folder has not yet started']) - - def get_public_status(self): - """ - For the web UI, basically. - """ - return self._public_status - - def set_public_status(self, status, *messages): - self._public_status = (status, messages) - - def startService(self): - service.MultiService.startService(self) - return self.uploader.start_monitoring() - - def stopService(self): - with MAGIC_FOLDER_STOP(nickname=self.name).context(): - d = DeferredContext(self._finish()) - d.addBoth( - lambda ign: service.MultiService.stopService(self) - ) - return d.addActionFinish() - - def ready(self): - """ready is used to signal us to start - processing the upload and download items... - """ - self.uploader.start_uploading() # synchronous, returns None - return self.downloader.start_downloading() - - def _finish(self): - d0 = self.downloader.stop() - d1 = self.uploader.stop() - return defer.DeferredList(list( - DeferredContext(d).addErrback(write_failure).result - for d in [d0, d1] - )) - - -_NICKNAME = Field.for_types( - u"nickname", - [unicode, bytes], - u"A Magic-Folder participant nickname.", -) - -_DIRECTION = Field.for_types( - u"direction", - [unicode], - u"A synchronization direction: uploader or downloader.", - eliotutil.validateSetMembership({u"uploader", u"downloader"}), -) - -PROCESSING_LOOP = ActionType( - u"magic-folder:processing-loop", - [_NICKNAME, _DIRECTION], - [], - u"A Magic-Folder is processing uploads or downloads.", -) - -ITERATION = ActionType( - u"magic-folder:iteration", - [_NICKNAME, _DIRECTION], - [], - u"A step towards synchronization in one direction.", -) - -_COUNT = Field.for_types( - u"count", - [int, long], - u"The number of items in the processing queue.", -) - -PROCESS_QUEUE = ActionType( - u"magic-folder:process-queue", - [_COUNT], - [], - u"A Magic-Folder is working through an item queue.", -) - -SCAN_REMOTE_COLLECTIVE = ActionType( - u"magic-folder:scan-remote-collective", - [], - [], - u"The remote collective is being scanned for peer DMDs.", -) - -_DMDS = Field( - u"dmds", - # The children of the collective directory are the participant DMDs. The - # keys in this dict give us the aliases of the participants. - lambda collective_directory_listing: collective_directory_listing.keys(), - u"The (D)istributed (M)utable (D)irectories belonging to each participant are being scanned for changes.", -) - -COLLECTIVE_SCAN = MessageType( - u"magic-folder:downloader:get-latest-file:collective-scan", - [_DMDS], - u"Participants in the collective are being scanned.", -) - - -SCAN_REMOTE_DMD = ActionType( - u"magic-folder:scan-remote-dmd", - [_NICKNAME], - [], - u"A peer DMD is being scanned for changes.", -) - -REMOTE_VERSION = Field.for_types( - u"remote_version", - [int, long], - u"The version of a path found in a peer DMD.", -) - -REMOTE_URI = Field.for_types( - u"remote_uri", - [bytes], - u"The filecap of a path found in a peer DMD.", -) - -REMOTE_DMD_ENTRY = MessageType( - u"magic-folder:remote-dmd-entry", - [eliotutil.RELPATH, magicfolderdb.PATHENTRY, REMOTE_VERSION, REMOTE_URI], - u"A single entry found by scanning a peer DMD.", -) - -ADD_TO_DOWNLOAD_QUEUE = MessageType( - u"magic-folder:add-to-download-queue", - [eliotutil.RELPATH], - u"An entry was found to be changed and is being queued for download.", -) - -MAGIC_FOLDER_STOP = ActionType( - u"magic-folder:stop", - [_NICKNAME], - [], - u"A Magic-Folder is being stopped.", -) - -MAYBE_UPLOAD = MessageType( - u"magic-folder:maybe-upload", - [eliotutil.RELPATH], - u"A decision is being made about whether to upload a file.", -) - -PENDING = Field( - u"pending", - lambda s: list(s), - u"The paths which are pending processing.", - eliotutil.validateInstanceOf(set), -) - -REMOVE_FROM_PENDING = ActionType( - u"magic-folder:remove-from-pending", - [eliotutil.RELPATH, PENDING], - [], - u"An item being processed is being removed from the pending set.", -) - -PATH = Field( - u"path", - lambda fp: fp.asTextMode().path, - u"A local filesystem path.", - eliotutil.validateInstanceOf(FilePath), -) - -NOTIFIED_OBJECT_DISAPPEARED = MessageType( - u"magic-folder:notified-object-disappeared", - [PATH], - u"A path which generated a notification was not found on the filesystem. This is normal.", -) - -PROPAGATE_DIRECTORY_DELETION = ActionType( - u"magic-folder:propagate-directory-deletion", - [], - [], - u"Children of a deleted directory are being queued for upload processing.", -) - -NO_DATABASE_ENTRY = MessageType( - u"magic-folder:no-database-entry", - [], - u"There is no local database entry for a particular relative path in the magic folder.", -) - -NOT_UPLOADING = MessageType( - u"magic-folder:not-uploading", - [], - u"An item being processed is not going to be uploaded.", -) - -SYMLINK = MessageType( - u"magic-folder:symlink", - [PATH], - u"An item being processed was a symlink and is being skipped", -) - -CREATED_DIRECTORY = Field.for_types( - u"created_directory", - [unicode], - u"The relative path of a newly created directory in a magic-folder.", -) - -PROCESS_DIRECTORY = ActionType( - u"magic-folder:process-directory", - [], - [CREATED_DIRECTORY], - u"An item being processed was a directory.", -) - -DIRECTORY_PATHENTRY = MessageType( - u"magic-folder:directory-dbentry", - [magicfolderdb.PATHENTRY], - u"Local database state relating to an item possibly being uploaded.", -) - -NOT_NEW_DIRECTORY = MessageType( - u"magic-folder:not-new-directory", - [], - u"A directory item being processed was found to not be new.", -) - -NOT_NEW_FILE = MessageType( - u"magic-folder:not-new-file", - [], - u"A file item being processed was found to not be new (or changed).", -) - -SPECIAL_FILE = MessageType( - u"magic-folder:special-file", - [], - u"An item being processed was found to be of a special type which is not supported.", -) - -_COUNTER_NAME = Field.for_types( - u"counter_name", - # Should really only be unicode - [unicode, bytes], - u"The name of a counter.", -) - -_DELTA = Field.for_types( - u"delta", - [int, long], - u"An amount of a specific change in a counter.", -) - -_VALUE = Field.for_types( - u"value", - [int, long], - u"The new value of a counter after a change.", -) - -COUNT_CHANGED = MessageType( - u"magic-folder:count", - [_COUNTER_NAME, _DELTA, _VALUE], - u"The value of a counter has changed.", -) - -START_MONITORING = ActionType( - u"magic-folder:start-monitoring", - [_NICKNAME, _DIRECTION], - [], - u"Uploader is beginning to monitor the filesystem for uploadable changes.", -) - -STOP_MONITORING = ActionType( - u"magic-folder:stop-monitoring", - [_NICKNAME, _DIRECTION], - [], - u"Uploader is terminating filesystem monitoring operation.", -) - -START_UPLOADING = ActionType( - u"magic-folder:start-uploading", - [_NICKNAME, _DIRECTION], - [], - u"Uploader is performing startup-time inspection of known files.", -) - -_IGNORED = Field.for_types( - u"ignored", - [bool], - u"A file proposed for queueing for processing is instead being ignored by policy.", -) - -_ALREADY_PENDING = Field.for_types( - u"already_pending", - [bool], - u"A file proposed for queueing for processing is already in the queue.", -) - -_SIZE = Field.for_types( - u"size", - [int, long, type(None)], - u"The size of a file accepted into the processing queue.", -) - -ADD_PENDING = ActionType( - u"magic-folder:add-pending", - [eliotutil.RELPATH], - [_IGNORED, _ALREADY_PENDING, _SIZE], - u"Uploader is adding a path to the processing queue.", -) - -FULL_SCAN = ActionType( - u"magic-folder:full-scan", - [_NICKNAME, _DIRECTION], - [], - u"A complete brute-force scan of the local directory is being performed.", -) - -SCAN = ActionType( - u"magic-folder:scan", - [eliotutil.RELPATH], - [], - u"A brute-force scan of a subset of the local directory is being performed.", -) - -NOTIFIED = ActionType( - u"magic-folder:notified", - [PATH, _NICKNAME, _DIRECTION], - [], - u"Magic-Folder received a notification of a local filesystem change for a certain path.", -) - -_NON_DIR_CREATED = Field.for_types( - u"non_dir_created", - [bool], - u"A creation event was for a non-directory and requires no further inspection.", -) - - -REACT_TO_INOTIFY = ActionType( - u"magic-folder:react-to-inotify", - [eliotutil.INOTIFY_EVENTS], - [_IGNORED, _NON_DIR_CREATED, _ALREADY_PENDING], - u"Magic-Folder is processing a notification from inotify(7) (or a clone) about a filesystem event.", -) - -_ABSPATH = Field.for_types( - u"abspath", - [unicode], - u"The absolute path of a file being written in a local directory.", -) - -_IS_CONFLICT = Field.for_types( - u"is_conflict", - [bool], - u"An indication of whether a file being written in a local directory is in a conflicted state.", -) - -_NOW = Field.for_types( - u"now", - [int, long, float], - u"The time at which a file is being written in a local directory.", -) - -_MTIME = Field.for_types( - u"mtime", - [int, long, float, type(None)], - u"A modification time to put into the metadata of a file being written in a local directory.", -) - -WRITE_DOWNLOADED_FILE = ActionType( - u"magic-folder:write-downloaded-file", - [_ABSPATH, _SIZE, _IS_CONFLICT, _NOW, _MTIME], - [], - u"A downloaded file is being written to the filesystem.", -) - -ALREADY_GONE = MessageType( - u"magic-folder:rename:already-gone", - [], - u"A deleted file could not be rewritten to a backup path because it no longer exists.", -) - -_REASON = Field( - u"reason", - lambda e: str(e), - u"An exception which may describe the form of the conflict.", - eliotutil.validateInstanceOf(Exception), -) - -OVERWRITE_BECOMES_CONFLICT = MessageType( - u"magic-folder:overwrite-becomes-conflict", - [_REASON], - u"An attempt to overwrite an existing file failed because that file is now conflicted.", -) - -_FILES = Field( - u"files", - lambda file_set: list(file_set), - u"All of the relative paths belonging to a Magic-Folder that are locally known.", -) - -ALL_FILES = MessageType( - u"magic-folder:all-files", - [_FILES], - u"A record of the rough state of the local database at the time of downloader start up.", -) - -_ITEMS = Field( - u"items", - lambda deque: list(dict(relpath=item.relpath_u, kind=item.kind) for item in deque), - u"Items in a processing queue.", -) - -ITEM_QUEUE = MessageType( - u"magic-folder:item-queue", - [_ITEMS], - u"A report of the items in the processing queue at this point.", -) - -_BATCH = Field( - u"batch", - # Just report the paths for now. Perhaps something from the values would - # also be useful, though? Consider it. - lambda batch: batch.keys(), - u"A batch of scanned items.", - eliotutil.validateInstanceOf(dict), -) - -SCAN_BATCH = MessageType( - u"magic-folder:scan-batch", - [_BATCH], - u"Items in a batch of files which were scanned from the DMD.", -) - -START_DOWNLOADING = ActionType( - u"magic-folder:start-downloading", - [_NICKNAME, _DIRECTION], - [], - u"A Magic-Folder downloader is initializing and beginning to manage downloads.", -) - -PERFORM_SCAN = ActionType( - u"magic-folder:perform-scan", - [], - [], - u"Remote storage is being scanned for changes which need to be synchronized.", -) - -_STATUS = Field.for_types( - u"status", - # Should just be unicode... - [unicode, bytes], - u"The status of an item in a processing queue.", -) - -QUEUED_ITEM_STATUS_CHANGE = MessageType( - u"magic-folder:item:status-change", - [eliotutil.RELPATH, _STATUS], - u"A queued item changed status.", -) - -_CONFLICT_REASON = Field.for_types( - u"conflict_reason", - [unicode, type(None)], - u"A human-readable explanation of why a file was in conflict.", - eliotutil.validateSetMembership({ - u"dbentry mismatch metadata", - u"dbentry newer version", - u"last_downloaded_uri mismatch", - u"file appeared", - None, - }), -) - -CHECKING_CONFLICTS = ActionType( - u"magic-folder:item:checking-conflicts", - [], - [_IS_CONFLICT, _CONFLICT_REASON], - u"A potential download item is being checked to determine if it is in a conflicted state.", -) - -REMOTE_DIRECTORY_CREATED = MessageType( - u"magic-folder:remote-directory-created", - [], - u"The downloader found a new directory in the DMD.", -) - -REMOTE_DIRECTORY_DELETED = MessageType( - u"magic-folder:remote-directory-deleted", - [], - u"The downloader found a directory has been deleted from the DMD.", -) - -class QueueMixin(HookMixin): - """ - A parent class for Uploader and Downloader that handles putting - IQueuedItem instances into a work queue and processing - them. Tracks some history of recent items processed (for the - "status" API). - - Subclasses implement _scan_delay, _perform_scan and _process - - :ivar unicode _name: Either "uploader" or "downloader". - - :ivar _deque: IQueuedItem instances to process - - :ivar _process_history: the last 20 items we processed - - :ivar _in_progress: current batch of items which are currently - being processed; chunks of work are removed from _deque and - worked on. As each finishes, it is added to _process_history - (with oldest items falling off the end). - """ - - def __init__(self, client, local_path_u, db, name, clock): - self._client = client - self._local_path_u = local_path_u - self._local_filepath = to_filepath(local_path_u) - self._db = db - self._name = name - self._clock = clock - self._log_fields = dict( - nickname=self._client.nickname, - direction=self._name, - ) - self._hooks = { - 'processed': None, - 'started': None, - 'iteration': None, - 'inotify': None, - 'item_processed': None, - } - self.started_d = self.set_hook('started') - - # we should have gotten nice errors already while loading the - # config, but just to be safe: - assert self._local_filepath.exists() - assert self._local_filepath.isdir() - - self._deque = deque() - # do we also want to bound on "maximum age"? - self._process_history = deque(maxlen=20) - self._in_progress = [] - - def get_status(self): - """ - Returns an iterable of instances that implement IQueuedItem - """ - for item in self._deque: - yield item - for item in self._in_progress: - yield item - for item in self._process_history: - yield item - - def _get_filepath(self, relpath_u): - return extend_filepath(self._local_filepath, relpath_u.split(u"/")) - - def stop(self): - """ - Don't process queued items anymore. - - :return Deferred: A ``Deferred`` that fires when processing has - completely stopped. - """ - d = self._processing - self._processing_loop.stop() - self._processing = None - self._processing_loop = None - return d - - def _begin_processing(self): - """ - Start a loop that looks for work to do and then does it. - """ - action = PROCESSING_LOOP(**self._log_fields) - - # Note that we don't put the processing iterations into the logging - # action because we expect this loop to run for the whole lifetime of - # the process. The tooling for dealing with incomplete action trees - # is still somewhat lacking. Putting the iteractions into the overall - # loop action would hamper reading those logs for now. - self._processing_loop = task.LoopingCall(self._processing_iteration) - self._processing_loop.clock = self._clock - self._processing = self._processing_loop.start(self._scan_delay(), now=True) - - with action.context(): - # We do make sure errors appear in the loop action though. - d = DeferredContext(self._processing) - d.addActionFinish() - - def _processing_iteration(self): - """ - One iteration runs self._process_deque which calls _perform_scan() and - then completely drains the _deque (processing each item). - """ - action = ITERATION(**self._log_fields) - with action.context(): - d = DeferredContext(defer.Deferred()) - - # During startup we scanned the collective for items to download. - # If we found work to do, we do not need to perform another scan - # here. More importantly, the logic for determining which items - # to download is *not correct* in the case where two scans are - # performed with no intermediate emptying of the work queue. - # Therefore, skip the scan any time there is queued work. The - # only time we expect there to be any, though, is on the first - # time through this loop. - if not self._deque: - # adds items to our deque - d.addCallback(lambda ignored: self._perform_scan()) - - # process anything in our queue - d.addCallback(lambda ignored: self._process_deque()) - - # Let the tests know we've made it this far. - d.addCallback(lambda ignored: self._call_hook(None, 'iteration')) - - # Get it out of the Eliot context - result = d.addActionFinish() - - # Kick it off - result.callback(None) - - # Give it back to LoopingCall so it can wait on us. - return result - - def _scan_delay(self): - raise NotImplementedError - - def _perform_scan(self): - return - - @eliotutil.inline_callbacks - def _process_deque(self): - # process everything currently in the queue. we're turning it - # into a list so that if any new items get added while we're - # processing, they'll not run until next time) - to_process = list(self._deque) - self._deque.clear() - self._count('objects_queued', -len(to_process)) - - # we want to include all these in the next status request, so - # we must put them 'somewhere' before the next yield (and it's - # not in _process_history because that gets trimmed and we - # don't want anything to disappear until after it is - # completed) - self._in_progress.extend(to_process) - - with PROCESS_QUEUE(count=len(to_process)): - for item in to_process: - self._process_history.appendleft(item) - self._in_progress.remove(item) - try: - proc = yield self._process(item) - if not proc: - self._process_history.remove(item) - self._call_hook(item, 'item_processed') - except: - write_traceback() - item.set_status('failed', self._clock.seconds()) - proc = Failure() - - self._call_hook(proc, 'processed') - - def _get_relpath(self, filepath): - segments = unicode_segments_from(filepath, self._local_filepath) - return u"/".join(segments) - - def _count(self, counter_name, delta=1): - ctr = 'magic_folder.%s.%s' % (self._name, counter_name) - self._client.stats_provider.count(ctr, delta) - COUNT_CHANGED.log( - counter_name=counter_name, - delta=delta, - value=self._client.stats_provider.counters[ctr], - ) - -# this isn't in interfaces.py because it's very specific to QueueMixin -class IQueuedItem(Interface): - relpath_u = Attribute("The path this item represents") - progress = Attribute("A PercentProgress instance") - - def set_status(self, status, current_time=None): - """ - """ - - def status_time(self, state): - """ - Get the time of particular state change, or None - """ - - def status_history(self): - """ - All status changes, sorted latest -> oldest - """ - - -@implementer(IQueuedItem) -class QueuedItem(object): - kind = None - - def __init__(self, relpath_u, progress, size): - self.relpath_u = relpath_u - self.progress = progress - self._status_history = dict() - self.size = size - - def set_status(self, status, current_time=None): - if current_time is None: - current_time = time.time() - self._status_history[status] = current_time - QUEUED_ITEM_STATUS_CHANGE.log( - relpath=self.relpath_u, - status=status, - ) - - def status_time(self, state): - """ - Returns None if there's no status-update for 'state', else returns - the timestamp when that state was reached. - """ - return self._status_history.get(state, None) - - def status_history(self): - """ - Returns a list of 2-tuples of (state, timestamp) sorted by timestamp - """ - hist = self._status_history.items() - hist.sort(lambda a, b: cmp(a[1], b[1])) - return hist - - def __eq__(self, other): - return ( - other.relpath_u == self.relpath_u, - other.status_history() == self.status_history(), - ) - - -class UploadItem(QueuedItem): - """ - Represents a single item the _deque of the Uploader - """ - kind = u"upload" - - -_ITEM = Field( - u"item", - lambda i: { - u"relpath": i.relpath_u, - u"size": i.size, - }, - u"An item to be uploaded or downloaded.", - eliotutil.validateInstanceOf(QueuedItem), -) - -PROCESS_ITEM = ActionType( - u"magic-folder:process-item", - [_ITEM], - [], - u"A path which was found wanting of an update is receiving an update.", -) - -DOWNLOAD_BEST_VERSION = ActionType( - u"magic-folder:download-best-version", - [], - [], - u"The content of a file in the Magic Folder is being downloaded.", -) - -class Uploader(QueueMixin): - - def __init__(self, client, local_path_u, db, upload_dirnode, pending_delay, clock): - QueueMixin.__init__(self, client, local_path_u, db, u'uploader', clock) - - self.is_ready = False - - if not IDirectoryNode.providedBy(upload_dirnode): - raise AssertionError("'upload_dircap' does not refer to a directory") - if upload_dirnode.is_unknown() or upload_dirnode.is_readonly(): - raise AssertionError("'upload_dircap' is not a writecap to a directory") - - self._upload_dirnode = upload_dirnode - self._inotify = get_inotify_module() - self._notifier = self._inotify.INotify() - - self._pending = set() # of unicode relpaths - self._pending_delay = pending_delay - self._periodic_full_scan_duration = 10 * 60 # perform a full scan every 10 minutes - self._periodic_callid = None - - if hasattr(self._notifier, 'set_pending_delay'): - self._notifier.set_pending_delay(pending_delay) - - # TODO: what about IN_MOVE_SELF and IN_UNMOUNT? - # - self.mask = ( self._inotify.IN_CREATE - | self._inotify.IN_CLOSE_WRITE - | self._inotify.IN_MOVED_TO - | self._inotify.IN_MOVED_FROM - | self._inotify.IN_DELETE - | self._inotify.IN_ONLYDIR - | IN_EXCL_UNLINK - ) - - def _add_watch(self, filepath): - self._notifier.watch( - filepath, - mask=self.mask, - callbacks=[self._notify], - recursive=True, - ) - - def start_monitoring(self): - action = START_MONITORING(**self._log_fields) - with action.context(): - d = DeferredContext(defer.succeed(None)) - - d.addCallback(lambda ign: self._add_watch(self._local_filepath)) - d.addCallback(lambda ign: self._notifier.startReading()) - d.addCallback(lambda ign: self._count('dirs_monitored')) - d.addBoth(self._call_hook, 'started') - return d.addActionFinish() - - def stop(self): - action = STOP_MONITORING(**self._log_fields) - with action.context(): - self._notifier.stopReading() - self._count('dirs_monitored', -1) - if self._periodic_callid: - try: - self._periodic_callid.cancel() - except AlreadyCancelled: - pass - - if hasattr(self._notifier, 'wait_until_stopped'): - d = DeferredContext(self._notifier.wait_until_stopped()) - else: - d = DeferredContext(defer.succeed(None)) - - d.addCallback(lambda ignored: QueueMixin.stop(self)) - return d.addActionFinish() - - def start_uploading(self): - action = START_UPLOADING(**self._log_fields) - with action: - self.is_ready = True - - all_relpaths = self._db.get_all_relpaths() - - for relpath_u in all_relpaths: - self._add_pending(relpath_u) - - self._full_scan() - self._begin_processing() - - def _scan_delay(self): - return self._pending_delay - - def _full_scan(self): - with FULL_SCAN(**self._log_fields): - self._periodic_callid = self._clock.callLater(self._periodic_full_scan_duration, self._full_scan) - self._scan(u"") - - def _add_pending(self, relpath_u): - with ADD_PENDING(relpath=relpath_u) as action: - if magicpath.should_ignore_file(relpath_u): - action.add_success_fields(ignored=True, already_pending=False, size=None) - return - if self.is_pending(relpath_u): - action.add_success_fields(ignored=False, already_pending=True, size=None) - return - - self._pending.add(relpath_u) - fp = self._get_filepath(relpath_u) - pathinfo = get_pathinfo(unicode_from_filepath(fp)) - progress = PercentProgress() - action.add_success_fields(ignored=False, already_pending=False, size=pathinfo.size) - item = UploadItem(relpath_u, progress, pathinfo.size) - item.set_status('queued', self._clock.seconds()) - self._deque.append(item) - self._count('objects_queued') - - def _scan(self, reldir_u): - # Scan a directory by (synchronously) adding the paths of all its children to self._pending. - # Note that this doesn't add them to the deque -- that will - with SCAN(relpath=reldir_u): - fp = self._get_filepath(reldir_u) - try: - children = listdir_filepath(fp) - except EnvironmentError: - raise Exception("WARNING: magic folder: permission denied on directory %s" - % quote_filepath(fp)) - except FilenameEncodingError: - raise Exception("WARNING: magic folder: could not list directory %s due to a filename encoding error" - % quote_filepath(fp)) - - for child in children: - _assert(isinstance(child, unicode), child=child) - self._add_pending("%s/%s" % (reldir_u, child) if reldir_u != u"" else child) - - def is_pending(self, relpath_u): - return relpath_u in self._pending - - def _notify(self, opaque, path, events_mask): - with NOTIFIED(path=path, **self._log_fields): - try: - return self._real_notify(opaque, path, events_mask) - except Exception: - write_traceback() - - def _real_notify(self, opaque, path, events_mask): - action = REACT_TO_INOTIFY( - # We could think about logging opaque here but ... it's opaque. - # All can do is id() or repr() it and neither of those actually - # produces very illuminating results. We drop opaque on the - # floor, anyway. - inotify_events=events_mask, - ) - success_fields = dict(non_dir_created=False, already_pending=False, ignored=False) - - with action: - relpath_u = self._get_relpath(path) - - # We filter out IN_CREATE events not associated with a directory. - # Acting on IN_CREATE for files could cause us to read and upload - # a possibly-incomplete file before the application has closed it. - # There should always be an IN_CLOSE_WRITE after an IN_CREATE, I think. - # It isn't possible to avoid watching for IN_CREATE at all, because - # it is the only event notified for a directory creation. - - if ((events_mask & self._inotify.IN_CREATE) != 0 and - (events_mask & self._inotify.IN_ISDIR) == 0): - success_fields[u"non_dir_created"] = True - elif relpath_u in self._pending: - success_fields[u"already_pending"] = True - elif magicpath.should_ignore_file(relpath_u): - success_fields[u"ignored"] = True - else: - self._add_pending(relpath_u) - - # Always fire the inotify hook. If an accident of timing causes a - # second inotify event for a particular path before the first has - # been processed, the expectation is still that any code that was - # waiting for the second inotify event should be notified. - self._call_hook(path, 'inotify') - action.add_success_fields(**success_fields) - - def _process(self, item): - """ - Possibly upload a single QueuedItem. If this returns False, the item is - removed from _process_history. - """ - # Uploader - with PROCESS_ITEM(item=item).context(): - relpath_u = item.relpath_u - precondition(isinstance(relpath_u, unicode), relpath_u) - precondition(not relpath_u.endswith(u'/'), relpath_u) - encoded_path_u = magicpath.path2magic(relpath_u) - - d = DeferredContext(defer.succeed(False)) - if relpath_u is None: - item.set_status('invalid_path', self._clock.seconds()) - return d.addActionFinish() - item.set_status('started', self._clock.seconds()) - - try: - # Take this item out of the pending set before we do any - # I/O-based processing related to it. If a further change - # takes place after we remove it from this set, we want it to - # end up in the set again. If we haven't gotten around to - # doing the I/O-based processing yet then the worst that will - # happen is we'll do a little redundant processing. - # - # If we did it the other way around, the sequence of events - # might be something like: we do some I/O, someone else does - # some I/O, a notification gets discarded because the path is - # still in the pending set, _then_ we remove it from the - # pending set. In such a circumstance, we've missed some I/O - # that we should have responded to. - with REMOVE_FROM_PENDING(relpath=relpath_u, pending=self._pending): - self._pending.remove(relpath_u) - except KeyError: - pass - - fp = self._get_filepath(relpath_u) - pathinfo = get_pathinfo(unicode_from_filepath(fp)) - - db_entry_is_dir = False - db_entry = self._db.get_db_entry(relpath_u) - if db_entry is None: - # Maybe it was a directory! - db_entry = self._db.get_db_entry(relpath_u + u"/") - if db_entry is None: - NO_DATABASE_ENTRY.log() - else: - db_entry_is_dir = True - - def _maybe_upload(ign, now=None): - MAYBE_UPLOAD.log(relpath=relpath_u) - if now is None: - now = time.time() - - if not pathinfo.exists: - # FIXME merge this with the 'isfile' case. - NOTIFIED_OBJECT_DISAPPEARED.log(path=fp) - self._count('objects_disappeared') - - if db_entry is None: - # If it exists neither on the filesystem nor in the - # database, it's neither a creation nor a deletion and - # there's nothing more to do. - return False - - if pathinfo.isdir or db_entry_is_dir: - with PROPAGATE_DIRECTORY_DELETION(): - for localpath in self._db.get_direct_children(relpath_u): - self._add_pending(localpath.relpath_u) - - last_downloaded_timestamp = now # is this correct? - - if is_new_file(pathinfo, db_entry): - new_version = db_entry.version + 1 - else: - NOT_UPLOADING.log() - self._count('objects_not_uploaded') - return False - - # look out! there's another place we set a "metadata" - # object like this (for new, not deleted files) - metadata = { - 'version': new_version, - 'deleted': True, - 'last_downloaded_timestamp': last_downloaded_timestamp, - 'user_mtime': pathinfo.ctime_ns / 1000000000.0, # why are we using ns in PathInfo?? - } - - # from the Fire Dragons part of the spec: - # Later, in response to a local filesystem change at a given path, the - # Magic Folder client reads the last-downloaded record associated with - # that path (if any) from the database and then uploads the current - # file. When it links the uploaded file into its client DMD, it - # includes the ``last_downloaded_uri`` field in the metadata of the - # directory entry, overwriting any existing field of that name. If - # there was no last-downloaded record associated with the path, this - # field is omitted. - # Note that ``last_downloaded_uri`` field does *not* record the URI of - # the uploaded file (which would be redundant); it records the URI of - # the last download before the local change that caused the upload. - # The field will be absent if the file has never been downloaded by - # this client (i.e. if it was created on this client and no change - # by any other client has been detected). - - # XXX currently not actually true: it will record the - # LAST THING we wrote to (or saw on) disk (not - # necessarily downloaded?) - - if db_entry.last_downloaded_uri is not None: - metadata['last_downloaded_uri'] = db_entry.last_downloaded_uri - if db_entry.last_uploaded_uri is not None: - metadata['last_uploaded_uri'] = db_entry.last_uploaded_uri - - if db_entry_is_dir: - real_encoded_path_u = encoded_path_u + magicpath.path2magic(u"/") - real_relpath_u = relpath_u + u"/" - else: - real_encoded_path_u = encoded_path_u - real_relpath_u = relpath_u - - empty_uploadable = Data("", self._client.convergence) - d2 = DeferredContext(self._upload_dirnode.add_file( - real_encoded_path_u, - empty_uploadable, - metadata=metadata, - overwrite=True, - progress=item.progress, - )) - - def _add_db_entry(filenode): - filecap = filenode.get_uri() - # if we're uploading a file, we want to set - # last_downloaded_uri to the filecap so that we don't - # immediately re-download it when we start up next - last_downloaded_uri = metadata.get('last_downloaded_uri', filecap) - self._db.did_upload_version( - real_relpath_u, - new_version, - filecap, - last_downloaded_uri, - last_downloaded_timestamp, - pathinfo, - ) - self._count('files_uploaded') - d2.addCallback(_add_db_entry) - d2.addCallback(lambda ign: True) - return d2.result - elif pathinfo.islink: - SYMLINK.log(path=fp) - return False - elif pathinfo.isdir: - if not getattr(self._notifier, 'recursive_includes_new_subdirectories', False): - self._add_watch(fp) - - DIRECTORY_PATHENTRY.log(pathentry=db_entry) - if not is_new_file(pathinfo, db_entry): - NOT_NEW_DIRECTORY.log() - return False - - uploadable = Data("", self._client.convergence) - with PROCESS_DIRECTORY().context() as action: - upload_d = DeferredContext(self._upload_dirnode.add_file( - encoded_path_u + magicpath.path2magic(u"/"), - uploadable, - metadata={"version": 0}, - overwrite=True, - progress=item.progress, - )) - def _dir_succeeded(dirnode): - action.add_success_fields(created_directory=relpath_u) - self._count('directories_created') - self._db.did_upload_version( - relpath_u + u"/", - version=0, - last_uploaded_uri=dirnode.get_uri(), - last_downloaded_uri=None, - last_downloaded_timestamp=now, - pathinfo=pathinfo, - ) - upload_d.addCallback(_dir_succeeded) - upload_d.addCallback(lambda ign: self._scan(relpath_u)) - upload_d.addCallback(lambda ign: True) - return upload_d.addActionFinish() - elif pathinfo.isfile: - last_downloaded_timestamp = now - - if db_entry is None: - new_version = 0 - elif is_new_file(pathinfo, db_entry): - new_version = db_entry.version + 1 - else: - NOT_NEW_FILE.log() - self._count('objects_not_uploaded') - return False - - metadata = { - 'version': new_version, - 'last_downloaded_timestamp': last_downloaded_timestamp, - 'user_mtime': pathinfo.mtime_ns / 1000000000.0, # why are we using ns in PathInfo?? - } - if db_entry is not None: - if db_entry.last_downloaded_uri is not None: - metadata['last_downloaded_uri'] = db_entry.last_downloaded_uri - if db_entry.last_uploaded_uri is not None: - metadata['last_uploaded_uri'] = db_entry.last_uploaded_uri - - uploadable = FileName(unicode_from_filepath(fp), self._client.convergence) - d2 = DeferredContext(self._upload_dirnode.add_file( - encoded_path_u, uploadable, - metadata=metadata, - overwrite=True, - progress=item.progress, - )) - - def _add_db_entry(filenode): - filecap = filenode.get_uri() - # if we're uploading a file, we want to set - # last_downloaded_uri to the filecap so that we don't - # immediately re-download it when we start up next - last_downloaded_uri = filecap - self._db.did_upload_version( - relpath_u, - new_version, - filecap, - last_downloaded_uri, - last_downloaded_timestamp, - pathinfo - ) - self._count('files_uploaded') - return True - d2.addCallback(_add_db_entry) - return d2.result - else: - SPECIAL_FILE.log() - return False - - d.addCallback(_maybe_upload) - - def _succeeded(res): - if res: - self._count('objects_succeeded') - # TODO: maybe we want the status to be 'ignored' if res is False - item.set_status('success', self._clock.seconds()) - return res - def _failed(f): - self._count('objects_failed') - item.set_status('failure', self._clock.seconds()) - return f - d.addCallbacks(_succeeded, _failed) - return d.addActionFinish() - - def _get_metadata(self, encoded_path_u): - try: - d = self._upload_dirnode.get_metadata_for(encoded_path_u) - except KeyError: - return Failure() - return d - - def _get_filenode(self, encoded_path_u): - try: - d = self._upload_dirnode.get(encoded_path_u) - except KeyError: - return Failure() - return d - - -class WriteFileMixin(object): - FUDGE_SECONDS = 10.0 - - def _get_conflicted_filename(self, abspath_u): - return abspath_u + u".conflict" - - def _write_downloaded_file(self, local_path_u, abspath_u, file_contents, - is_conflict=False, now=None, mtime=None): - if now is None: - now = time.time() - action = WRITE_DOWNLOADED_FILE( - abspath=abspath_u, - size=len(file_contents), - is_conflict=is_conflict, - now=now, - mtime=mtime, - ) - with action: - return self._write_downloaded_file_logged( - local_path_u, - abspath_u, - file_contents, - is_conflict, - now, - mtime, - ) - - def _write_downloaded_file_logged(self, local_path_u, abspath_u, - file_contents, is_conflict, now, mtime): - # 1. Write a temporary file, say .foo.tmp. - # 2. is_conflict determines whether this is an overwrite or a conflict. - # 3. Set the mtime of the replacement file to be T seconds before the - # current local time, or mtime whichever is oldest - # 4. Perform a file replacement with backup filename foo.backup, - # replaced file foo, and replacement file .foo.tmp. If any step of - # this operation fails, reclassify as a conflict and stop. - # - # Returns the path of the destination file. - precondition_abspath(abspath_u) - replacement_path_u = abspath_u + u".tmp" # FIXME more unique - - initial_path_u = os.path.dirname(abspath_u) - fileutil.make_dirs_with_absolute_mode(local_path_u, initial_path_u, (~ self._umask) & 0o777) - fileutil.write(replacement_path_u, file_contents) - os.chmod(replacement_path_u, (~ self._umask) & 0o666) - - # FUDGE_SECONDS is used to determine if another process has - # written to the same file concurrently. This is described in - # the Earth Dragon section of our design document ("T" in the - # spec is FUDGE_SECONDS here): - # docs/proposed/magic-folder/remote-to-local-sync.rst - fudge_time = now - self.FUDGE_SECONDS - modified_time = min(fudge_time, mtime) if mtime else fudge_time - os.utime(replacement_path_u, (now, modified_time)) - if is_conflict: - return self._rename_conflicted_file(abspath_u, replacement_path_u) - else: - try: - fileutil.replace_file(abspath_u, replacement_path_u) - return abspath_u - except fileutil.ConflictError as e: - OVERWRITE_BECOMES_CONFLICT.log(reason=e) - return self._rename_conflicted_file(abspath_u, replacement_path_u) - - @log_call( - action_type=u"magic-folder:rename-conflicted", - include_args=["abspath_u", "replacement_path_u"], - ) - def _rename_conflicted_file(self, abspath_u, replacement_path_u): - conflict_path_u = self._get_conflicted_filename(abspath_u) - fileutil.rename_no_overwrite(replacement_path_u, conflict_path_u) - return conflict_path_u - - @log_call( - action_type=u"magic-folder:rename-deleted", - include_args=["abspath_u"], - ) - def _rename_deleted_file(self, abspath_u): - try: - fileutil.rename_no_overwrite(abspath_u, abspath_u + u'.backup') - except OSError: - ALREADY_GONE.log() - return abspath_u - - -def _is_empty_filecap(client, cap): - """ - Internal helper. - - :param cap: a capability URI - - :returns: True if "cap" represents an empty file - """ - node = client.create_node_from_uri( - None, - cap.encode('ascii'), - ) - return (not node.get_size()) - - -class DownloadItem(QueuedItem): - """ - Represents a single item in the _deque of the Downloader - """ - kind = u"download" - - def __init__(self, relpath_u, progress, filenode, metadata, size): - super(DownloadItem, self).__init__(relpath_u, progress, size) - self.file_node = filenode - self.metadata = metadata - - -class Downloader(QueueMixin, WriteFileMixin): - - def __init__(self, client, local_path_u, db, collective_dirnode, - upload_readonly_dircap, clock, is_upload_pending, umask, - status_reporter, poll_interval=60): - QueueMixin.__init__(self, client, local_path_u, db, u'downloader', clock) - - if not IDirectoryNode.providedBy(collective_dirnode): - raise AssertionError("'collective_dircap' does not refer to a directory") - if collective_dirnode.is_unknown() or not collective_dirnode.is_readonly(): - raise AssertionError("'collective_dircap' is not a readonly cap to a directory") - - self._collective_dirnode = collective_dirnode - self._upload_readonly_dircap = upload_readonly_dircap - self._is_upload_pending = is_upload_pending - self._umask = umask - self._status_reporter = status_reporter - self._poll_interval = poll_interval - - @eliotutil.inline_callbacks - def start_downloading(self): - action = START_DOWNLOADING(**self._log_fields) - with action: - ALL_FILES.log(files=self._db.get_all_relpaths()) - - while True: - try: - yield self._scan_remote_collective(scan_self=True) - # The integration tests watch for this log message (in the - # Twisted log) to decide when it is safe to proceed. - # Clearly, we need better programmatic interrogation of - # magic-folder state. - twmsg("Completed initial Magic Folder scan successfully ({})".format(self)) - self._begin_processing() - return - except Exception: - self._status_reporter( - False, "Initial scan has failed", - "Last tried at %s" % self.nice_current_time(), - ) - write_traceback() - yield task.deferLater(self._clock, self._scan_delay(), lambda: None) - - def nice_current_time(self): - return format_time(datetime.fromtimestamp(self._clock.seconds()).timetuple()) - - def _should_download(self, relpath_u, remote_version, remote_uri): - """ - _should_download returns a bool indicating whether or not a remote object should be downloaded. - We check the remote metadata version against our magic-folder db version number; - latest version wins. - """ - if magicpath.should_ignore_file(relpath_u): - return False - db_entry = self._db.get_db_entry(relpath_u) - if db_entry is None: - return True - if db_entry.version < remote_version: - return True - if db_entry.last_downloaded_uri is None and _is_empty_filecap(self._client, remote_uri): - pass - elif db_entry.last_downloaded_uri != remote_uri: - return True - return False - - def _get_local_latest(self, relpath_u): - """ - _get_local_latest takes a unicode path string checks to see if this file object - exists in our magic-folder db; if not then return None - else check for an entry in our magic-folder db and return it. - """ - if not self._get_filepath(relpath_u).exists(): - return None - return self._db.get_db_entry(relpath_u) - - def _get_collective_latest_file(self, filename): - """ - _get_collective_latest_file takes a file path pointing to a file managed by - magic-folder and returns a deferred that fires with the two tuple containing a - file node and metadata for the latest version of the file located in the - magic-folder collective directory. - """ - action = start_action( - action_type=u"magic-folder:downloader:get-latest-file", - name=filename, - ) - with action.context(): - collective_dirmap_d = DeferredContext(self._collective_dirnode.list()) - def scan_collective(result): - COLLECTIVE_SCAN.log(dmds=result) - list_of_deferreds = [] - for dir_name in result: - # XXX make sure it's a directory - d = DeferredContext(defer.succeed(None)) - d.addCallback(lambda x, dir_name=dir_name: result[dir_name][0].get_child_and_metadata(filename)) - list_of_deferreds.append(d) - deferList = defer.DeferredList(list_of_deferreds, consumeErrors=True) - return deferList - collective_dirmap_d.addCallback(scan_collective) - def highest_version(deferredList): - max_version = 0 - metadata = None - node = None - for success, result in deferredList: - if success: - Message.log( - message_type=u"magic-folder:downloader:get-latest-file:version", - version=result[1]['version'], - ) - if node is None or result[1]['version'] > max_version: - node, metadata = result - max_version = result[1]['version'] - else: - Message.log( - message_type="magic-folder:downloader:get-latest-file:failed", - ) - return node, metadata - collective_dirmap_d.addCallback(highest_version) - return collective_dirmap_d.addActionFinish() - - def _scan_remote_dmd(self, nickname, dirnode, scan_batch): - """ - Read the contents of a single DMD into the given batch. - - :param unicode nickname: The nickname for the participant owning the - DMD to scan. - - :param IDirectoryNode dirnode: The node representing the chosen - participant's DMD. - - :param dict scan_batch: A dictionary into which to collect the results - of the scan. This is mutated to add the results in-place. Keys - are the unicode relative paths of contents of the DMD. Values are - a list of two-tuples. The first element of each two-tuple is the - ``IFilesystemNode`` for the content. The second element is a - ``dict`` of metadata. - - :return Deferred: A ``Deferred`` which fires when the scan is - complete. - """ - with SCAN_REMOTE_DMD(nickname=nickname).context(): - d = DeferredContext(dirnode.list()) - def scan_listing(listing_map): - for encoded_relpath_u, (file_node, metadata) in listing_map.iteritems(): - relpath_u = magicpath.magic2path(encoded_relpath_u) - local_dbentry = self._get_local_latest(relpath_u) - - # XXX FIXME this is *awefully* similar to - # _should_download code in function etc -- can we - # share? - remote_version = metadata.get('version', None) - remote_uri = file_node.get_readonly_uri() - REMOTE_DMD_ENTRY.log( - relpath=relpath_u, - pathentry=local_dbentry, - remote_version=remote_version, - remote_uri=remote_uri, - ) - - if (local_dbentry is None or remote_version is None or - local_dbentry.version < remote_version or - (local_dbentry.version == remote_version and local_dbentry.last_downloaded_uri != remote_uri)): - ADD_TO_DOWNLOAD_QUEUE.log(relpath=relpath_u) - - # The scan_batch is shared across the scan of multiple - # DMDs. It is expected the DMDs will most often be mostly - # synchronized with each other. The common case, then, is - # that there is already an entry for relpath_u. So try to - # make that the fast path: assume there is a value already - # and extend it. If there's not, we'll do an extra lookup - # to initialize it. - try: - scan_batch[relpath_u] += [(file_node, metadata)] - except KeyError: - scan_batch[relpath_u] = [(file_node, metadata)] - self._status_reporter( - True, 'Magic folder is working', - 'Last scan: %s' % self.nice_current_time(), - ) - - d.addCallback(scan_listing) - return d.addActionFinish() - - @eliotutil.log_call_deferred(SCAN_REMOTE_COLLECTIVE.action_type) - def _scan_remote_collective(self, scan_self=False): - precondition(not self._deque, "Items in _deque invalidate should_download logic") - scan_batch = {} # path -> [(filenode, metadata)] - d = DeferredContext(self._collective_dirnode.list()) - def scan_collective(dirmap): - d2 = DeferredContext(defer.succeed(None)) - for dir_name in dirmap: - (dirnode, metadata) = dirmap[dir_name] - if scan_self or dirnode.get_readonly_uri() != self._upload_readonly_dircap: - d2.addCallback(lambda ign, dir_name=dir_name, dirnode=dirnode: - self._scan_remote_dmd(dir_name, dirnode, scan_batch)) - # XXX what should we do to make this failure more visible to users? - d2.addErrback(write_traceback) - return d2.result - d.addCallback(scan_collective) - - @log_call( - action_type=u"magic-folder:filter-batch-to-deque", - include_args=[], - include_result=False, - ) - def _filter_batch_to_deque(ign): - ITEM_QUEUE.log(items=self._deque) - SCAN_BATCH.log(batch=scan_batch) - for relpath_u, versions in scan_batch.iteritems(): - file_node, metadata = max(versions, key=lambda x: x[1]['version']) - - if self._should_download(relpath_u, metadata['version'], file_node.get_readonly_uri()): - to_dl = DownloadItem( - relpath_u, - PercentProgress(file_node.get_size()), - file_node, - metadata, - file_node.get_size(), - ) - to_dl.set_status('queued', self._clock.seconds()) - self._deque.append(to_dl) - self._count("objects_queued") - else: - self._call_hook(None, 'processed', async=True) # await this maybe-Deferred?? - - d.addCallback(_filter_batch_to_deque) - return d.result - - def _scan_delay(self): - return self._poll_interval - - @eliotutil.log_call_deferred(PERFORM_SCAN.action_type) - @eliotutil.inline_callbacks - def _perform_scan(self): - try: - yield self._scan_remote_collective() - self._status_reporter( - True, 'Magic folder is working', - 'Last scan: %s' % self.nice_current_time(), - ) - except Exception as e: - write_traceback() - self._status_reporter( - False, 'Remote scan has failed: %s' % str(e), - 'Last attempted at %s' % self.nice_current_time(), - ) - - def _process(self, item): - """ - Possibly upload a single QueuedItem. If this returns False, the item is - removed from _process_history. - """ - # Downloader - now = self._clock.seconds() - - item.set_status('started', now) - fp = self._get_filepath(item.relpath_u) - abspath_u = unicode_from_filepath(fp) - conflict_path_u = self._get_conflicted_filename(abspath_u) - last_uploaded_uri = item.metadata.get('last_uploaded_uri', None) - - with PROCESS_ITEM(item=item): - d = DeferredContext(defer.succeed(False)) - - def do_update_db(written_abspath_u): - filecap = item.file_node.get_uri() - if not item.file_node.get_size(): - filecap = None # ^ is an empty file - last_downloaded_uri = filecap - last_downloaded_timestamp = now - written_pathinfo = get_pathinfo(written_abspath_u) - - if not written_pathinfo.exists and not item.metadata.get('deleted', False): - raise Exception("downloaded object %s disappeared" % quote_local_unicode_path(written_abspath_u)) - - self._db.did_upload_version( - item.relpath_u, - item.metadata['version'], - last_uploaded_uri, - last_downloaded_uri, - last_downloaded_timestamp, - written_pathinfo, - ) - self._count('objects_downloaded') - item.set_status('success', self._clock.seconds()) - return True - - def failed(f): - item.set_status('failure', self._clock.seconds()) - self._count('objects_failed') - return f - - if os.path.isfile(conflict_path_u): - def fail(res): - raise ConflictError("download failed: already conflicted: %r" % (item.relpath_u,)) - d.addCallback(fail) - else: - - # Let ``last_downloaded_uri`` be the field of that name obtained from - # the directory entry metadata for ``foo`` in Bob's DMD (this field - # may be absent). Then the algorithm is: - - # * 2a. Attempt to "stat" ``foo`` to get its *current statinfo* (size - # in bytes, ``mtime``, and ``ctime``). If Alice has no local copy - # of ``foo``, classify as an overwrite. - - current_statinfo = get_pathinfo(abspath_u) - - is_conflict = False - db_entry = self._db.get_db_entry(item.relpath_u) - dmd_last_downloaded_uri = item.metadata.get('last_downloaded_uri', None) - - # * 2b. Read the following information for the path ``foo`` from the - # local magic folder db: - # * the *last-seen statinfo*, if any (this is the size in - # bytes, ``mtime``, and ``ctime`` stored in the ``local_files`` - # table when the file was last uploaded); - # * the ``last_uploaded_uri`` field of the ``local_files`` table - # for this file, which is the URI under which the file was last - # uploaded. - - with CHECKING_CONFLICTS() as action: - conflict_reason = None - if db_entry: - # * 2c. If any of the following are true, then classify as a conflict: - # * i. there are pending notifications of changes to ``foo``; - # * ii. the last-seen statinfo is either absent (i.e. there is - # no entry in the database for this path), or different from the - # current statinfo; - - if current_statinfo.exists: - if (db_entry.mtime_ns != current_statinfo.mtime_ns or \ - db_entry.ctime_ns != current_statinfo.ctime_ns or \ - db_entry.size != current_statinfo.size): - is_conflict = True - conflict_reason = u"dbentry mismatch metadata" - - if db_entry.last_downloaded_uri is None \ - or db_entry.last_uploaded_uri is None \ - or dmd_last_downloaded_uri is None: - # we've never downloaded anything before for this - # file, but the other side might have created a new - # file "at the same time" - if db_entry.version >= item.metadata['version']: - is_conflict = True - conflict_reason = u"dbentry newer version" - elif dmd_last_downloaded_uri != db_entry.last_downloaded_uri: - is_conflict = True - conflict_reason = u"last_downloaded_uri mismatch" - - else: # no local db_entry .. but has the file appeared locally meantime? - if current_statinfo.exists: - is_conflict = True - conflict_reason = u"file appeared" - - action.add_success_fields( - is_conflict=is_conflict, - conflict_reason=conflict_reason, - ) - - if is_conflict: - self._count('objects_conflicted') - - if item.relpath_u.endswith(u"/"): - if item.metadata.get('deleted', False): - REMOTE_DIRECTORY_DELETED.log() - else: - REMOTE_DIRECTORY_CREATED.log() - d.addCallback(lambda ign: fileutil.make_dirs(abspath_u)) - d.addCallback(lambda ign: abspath_u) - else: - if item.metadata.get('deleted', False): - d.addCallback(lambda ign: self._rename_deleted_file(abspath_u)) - else: - @eliotutil.log_call_deferred(DOWNLOAD_BEST_VERSION.action_type) - def download_best_version(ignored): - d = DeferredContext(item.file_node.download_best_version(progress=item.progress)) - d.addCallback(lambda contents: self._write_downloaded_file( - self._local_path_u, abspath_u, contents, - is_conflict=is_conflict, - mtime=item.metadata.get('user_mtime', item.metadata.get('tahoe', {}).get('linkmotime')), - )) - return d.result - - d.addCallback(download_best_version) - - d.addCallback(do_update_db) - d.addErrback(failed) - - def trap_conflicts(f): - f.trap(ConflictError) - return False - d.addErrback(trap_conflicts) - return d.addActionFinish() diff --git a/src/allmydata/magicfolderdb.py b/src/allmydata/magicfolderdb.py deleted file mode 100644 index 9b09f6c95..000000000 --- a/src/allmydata/magicfolderdb.py +++ /dev/null @@ -1,204 +0,0 @@ -from __future__ import print_function - -import sys -from collections import namedtuple - -from allmydata.util.dbutil import get_db, DBError -from allmydata.util.eliotutil import ( - RELPATH, - VERSION, - LAST_UPLOADED_URI, - LAST_DOWNLOADED_URI, - LAST_DOWNLOADED_TIMESTAMP, - PATHINFO, - validateSetMembership, - validateInstanceOf, -) -from eliot import ( - Field, - ActionType, -) - -PathEntry = namedtuple('PathEntry', 'size mtime_ns ctime_ns version last_uploaded_uri ' - 'last_downloaded_uri last_downloaded_timestamp') - -PATHENTRY = Field( - u"pathentry", - lambda v: None if v is None else { - "size": v.size, - "mtime_ns": v.mtime_ns, - "ctime_ns": v.ctime_ns, - "version": v.version, - "last_uploaded_uri": v.last_uploaded_uri, - "last_downloaded_uri": v.last_downloaded_uri, - "last_downloaded_timestamp": v.last_downloaded_timestamp, - }, - u"The local database state of a file.", - validateInstanceOf((type(None), PathEntry)), -) - -_INSERT_OR_UPDATE = Field.for_types( - u"insert_or_update", - [unicode], - u"An indication of whether the record for this upload was new or an update to a previous entry.", - validateSetMembership({u"insert", u"update"}), -) - -UPDATE_ENTRY = ActionType( - u"magic-folder-db:update-entry", - [RELPATH, VERSION, LAST_UPLOADED_URI, LAST_DOWNLOADED_URI, LAST_DOWNLOADED_TIMESTAMP, PATHINFO], - [_INSERT_OR_UPDATE], - u"Record some metadata about a relative path in the magic-folder.", -) - - -# magic-folder db schema version 1 -SCHEMA_v1 = """ -CREATE TABLE version -( - version INTEGER -- contains one row, set to 1 -); - -CREATE TABLE local_files -( - path VARCHAR(1024) PRIMARY KEY, -- UTF-8 filename relative to local magic folder dir - size INTEGER, -- ST_SIZE, or NULL if the file has been deleted - mtime_ns INTEGER, -- ST_MTIME in nanoseconds - ctime_ns INTEGER, -- ST_CTIME in nanoseconds - version INTEGER, - last_uploaded_uri VARCHAR(256), -- URI:CHK:... - last_downloaded_uri VARCHAR(256), -- URI:CHK:... - last_downloaded_timestamp TIMESTAMP -); -""" - - -def get_magicfolderdb(dbfile, stderr=sys.stderr, - create_version=(SCHEMA_v1, 1), just_create=False): - # Open or create the given backupdb file. The parent directory must - # exist. - try: - (sqlite3, db) = get_db(dbfile, stderr, create_version, - just_create=just_create, dbname="magicfolderdb") - if create_version[1] in (1, 2): - return MagicFolderDB(sqlite3, db) - else: - print("invalid magicfolderdb schema version specified", file=stderr) - return None - except DBError as e: - print(e, file=stderr) - return None - -class LocalPath(object): - @classmethod - def fromrow(self, row): - p = LocalPath() - p.relpath_u = row[0] - p.entry = PathEntry(*row[1:]) - return p - - -class MagicFolderDB(object): - VERSION = 1 - - def __init__(self, sqlite_module, connection): - self.sqlite_module = sqlite_module - self.connection = connection - self.cursor = connection.cursor() - - def close(self): - self.connection.close() - - def get_db_entry(self, relpath_u): - """ - Retrieve the entry in the database for a given path, or return None - if there is no such entry. - """ - c = self.cursor - c.execute("SELECT size, mtime_ns, ctime_ns, version, last_uploaded_uri," - " last_downloaded_uri, last_downloaded_timestamp" - " FROM local_files" - " WHERE path=?", - (relpath_u,)) - row = self.cursor.fetchone() - if not row: - return None - else: - (size, mtime_ns, ctime_ns, version, last_uploaded_uri, - last_downloaded_uri, last_downloaded_timestamp) = row - return PathEntry(size=size, mtime_ns=mtime_ns, ctime_ns=ctime_ns, version=version, - last_uploaded_uri=last_uploaded_uri, - last_downloaded_uri=last_downloaded_uri, - last_downloaded_timestamp=last_downloaded_timestamp) - - def get_direct_children(self, relpath_u): - """ - Given the relative path to a directory, return ``LocalPath`` instances - representing all direct children of that directory. - """ - # It would be great to not be interpolating data into query - # statements. However, query parameters are not supported in the - # position where we need them. - sqlitesafe_relpath_u = relpath_u.replace(u"'", u"''") - statement = ( - """ - SELECT - path, size, mtime_ns, ctime_ns, version, last_uploaded_uri, - last_downloaded_uri, last_downloaded_timestamp - FROM - local_files - WHERE - -- The "_" used here ensures there is at least one character - -- after the /. This prevents matching the path itself. - path LIKE '{path}/_%' AND - - -- The "_" used here serves a similar purpose. This allows - -- matching directory children but avoids matching their - -- children. - path NOT LIKE '{path}/_%/_%' - """ - ).format(path=sqlitesafe_relpath_u) - - self.cursor.execute(statement) - rows = self.cursor.fetchall() - return list( - LocalPath.fromrow(row) - for row - in rows - ) - - def get_all_relpaths(self): - """ - Retrieve a set of all relpaths of files that have had an entry in magic folder db - (i.e. that have been downloaded at least once). - """ - self.cursor.execute("SELECT path FROM local_files") - rows = self.cursor.fetchall() - return set([r[0] for r in rows]) - - def did_upload_version(self, relpath_u, version, last_uploaded_uri, last_downloaded_uri, last_downloaded_timestamp, pathinfo): - action = UPDATE_ENTRY( - relpath=relpath_u, - version=version, - last_uploaded_uri=last_uploaded_uri, - last_downloaded_uri=last_downloaded_uri, - last_downloaded_timestamp=last_downloaded_timestamp, - pathinfo=pathinfo, - ) - with action: - try: - self.cursor.execute("INSERT INTO local_files VALUES (?,?,?,?,?,?,?,?)", - (relpath_u, pathinfo.size, pathinfo.mtime_ns, pathinfo.ctime_ns, - version, last_uploaded_uri, last_downloaded_uri, - last_downloaded_timestamp)) - action.add_success_fields(insert_or_update=u"insert") - except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError): - self.cursor.execute("UPDATE local_files" - " SET size=?, mtime_ns=?, ctime_ns=?, version=?, last_uploaded_uri=?," - " last_downloaded_uri=?, last_downloaded_timestamp=?" - " WHERE path=?", - (pathinfo.size, pathinfo.mtime_ns, pathinfo.ctime_ns, version, - last_uploaded_uri, last_downloaded_uri, last_downloaded_timestamp, - relpath_u)) - action.add_success_fields(insert_or_update=u"update") - self.connection.commit() diff --git a/src/allmydata/magicpath.py b/src/allmydata/magicpath.py deleted file mode 100644 index 1f46330d4..000000000 --- a/src/allmydata/magicpath.py +++ /dev/null @@ -1,32 +0,0 @@ -import re -import os.path - -from allmydata.util.assertutil import precondition, _assert - -def path2magic(path): - return re.sub(u'[/@]', lambda m: {u'/': u'@_', u'@': u'@@'}[m.group(0)], path) - -def magic2path(path): - return re.sub(u'@[_@]', lambda m: {u'@_': u'/', u'@@': u'@'}[m.group(0)], path) - - -IGNORE_SUFFIXES = [u'.backup', u'.tmp', u'.conflict'] -IGNORE_PREFIXES = [u'.'] - -def should_ignore_file(path_u): - precondition(isinstance(path_u, unicode), path_u=path_u) - - for suffix in IGNORE_SUFFIXES: - if path_u.endswith(suffix): - return True - - while path_u != u"": - oldpath_u = path_u - path_u, tail_u = os.path.split(path_u) - if tail_u.startswith(u"."): - return True - if path_u == oldpath_u: - return True # the path was absolute - _assert(len(path_u) < len(oldpath_u), path_u=path_u, oldpath_u=oldpath_u) - - return False diff --git a/src/allmydata/scripts/magic_folder_cli.py b/src/allmydata/scripts/magic_folder_cli.py deleted file mode 100644 index 4165dec49..000000000 --- a/src/allmydata/scripts/magic_folder_cli.py +++ /dev/null @@ -1,610 +0,0 @@ -from __future__ import print_function - -import os -import urllib -from types import NoneType -from six.moves import cStringIO as StringIO -from datetime import datetime -import json - - -from twisted.python import usage - -from allmydata.util.assertutil import precondition - -from .common import BaseOptions, BasedirOptions, get_aliases -from .cli import MakeDirectoryOptions, LnOptions, CreateAliasOptions -import tahoe_mv -from allmydata.util.encodingutil import argv_to_abspath, argv_to_unicode, to_str, \ - quote_local_unicode_path -from allmydata.scripts.common_http import do_http, BadResponse -from allmydata.util import fileutil -from allmydata import uri -from allmydata.util.abbreviate import abbreviate_space, abbreviate_time -from allmydata.frontends.magic_folder import load_magic_folders -from allmydata.frontends.magic_folder import save_magic_folders -from allmydata.frontends.magic_folder import maybe_upgrade_magic_folders - - -INVITE_SEPARATOR = "+" - -class CreateOptions(BasedirOptions): - nickname = None # NOTE: *not* the "name of this magic-folder" - local_dir = None - synopsis = "MAGIC_ALIAS: [NICKNAME LOCAL_DIR]" - optParameters = [ - ("poll-interval", "p", "60", "How often to ask for updates"), - ("name", "n", "default", "The name of this magic-folder"), - ] - description = ( - "Create a new magic-folder. If you specify NICKNAME and " - "LOCAL_DIR, this client will also be invited and join " - "using the given nickname. A new alias (see 'tahoe list-aliases') " - "will be added with the master folder's writecap." - ) - - def parseArgs(self, alias, nickname=None, local_dir=None): - BasedirOptions.parseArgs(self) - alias = argv_to_unicode(alias) - if not alias.endswith(u':'): - raise usage.UsageError("An alias must end with a ':' character.") - self.alias = alias[:-1] - self.nickname = None if nickname is None else argv_to_unicode(nickname) - try: - if int(self['poll-interval']) <= 0: - raise ValueError("should be positive") - except ValueError: - raise usage.UsageError( - "--poll-interval must be a positive integer" - ) - - # Expand the path relative to the current directory of the CLI command, not the node. - self.local_dir = None if local_dir is None else argv_to_abspath(local_dir, long_path=False) - - if self.nickname and not self.local_dir: - raise usage.UsageError("If NICKNAME is specified then LOCAL_DIR must also be specified.") - node_url_file = os.path.join(self['node-directory'], u"node.url") - self['node-url'] = fileutil.read(node_url_file).strip() - -def _delegate_options(source_options, target_options): - target_options.aliases = get_aliases(source_options['node-directory']) - target_options["node-url"] = source_options["node-url"] - target_options["node-directory"] = source_options["node-directory"] - target_options["name"] = source_options["name"] - target_options.stdin = StringIO("") - target_options.stdout = StringIO() - target_options.stderr = StringIO() - return target_options - -def create(options): - precondition(isinstance(options.alias, unicode), alias=options.alias) - precondition(isinstance(options.nickname, (unicode, NoneType)), nickname=options.nickname) - precondition(isinstance(options.local_dir, (unicode, NoneType)), local_dir=options.local_dir) - - # make sure we don't already have a magic-folder with this name before we create the alias - maybe_upgrade_magic_folders(options["node-directory"]) - folders = load_magic_folders(options["node-directory"]) - if options['name'] in folders: - print("Already have a magic-folder named '{}'".format(options['name']), file=options.stderr) - return 1 - - # create an alias; this basically just remembers the cap for the - # master directory - from allmydata.scripts import tahoe_add_alias - create_alias_options = _delegate_options(options, CreateAliasOptions()) - create_alias_options.alias = options.alias - - rc = tahoe_add_alias.create_alias(create_alias_options) - if rc != 0: - print(create_alias_options.stderr.getvalue(), file=options.stderr) - return rc - print(create_alias_options.stdout.getvalue(), file=options.stdout) - - if options.nickname is not None: - print(u"Inviting myself as client '{}':".format(options.nickname), file=options.stdout) - invite_options = _delegate_options(options, InviteOptions()) - invite_options.alias = options.alias - invite_options.nickname = options.nickname - invite_options['name'] = options['name'] - rc = invite(invite_options) - if rc != 0: - print(u"magic-folder: failed to invite after create\n", file=options.stderr) - print(invite_options.stderr.getvalue(), file=options.stderr) - return rc - invite_code = invite_options.stdout.getvalue().strip() - print(u" created invite code", file=options.stdout) - join_options = _delegate_options(options, JoinOptions()) - join_options['poll-interval'] = options['poll-interval'] - join_options.nickname = options.nickname - join_options.local_dir = options.local_dir - join_options.invite_code = invite_code - rc = join(join_options) - if rc != 0: - print(u"magic-folder: failed to join after create\n", file=options.stderr) - print(join_options.stderr.getvalue(), file=options.stderr) - return rc - print(u" joined new magic-folder", file=options.stdout) - print( - u"Successfully created magic-folder '{}' with alias '{}:' " - u"and client '{}'\nYou must re-start your node before the " - u"magic-folder will be active." - .format(options['name'], options.alias, options.nickname), file=options.stdout) - return 0 - - -class ListOptions(BasedirOptions): - description = ( - "List all magic-folders this client has joined" - ) - optFlags = [ - ("json", "", "Produce JSON output") - ] - - -def list_(options): - folders = load_magic_folders(options["node-directory"]) - if options["json"]: - _list_json(options, folders) - return 0 - _list_human(options, folders) - return 0 - - -def _list_json(options, folders): - """ - List our magic-folders using JSON - """ - info = dict() - for name, details in folders.items(): - info[name] = { - u"directory": details["directory"], - } - print(json.dumps(info), file=options.stdout) - return 0 - - -def _list_human(options, folders): - """ - List our magic-folders for a human user - """ - if folders: - print("This client has the following magic-folders:", file=options.stdout) - biggest = max([len(nm) for nm in folders.keys()]) - fmt = " {:>%d}: {}" % (biggest, ) - for name, details in folders.items(): - print(fmt.format(name, details["directory"]), file=options.stdout) - else: - print("No magic-folders", file=options.stdout) - - -class InviteOptions(BasedirOptions): - nickname = None - synopsis = "MAGIC_ALIAS: NICKNAME" - stdin = StringIO("") - optParameters = [ - ("name", "n", "default", "The name of this magic-folder"), - ] - description = ( - "Invite a new participant to a given magic-folder. The resulting " - "invite-code that is printed is secret information and MUST be " - "transmitted securely to the invitee." - ) - - def parseArgs(self, alias, nickname=None): - BasedirOptions.parseArgs(self) - alias = argv_to_unicode(alias) - if not alias.endswith(u':'): - raise usage.UsageError("An alias must end with a ':' character.") - self.alias = alias[:-1] - self.nickname = argv_to_unicode(nickname) - node_url_file = os.path.join(self['node-directory'], u"node.url") - self['node-url'] = open(node_url_file, "r").read().strip() - aliases = get_aliases(self['node-directory']) - self.aliases = aliases - - -def invite(options): - precondition(isinstance(options.alias, unicode), alias=options.alias) - precondition(isinstance(options.nickname, unicode), nickname=options.nickname) - - from allmydata.scripts import tahoe_mkdir - mkdir_options = _delegate_options(options, MakeDirectoryOptions()) - mkdir_options.where = None - - rc = tahoe_mkdir.mkdir(mkdir_options) - if rc != 0: - print("magic-folder: failed to mkdir\n", file=options.stderr) - return rc - - # FIXME this assumes caps are ASCII. - dmd_write_cap = mkdir_options.stdout.getvalue().strip() - dmd_readonly_cap = uri.from_string(dmd_write_cap).get_readonly().to_string() - if dmd_readonly_cap is None: - print("magic-folder: failed to diminish dmd write cap\n", file=options.stderr) - return 1 - - magic_write_cap = get_aliases(options["node-directory"])[options.alias] - magic_readonly_cap = uri.from_string(magic_write_cap).get_readonly().to_string() - - # tahoe ln CLIENT_READCAP COLLECTIVE_WRITECAP/NICKNAME - ln_options = _delegate_options(options, LnOptions()) - ln_options.from_file = unicode(dmd_readonly_cap, 'utf-8') - ln_options.to_file = u"%s/%s" % (unicode(magic_write_cap, 'utf-8'), options.nickname) - rc = tahoe_mv.mv(ln_options, mode="link") - if rc != 0: - print("magic-folder: failed to create link\n", file=options.stderr) - print(ln_options.stderr.getvalue(), file=options.stderr) - return rc - - # FIXME: this assumes caps are ASCII. - print("%s%s%s" % (magic_readonly_cap, INVITE_SEPARATOR, dmd_write_cap), file=options.stdout) - return 0 - -class JoinOptions(BasedirOptions): - synopsis = "INVITE_CODE LOCAL_DIR" - dmd_write_cap = "" - magic_readonly_cap = "" - optParameters = [ - ("poll-interval", "p", "60", "How often to ask for updates"), - ("name", "n", "default", "Name of the magic-folder"), - ] - - def parseArgs(self, invite_code, local_dir): - BasedirOptions.parseArgs(self) - - try: - if int(self['poll-interval']) <= 0: - raise ValueError("should be positive") - except ValueError: - raise usage.UsageError( - "--poll-interval must be a positive integer" - ) - # Expand the path relative to the current directory of the CLI command, not the node. - self.local_dir = None if local_dir is None else argv_to_abspath(local_dir, long_path=False) - self.invite_code = to_str(argv_to_unicode(invite_code)) - -def join(options): - fields = options.invite_code.split(INVITE_SEPARATOR) - if len(fields) != 2: - raise usage.UsageError("Invalid invite code.") - magic_readonly_cap, dmd_write_cap = fields - - maybe_upgrade_magic_folders(options["node-directory"]) - existing_folders = load_magic_folders(options["node-directory"]) - - if options['name'] in existing_folders: - print("This client already has a magic-folder named '{}'".format(options['name']), file=options.stderr) - return 1 - - db_fname = os.path.join( - options["node-directory"], - u"private", - u"magicfolder_{}.sqlite".format(options['name']), - ) - if os.path.exists(db_fname): - print("Database '{}' already exists; not overwriting".format(db_fname), file=options.stderr) - return 1 - - folder = { - u"directory": options.local_dir.encode('utf-8'), - u"collective_dircap": magic_readonly_cap, - u"upload_dircap": dmd_write_cap, - u"poll_interval": options["poll-interval"], - } - existing_folders[options["name"]] = folder - - save_magic_folders(options["node-directory"], existing_folders) - return 0 - - -class LeaveOptions(BasedirOptions): - synopsis = "Remove a magic-folder and forget all state" - optParameters = [ - ("name", "n", "default", "Name of magic-folder to leave"), - ] - - -def leave(options): - from ConfigParser import SafeConfigParser - - existing_folders = load_magic_folders(options["node-directory"]) - - if not existing_folders: - print("No magic-folders at all", file=options.stderr) - return 1 - - if options["name"] not in existing_folders: - print("No such magic-folder '{}'".format(options["name"]), file=options.stderr) - return 1 - - privdir = os.path.join(options["node-directory"], u"private") - db_fname = os.path.join(privdir, u"magicfolder_{}.sqlite".format(options["name"])) - - # delete from YAML file and re-write it - del existing_folders[options["name"]] - save_magic_folders(options["node-directory"], existing_folders) - - # delete the database file - try: - fileutil.remove(db_fname) - except Exception as e: - print("Warning: unable to remove %s due to %s: %s" - % (quote_local_unicode_path(db_fname), e.__class__.__name__, str(e)), file=options.stderr) - - # if this was the last magic-folder, disable them entirely - if not existing_folders: - parser = SafeConfigParser() - parser.read(os.path.join(options["node-directory"], u"tahoe.cfg")) - parser.remove_section("magic_folder") - with open(os.path.join(options["node-directory"], u"tahoe.cfg"), "w") as f: - parser.write(f) - - return 0 - - -class StatusOptions(BasedirOptions): - synopsis = "" - stdin = StringIO("") - optParameters = [ - ("name", "n", "default", "Name for the magic-folder to show status"), - ] - - def parseArgs(self): - BasedirOptions.parseArgs(self) - node_url_file = os.path.join(self['node-directory'], u"node.url") - with open(node_url_file, "r") as f: - self['node-url'] = f.read().strip() - - -def _get_json_for_fragment(options, fragment, method='GET', post_args=None): - nodeurl = options['node-url'] - if nodeurl.endswith('/'): - nodeurl = nodeurl[:-1] - - url = u'%s/%s' % (nodeurl, fragment) - if method == 'POST': - if post_args is None: - raise ValueError("Must pass post_args= for POST method") - body = urllib.urlencode(post_args) - else: - body = '' - if post_args is not None: - raise ValueError("post_args= only valid for POST method") - resp = do_http(method, url, body=body) - if isinstance(resp, BadResponse): - # specifically NOT using format_http_error() here because the - # URL is pretty sensitive (we're doing /uri/). - raise RuntimeError( - "Failed to get json from '%s': %s" % (nodeurl, resp.error) - ) - - data = resp.read() - parsed = json.loads(data) - if parsed is None: - raise RuntimeError("No data from '%s'" % (nodeurl,)) - return parsed - - -def _get_json_for_cap(options, cap): - return _get_json_for_fragment( - options, - 'uri/%s?t=json' % urllib.quote(cap), - ) - -def _print_item_status(item, now, longest): - paddedname = (' ' * (longest - len(item['path']))) + item['path'] - if 'failure_at' in item: - ts = datetime.fromtimestamp(item['started_at']) - prog = 'Failed %s (%s)' % (abbreviate_time(now - ts), ts) - elif item['percent_done'] < 100.0: - if 'started_at' not in item: - prog = 'not yet started' - else: - so_far = now - datetime.fromtimestamp(item['started_at']) - if so_far.seconds > 0.0: - rate = item['percent_done'] / so_far.seconds - if rate != 0: - time_left = (100.0 - item['percent_done']) / rate - prog = '%2.1f%% done, around %s left' % ( - item['percent_done'], - abbreviate_time(time_left), - ) - else: - time_left = None - prog = '%2.1f%% done' % (item['percent_done'],) - else: - prog = 'just started' - else: - prog = '' - for verb in ['finished', 'started', 'queued']: - keyname = verb + '_at' - if keyname in item: - when = datetime.fromtimestamp(item[keyname]) - prog = '%s %s' % (verb, abbreviate_time(now - when)) - break - - print(" %s: %s" % (paddedname, prog)) - - -def status(options): - nodedir = options["node-directory"] - stdout, stderr = options.stdout, options.stderr - magic_folders = load_magic_folders(os.path.join(options["node-directory"])) - - with open(os.path.join(nodedir, u'private', u'api_auth_token'), 'rb') as f: - token = f.read() - - print("Magic-folder status for '{}':".format(options["name"]), file=stdout) - - if options["name"] not in magic_folders: - raise Exception( - "No such magic-folder '{}'".format(options["name"]) - ) - - dmd_cap = magic_folders[options["name"]]["upload_dircap"] - collective_readcap = magic_folders[options["name"]]["collective_dircap"] - - # do *all* our data-retrievals first in case there's an error - try: - dmd_data = _get_json_for_cap(options, dmd_cap) - remote_data = _get_json_for_cap(options, collective_readcap) - magic_data = _get_json_for_fragment( - options, - 'magic_folder?t=json', - method='POST', - post_args=dict( - t='json', - name=options["name"], - token=token, - ) - ) - except Exception as e: - print("failed to retrieve data: %s" % str(e), file=stderr) - return 2 - - for d in [dmd_data, remote_data, magic_data]: - if isinstance(d, dict) and 'error' in d: - print("Error from server: %s" % d['error'], file=stderr) - print("This means we can't retrieve the remote shared directory.", file=stderr) - return 3 - - captype, dmd = dmd_data - if captype != 'dirnode': - print("magic_folder_dircap isn't a directory capability", file=stderr) - return 2 - - now = datetime.now() - - print("Local files:", file=stdout) - for (name, child) in dmd['children'].items(): - captype, meta = child - status = 'good' - size = meta['size'] - created = datetime.fromtimestamp(meta['metadata']['tahoe']['linkcrtime']) - version = meta['metadata']['version'] - nice_size = abbreviate_space(size) - nice_created = abbreviate_time(now - created) - if captype != 'filenode': - print("%20s: error, should be a filecap" % name, file=stdout) - continue - print(" %s (%s): %s, version=%s, created %s" % (name, nice_size, status, version, nice_created), file=stdout) - - print(file=stdout) - print("Remote files:", file=stdout) - - captype, collective = remote_data - for (name, data) in collective['children'].items(): - if data[0] != 'dirnode': - print("Error: '%s': expected a dirnode, not '%s'" % (name, data[0]), file=stdout) - print(" %s's remote:" % name, file=stdout) - dmd = _get_json_for_cap(options, data[1]['ro_uri']) - if isinstance(dmd, dict) and 'error' in dmd: - print(" Error: could not retrieve directory", file=stdout) - continue - if dmd[0] != 'dirnode': - print("Error: should be a dirnode", file=stdout) - continue - for (n, d) in dmd[1]['children'].items(): - if d[0] != 'filenode': - print("Error: expected '%s' to be a filenode." % (n,), file=stdout) - - meta = d[1] - status = 'good' - size = meta['size'] - created = datetime.fromtimestamp(meta['metadata']['tahoe']['linkcrtime']) - version = meta['metadata']['version'] - nice_size = abbreviate_space(size) - nice_created = abbreviate_time(now - created) - print(" %s (%s): %s, version=%s, created %s" % (n, nice_size, status, version, nice_created), file=stdout) - - if len(magic_data): - uploads = [item for item in magic_data if item['kind'] == 'upload'] - downloads = [item for item in magic_data if item['kind'] == 'download'] - longest = max([len(item['path']) for item in magic_data]) - - # maybe gate this with --show-completed option or something? - uploads = [item for item in uploads if item['status'] != 'success'] - downloads = [item for item in downloads if item['status'] != 'success'] - - if len(uploads): - print() - print("Uploads:", file=stdout) - for item in uploads: - _print_item_status(item, now, longest) - - if len(downloads): - print() - print("Downloads:", file=stdout) - for item in downloads: - _print_item_status(item, now, longest) - - for item in magic_data: - if item['status'] == 'failure': - print("Failed:", item, file=stdout) - - return 0 - - -class MagicFolderCommand(BaseOptions): - subCommands = [ - ["create", None, CreateOptions, "Create a Magic Folder."], - ["invite", None, InviteOptions, "Invite someone to a Magic Folder."], - ["join", None, JoinOptions, "Join a Magic Folder."], - ["leave", None, LeaveOptions, "Leave a Magic Folder."], - ["status", None, StatusOptions, "Display status of uploads/downloads."], - ["list", None, ListOptions, "List Magic Folders configured in this client."], - ] - optFlags = [ - ["debug", "d", "Print full stack-traces"], - ] - description = ( - "A magic-folder has an owner who controls the writecap " - "containing a list of nicknames and readcaps. The owner can invite " - "new participants. Every participant has the writecap for their " - "own folder (the corresponding readcap is in the master folder). " - "All clients download files from all other participants using the " - "readcaps contained in the master magic-folder directory." - ) - - def postOptions(self): - if not hasattr(self, 'subOptions'): - raise usage.UsageError("must specify a subcommand") - def getSynopsis(self): - return "Usage: tahoe [global-options] magic-folder" - def getUsage(self, width=None): - t = BaseOptions.getUsage(self, width) - t += ( - "Please run e.g. 'tahoe magic-folder create --help' for more " - "details on each subcommand.\n" - ) - return t - -subDispatch = { - "create": create, - "invite": invite, - "join": join, - "leave": leave, - "status": status, - "list": list_, -} - -def do_magic_folder(options): - so = options.subOptions - so.stdout = options.stdout - so.stderr = options.stderr - f = subDispatch[options.subCommand] - try: - return f(so) - except Exception as e: - print("Error: %s" % (e,), file=options.stderr) - if options['debug']: - raise - -subCommands = [ - ["magic-folder", None, MagicFolderCommand, - "Magic Folder subcommands: use 'tahoe magic-folder' for a list."], -] - -dispatch = { - "magic-folder": do_magic_folder, -} diff --git a/src/allmydata/scripts/runner.py b/src/allmydata/scripts/runner.py index 60322c426..705d3193b 100644 --- a/src/allmydata/scripts/runner.py +++ b/src/allmydata/scripts/runner.py @@ -9,7 +9,7 @@ from twisted.internet import defer, task, threads from allmydata.version_checks import get_package_versions_string from allmydata.scripts.common import get_default_nodedir from allmydata.scripts import debug, create_node, cli, \ - stats_gatherer, admin, magic_folder_cli, tahoe_daemonize, tahoe_start, \ + stats_gatherer, admin, tahoe_daemonize, tahoe_start, \ tahoe_stop, tahoe_restart, tahoe_run, tahoe_invite from allmydata.util.encodingutil import quote_output, quote_local_unicode_path, get_io_encoding from allmydata.util.eliotutil import ( @@ -61,7 +61,6 @@ class Options(usage.Options): + process_control_commands + debug.subCommands + cli.subCommands - + magic_folder_cli.subCommands + tahoe_invite.subCommands ) @@ -154,10 +153,6 @@ def dispatch(config, # these are blocking, and must be run in a thread f0 = cli.dispatch[command] f = lambda so: threads.deferToThread(f0, so) - elif command in magic_folder_cli.dispatch: - # same - f0 = magic_folder_cli.dispatch[command] - f = lambda so: threads.deferToThread(f0, so) elif command in tahoe_invite.dispatch: f = tahoe_invite.dispatch[command] else: diff --git a/src/allmydata/test/cli/test_magic_folder.py b/src/allmydata/test/cli/test_magic_folder.py deleted file mode 100644 index b8d26b893..000000000 --- a/src/allmydata/test/cli/test_magic_folder.py +++ /dev/null @@ -1,814 +0,0 @@ -import json -import shutil -import os.path -import mock -import re -import time -from datetime import datetime - -from eliot import ( - log_call, - start_action, -) -from eliot.twisted import ( - DeferredContext, -) - -from twisted.trial import unittest -from twisted.internet import defer -from twisted.internet import reactor -from twisted.python import usage - -from allmydata.util.assertutil import precondition -from allmydata.util import fileutil -from allmydata.scripts.common import get_aliases -from ..no_network import GridTestMixin -from ..common_util import parse_cli -from .common import CLITestMixin -from allmydata.test.common_util import NonASCIIPathMixin -from allmydata.scripts import magic_folder_cli -from allmydata.util.fileutil import abspath_expanduser_unicode -from allmydata.util.encodingutil import unicode_to_argv -from allmydata.frontends.magic_folder import MagicFolder -from allmydata import uri -from ...util.eliotutil import ( - log_call_deferred, -) - -class MagicFolderCLITestMixin(CLITestMixin, GridTestMixin, NonASCIIPathMixin): - def setUp(self): - GridTestMixin.setUp(self) - self.alice_nickname = self.unicode_or_fallback(u"Alice\u00F8", u"Alice", io_as_well=True) - self.bob_nickname = self.unicode_or_fallback(u"Bob\u00F8", u"Bob", io_as_well=True) - - def do_create_magic_folder(self, client_num): - with start_action(action_type=u"create-magic-folder", client_num=client_num).context(): - d = DeferredContext( - self.do_cli( - "magic-folder", "--debug", "create", "magic:", - client_num=client_num, - ) - ) - def _done(args): - (rc, stdout, stderr) = args - self.failUnlessEqual(rc, 0, stdout + stderr) - self.assertIn("Alias 'magic' created", stdout) -# self.failUnlessIn("joined new magic-folder", stdout) -# self.failUnlessIn("Successfully created magic-folder", stdout) - self.failUnlessEqual(stderr, "") - aliases = get_aliases(self.get_clientdir(i=client_num)) - self.assertIn("magic", aliases) - self.failUnless(aliases["magic"].startswith("URI:DIR2:")) - d.addCallback(_done) - return d.addActionFinish() - - def do_invite(self, client_num, nickname): - nickname_arg = unicode_to_argv(nickname) - action = start_action( - action_type=u"invite-to-magic-folder", - client_num=client_num, - nickname=nickname, - ) - with action.context(): - d = DeferredContext( - self.do_cli( - "magic-folder", - "invite", - "magic:", - nickname_arg, - client_num=client_num, - ) - ) - def _done(args): - (rc, stdout, stderr) = args - self.failUnlessEqual(rc, 0) - return (rc, stdout, stderr) - d.addCallback(_done) - return d.addActionFinish() - - def do_list(self, client_num, json=False): - args = ("magic-folder", "list",) - if json: - args = args + ("--json",) - d = self.do_cli(*args, client_num=client_num) - def _done(args): - (rc, stdout, stderr) = args - return (rc, stdout, stderr) - d.addCallback(_done) - return d - - def do_status(self, client_num, name=None): - args = ("magic-folder", "status",) - if name is not None: - args = args + ("--name", name) - d = self.do_cli(*args, client_num=client_num) - def _done(args): - (rc, stdout, stderr) = args - return (rc, stdout, stderr) - d.addCallback(_done) - return d - - def do_join(self, client_num, local_dir, invite_code): - action = start_action( - action_type=u"join-magic-folder", - client_num=client_num, - local_dir=local_dir, - invite_code=invite_code, - ) - with action.context(): - precondition(isinstance(local_dir, unicode), local_dir=local_dir) - precondition(isinstance(invite_code, str), invite_code=invite_code) - local_dir_arg = unicode_to_argv(local_dir) - d = DeferredContext( - self.do_cli( - "magic-folder", - "join", - invite_code, - local_dir_arg, - client_num=client_num, - ) - ) - def _done(args): - (rc, stdout, stderr) = args - self.failUnlessEqual(rc, 0) - self.failUnlessEqual(stdout, "") - self.failUnlessEqual(stderr, "") - return (rc, stdout, stderr) - d.addCallback(_done) - return d.addActionFinish() - - def do_leave(self, client_num): - d = self.do_cli("magic-folder", "leave", client_num=client_num) - def _done(args): - (rc, stdout, stderr) = args - self.failUnlessEqual(rc, 0) - return (rc, stdout, stderr) - d.addCallback(_done) - return d - - def check_joined_config(self, client_num, upload_dircap): - """Tests that our collective directory has the readonly cap of - our upload directory. - """ - action = start_action(action_type=u"check-joined-config") - with action.context(): - collective_readonly_cap = self.get_caps_from_files(client_num)[0] - d = DeferredContext( - self.do_cli( - "ls", "--json", - collective_readonly_cap, - client_num=client_num, - ) - ) - def _done(args): - (rc, stdout, stderr) = args - self.failUnlessEqual(rc, 0) - return (rc, stdout, stderr) - d.addCallback(_done) - def test_joined_magic_folder(args): - (rc, stdout, stderr) = args - readonly_cap = unicode(uri.from_string(upload_dircap).get_readonly().to_string(), 'utf-8') - s = re.search(readonly_cap, stdout) - self.failUnless(s is not None) - return None - d.addCallback(test_joined_magic_folder) - return d.addActionFinish() - - def get_caps_from_files(self, client_num): - from allmydata.frontends.magic_folder import load_magic_folders - folders = load_magic_folders(self.get_clientdir(i=client_num)) - mf = folders["default"] - return mf['collective_dircap'], mf['upload_dircap'] - - @log_call - def check_config(self, client_num, local_dir): - client_config = fileutil.read(os.path.join(self.get_clientdir(i=client_num), "tahoe.cfg")) - mf_yaml = fileutil.read(os.path.join(self.get_clientdir(i=client_num), "private", "magic_folders.yaml")) - local_dir_utf8 = local_dir.encode('utf-8') - magic_folder_config = "[magic_folder]\nenabled = True" - self.assertIn(magic_folder_config, client_config) - self.assertIn(local_dir_utf8, mf_yaml) - - def create_invite_join_magic_folder(self, nickname, local_dir): - nickname_arg = unicode_to_argv(nickname) - local_dir_arg = unicode_to_argv(local_dir) - # the --debug means we get real exceptions on failures - d = self.do_cli("magic-folder", "--debug", "create", "magic:", nickname_arg, local_dir_arg) - def _done(args): - (rc, stdout, stderr) = args - self.failUnlessEqual(rc, 0, stdout + stderr) - - client = self.get_client() - self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0) - self.collective_dirnode = client.create_node_from_uri(self.collective_dircap) - self.upload_dirnode = client.create_node_from_uri(self.upload_dircap) - d.addCallback(_done) - d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap)) - d.addCallback(lambda ign: self.check_config(0, local_dir)) - return d - - # XXX should probably just be "tearDown"... - @log_call_deferred(action_type=u"test:cli:magic-folder:cleanup") - def cleanup(self, res): - d = DeferredContext(defer.succeed(None)) - def _clean(ign): - return self.magicfolder.disownServiceParent() - - d.addCallback(_clean) - d.addCallback(lambda ign: res) - return d.result - - def init_magicfolder(self, client_num, upload_dircap, collective_dircap, local_magic_dir, clock): - dbfile = abspath_expanduser_unicode(u"magicfolder_default.sqlite", base=self.get_clientdir(i=client_num)) - magicfolder = MagicFolder( - client=self.get_client(client_num), - upload_dircap=upload_dircap, - collective_dircap=collective_dircap, - local_path_u=local_magic_dir, - dbfile=dbfile, - umask=0o077, - name='default', - clock=clock, - uploader_delay=0.2, - downloader_delay=0, - ) - - magicfolder.setServiceParent(self.get_client(client_num)) - magicfolder.ready() - return magicfolder - - def setup_alice_and_bob(self, alice_clock=reactor, bob_clock=reactor): - self.set_up_grid(num_clients=2, oneshare=True) - - self.alice_magicfolder = None - self.bob_magicfolder = None - - alice_magic_dir = abspath_expanduser_unicode(u"Alice-magic", base=self.basedir) - self.mkdir_nonascii(alice_magic_dir) - bob_magic_dir = abspath_expanduser_unicode(u"Bob-magic", base=self.basedir) - self.mkdir_nonascii(bob_magic_dir) - - # Alice creates a Magic Folder, invites herself and joins. - d = self.do_create_magic_folder(0) - d.addCallback(lambda ign: self.do_invite(0, self.alice_nickname)) - def get_invite_code(result): - self.invite_code = result[1].strip() - d.addCallback(get_invite_code) - d.addCallback(lambda ign: self.do_join(0, alice_magic_dir, self.invite_code)) - def get_alice_caps(ign): - self.alice_collective_dircap, self.alice_upload_dircap = self.get_caps_from_files(0) - d.addCallback(get_alice_caps) - d.addCallback(lambda ign: self.check_joined_config(0, self.alice_upload_dircap)) - d.addCallback(lambda ign: self.check_config(0, alice_magic_dir)) - def get_Alice_magicfolder(result): - self.alice_magicfolder = self.init_magicfolder(0, self.alice_upload_dircap, - self.alice_collective_dircap, - alice_magic_dir, alice_clock) - return result - d.addCallback(get_Alice_magicfolder) - - # Alice invites Bob. Bob joins. - d.addCallback(lambda ign: self.do_invite(0, self.bob_nickname)) - def get_invite_code(result): - self.invite_code = result[1].strip() - d.addCallback(get_invite_code) - d.addCallback(lambda ign: self.do_join(1, bob_magic_dir, self.invite_code)) - def get_bob_caps(ign): - self.bob_collective_dircap, self.bob_upload_dircap = self.get_caps_from_files(1) - d.addCallback(get_bob_caps) - d.addCallback(lambda ign: self.check_joined_config(1, self.bob_upload_dircap)) - d.addCallback(lambda ign: self.check_config(1, bob_magic_dir)) - def get_Bob_magicfolder(result): - self.bob_magicfolder = self.init_magicfolder(1, self.bob_upload_dircap, - self.bob_collective_dircap, - bob_magic_dir, bob_clock) - return result - d.addCallback(get_Bob_magicfolder) - return d - - -class ListMagicFolder(MagicFolderCLITestMixin, unittest.TestCase): - - @defer.inlineCallbacks - def setUp(self): - yield super(ListMagicFolder, self).setUp() - self.basedir="mf_list" - self.set_up_grid(oneshare=True) - self.local_dir = os.path.join(self.basedir, "magic") - os.mkdir(self.local_dir) - self.abs_local_dir_u = abspath_expanduser_unicode(unicode(self.local_dir), long_path=False) - - yield self.do_create_magic_folder(0) - (rc, stdout, stderr) = yield self.do_invite(0, self.alice_nickname) - invite_code = stdout.strip() - yield self.do_join(0, unicode(self.local_dir), invite_code) - - @defer.inlineCallbacks - def tearDown(self): - yield super(ListMagicFolder, self).tearDown() - shutil.rmtree(self.basedir) - - @defer.inlineCallbacks - def test_list(self): - rc, stdout, stderr = yield self.do_list(0) - self.failUnlessEqual(rc, 0) - self.assertIn("default:", stdout) - - @defer.inlineCallbacks - def test_list_none(self): - yield self.do_leave(0) - rc, stdout, stderr = yield self.do_list(0) - self.failUnlessEqual(rc, 0) - self.assertIn("No magic-folders", stdout) - - @defer.inlineCallbacks - def test_list_json(self): - rc, stdout, stderr = yield self.do_list(0, json=True) - self.failUnlessEqual(rc, 0) - res = json.loads(stdout) - self.assertEqual( - dict(default=dict(directory=self.abs_local_dir_u)), - res, - ) - - -class StatusMagicFolder(MagicFolderCLITestMixin, unittest.TestCase): - - @defer.inlineCallbacks - def setUp(self): - yield super(StatusMagicFolder, self).setUp() - self.basedir="mf_list" - self.set_up_grid(oneshare=True) - self.local_dir = os.path.join(self.basedir, "magic") - os.mkdir(self.local_dir) - self.abs_local_dir_u = abspath_expanduser_unicode(unicode(self.local_dir), long_path=False) - - yield self.do_create_magic_folder(0) - (rc, stdout, stderr) = yield self.do_invite(0, self.alice_nickname) - invite_code = stdout.strip() - yield self.do_join(0, unicode(self.local_dir), invite_code) - - @defer.inlineCallbacks - def tearDown(self): - yield super(StatusMagicFolder, self).tearDown() - shutil.rmtree(self.basedir) - - @defer.inlineCallbacks - def test_status(self): - now = datetime.now() - then = now.replace(year=now.year - 5) - five_year_interval = (now - then).total_seconds() - - def json_for_cap(options, cap): - if cap.startswith('URI:DIR2:'): - return ( - 'dirnode', - { - "children": { - "foo": ('filenode', { - "size": 1234, - "metadata": { - "tahoe": { - "linkcrtime": (time.time() - five_year_interval), - }, - "version": 1, - }, - "ro_uri": "read-only URI", - }) - } - } - ) - else: - return ('dirnode', {"children": {}}) - jc = mock.patch( - "allmydata.scripts.magic_folder_cli._get_json_for_cap", - side_effect=json_for_cap, - ) - - def json_for_frag(options, fragment, method='GET', post_args=None): - return {} - jf = mock.patch( - "allmydata.scripts.magic_folder_cli._get_json_for_fragment", - side_effect=json_for_frag, - ) - - with jc, jf: - rc, stdout, stderr = yield self.do_status(0) - self.failUnlessEqual(rc, 0) - self.assertIn("default", stdout) - - self.assertIn( - "foo (1.23 kB): good, version=1, created 5 years ago", - stdout, - ) - - @defer.inlineCallbacks - def test_status_child_not_dirnode(self): - def json_for_cap(options, cap): - if cap.startswith('URI:DIR2'): - return ( - 'dirnode', - { - "children": { - "foo": ('filenode', { - "size": 1234, - "metadata": { - "tahoe": { - "linkcrtime": 0.0, - }, - "version": 1, - }, - "ro_uri": "read-only URI", - }) - } - } - ) - elif cap == "read-only URI": - return { - "error": "bad stuff", - } - else: - return ('dirnode', {"children": {}}) - jc = mock.patch( - "allmydata.scripts.magic_folder_cli._get_json_for_cap", - side_effect=json_for_cap, - ) - - def json_for_frag(options, fragment, method='GET', post_args=None): - return {} - jf = mock.patch( - "allmydata.scripts.magic_folder_cli._get_json_for_fragment", - side_effect=json_for_frag, - ) - - with jc, jf: - rc, stdout, stderr = yield self.do_status(0) - self.failUnlessEqual(rc, 0) - - self.assertIn( - "expected a dirnode", - stdout + stderr, - ) - - @defer.inlineCallbacks - def test_status_error_not_dircap(self): - def json_for_cap(options, cap): - if cap.startswith('URI:DIR2:'): - return ( - 'filenode', - {} - ) - else: - return ('dirnode', {"children": {}}) - jc = mock.patch( - "allmydata.scripts.magic_folder_cli._get_json_for_cap", - side_effect=json_for_cap, - ) - - def json_for_frag(options, fragment, method='GET', post_args=None): - return {} - jf = mock.patch( - "allmydata.scripts.magic_folder_cli._get_json_for_fragment", - side_effect=json_for_frag, - ) - - with jc, jf: - rc, stdout, stderr = yield self.do_status(0) - self.failUnlessEqual(rc, 2) - self.assertIn( - "magic_folder_dircap isn't a directory capability", - stdout + stderr, - ) - - @defer.inlineCallbacks - def test_status_nothing(self): - rc, stdout, stderr = yield self.do_status(0, name="blam") - self.assertIn("No such magic-folder 'blam'", stderr) - - -class CreateMagicFolder(MagicFolderCLITestMixin, unittest.TestCase): - def test_create_and_then_invite_join(self): - self.basedir = "cli/MagicFolder/create-and-then-invite-join" - self.set_up_grid(oneshare=True) - local_dir = os.path.join(self.basedir, "magic") - os.mkdir(local_dir) - abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False) - - d = self.do_create_magic_folder(0) - d.addCallback(lambda ign: self.do_invite(0, self.alice_nickname)) - def get_invite_code_and_join(args): - (rc, stdout, stderr) = args - invite_code = stdout.strip() - return self.do_join(0, unicode(local_dir), invite_code) - d.addCallback(get_invite_code_and_join) - def get_caps(ign): - self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0) - d.addCallback(get_caps) - d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap)) - d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u)) - return d - - def test_create_error(self): - self.basedir = "cli/MagicFolder/create-error" - self.set_up_grid(oneshare=True) - - d = self.do_cli("magic-folder", "create", "m a g i c:", client_num=0) - def _done(args): - (rc, stdout, stderr) = args - self.failIfEqual(rc, 0) - self.failUnlessIn("Alias names cannot contain spaces.", stderr) - d.addCallback(_done) - return d - - @defer.inlineCallbacks - def test_create_duplicate_name(self): - self.basedir = "cli/MagicFolder/create-dup" - self.set_up_grid(oneshare=True) - - rc, stdout, stderr = yield self.do_cli( - "magic-folder", "create", "magic:", "--name", "foo", - client_num=0, - ) - self.assertEqual(rc, 0) - - rc, stdout, stderr = yield self.do_cli( - "magic-folder", "create", "magic:", "--name", "foo", - client_num=0, - ) - self.assertEqual(rc, 1) - self.assertIn( - "Already have a magic-folder named 'default'", - stderr - ) - - @defer.inlineCallbacks - def test_leave_wrong_folder(self): - self.basedir = "cli/MagicFolder/leave_wrong_folders" - yield self.set_up_grid(oneshare=True) - magic_dir = os.path.join(self.basedir, 'magic') - os.mkdir(magic_dir) - - rc, stdout, stderr = yield self.do_cli( - "magic-folder", "create", "--name", "foo", "magic:", "my_name", magic_dir, - client_num=0, - ) - self.assertEqual(rc, 0) - - rc, stdout, stderr = yield self.do_cli( - "magic-folder", "leave", "--name", "bar", - client_num=0, - ) - self.assertNotEqual(rc, 0) - self.assertIn( - "No such magic-folder 'bar'", - stdout + stderr, - ) - - @defer.inlineCallbacks - def test_leave_no_folder(self): - self.basedir = "cli/MagicFolder/leave_no_folders" - yield self.set_up_grid(oneshare=True) - magic_dir = os.path.join(self.basedir, 'magic') - os.mkdir(magic_dir) - - rc, stdout, stderr = yield self.do_cli( - "magic-folder", "create", "--name", "foo", "magic:", "my_name", magic_dir, - client_num=0, - ) - self.assertEqual(rc, 0) - - rc, stdout, stderr = yield self.do_cli( - "magic-folder", "leave", "--name", "foo", - client_num=0, - ) - self.assertEqual(rc, 0) - - rc, stdout, stderr = yield self.do_cli( - "magic-folder", "leave", "--name", "foo", - client_num=0, - ) - self.assertEqual(rc, 1) - self.assertIn( - "No magic-folders at all", - stderr, - ) - - @defer.inlineCallbacks - def test_leave_no_folders_at_all(self): - self.basedir = "cli/MagicFolder/leave_no_folders_at_all" - yield self.set_up_grid(oneshare=True) - - rc, stdout, stderr = yield self.do_cli( - "magic-folder", "leave", - client_num=0, - ) - self.assertEqual(rc, 1) - self.assertIn( - "No magic-folders at all", - stderr, - ) - - def test_create_invite_join(self): - self.basedir = "cli/MagicFolder/create-invite-join" - self.set_up_grid(oneshare=True) - local_dir = os.path.join(self.basedir, "magic") - abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False) - - d = self.do_cli("magic-folder", "create", "magic:", "Alice", local_dir) - def _done(args): - (rc, stdout, stderr) = args - self.failUnlessEqual(rc, 0) - self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0) - d.addCallback(_done) - d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap)) - d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u)) - return d - - def test_help_synopsis(self): - self.basedir = "cli/MagicFolder/help_synopsis" - os.makedirs(self.basedir) - - o = magic_folder_cli.CreateOptions() - o.parent = magic_folder_cli.MagicFolderCommand() - o.parent.getSynopsis() - - def test_create_invite_join_failure(self): - self.basedir = "cli/MagicFolder/create-invite-join-failure" - os.makedirs(self.basedir) - - o = magic_folder_cli.CreateOptions() - o.parent = magic_folder_cli.MagicFolderCommand() - o.parent['node-directory'] = self.basedir - try: - o.parseArgs("magic:", "Alice", "-foo") - except usage.UsageError as e: - self.failUnlessIn("cannot start with '-'", str(e)) - else: - self.fail("expected UsageError") - - def test_join_failure(self): - self.basedir = "cli/MagicFolder/create-join-failure" - os.makedirs(self.basedir) - - o = magic_folder_cli.JoinOptions() - o.parent = magic_folder_cli.MagicFolderCommand() - o.parent['node-directory'] = self.basedir - try: - o.parseArgs("URI:invite+URI:code", "-foo") - except usage.UsageError as e: - self.failUnlessIn("cannot start with '-'", str(e)) - else: - self.fail("expected UsageError") - - def test_join_twice_failure(self): - self.basedir = "cli/MagicFolder/create-join-twice-failure" - os.makedirs(self.basedir) - self.set_up_grid(oneshare=True) - local_dir = os.path.join(self.basedir, "magic") - abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False) - - d = self.do_create_magic_folder(0) - d.addCallback(lambda ign: self.do_invite(0, self.alice_nickname)) - def get_invite_code_and_join(args): - (rc, stdout, stderr) = args - self.invite_code = stdout.strip() - return self.do_join(0, unicode(local_dir), self.invite_code) - d.addCallback(get_invite_code_and_join) - def get_caps(ign): - self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0) - d.addCallback(get_caps) - d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap)) - d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u)) - def join_again(ignore): - return self.do_cli("magic-folder", "join", self.invite_code, local_dir, client_num=0) - d.addCallback(join_again) - def get_results(result): - (rc, out, err) = result - self.failUnlessEqual(out, "") - self.failUnlessIn("This client already has a magic-folder", err) - self.failIfEqual(rc, 0) - d.addCallback(get_results) - return d - - def test_join_leave_join(self): - self.basedir = "cli/MagicFolder/create-join-leave-join" - os.makedirs(self.basedir) - self.set_up_grid(oneshare=True) - local_dir = os.path.join(self.basedir, "magic") - abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False) - - self.invite_code = None - d = self.do_create_magic_folder(0) - d.addCallback(lambda ign: self.do_invite(0, self.alice_nickname)) - def get_invite_code_and_join(args): - (rc, stdout, stderr) = args - self.failUnlessEqual(rc, 0) - self.invite_code = stdout.strip() - return self.do_join(0, unicode(local_dir), self.invite_code) - d.addCallback(get_invite_code_and_join) - def get_caps(ign): - self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0) - d.addCallback(get_caps) - d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap)) - d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u)) - d.addCallback(lambda ign: self.do_leave(0)) - - d.addCallback(lambda ign: self.do_join(0, unicode(local_dir), self.invite_code)) - def get_caps(ign): - self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0) - d.addCallback(get_caps) - d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap)) - d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u)) - - return d - - def test_join_failures(self): - self.basedir = "cli/MagicFolder/create-join-failures" - os.makedirs(self.basedir) - self.set_up_grid(oneshare=True) - local_dir = os.path.join(self.basedir, "magic") - os.mkdir(local_dir) - abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False) - - self.invite_code = None - d = self.do_create_magic_folder(0) - d.addCallback(lambda ign: self.do_invite(0, self.alice_nickname)) - def get_invite_code_and_join(args): - (rc, stdout, stderr) = args - self.failUnlessEqual(rc, 0) - self.invite_code = stdout.strip() - return self.do_join(0, unicode(local_dir), self.invite_code) - d.addCallback(get_invite_code_and_join) - def get_caps(ign): - self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0) - d.addCallback(get_caps) - d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap)) - d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u)) - - def check_success(result): - (rc, out, err) = result - self.failUnlessEqual(rc, 0, out + err) - def check_failure(result): - (rc, out, err) = result - self.failIfEqual(rc, 0) - - def leave(ign): - return self.do_cli("magic-folder", "leave", client_num=0) - d.addCallback(leave) - d.addCallback(check_success) - - magic_folder_db_file = os.path.join(self.get_clientdir(i=0), u"private", u"magicfolder_default.sqlite") - - def check_join_if_file(my_file): - fileutil.write(my_file, "my file data") - d2 = self.do_cli("magic-folder", "join", self.invite_code, local_dir, client_num=0) - d2.addCallback(check_failure) - return d2 - - for my_file in [magic_folder_db_file]: - d.addCallback(lambda ign, my_file: check_join_if_file(my_file), my_file) - d.addCallback(leave) - # we didn't successfully join, so leaving should be an error - d.addCallback(check_failure) - - return d - -class CreateErrors(unittest.TestCase): - def test_poll_interval(self): - e = self.assertRaises(usage.UsageError, parse_cli, - "magic-folder", "create", "--poll-interval=frog", - "alias:") - self.assertEqual(str(e), "--poll-interval must be a positive integer") - - e = self.assertRaises(usage.UsageError, parse_cli, - "magic-folder", "create", "--poll-interval=-4", - "alias:") - self.assertEqual(str(e), "--poll-interval must be a positive integer") - - def test_alias(self): - e = self.assertRaises(usage.UsageError, parse_cli, - "magic-folder", "create", "no-colon") - self.assertEqual(str(e), "An alias must end with a ':' character.") - - def test_nickname(self): - e = self.assertRaises(usage.UsageError, parse_cli, - "magic-folder", "create", "alias:", "nickname") - self.assertEqual(str(e), "If NICKNAME is specified then LOCAL_DIR must also be specified.") - -class InviteErrors(unittest.TestCase): - def test_alias(self): - e = self.assertRaises(usage.UsageError, parse_cli, - "magic-folder", "invite", "no-colon") - self.assertEqual(str(e), "An alias must end with a ':' character.") - -class JoinErrors(unittest.TestCase): - def test_poll_interval(self): - e = self.assertRaises(usage.UsageError, parse_cli, - "magic-folder", "join", "--poll-interval=frog", - "code", "localdir") - self.assertEqual(str(e), "--poll-interval must be a positive integer") - - e = self.assertRaises(usage.UsageError, parse_cli, - "magic-folder", "join", "--poll-interval=-2", - "code", "localdir") - self.assertEqual(str(e), "--poll-interval must be a positive integer") diff --git a/src/allmydata/test/test_client.py b/src/allmydata/test/test_client.py index 824db82fe..85d508093 100644 --- a/src/allmydata/test/test_client.py +++ b/src/allmydata/test/test_client.py @@ -37,7 +37,6 @@ from testtools.twistedsupport import ( ) import allmydata -import allmydata.frontends.magic_folder import allmydata.util.log from allmydata.node import OldConfigError, OldConfigOptionError, UnescapedHashError, _Config, create_node_dir @@ -658,104 +657,6 @@ class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.Test yield _check("helper.furl = None", None) yield _check("helper.furl = pb://blah\n", "pb://blah") - @defer.inlineCallbacks - def test_create_magic_folder_service(self): - """ - providing magic-folder options actually creates a MagicFolder service - """ - boom = False - class Boom(Exception): - pass - - class MockMagicFolder(allmydata.frontends.magic_folder.MagicFolder): - name = 'magic-folder' - - def __init__(self, client, upload_dircap, collective_dircap, local_path_u, dbfile, umask, name, - inotify=None, uploader_delay=1.0, clock=None, downloader_delay=3): - if boom: - raise Boom() - - service.MultiService.__init__(self) - self.client = client - self._umask = umask - self.upload_dircap = upload_dircap - self.collective_dircap = collective_dircap - self.local_dir = local_path_u - self.dbfile = dbfile - self.inotify = inotify - - def startService(self): - self.running = True - - def stopService(self): - self.running = False - - def ready(self): - pass - - self.patch(allmydata.frontends.magic_folder, 'MagicFolder', MockMagicFolder) - - upload_dircap = "URI:DIR2:blah" - local_dir_u = self.unicode_or_fallback(u"loc\u0101l_dir", u"local_dir") - local_dir_utf8 = local_dir_u.encode('utf-8') - config = (BASECONFIG + - "[storage]\n" + - "enabled = false\n" + - "[magic_folder]\n" + - "enabled = true\n") - - basedir1 = "test_client.Basic.test_create_magic_folder_service1" - os.mkdir(basedir1) - os.mkdir(local_dir_u) - - # which config-entry should be missing? - fileutil.write(os.path.join(basedir1, "tahoe.cfg"), - config + "local.directory = " + local_dir_utf8 + "\n") - with self.assertRaises(IOError): - yield client.create_client(basedir1) - - # local.directory entry missing .. but that won't be an error - # now, it'll just assume there are not magic folders - # .. hrm...should we make that an error (if enabled=true but - # there's not yaml AND no local.directory?) - fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config) - fileutil.write(os.path.join(basedir1, "private", "magic_folder_dircap"), "URI:DIR2:blah") - fileutil.write(os.path.join(basedir1, "private", "collective_dircap"), "URI:DIR2:meow") - - fileutil.write(os.path.join(basedir1, "tahoe.cfg"), - config.replace("[magic_folder]\n", "[drop_upload]\n")) - - with self.assertRaises(OldConfigOptionError): - yield client.create_client(basedir1) - - fileutil.write(os.path.join(basedir1, "tahoe.cfg"), - config + "local.directory = " + local_dir_utf8 + "\n") - c1 = yield client.create_client(basedir1) - magicfolder = c1.getServiceNamed('magic-folder') - self.failUnless(isinstance(magicfolder, MockMagicFolder), magicfolder) - self.failUnlessReallyEqual(magicfolder.client, c1) - self.failUnlessReallyEqual(magicfolder.upload_dircap, upload_dircap) - self.failUnlessReallyEqual(os.path.basename(magicfolder.local_dir), local_dir_u) - self.failUnless(magicfolder.inotify is None, magicfolder.inotify) - # It doesn't start until the client starts. - self.assertFalse(magicfolder.running) - - # See above. - boom = True - - basedir2 = "test_client.Basic.test_create_magic_folder_service2" - os.mkdir(basedir2) - os.mkdir(os.path.join(basedir2, "private")) - fileutil.write(os.path.join(basedir2, "tahoe.cfg"), - BASECONFIG + - "[magic_folder]\n" + - "enabled = true\n" + - "local.directory = " + local_dir_utf8 + "\n") - fileutil.write(os.path.join(basedir2, "private", "magic_folder_dircap"), "URI:DIR2:blah") - fileutil.write(os.path.join(basedir2, "private", "collective_dircap"), "URI:DIR2:meow") - with self.assertRaises(Boom): - yield client.create_client(basedir2) - def flush_but_dont_ignore(res): d = flushEventualQueue() diff --git a/src/allmydata/test/test_inotify.py b/src/allmydata/test/test_inotify.py deleted file mode 100644 index 9f618a34a..000000000 --- a/src/allmydata/test/test_inotify.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (c) Twisted Matrix Laboratories. -# See LICENSE for details. - -""" -Tests for the inotify-alike implementation L{allmydata.watchdog}. -""" - -# Note: See https://twistedmatrix.com/trac/ticket/8915 for a proposal -# to avoid all of this duplicated code from Twisted. - -from twisted.internet import defer, reactor -from twisted.python import filepath, runtime - -from allmydata.frontends.magic_folder import get_inotify_module -from .common import ( - AsyncTestCase, - skipIf, -) -inotify = get_inotify_module() - - -@skipIf(runtime.platformType == "win32", "inotify does not yet work on windows") -class INotifyTests(AsyncTestCase): - """ - Define all the tests for the basic functionality exposed by - L{inotify.INotify}. - """ - def setUp(self): - self.dirname = filepath.FilePath(self.mktemp()) - self.dirname.createDirectory() - self.inotify = inotify.INotify() - self.inotify.startReading() - self.addCleanup(self.inotify.stopReading) - return super(INotifyTests, self).setUp() - - - def _notificationTest(self, mask, operation, expectedPath=None): - """ - Test notification from some filesystem operation. - - @param mask: The event mask to use when setting up the watch. - - @param operation: A function which will be called with the - name of a file in the watched directory and which should - trigger the event. - - @param expectedPath: Optionally, the name of the path which is - expected to come back in the notification event; this will - also be passed to C{operation} (primarily useful when the - operation is being done to the directory itself, not a - file in it). - - @return: A L{Deferred} which fires successfully when the - expected event has been received or fails otherwise. - """ - if expectedPath is None: - expectedPath = self.dirname.child("foo.bar") - notified = defer.Deferred() - def cbNotified(result): - (watch, filename, events) = result - self.assertEqual(filename.asBytesMode(), expectedPath.asBytesMode()) - self.assertTrue(events & mask) - self.inotify.ignore(self.dirname) - notified.addCallback(cbNotified) - - def notify_event(*args): - notified.callback(args) - self.inotify.watch( - self.dirname, mask=mask, - callbacks=[notify_event]) - operation(expectedPath) - return notified - - - def test_modify(self): - """ - Writing to a file in a monitored directory sends an - C{inotify.IN_MODIFY} event to the callback. - """ - def operation(path): - with path.open("w") as fObj: - fObj.write(b'foo') - - return self._notificationTest(inotify.IN_MODIFY, operation) - - - def test_attrib(self): - """ - Changing the metadata of a file in a monitored directory - sends an C{inotify.IN_ATTRIB} event to the callback. - """ - def operation(path): - # Create the file. - path.touch() - # Modify the file's attributes. - path.touch() - - return self._notificationTest(inotify.IN_ATTRIB, operation) - - - def test_closeWrite(self): - """ - Closing a file which was open for writing in a monitored - directory sends an C{inotify.IN_CLOSE_WRITE} event to the - callback. - """ - def operation(path): - path.open("w").close() - - return self._notificationTest(inotify.IN_CLOSE_WRITE, operation) - - - def test_delete(self): - """ - Deleting a file in a monitored directory sends an - C{inotify.IN_DELETE} event to the callback. - """ - expectedPath = self.dirname.child("foo.bar") - expectedPath.touch() - notified = defer.Deferred() - def cbNotified(result): - (watch, filename, events) = result - self.assertEqual(filename.asBytesMode(), expectedPath.asBytesMode()) - self.assertTrue(events & inotify.IN_DELETE) - notified.addCallback(cbNotified) - self.inotify.watch( - self.dirname, mask=inotify.IN_DELETE, - callbacks=[lambda *args: notified.callback(args)]) - expectedPath.remove() - return notified - - - def test_humanReadableMask(self): - """ - L{inotify.humanReadableMask} translates all the possible event masks to a - human readable string. - """ - for mask, value in inotify._FLAG_TO_HUMAN: - self.assertEqual(inotify.humanReadableMask(mask)[0], value) - - checkMask = ( - inotify.IN_CLOSE_WRITE | inotify.IN_ACCESS | inotify.IN_OPEN) - self.assertEqual( - set(inotify.humanReadableMask(checkMask)), - set(['close_write', 'access', 'open'])) - - - def test_noAutoAddSubdirectory(self): - """ - L{inotify.INotify.watch} with autoAdd==False will stop inotify - from watching subdirectories created under the watched one. - """ - def _callback(wp, fp, mask): - # We are notified before we actually process new - # directories, so we need to defer this check. - def _(): - try: - self.assertFalse(self.inotify._isWatched(subdir)) - d.callback(None) - except Exception: - d.errback() - reactor.callLater(0, _) - - checkMask = inotify.IN_ISDIR | inotify.IN_CREATE - self.inotify.watch( - self.dirname, mask=checkMask, autoAdd=False, - callbacks=[_callback]) - subdir = self.dirname.child('test') - d = defer.Deferred() - subdir.createDirectory() - return d diff --git a/src/allmydata/test/test_magic_folder.py b/src/allmydata/test/test_magic_folder.py deleted file mode 100644 index c39027eea..000000000 --- a/src/allmydata/test/test_magic_folder.py +++ /dev/null @@ -1,2425 +0,0 @@ -from __future__ import print_function - -import os, sys, time -import stat, shutil, json -import mock -from os.path import join, exists, isdir -from errno import ENOENT - -from twisted.internet import defer, task, reactor -from twisted.python.runtime import platform -from twisted.python.filepath import FilePath - -from testtools.matchers import ( - Not, - Is, - ContainsDict, - Equals, -) - -from eliot import ( - Message, - start_action, - log_call, -) -from eliot.twisted import DeferredContext - -from allmydata.interfaces import ( - IDirectoryNode, - NoSharesError, -) -from allmydata.util.assertutil import precondition - -from allmydata.util import fake_inotify, fileutil, configutil, yamlutil -from allmydata.util.encodingutil import get_filesystem_encoding, to_filepath -from allmydata.util.consumer import download_to_data -from allmydata.test.no_network import GridTestMixin -from allmydata.test.common_util import ReallyEqualMixin -from .common import ( - ShouldFailMixin, - SyncTestCase, - AsyncTestCase, - skipIf, -) -from .cli.test_magic_folder import MagicFolderCLITestMixin - -from allmydata.frontends import magic_folder -from allmydata.frontends.magic_folder import ( - MagicFolder, WriteFileMixin, - ConfigurationError, -) -from allmydata import magicfolderdb, magicpath -from allmydata.util.fileutil import get_pathinfo -from allmydata.util.fileutil import abspath_expanduser_unicode -from allmydata.immutable.upload import Data -from allmydata.mutable.common import ( - UnrecoverableFileError, -) - -from ..util.eliotutil import ( - inline_callbacks, - log_call_deferred, -) - -_debug = False - -try: - magic_folder.get_inotify_module() -except NotImplementedError: - support_missing = True - support_message = ( - "Magic Folder support can only be tested for-real on an OS that " - "supports inotify or equivalent." - ) -else: - support_missing = False - support_message = None - -if platform.isMacOSX(): - def modified_mtime_barrier(path): - """ - macOS filesystem (HFS+) has one second resolution on filesystem - modification time metadata. Make sure that code running after this - function which modifies the file will produce a changed mtime on that - file. - """ - try: - mtime = path.getModificationTime() - except OSError as e: - if e.errno == ENOENT: - # If the file does not exist yet, there is no current mtime - # value that might match a future mtime value. We have - # nothing to do. - return - # Propagate any other errors as we don't know what's going on. - raise - if int(time.time()) == int(mtime): - # The current time matches the file's modification time, to the - # resolution of the filesystem metadata. Therefore, change the - # current time. - time.sleep(1) -else: - def modified_mtime_barrier(path): - """ - non-macOS platforms have sufficiently high-resolution file modification - time metadata that nothing in particular is required to ensure a - modified mtime as a result of a future write. - """ - - -class NewConfigUtilTests(SyncTestCase): - - def setUp(self): - # some tests look at the umask of created directories or files - # so we set an explicit one - old_umask = os.umask(0o022) - self.addCleanup(lambda: os.umask(old_umask)) - self.basedir = abspath_expanduser_unicode(unicode(self.mktemp())) - os.mkdir(self.basedir) - self.local_dir = abspath_expanduser_unicode(unicode(self.mktemp())) - os.mkdir(self.local_dir) - privdir = join(self.basedir, "private") - os.mkdir(privdir) - - self.poll_interval = 60 - self.collective_dircap = u"a" * 32 - self.magic_folder_dircap = u"b" * 32 - - self.folders = { - u"default": { - u"directory": self.local_dir, - u"upload_dircap": self.magic_folder_dircap, - u"collective_dircap": self.collective_dircap, - u"poll_interval": self.poll_interval, - } - } - - # we need a bit of tahoe.cfg - self.write_tahoe_config( - self.basedir, - u"[magic_folder]\n" - u"enabled = True\n", - ) - # ..and the yaml - self.write_magic_folder_config(self.basedir, self.folders) - return super(NewConfigUtilTests, self).setUp() - - def write_tahoe_config(self, basedir, tahoe_config): - with open(join(basedir, u"tahoe.cfg"), "w") as f: - f.write(tahoe_config) - - def write_magic_folder_config(self, basedir, folder_configuration): - yaml_fname = join(basedir, u"private", u"magic_folders.yaml") - with open(yaml_fname, "w") as f: - f.write(yamlutil.safe_dump({u"magic-folders": folder_configuration})) - - def test_load(self): - folders = magic_folder.load_magic_folders(self.basedir) - self.assertEqual(['default'], list(folders.keys())) - self.assertEqual(folders['default'][u'umask'], 0o077) - - def test_load_makes_directory(self): - """ - If the *directory* does not exist then it is created by - ``load_magic_folders``. - """ - os.rmdir(self.local_dir) - # Just pick some arbitrary bits. - # rwxr-xr-- - perm = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH - self.folders[u"default"][u"umask"] = (0o777 & ~perm) - self.write_magic_folder_config(self.basedir, self.folders) - - magic_folder.load_magic_folders(self.basedir) - - # It is created. - self.assertTrue( - isdir(self.local_dir), - "magic-folder local directory {} was not created".format( - self.local_dir, - ), - ) - # It has permissions determined by the configured umask. - if sys.platform != "win32": - self.assertEqual( - perm, - stat.S_IMODE(os.stat(self.local_dir).st_mode), - ) - else: - # Do directories even have permissions on Windows? - print("Not asserting directory-creation mode on windows") - - def test_directory_collision(self): - """ - If a non-directory already exists at the magic folder's configured local - directory path, ``load_magic_folders`` raises an exception. - """ - os.rmdir(self.local_dir) - open(self.local_dir, "w").close() - - with self.assertRaises(ConfigurationError) as ctx: - magic_folder.load_magic_folders(self.basedir) - self.assertIn( - "exists and is not a directory", - str(ctx.exception), - ) - - def test_directory_creation_error(self): - """ - If a directory at the magic folder's configured local directory path - cannot be created for some other reason, ``load_magic_folders`` raises - an exception. - """ - os.rmdir(self.local_dir) - open(self.local_dir, "w").close() - self.folders[u"default"][u"directory"] = self.local_dir + "/foo" - self.write_magic_folder_config(self.basedir, self.folders) - - with self.assertRaises(ConfigurationError) as ctx: - magic_folder.load_magic_folders(self.basedir) - self.assertIn( - "could not be created", - str(ctx.exception), - ) - - def test_both_styles_of_config(self): - os.unlink(join(self.basedir, u"private", u"magic_folders.yaml")) - with self.assertRaises(Exception) as ctx: - magic_folder.load_magic_folders(self.basedir) - self.assertIn( - "[magic_folder] is enabled but has no YAML file and no 'local.directory' option", - str(ctx.exception) - ) - - def test_wrong_obj(self): - yaml_fname = join(self.basedir, u"private", u"magic_folders.yaml") - with open(yaml_fname, "w") as f: - f.write('----\n') - - with self.assertRaises(Exception) as ctx: - magic_folder.load_magic_folders(self.basedir) - self.assertIn( - "should contain a dict", - str(ctx.exception) - ) - - def test_no_magic_folders(self): - yaml_fname = join(self.basedir, u"private", u"magic_folders.yaml") - with open(yaml_fname, "w") as f: - f.write('') - - with self.assertRaises(Exception) as ctx: - magic_folder.load_magic_folders(self.basedir) - self.assertIn( - "should contain a dict", - str(ctx.exception) - ) - - def test_magic_folders_not_dict(self): - yaml_fname = join(self.basedir, u"private", u"magic_folders.yaml") - with open(yaml_fname, "w") as f: - f.write('magic-folders: "foo"\n') - - with self.assertRaises(Exception) as ctx: - magic_folder.load_magic_folders(self.basedir) - self.assertIn( - "should be a dict", - str(ctx.exception) - ) - self.assertIn( - "'magic-folders'", - str(ctx.exception) - ) - - def test_wrong_umask_obj(self): - """ - If a umask is given for a magic-folder that is not an integer, an - exception is raised. - """ - self.folders[u"default"][u"umask"] = "0077" - yaml_fname = join(self.basedir, u"private", u"magic_folders.yaml") - with open(yaml_fname, "w") as f: - f.write(yamlutil.safe_dump({u"magic-folders": self.folders})) - - with self.assertRaises(Exception) as ctx: - magic_folder.load_magic_folders(self.basedir) - self.assertIn( - "umask must be an integer", - str(ctx.exception) - ) - - def test_wrong_sub_obj(self): - yaml_fname = join(self.basedir, u"private", u"magic_folders.yaml") - with open(yaml_fname, "w") as f: - f.write("magic-folders:\n default: foo\n") - - with self.assertRaises(Exception) as ctx: - magic_folder.load_magic_folders(self.basedir) - self.assertIn( - "must itself be a dict", - str(ctx.exception) - ) - - def test_missing_interval(self): - del self.folders[u"default"]["poll_interval"] - yaml_fname = join(self.basedir, u"private", u"magic_folders.yaml") - with open(yaml_fname, "w") as f: - f.write(yamlutil.safe_dump({u"magic-folders": self.folders})) - - with self.assertRaises(Exception) as ctx: - magic_folder.load_magic_folders(self.basedir) - self.assertIn( - "missing 'poll_interval'", - str(ctx.exception) - ) - - -class LegacyConfigUtilTests(SyncTestCase): - - def setUp(self): - # create a valid 'old style' magic-folder configuration - self.basedir = abspath_expanduser_unicode(unicode(self.mktemp())) - os.mkdir(self.basedir) - self.local_dir = abspath_expanduser_unicode(unicode(self.mktemp())) - os.mkdir(self.local_dir) - privdir = join(self.basedir, "private") - os.mkdir(privdir) - - # state tests might need to know - self.poll_interval = 60 - self.collective_dircap = u"a" * 32 - self.magic_folder_dircap = u"b" * 32 - - # write fake config structure - with open(join(self.basedir, u"tahoe.cfg"), "w") as f: - f.write( - u"[magic_folder]\n" - u"enabled = True\n" - u"local.directory = {}\n" - u"poll_interval = {}\n".format( - self.local_dir, - self.poll_interval, - ) - ) - with open(join(privdir, "collective_dircap"), "w") as f: - f.write("{}\n".format(self.collective_dircap)) - with open(join(privdir, "magic_folder_dircap"), "w") as f: - f.write("{}\n".format(self.magic_folder_dircap)) - with open(join(privdir, "magicfolderdb.sqlite"), "w") as f: - pass - return super(LegacyConfigUtilTests, self).setUp() - - def test_load_legacy_no_dir(self): - expected = self.local_dir + 'foo' - with open(join(self.basedir, u"tahoe.cfg"), "w") as f: - f.write( - u"[magic_folder]\n" - u"enabled = True\n" - u"local.directory = {}\n" - u"poll_interval = {}\n".format( - expected, - self.poll_interval, - ) - ) - - magic_folder.load_magic_folders(self.basedir) - - self.assertTrue( - isdir(expected), - "magic-folder local directory {} was not created".format( - expected, - ), - ) - - def test_load_legacy_not_a_dir(self): - with open(join(self.basedir, u"tahoe.cfg"), "w") as f: - f.write( - u"[magic_folder]\n" - u"enabled = True\n" - u"local.directory = {}\n" - u"poll_interval = {}\n".format( - self.local_dir + "foo", - self.poll_interval, - ) - ) - with open(self.local_dir + "foo", "w") as f: - f.write("not a directory") - - with self.assertRaises(ConfigurationError) as ctx: - magic_folder.load_magic_folders(self.basedir) - self.assertIn( - "is not a directory", - str(ctx.exception) - ) - - def test_load_legacy_and_new(self): - with open(join(self.basedir, u"private", u"magic_folders.yaml"), "w") as f: - f.write("---") - - with self.assertRaises(Exception) as ctx: - magic_folder.load_magic_folders(self.basedir) - self.assertIn( - "both old-style configuration and new-style", - str(ctx.exception) - ) - - def test_upgrade(self): - # test data is created in setUp; upgrade config - magic_folder._upgrade_magic_folder_config(self.basedir) - - # ensure old stuff is gone - self.assertFalse( - exists(join(self.basedir, "private", "collective_dircap")) - ) - self.assertFalse( - exists(join(self.basedir, "private", "magic_folder_dircap")) - ) - self.assertFalse( - exists(join(self.basedir, "private", "magicfolderdb.sqlite")) - ) - - # ensure we've got the new stuff - self.assertTrue( - exists(join(self.basedir, "private", "magicfolder_default.sqlite")) - ) - # what about config? - config = configutil.get_config(join(self.basedir, u"tahoe.cfg")) - self.assertFalse(config.has_option("magic_folder", "local.directory")) - - def test_load_legacy(self): - folders = magic_folder.load_magic_folders(self.basedir) - - self.assertEqual(['default'], list(folders.keys())) - self.assertTrue( - exists(join(self.basedir, "private", "collective_dircap")) - ) - self.assertTrue( - exists(join(self.basedir, "private", "magic_folder_dircap")) - ) - self.assertTrue( - exists(join(self.basedir, "private", "magicfolderdb.sqlite")) - ) - - def test_load_legacy_upgrade(self): - magic_folder.maybe_upgrade_magic_folders(self.basedir) - folders = magic_folder.load_magic_folders(self.basedir) - - self.assertEqual(['default'], list(folders.keys())) - # 'legacy' files should be gone - self.assertFalse( - exists(join(self.basedir, "private", "collective_dircap")) - ) - self.assertFalse( - exists(join(self.basedir, "private", "magic_folder_dircap")) - ) - self.assertFalse( - exists(join(self.basedir, "private", "magicfolderdb.sqlite")) - ) - - - -class MagicFolderDbTests(SyncTestCase): - - def setUp(self): - self.temp = abspath_expanduser_unicode(unicode(self.mktemp())) - os.mkdir(self.temp) - self.addCleanup(lambda: shutil.rmtree(self.temp)) - dbfile = abspath_expanduser_unicode(u"testdb.sqlite", base=self.temp) - self.db = magicfolderdb.get_magicfolderdb(dbfile, create_version=(magicfolderdb.SCHEMA_v1, 1)) - self.addCleanup(lambda: self.db.close()) - self.failUnless(self.db, "unable to create magicfolderdb from %r" % (dbfile,)) - self.failUnlessEqual(self.db.VERSION, 1) - return super(MagicFolderDbTests, self).setUp() - - def test_create(self): - self.db.did_upload_version( - relpath_u=u'fake_path', - version=0, - last_uploaded_uri=None, - last_downloaded_uri='URI:foo', - last_downloaded_timestamp=1234.5, - pathinfo=get_pathinfo(self.temp), # a directory, but should be fine for test - ) - - entry = self.db.get_db_entry(u'fake_path') - self.assertTrue(entry is not None) - self.assertEqual(entry.last_downloaded_uri, 'URI:foo') - - def test_update(self): - self.db.did_upload_version( - relpath_u=u'fake_path', - version=0, - last_uploaded_uri=None, - last_downloaded_uri='URI:foo', - last_downloaded_timestamp=1234.5, - pathinfo=get_pathinfo(self.temp), # a directory, but should be fine for test - ) - self.db.did_upload_version( - relpath_u=u'fake_path', - version=1, - last_uploaded_uri=None, - last_downloaded_uri='URI:bar', - last_downloaded_timestamp=1234.5, - pathinfo=get_pathinfo(self.temp), # a directory, but should be fine for test - ) - - entry = self.db.get_db_entry(u'fake_path') - self.assertTrue(entry is not None) - self.assertEqual(entry.last_downloaded_uri, 'URI:bar') - self.assertEqual(entry.version, 1) - - def test_same_content_different_path(self): - content_uri = 'URI:CHK:27d2yruqwk6zb2w7hkbbfxxbue:ipmszjysmn4vdeaxz7rtxtv3gwv6vrqcg2ktrdmn4oxqqucltxxq:2:4:1052835840' - self.db.did_upload_version( - relpath_u=u'path0', - version=0, - last_uploaded_uri=None, - last_downloaded_uri=content_uri, - last_downloaded_timestamp=1234.5, - pathinfo=get_pathinfo(self.temp), # a directory, but should be fine for test - ) - self.db.did_upload_version( - relpath_u=u'path1', - version=0, - last_uploaded_uri=None, - last_downloaded_uri=content_uri, - last_downloaded_timestamp=1234.5, - pathinfo=get_pathinfo(self.temp), # a directory, but should be fine for test - ) - - entry = self.db.get_db_entry(u'path0') - self.assertTrue(entry is not None) - self.assertEqual(entry.last_downloaded_uri, content_uri) - - entry = self.db.get_db_entry(u'path1') - self.assertTrue(entry is not None) - self.assertEqual(entry.last_downloaded_uri, content_uri) - - def test_get_direct_children(self): - """ - ``get_direct_children`` returns a list of ``PathEntry`` representing each - local file in the database which is a direct child of the given path. - """ - def add_file(relpath_u): - self.db.did_upload_version( - relpath_u=relpath_u, - version=0, - last_uploaded_uri=None, - last_downloaded_uri=None, - last_downloaded_timestamp=1234, - pathinfo=get_pathinfo(self.temp), - ) - paths = [ - u"some_random_file", - u"the_target_directory_is_elsewhere", - u"the_target_directory_is_not_this/", - u"the_target_directory_is_not_this/and_not_in_here", - u"the_target_directory/", - u"the_target_directory/foo", - u"the_target_directory/bar", - u"the_target_directory/baz", - u"the_target_directory/quux/", - u"the_target_directory/quux/exclude_grandchildren", - u"the_target_directory/quux/and_great_grandchildren/", - u"the_target_directory/quux/and_great_grandchildren/foo", - u"the_target_directory_is_over/stuff", - u"please_ignore_this_for_sure", - ] - for relpath_u in paths: - add_file(relpath_u) - - expected_paths = [ - u"the_target_directory/foo", - u"the_target_directory/bar", - u"the_target_directory/baz", - u"the_target_directory/quux/", - ] - - actual_paths = list( - localpath.relpath_u - for localpath - in self.db.get_direct_children(u"the_target_directory") - ) - self.assertEqual(expected_paths, actual_paths) - - -def iterate_downloader(magic): - return magic.downloader._processing_iteration() - - -def iterate_uploader(magic): - return magic.uploader._processing_iteration() - -@inline_callbacks -def iterate(magic): - yield iterate_uploader(magic) - yield iterate_downloader(magic) - - -@inline_callbacks -def notify_when_pending(uploader, filename): - with start_action(action_type=u"notify-when-pending", filename=filename): - relpath = uploader._get_relpath(FilePath(filename)) - while not uploader.is_pending(relpath): - Message.log(message_type=u"not-pending") - yield uploader.set_hook('inotify') - - -class FileOperationsHelper(object): - """ - This abstracts all file operations we might do in magic-folder unit-tests. - - This is so we can correctly wait for inotify events to 'actually' - propagate. For the mock tests this is easy, since we're sending - them sychronously. For the Real tests we have to wait for the - actual inotify thing. - """ - _timeout = 30.0 - - def __init__(self, uploader, inject_events=False): - self._uploader = uploader - self._inotify = fake_inotify # fixme? - self._fake_inotify = inject_events - - @log_call_deferred(action_type=u"fileops:move") - def move(self, from_path_u, to_path_u): - from_fname = from_path_u - to_fname = to_path_u - d = self._uploader.set_hook('inotify') - os.rename(from_fname, to_fname) - - self._maybe_notify(to_fname, self._inotify.IN_MOVED_TO) - # hmm? we weren't faking IN_MOVED_FROM previously .. but seems like we should have been? - # self._uploader._notifier.event(to_filepath(from_fname), self._inotify.IN_MOVED_FROM) - return d.addTimeout(self._timeout, reactor) - - @log_call_deferred(action_type=u"fileops:write") - def write(self, path_u, contents): - fname = path_u - if not os.path.exists(fname): - self._maybe_notify(fname, self._inotify.IN_CREATE) - - d = notify_when_pending(self._uploader, path_u) - - modified_mtime_barrier(FilePath(fname)) - with open(fname, "wb") as f: - f.write(contents) - - self._maybe_notify(fname, self._inotify.IN_CLOSE_WRITE) - return d.addTimeout(self._timeout, reactor) - - @log_call_deferred(action_type=u"fileops:mkdir") - def mkdir(self, path_u): - fname = path_u - d = self._uploader.set_hook('inotify') - os.mkdir(fname) - self._maybe_notify(fname, self._inotify.IN_CREATE | self._inotify.IN_ISDIR) - return d.addTimeout(self._timeout, reactor) - - @log_call_deferred(action_type=u"fileops:delete") - def delete(self, path_u): - fname = path_u - d = self._uploader.set_hook('inotify') - if os.path.isdir(fname): - remove = os.rmdir - else: - remove = os.unlink - remove(fname) - - self._maybe_notify(fname, self._inotify.IN_DELETE) - return d.addTimeout(self._timeout, reactor) - - def _maybe_notify(self, fname, mask): - if self._fake_inotify: - self._uploader._notifier.event(to_filepath(fname), mask) - - -class CheckerMixin(object): - """ - Factored out of one of the many test classes. - - *Ideally* these should just be bare helper methods, but many of - them already depended upon self.* state. One major problem is that - they're using self.magicfolder *but* some of the alice/bob tests - use this, too, and they just do "self.magicfolder = - self.bob_magicfolder" or whatever before calling them, which is - *horrible*. - """ - def _check_mkdir(self, name_u): - return self._check_file(name_u + u"/", "", directory=True) - - @defer.inlineCallbacks - def _check_file(self, name_u, data, temporary=False, directory=False): - precondition(not (temporary and directory), temporary=temporary, directory=directory) - - # print "%r._check_file(%r, %r, temporary=%r, directory=%r)" % (self, name_u, data, temporary, directory) - previously_uploaded = self._get_count('uploader.objects_succeeded') - previously_disappeared = self._get_count('uploader.objects_disappeared') - - path_u = abspath_expanduser_unicode(name_u, base=self.local_dir) - - if directory: - yield self.fileops.mkdir(path_u) - else: - # We don't use FilePath.setContent() here because it creates a temporary file that - # is renamed into place, which causes events that the test is not expecting. - yield self.fileops.write(path_u, data) - yield iterate(self.magicfolder) - if temporary: - yield iterate(self.magicfolder) - yield self.fileops.delete(path_u) - - yield iterate(self.magicfolder) - encoded_name_u = magicpath.path2magic(name_u) - - yield self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0) - if temporary: - yield self.failUnlessReallyEqual(self._get_count('uploader.objects_disappeared'), - previously_disappeared + 1) - else: - yield self.magicfolder.uploader._upload_dirnode.list() - x = yield self.magicfolder.uploader._upload_dirnode.get(encoded_name_u) - actual_data = yield download_to_data(x) - self.failUnlessReallyEqual(actual_data, data) - self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), - previously_uploaded + 1) - - self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0) - - @defer.inlineCallbacks - def _check_version_in_dmd(self, magicfolder, relpath_u, expected_version): - encoded_name_u = magicpath.path2magic(relpath_u) - result = yield magicfolder.downloader._get_collective_latest_file(encoded_name_u) - self.assertIsNot( - result, - None, - "collective_latest_file({}) is None".format(encoded_name_u), - ) - node, metadata = result - self.assertIsNot( - metadata, - None, - "collective_latest_file({}) metadata is None".format(encoded_name_u), - ) - self.failUnlessEqual(metadata['version'], expected_version) - - def _check_version_in_local_db(self, magicfolder, relpath_u, expected_version): - db_entry = magicfolder._db.get_db_entry(relpath_u) - if db_entry is not None: - #print "_check_version_in_local_db: %r has version %s" % (relpath_u, version) - self.failUnlessEqual(db_entry.version, expected_version) - - def _check_file_gone(self, magicfolder, relpath_u): - path = os.path.join(magicfolder.uploader._local_path_u, relpath_u) - self.assertTrue(not os.path.exists(path)) - - def _check_uploader_count(self, name, expected, magic=None): - if magic is None: - magic = self.alice_magicfolder - self.failUnlessReallyEqual( - self._get_count( - 'uploader.'+name, - client=magic._client, - ), - expected, - "Pending: {}\n" - "Deque: {}\n".format(magic.uploader._pending, magic.uploader._deque), - ) - - def _check_downloader_count(self, name, expected, magic=None): - self.failUnlessReallyEqual(self._get_count('downloader.'+name, client=(magic or self.bob_magicfolder)._client), - expected) - - def _get_count(self, name, client=None): - counters = (client or self.get_client()).stats_provider.get_stats()["counters"] - return counters.get('magic_folder.%s' % (name,), 0) - - - -class MagicFolderAliceBobTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, ReallyEqualMixin, CheckerMixin): - inject_inotify = False - - def setUp(self): - MagicFolderCLITestMixin.setUp(self) - temp = self.mktemp() - self.basedir = abspath_expanduser_unicode(temp.decode(get_filesystem_encoding())) - # set_up_grid depends on self.basedir existing - with start_action(action_type=u"set_up_grid"): - self.set_up_grid(num_clients=2, oneshare=True) - - self.alice_clock = task.Clock() - self.bob_clock = task.Clock() - - # this is all just .setup_alice_and_bob(), essentially - self.alice_magicfolder = None - self.bob_magicfolder = None - - self.alice_magic_dir = abspath_expanduser_unicode(u"Alice-magic", base=self.basedir) - self.mkdir_nonascii(self.alice_magic_dir) - self.bob_magic_dir = abspath_expanduser_unicode(u"Bob-magic", base=self.basedir) - self.mkdir_nonascii(self.bob_magic_dir) - - # Alice creates a Magic Folder, invites herself and joins. - d = DeferredContext(self.do_create_magic_folder(0)) - d.addCallback(lambda ign: self.do_invite(0, self.alice_nickname)) - def get_invite_code(result): - self.invite_code = result[1].strip() - d.addCallback(get_invite_code) - d.addCallback(lambda ign: self.do_join(0, self.alice_magic_dir, self.invite_code)) - def get_alice_caps(ign): - self.alice_collective_dircap, self.alice_upload_dircap = self.get_caps_from_files(0) - d.addCallback(get_alice_caps) - d.addCallback(lambda ign: self.check_joined_config(0, self.alice_upload_dircap)) - d.addCallback(lambda ign: self.check_config(0, self.alice_magic_dir)) - def get_Alice_magicfolder(result): - self.alice_magicfolder = self.init_magicfolder(0, self.alice_upload_dircap, - self.alice_collective_dircap, - self.alice_magic_dir, self.alice_clock) - self.alice_fileops = FileOperationsHelper(self.alice_magicfolder.uploader, self.inject_inotify) - d0 = self.alice_magicfolder.uploader.set_hook('iteration') - d1 = self.alice_magicfolder.downloader.set_hook('iteration') - self.alice_clock.advance(self.alice_magicfolder.uploader._pending_delay + 1) - d0.addCallback(lambda ign: d1) - d0.addCallback(lambda ign: result) - return d0 - d.addCallback(get_Alice_magicfolder) - - # Alice invites Bob. Bob joins. - d.addCallback(lambda ign: self.do_invite(0, self.bob_nickname)) - def get_invite_code(result): - self.invite_code = result[1].strip() - d.addCallback(get_invite_code) - d.addCallback(lambda ign: self.do_join(1, self.bob_magic_dir, self.invite_code)) - def get_bob_caps(ign): - self.bob_collective_dircap, self.bob_upload_dircap = self.get_caps_from_files(1) - d.addCallback(get_bob_caps) - d.addCallback(lambda ign: self.check_joined_config(1, self.bob_upload_dircap)) - d.addCallback(lambda ign: self.check_config(1, self.bob_magic_dir)) - def get_Bob_magicfolder(result): - self.bob_magicfolder = self.init_magicfolder(1, self.bob_upload_dircap, - self.bob_collective_dircap, - self.bob_magic_dir, self.bob_clock) - self.bob_fileops = FileOperationsHelper(self.bob_magicfolder.uploader, self.inject_inotify) - d0 = self.bob_magicfolder.uploader.set_hook('iteration') - d1 = self.bob_magicfolder.downloader.set_hook('iteration') - self.bob_clock.advance(self.alice_magicfolder.uploader._pending_delay + 1) - d0.addCallback(lambda ign: d1) - d0.addCallback(lambda ign: result) - return d0 - d.addCallback(get_Bob_magicfolder) - return d.result - - @defer.inlineCallbacks - def tearDown(self): - yield GridTestMixin.tearDown(self) - - for mf in [self.alice_magicfolder, self.bob_magicfolder]: - mf.uploader._clock.advance(mf.uploader._pending_delay + 1) - mf.downloader._clock.advance(mf.downloader._poll_interval + 1) - - @inline_callbacks - def test_alice_delete_bob_restore(self): - alice_fname = os.path.join(self.alice_magic_dir, 'blam') - bob_fname = os.path.join(self.bob_magic_dir, 'blam') - - alice_proc = self.alice_magicfolder.uploader.set_hook('processed') - - with start_action(action_type=u"alice:create"): - yield self.alice_fileops.write(alice_fname, 'contents0\n') - yield iterate(self.alice_magicfolder) # for windows - - with start_action(action_type=u"alice:upload"): - yield iterate_uploader(self.alice_magicfolder) - yield alice_proc - - with start_action(action_type=u"alice:check-upload"): - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 0) - yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0) - - with start_action(action_type=u"bob:download"): - yield iterate_downloader(self.bob_magicfolder) - - with start_action(action_type=u"alice:recheck-upload"): - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 0) - yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0) - - with start_action(action_type=u"bob:check-download"): - yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 0) - yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0) - yield self.failUnlessReallyEqual( - self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client), - 0 - ) - yield self.failUnlessReallyEqual( - self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client), - 1 - ) - - yield iterate(self.bob_magicfolder) # for windows - - - bob_proc = self.bob_magicfolder.uploader.set_hook('processed') - alice_proc = self.alice_magicfolder.downloader.set_hook('processed') - - with start_action(action_type=u"bob:delete"): - yield self.bob_fileops.delete(bob_fname) - yield iterate(self.bob_magicfolder) # for windows - - with start_action(action_type=u"bob:upload"): - yield iterate_uploader(self.bob_magicfolder) - yield bob_proc - - with start_action(action_type=u"alice:download"): - yield iterate_downloader(self.alice_magicfolder) - yield alice_proc - - # check versions - with start_action(action_type=u"bob:check-upload"): - node, metadata = yield self.alice_magicfolder.downloader._get_collective_latest_file(u'blam') - self.assertTrue(metadata['deleted']) - yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1) - yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 1) - - with start_action(action_type=u"alice:check-download"): - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1) - yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 1) - - with start_action(action_type=u"alice:mysterious-iterate"): - # not *entirely* sure why we need to iterate Alice for the - # real test here. But, we do. - yield iterate(self.alice_magicfolder) - - # now alice restores it (alice should upload, bob download) - alice_proc = self.alice_magicfolder.uploader.set_hook('processed') - bob_proc = self.bob_magicfolder.downloader.set_hook('processed') - - with start_action(action_type=u"alice:rewrite"): - yield self.alice_fileops.write(alice_fname, 'new contents\n') - yield iterate(self.alice_magicfolder) # for windows - - with start_action(action_type=u"alice:reupload"): - yield iterate_uploader(self.alice_magicfolder) - yield alice_proc - - with start_action(action_type=u"bob:redownload"): - yield iterate_downloader(self.bob_magicfolder) - yield bob_proc - - # check versions - with start_action(action_type=u"bob:recheck-download"): - node, metadata = yield self.alice_magicfolder.downloader._get_collective_latest_file(u'blam') - self.assertTrue('deleted' not in metadata or not metadata['deleted']) - yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 2) - yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 2) - - with start_action(action_type=u"alice:final-check-upload"): - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 2) - yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 2) - - @inline_callbacks - def test_alice_sees_bobs_delete_with_error(self): - # alice creates a file, bob deletes it -- and we also arrange - # for Alice's file to have "gone missing" as well. - alice_fname = os.path.join(self.alice_magic_dir, 'blam') - bob_fname = os.path.join(self.bob_magic_dir, 'blam') - - # alice creates a file, bob downloads it - alice_proc = self.alice_magicfolder.uploader.set_hook('processed') - bob_proc = self.bob_magicfolder.downloader.set_hook('processed') - - with start_action(action_type=u"alice:create"): - yield self.alice_fileops.write(alice_fname, 'contents0\n') - yield iterate(self.alice_magicfolder) # for windows - - with start_action(action_type=u"alice:upload"): - yield iterate_uploader(self.alice_magicfolder) - yield alice_proc # alice uploads - - with start_action(action_type=u"bob:download"): - yield iterate_downloader(self.bob_magicfolder) - yield bob_proc # bob downloads - - with start_action(action_type=u"mysterious:iterate"): - yield iterate(self.alice_magicfolder) # for windows - yield iterate(self.bob_magicfolder) # for windows - - # check the state (XXX I had to switch the versions to 0; is that really right? why?) - with start_action(action_type=u"alice:check"): - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 0) - yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0) - - with start_action(action_type=u"bob:check"): - yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 0) - yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0) - self.failUnlessReallyEqual( - self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client), - 0 - ) - self.failUnlessReallyEqual( - self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client), - 1 - ) - - bob_proc = self.bob_magicfolder.uploader.set_hook('processed') - alice_proc = self.alice_magicfolder.downloader.set_hook('processed') - - with start_action(action_type=u"bob:delete"): - yield self.bob_fileops.delete(bob_fname) - - with start_action(action_type=u"alice:delete"): - # just after notifying bob, we also delete alice's, - # covering the 'except' flow in _rename_deleted_file() - yield self.alice_fileops.delete(alice_fname) - - with start_action(action_type=u"bob:upload-delete"): - yield iterate_uploader(self.bob_magicfolder) - yield bob_proc - - with start_action(action_type=u"alice:download-delete"): - yield iterate_downloader(self.alice_magicfolder) - yield alice_proc - - # check versions - with start_action(action_type=u"bob:check"): - node, metadata = yield self.alice_magicfolder.downloader._get_collective_latest_file(u'blam') - self.assertTrue(metadata['deleted']) - yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1) - yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 1) - - with start_action(action_type=u"alice:check"): - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1) - yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 1) - - @inline_callbacks - def test_alice_create_bob_update(self): - alice_fname = os.path.join(self.alice_magic_dir, 'blam') - bob_fname = os.path.join(self.bob_magic_dir, 'blam') - - # alice creates a file, bob downloads it - yield self.alice_fileops.write(alice_fname, 'contents0\n') - - yield iterate(self.alice_magicfolder) - yield iterate(self.alice_magicfolder) - yield iterate(self.bob_magicfolder) - - # check the state (XXX ditto, had to switch to veresion 0; right?) - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 0) - self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0) - yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 0) - self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0) - self.failUnlessReallyEqual( - self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client), - 0 - ) - self.failUnlessReallyEqual( - self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client), - 1 - ) - - yield iterate(self.bob_magicfolder) - # now bob updates it (bob should upload, alice download) - yield self.bob_fileops.write(bob_fname, 'bob wuz here\n') - - yield iterate(self.bob_magicfolder) - yield iterate(self.bob_magicfolder) - yield iterate(self.alice_magicfolder) - - # check the state - yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1) - self._check_version_in_local_db(self.bob_magicfolder, u"blam", 1) - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1) - self._check_version_in_local_db(self.alice_magicfolder, u"blam", 1) - - @inline_callbacks - def test_download_retry(self): - alice_fname = os.path.join(self.alice_magic_dir, 'blam') - # bob_fname = os.path.join(self.bob_magic_dir, 'blam') - - # Alice creates a file - yield self.alice_fileops.write(alice_fname, ''.join(['contents-%04d\n' % i for i in range(1024)])) - yield iterate(self.alice_magicfolder) - # check alice created the file - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 0) - self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0) - - # now, we ONLY want to do the scan, not a full iteration of - # the process loop. So we do just the scan part "by hand" in - # Bob's downloader - with start_action(action_type=u"test:perform-scan"): - yield self.bob_magicfolder.downloader._perform_scan() - # while we're delving into internals, I guess we might as well - # confirm that we did queue up an item to download - self.assertEqual(1, len(self.bob_magicfolder.downloader._deque)) - - # break all the servers so the download fails. count=1 because we - # only want the download attempted by _process_deque to fail. After - # that, we want it to work again. - for server_id in self.g.get_all_serverids(): - self.g.break_server(server_id, count=1) - - # now let bob try to do the download. Reach in and call - # _process_deque directly because we are already half-way through a - # logical iteration thanks to the _perform_scan call above. - with start_action(action_type=u"test:process-deque"): - yield self.bob_magicfolder.downloader._process_deque() - - self.eliot_logger.flushTracebacks(UnrecoverableFileError) - logged = self.eliot_logger.flushTracebacks(NoSharesError) - self.assertEqual( - 1, - len(logged), - "Got other than expected single NoSharesError: {}".format(logged), - ) - - # ...however Bob shouldn't have downloaded anything - self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0) - # bob should *not* have downloaded anything, as we failed all the servers - self.failUnlessReallyEqual( - self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client), - 0 - ) - self.failUnlessReallyEqual( - self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client), - 1 - ) - - with start_action(action_type=u"test:iterate"): - # now we let Bob try again - yield iterate(self.bob_magicfolder) - - # ...and he should have succeeded - self.failUnlessReallyEqual( - self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client), - 1 - ) - yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 0) - - @inline_callbacks - def test_conflict_local_change_fresh(self): - alice_fname = os.path.join(self.alice_magic_dir, 'localchange0') - bob_fname = os.path.join(self.bob_magic_dir, 'localchange0') - - # alice creates a file, bob downloads it - alice_proc = self.alice_magicfolder.uploader.set_hook('processed') - bob_proc = self.bob_magicfolder.downloader.set_hook('processed') - - yield self.alice_fileops.write(alice_fname, 'contents0\n') - yield iterate(self.alice_magicfolder) # for windows - - # before bob downloads, we make a local file for bob by the - # same name - with open(bob_fname, 'w') as f: - f.write("not the right stuff") - - yield iterate_uploader(self.alice_magicfolder) - yield alice_proc # alice uploads - - yield iterate_downloader(self.bob_magicfolder) - yield bob_proc # bob downloads - - # ...so now bob should produce a conflict - self.assertTrue(os.path.exists(bob_fname + '.conflict')) - - @inline_callbacks - def test_conflict_local_change_existing(self): - alice_fname = os.path.join(self.alice_magic_dir, 'localchange1') - bob_fname = os.path.join(self.bob_magic_dir, 'localchange1') - - alice_proc = self.alice_magicfolder.uploader.set_hook('processed') - bob_proc = self.bob_magicfolder.downloader.set_hook('processed') - - with start_action(action_type=u"alice:create"): - yield self.alice_fileops.write(alice_fname, 'contents0\n') - yield iterate(self.alice_magicfolder) # for windows - - with start_action(action_type=u"alice:upload"): - yield iterate_uploader(self.alice_magicfolder) - yield alice_proc # alice uploads - self.assertEqual( - 1, - self._get_count( - 'uploader.files_uploaded', - client=self.alice_magicfolder._client, - ), - ) - - with start_action(action_type=u"bob:download"): - yield iterate_downloader(self.bob_magicfolder) - yield bob_proc # bob downloads - self.assertEqual( - 1, - self._get_count( - 'downloader.objects_downloaded', - client=self.bob_magicfolder._client, - ), - ) - - alice_proc = self.alice_magicfolder.uploader.set_hook('processed') - bob_proc = self.bob_magicfolder.downloader.set_hook('processed') - - with start_action(action_type=u"alice:rewrite"): - yield self.alice_fileops.write(alice_fname, 'contents1\n') - yield iterate(self.alice_magicfolder) # for windows - - with start_action(action_type=u"bob:rewrite"): - # before bob downloads, make a local change - with open(bob_fname, "w") as f: - f.write("bob's local change") - - with start_action(action_type=u"alice:reupload"): - yield iterate_uploader(self.alice_magicfolder) - yield alice_proc # alice uploads - self.assertEqual( - 2, - self._get_count( - 'uploader.files_uploaded', - client=self.alice_magicfolder._client, - ), - ) - - with start_action(action_type=u"bob:redownload-and-conflict"): - yield iterate_downloader(self.bob_magicfolder) - yield bob_proc # bob downloads - - self.assertEqual( - 2, - self._get_count( - 'downloader.objects_downloaded', - client=self.bob_magicfolder._client, - ), - ) - self.assertEqual( - 1, - self._get_count( - 'downloader.objects_conflicted', - client=self.bob_magicfolder._client, - ), - ) - - # ...so now bob should produce a conflict - self.assertTrue(os.path.exists(bob_fname + '.conflict')) - - @inline_callbacks - def test_alice_delete_and_restore(self): - alice_fname = os.path.join(self.alice_magic_dir, 'blam') - bob_fname = os.path.join(self.bob_magic_dir, 'blam') - - # alice creates a file, bob downloads it - alice_proc = self.alice_magicfolder.uploader.set_hook('processed') - bob_proc = self.bob_magicfolder.downloader.set_hook('processed') - - with start_action(action_type=u"alice:create"): - yield self.alice_fileops.write(alice_fname, 'contents0\n') - yield iterate(self.alice_magicfolder) # for windows - - with start_action(action_type=u"alice:upload"): - yield iterate_uploader(self.alice_magicfolder) - yield alice_proc # alice uploads - - with start_action(action_type=u"bob:download"): - yield iterate_downloader(self.bob_magicfolder) - yield bob_proc # bob downloads - - with start_action(action_type=u"alice:check"): - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 0) - yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0) - - with start_action(action_type=u"bob:check"): - yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 0) - yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0) - yield self.failUnlessReallyEqual( - self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client), - 0 - ) - yield self.failUnlessReallyEqual( - self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client), - 1 - ) - self.failUnless(os.path.exists(bob_fname)) - self.failUnless(not os.path.exists(bob_fname + '.backup')) - self.failUnless(not os.path.exists(bob_fname + '.conflict')) - - alice_proc = self.alice_magicfolder.uploader.set_hook('processed') - bob_proc = self.bob_magicfolder.downloader.set_hook('processed') - - with start_action(action_type=u"alice:delete"): - yield self.alice_fileops.delete(alice_fname) - yield iterate_uploader(self.alice_magicfolder) - yield alice_proc - - with start_action(action_type=u"bob:redownload"): - yield iterate_downloader(self.bob_magicfolder) - yield bob_proc - - with start_action(action_type=u"bob:recheck"): - yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1) - yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 1) - self.assertFalse(os.path.exists(bob_fname)) - self.assertTrue(os.path.exists(bob_fname + '.backup')) - self.assertFalse(os.path.exists(bob_fname + '.conflict')) - - with start_action(action_type=u"alice:recheck"): - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1) - yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 1) - - with start_action(action_type=u"alice:restore"): - os.unlink(bob_fname + '.backup') - alice_proc = self.alice_magicfolder.uploader.set_hook('processed') - bob_proc = self.bob_magicfolder.downloader.set_hook('processed') - yield self.alice_fileops.write(alice_fname, 'alice wuz here\n') - yield iterate(self.alice_magicfolder) # for windows - - with start_action(action_type=u"alice:reupload"): - yield iterate_uploader(self.alice_magicfolder) - yield iterate_downloader(self.alice_magicfolder) # why? - yield alice_proc - - with start_action(action_type=u"bob:final-redownload"): - yield iterate_downloader(self.bob_magicfolder) - yield iterate_uploader(self.bob_magicfolder) - yield bob_proc - - with start_action(action_type=u"bob:final-check"): - yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 2) - yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 2) - self.failUnless(os.path.exists(bob_fname)) - - with start_action(action_type=u"alice:final-check"): - yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 2) - yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 2) - - # XXX this should be shortened -- as in, any cases not covered by - # the other tests in here should get their own minimal test-case. - @skipIf(sys.platform == "win32", "Still inotify problems on Windows (FIXME)") - def test_alice_bob(self): - d = DeferredContext(defer.succeed(None)) - - # XXX FIXME just quickly porting this test via aliases -- the - # "real" solution is to break out any relevant test-cases as - # their own (smaller!) tests. - alice_clock = self.alice_magicfolder.uploader._clock - bob_clock = self.bob_magicfolder.uploader._clock - - def _wait_for_Alice(ign, downloaded_d): - if _debug: print("Now waiting for Alice to download\n") - alice_clock.advance(4) - return downloaded_d - - def _wait_for_Bob(ign, downloaded_d): - if _debug: print("Now waiting for Bob to download\n") - bob_clock.advance(4) - return downloaded_d - - def _wait_for(ign, something_to_do, alice=True): - if alice: - downloaded_d = self.bob_magicfolder.downloader.set_hook('processed') - uploaded_d = self.alice_magicfolder.uploader.set_hook('processed') - else: - downloaded_d = self.alice_magicfolder.downloader.set_hook('processed') - uploaded_d = self.bob_magicfolder.uploader.set_hook('processed') - - d = something_to_do() - - def advance(ign): - if alice: - if _debug: print("Waiting for Alice to upload 3\n") - alice_clock.advance(4) - uploaded_d.addCallback(_wait_for_Bob, downloaded_d) - else: - if _debug: print("Waiting for Bob to upload\n") - bob_clock.advance(4) - uploaded_d.addCallback(_wait_for_Alice, downloaded_d) - return uploaded_d - d.addCallback(advance) - return d - - @inline_callbacks - def Alice_to_write_a_file(): - if _debug: print("Alice writes a file\n\n\n\n\n") - self.file_path = abspath_expanduser_unicode(u"file1", base=self.alice_magicfolder.uploader._local_path_u) - yield self.alice_fileops.write(self.file_path, "meow, meow meow. meow? meow meow! meow.") - yield iterate(self.alice_magicfolder) - d.addCallback(_wait_for, Alice_to_write_a_file) - - @log_call_deferred(action_type=u"check_state") - @inline_callbacks - def check_state(ignored): - yield self._check_version_in_dmd(self.alice_magicfolder, u"file1", 0) - self._check_version_in_local_db(self.alice_magicfolder, u"file1", 0) - self._check_uploader_count('objects_failed', 0) - self._check_uploader_count('objects_succeeded', 1) - self._check_uploader_count('files_uploaded', 1) - self._check_uploader_count('objects_queued', 0) - self._check_uploader_count('directories_created', 0) - self._check_uploader_count('objects_conflicted', 0) - self._check_uploader_count('objects_conflicted', 0, magic=self.bob_magicfolder) - - self._check_version_in_local_db(self.bob_magicfolder, u"file1", 0) - self._check_downloader_count('objects_failed', 0) - self._check_downloader_count('objects_downloaded', 1) - self._check_uploader_count('objects_succeeded', 0, magic=self.bob_magicfolder) - self._check_downloader_count('objects_downloaded', 1, magic=self.bob_magicfolder) - d.addCallback(check_state) - - @inline_callbacks - def Alice_to_delete_file(): - if _debug: print("Alice deletes the file!\n\n\n\n") - yield self.alice_fileops.delete(self.file_path) - yield iterate(self.alice_magicfolder) - yield iterate(self.bob_magicfolder) - d.addCallback(_wait_for, Alice_to_delete_file) - - @inline_callbacks - def notify_bob_moved(ign): - # WARNING: this is just directly notifying for the mock - # tests, because in the Real* tests the .backup file will - # me moved into place (from the original) - p = abspath_expanduser_unicode(u"file1", base=self.bob_magicfolder.uploader._local_path_u) - if self.bob_fileops._fake_inotify: - self.bob_magicfolder.uploader._notifier.event(to_filepath(p + u'.backup'), fake_inotify.IN_MOVED_TO) - yield iterate(self.bob_magicfolder) - d.addCallback(notify_bob_moved) - - @log_call_deferred(action_type=u"check_state") - @inline_callbacks - def check_state(ignored): - yield self._check_version_in_dmd(self.alice_magicfolder, u"file1", 1) - self._check_version_in_local_db(self.alice_magicfolder, u"file1", 1) - self._check_uploader_count('objects_failed', 0) - self._check_uploader_count('objects_succeeded', 2) - self._check_uploader_count('objects_succeeded', 0, magic=self.bob_magicfolder) - - self._check_version_in_local_db(self.bob_magicfolder, u"file1", 1) - self._check_version_in_dmd(self.bob_magicfolder, u"file1", 1) - self._check_file_gone(self.bob_magicfolder, u"file1") - self._check_downloader_count('objects_failed', 0) - self._check_downloader_count('objects_downloaded', 2) - self._check_downloader_count('objects_downloaded', 2, magic=self.bob_magicfolder) - d.addCallback(check_state) - - @inline_callbacks - def Alice_to_rewrite_file(): - if _debug: print("Alice rewrites file\n") - self.file_path = abspath_expanduser_unicode(u"file1", base=self.alice_magicfolder.uploader._local_path_u) - yield self.alice_fileops.write( - self.file_path, - "Alice suddenly sees the white rabbit running into the forest.", - ) - yield iterate(self.alice_magicfolder) - d.addCallback(_wait_for, Alice_to_rewrite_file) - d.addCallback(lambda ign: iterate(self.bob_magicfolder)) - - @log_call_deferred(action_type=u"check_state") - @inline_callbacks - def check_state(ignored): - yield self._check_version_in_dmd(self.alice_magicfolder, u"file1", 2) - self._check_version_in_local_db(self.alice_magicfolder, u"file1", 2) - self._check_uploader_count('objects_failed', 0) - self._check_uploader_count('objects_succeeded', 3) - self._check_uploader_count('files_uploaded', 3) - self._check_uploader_count('objects_queued', 0) - self._check_uploader_count('directories_created', 0) - self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_conflicted', 0) - - self._check_version_in_dmd(self.bob_magicfolder, u"file1", 2) - self._check_version_in_local_db(self.bob_magicfolder, u"file1", 2) - self._check_downloader_count('objects_failed', 0) - self._check_downloader_count('objects_downloaded', 3) - self._check_uploader_count('objects_succeeded', 0, magic=self.bob_magicfolder) - d.addCallback(check_state) - - path_u = u"/tmp/magic_folder_test" - encoded_path_u = magicpath.path2magic(u"/tmp/magic_folder_test") - - def Alice_tries_to_p0wn_Bob(ign): - if _debug: print("Alice tries to p0wn Bob\n") - iter_d = iterate(self.bob_magicfolder) - processed_d = self.bob_magicfolder.downloader.set_hook('processed') - - # upload a file that would provoke the security bug from #2506 - uploadable = Data("", self.alice_magicfolder._client.convergence) - alice_dmd = self.alice_magicfolder.uploader._upload_dirnode - - d2 = alice_dmd.add_file(encoded_path_u, uploadable, metadata={"version": 0}, overwrite=True) - d2.addCallback(lambda ign: self.failUnless(alice_dmd.has_child(encoded_path_u))) - d2.addCallback(lambda ign: iter_d) - d2.addCallback(_wait_for_Bob, processed_d) - return d2 - d.addCallback(Alice_tries_to_p0wn_Bob) - - @log_call(action_type=u"check_state", include_args=[], include_result=False) - def check_state(ignored): - self.failIf(os.path.exists(path_u)) - self._check_version_in_local_db(self.bob_magicfolder, encoded_path_u, None) - self._check_downloader_count('objects_downloaded', 3) - self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_conflicted', 0) - d.addCallback(check_state) - - @inline_callbacks - def Bob_to_rewrite_file(): - if _debug: print("Bob rewrites file\n") - self.file_path = abspath_expanduser_unicode(u"file1", base=self.bob_magicfolder.uploader._local_path_u) - if _debug: print("---- bob's file is %r" % (self.file_path,)) - yield self.bob_fileops.write(self.file_path, "No white rabbit to be found.") - yield iterate(self.bob_magicfolder) - d.addCallback(lambda ign: _wait_for(None, Bob_to_rewrite_file, alice=False)) - - @log_call_deferred(action_type=u"check_state") - @inline_callbacks - def check_state(ignored): - yield self._check_version_in_dmd(self.bob_magicfolder, u"file1", 3) - self._check_version_in_local_db(self.bob_magicfolder, u"file1", 3) - self._check_uploader_count('objects_failed', 0, magic=self.bob_magicfolder) - self._check_uploader_count('objects_succeeded', 1, magic=self.bob_magicfolder) - self._check_uploader_count('files_uploaded', 1, magic=self.bob_magicfolder) - self._check_uploader_count('objects_queued', 0, magic=self.bob_magicfolder) - self._check_uploader_count('directories_created', 0, magic=self.bob_magicfolder) - self._check_downloader_count('objects_conflicted', 0, magic=self.bob_magicfolder) - - self._check_version_in_dmd(self.alice_magicfolder, u"file1", 3) - self._check_version_in_local_db(self.alice_magicfolder, u"file1", 3) - self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_downloaded', 1, magic=self.alice_magicfolder) - self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder) - d.addCallback(check_state) - - def Alice_conflicts_with_Bobs_last_downloaded_uri(): - if _debug: print("Alice conflicts with Bob\n") - downloaded_d = self.bob_magicfolder.downloader.set_hook('processed') - uploadable = Data("do not follow the white rabbit", self.alice_magicfolder._client.convergence) - alice_dmd = self.alice_magicfolder.uploader._upload_dirnode - d2 = alice_dmd.add_file(u"file1", uploadable, - metadata={"version": 5, - "last_downloaded_uri" : "URI:LIT:" }, - overwrite=True) - if _debug: print("Waiting for Alice to upload\n") - d2.addCallback(lambda ign: bob_clock.advance(6)) - d2.addCallback(lambda ign: downloaded_d) - d2.addCallback(lambda ign: self.failUnless(alice_dmd.has_child(encoded_path_u))) - return d2 - d.addCallback(lambda ign: Alice_conflicts_with_Bobs_last_downloaded_uri()) - - @log_call(action_type=u"check_state", include_args=[], include_result=False) - def check_state(ignored): - self._check_downloader_count('objects_downloaded', 4, magic=self.bob_magicfolder) - self._check_downloader_count('objects_conflicted', 1, magic=self.bob_magicfolder) - self._check_downloader_count('objects_downloaded', 1, magic=self.alice_magicfolder) - self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder) - self._check_uploader_count('files_uploaded', 1, magic=self.bob_magicfolder) - self._check_uploader_count('objects_succeeded', 1, magic=self.bob_magicfolder) - d.addCallback(check_state) - - # prepare to perform another conflict test - @log_call_deferred(action_type=u"alice:to-write:file2") - @inline_callbacks - def Alice_to_write_file2(): - if _debug: print("Alice writes a file2\n") - self.file_path = abspath_expanduser_unicode(u"file2", base=self.alice_magicfolder.uploader._local_path_u) - d = self.alice_fileops.write(self.file_path, "something") - self.bob_clock.advance(4) - yield d - d.addCallback(_wait_for, Alice_to_write_file2) - - @log_call_deferred(action_type=u"check_state") - @inline_callbacks - def check_state(ignored): - yield self._check_version_in_dmd(self.alice_magicfolder, u"file2", 0) - self._check_version_in_local_db(self.alice_magicfolder, u"file2", 0) - self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder) - self._check_uploader_count('files_uploaded', 1, magic=self.bob_magicfolder) - d.addCallback(check_state) - - def advance(ign): - alice_clock.advance(4) - bob_clock.advance(4) - # we need to pause here, or make "is_new_file()" more - # robust, because this is now fast enough that the mtime - # of the allegedly-new file matches, so Bob decides not to - # upload (and the test hangs). Not sure why it worked - # before; must have been *just* slow enough? - # XXX FIXME for the new real-test had to jack this to 0.5; - # related to the 0.1 notify pause?? - return task.deferLater(reactor, 0.5, lambda: None) - d.addCallback(advance) - d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, u"file2", 0)) - - @inline_callbacks - def Bob_to_rewrite_file2(): - if _debug: print("Bob rewrites file2\n") - self.file_path = abspath_expanduser_unicode(u"file2", base=self.bob_magicfolder.uploader._local_path_u) - if _debug: print("---- bob's file is %r" % (self.file_path,)) - yield iterate(self.bob_magicfolder) - yield self.bob_fileops.write(self.file_path, "roger roger. what vector?") - if _debug: print("---- bob rewrote file2") - yield iterate(self.bob_magicfolder) - if _debug: print("---- iterated bob's magicfolder") - d.addCallback(lambda ign: _wait_for(None, Bob_to_rewrite_file2, alice=False)) - - @log_call_deferred(action_type=u"check_state") - @inline_callbacks - def check_state(ignored): - yield self._check_version_in_dmd(self.bob_magicfolder, u"file2", 1) - self._check_downloader_count('objects_downloaded', 5, magic=self.bob_magicfolder) - self._check_downloader_count('objects_conflicted', 1, magic=self.bob_magicfolder) - self._check_uploader_count('objects_failed', 0, magic=self.bob_magicfolder) - self._check_uploader_count('objects_succeeded', 2, magic=self.bob_magicfolder) - self._check_uploader_count('files_uploaded', 2, magic=self.bob_magicfolder) - self._check_uploader_count('objects_queued', 0, magic=self.bob_magicfolder) - self._check_uploader_count('directories_created', 0, magic=self.bob_magicfolder) - self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_downloaded', 2, magic=self.alice_magicfolder) - d.addCallback(check_state) - - # XXX here we advance the clock and then test again to make sure no values are monotonically increasing - # with each queue turn ;-p - alice_clock.advance(6) - bob_clock.advance(6) - - @log_call_deferred(action_type=u"check_state") - @inline_callbacks - def check_state(ignored): - yield self._check_version_in_dmd(self.bob_magicfolder, u"file2", 1) - self._check_downloader_count('objects_downloaded', 5) - self._check_downloader_count('objects_conflicted', 1) - self._check_uploader_count('objects_failed', 0, magic=self.bob_magicfolder) - self._check_uploader_count('objects_succeeded', 2, magic=self.bob_magicfolder) - self._check_uploader_count('files_uploaded', 2, magic=self.bob_magicfolder) - self._check_uploader_count('directories_created', 0, magic=self.bob_magicfolder) - self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_downloaded', 2, magic=self.alice_magicfolder) - self._check_uploader_count('files_uploaded', 2, magic=self.bob_magicfolder) - d.addCallback(check_state) - - def Alice_conflicts_with_Bobs_last_uploaded_uri(): - if _debug: print("Alice conflicts with Bob\n") - encoded_path_u = magicpath.path2magic(u"file2") - downloaded_d = self.bob_magicfolder.downloader.set_hook('processed') - uploadable = Data("rabbits with sharp fangs", self.alice_magicfolder._client.convergence) - alice_dmd = self.alice_magicfolder.uploader._upload_dirnode - d2 = alice_dmd.add_file(u"file2", uploadable, - metadata={"version": 5, - "last_uploaded_uri" : "URI:LIT:" }, - overwrite=True) - if _debug: print("Waiting for Alice to upload\n") - d2.addCallback(lambda ign: bob_clock.advance(6)) - d2.addCallback(lambda ign: downloaded_d) - d2.addCallback(lambda ign: self.failUnless(alice_dmd.has_child(encoded_path_u))) - return d2 - d.addCallback(lambda ign: Alice_conflicts_with_Bobs_last_uploaded_uri()) - - @log_call_deferred(action_type=u"check_state") - @inline_callbacks - def check_state(ignored): - yield self._check_version_in_dmd(self.bob_magicfolder, u"file2", 5) - self._check_downloader_count('objects_downloaded', 6) - self._check_downloader_count('objects_conflicted', 1) - self._check_uploader_count('objects_failed', 0, magic=self.bob_magicfolder) - self._check_uploader_count('objects_succeeded', 2, magic=self.bob_magicfolder) - self._check_uploader_count('files_uploaded', 2, magic=self.bob_magicfolder) - self._check_uploader_count('directories_created', 0, magic=self.bob_magicfolder) - self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_downloaded', 2, magic=self.alice_magicfolder) - d.addCallback(check_state) - - def foo(ign): - alice_clock.advance(6) - bob_clock.advance(6) - alice_clock.advance(6) - bob_clock.advance(6) - d.addCallback(foo) - - @log_call(action_type=u"check_state", include_args=[], include_result=False) - def check_state(ignored): - self._check_downloader_count('objects_downloaded', 2, magic=self.alice_magicfolder) - self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_conflicted', 1) - self._check_downloader_count('objects_downloaded', 6) - d.addCallback(check_state) - - # prepare to perform another conflict test - @inline_callbacks - def Alice_to_write_file3(): - if _debug: print("Alice writes a file\n") - self.file_path = abspath_expanduser_unicode(u"file3", base=self.alice_magicfolder.uploader._local_path_u) - yield self.alice_fileops.write(self.file_path, "something") - yield iterate(self.alice_magicfolder) - # Make sure Bob gets the file before we do anything else. - yield iterate(self.bob_magicfolder) - d.addCallback(_wait_for, Alice_to_write_file3) - - @log_call_deferred(action_type=u"check_state") - @inline_callbacks - def check_state(ignored): - yield self._check_version_in_dmd(self.alice_magicfolder, u"file3", 0) - self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_downloaded', 7) - self._check_downloader_count('objects_downloaded', 2, magic=self.alice_magicfolder) - self._check_downloader_count('objects_conflicted', 1) - self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder) - d.addCallback(check_state) - - @inline_callbacks - def Bob_to_rewrite_file3(): - if _debug: print("Bob rewrites file3\n") - self.file_path = abspath_expanduser_unicode(u"file3", base=self.bob_magicfolder.uploader._local_path_u) - if _debug: print("---- bob's file is %r" % (self.file_path,)) - yield iterate(self.bob_magicfolder) - yield self.bob_fileops.write(self.file_path, "roger roger") - yield iterate(self.bob_magicfolder) - d.addCallback(lambda ign: _wait_for(None, Bob_to_rewrite_file3, alice=False)) - - @log_call_deferred(action_type=u"check_state") - @inline_callbacks - def check_state(ignored): - yield self._check_version_in_dmd(self.bob_magicfolder, u"file3", 1) - self._check_downloader_count('objects_downloaded', 7) - self._check_downloader_count('objects_conflicted', 1) - self._check_uploader_count('objects_failed', 0, magic=self.bob_magicfolder) - self._check_uploader_count('objects_succeeded', 3, magic=self.bob_magicfolder) - self._check_uploader_count('files_uploaded', 3, magic=self.bob_magicfolder) - self._check_uploader_count('objects_queued', 0, magic=self.bob_magicfolder) - self._check_uploader_count('directories_created', 0, magic=self.bob_magicfolder) - self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder) - self._check_downloader_count('objects_downloaded', 3, magic=self.alice_magicfolder) - d.addCallback(check_state) - - return d.addActionFinish() - - -class SingleMagicFolderTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, ReallyEqualMixin, CheckerMixin): - """ - These tests will be run both with a mock notifier, and (on platforms that support it) - with the real INotify. - """ - - def setUp(self): - self.assertIs(None, super(SingleMagicFolderTestMixin, self).setUp()) - temp = self.mktemp() - self.basedir = abspath_expanduser_unicode(temp.decode(get_filesystem_encoding())) - self.magicfolder = None - self.set_up_grid(oneshare=True) - self.local_dir = os.path.join(self.basedir, u"local_dir") - self.mkdir_nonascii(self.local_dir) - - # Magic-folder implementation somehow manages to leave a DelayedCall - # in the reactor from the eventual queue by the end of the test. It - # may have something to do with the upload process but it's not - # entirely clear. It's difficult to track things through the eventual - # queue. It is almost certainly the case that some other Deferred - # involved in magic-folder that is already being waited on elsewhere - # *should* encompass this DelayedCall but I wasn't able to figure out - # where that association needs to be made. So, as a work-around, - # explicitly flush the eventual queue at the end of the test, too. - from foolscap.eventual import flushEventualQueue - self.addCleanup(flushEventualQueue) - - # Sometimes a collective scan fails with UnrecoverableFileError. It's - # not clear to me why. :/ This fixes the issue, though, and all other - # asserted-about behavior is provided whether this case is hit or not. - self.addCleanup( - lambda: self.eliot_logger.flushTracebacks(UnrecoverableFileError) - ) - - d = DeferredContext(self.create_invite_join_magic_folder(self.alice_nickname, self.local_dir)) - d.addCallback(self._restart_client) - # note: _restart_client ultimately sets self.magicfolder to not-None - return d.result - - def tearDown(self): - d = DeferredContext(super(SingleMagicFolderTestMixin, self).tearDown()) - d.addCallback(self.cleanup) - return d.result - - def _createdb(self): - dbfile = abspath_expanduser_unicode(u"magicfolder_default.sqlite", base=self.basedir) - mdb = magicfolderdb.get_magicfolderdb(dbfile, create_version=(magicfolderdb.SCHEMA_v1, 1)) - self.failUnless(mdb, "unable to create magicfolderdb from %r" % (dbfile,)) - self.failUnlessEqual(mdb.VERSION, 1) - return mdb - - @log_call_deferred(action_type=u"restart-client") - def _restart_client(self, ign): - #print "_restart_client" - d = DeferredContext(self.restart_client()) - d.addCallback(self._wait_until_started) - return d.result - - @log_call_deferred(action_type=u"wait-until-started") - def _wait_until_started(self, ign): - #print "_wait_until_started" - self.magicfolder = self.get_client().getServiceNamed('magic-folder-default') - self.fileops = FileOperationsHelper(self.magicfolder.uploader, self.inject_inotify) - self.up_clock = task.Clock() - self.down_clock = task.Clock() - self.magicfolder.uploader._clock = self.up_clock - self.magicfolder.downloader._clock = self.down_clock - - # XXX should probably be passing the reactor to instances when - # they're created, but that's a ton of re-factoring, so we - # side-step that issue by hacking it in here. However, we - # *have* to "hack it in" before we call ready() so that the - # first iteration of the loop doesn't call the "real" - # reactor's callLater. :( - return self.magicfolder.ready() - - def test_db_basic(self): - fileutil.make_dirs(self.basedir) - self._createdb() - - @inline_callbacks - def test_scan_once_on_startup(self): - # What is this test? Maybe it is just a stub and needs finishing. - self.magicfolder.uploader._clock.advance(99) - - yield self._check_uploader_count('files_uploaded', 0, magic=self.magicfolder) - yield self._check_uploader_count('objects_queued', 0, magic=self.magicfolder) - yield self._check_downloader_count('objects_conflicted', 0, magic=self.magicfolder) - yield self._check_uploader_count('objects_succeeded', 0, magic=self.magicfolder) - yield self._check_downloader_count('objects_failed', 0, magic=self.magicfolder) - yield self._check_downloader_count('objects_downloaded', 0, magic=self.magicfolder) - - def test_db_persistence(self): - """Test that a file upload creates an entry in the database.""" - - fileutil.make_dirs(self.basedir) - db = self._createdb() - - relpath1 = u"myFile1" - pathinfo = fileutil.PathInfo(isdir=False, isfile=True, islink=False, - exists=True, size=1, mtime_ns=123, ctime_ns=456) - db.did_upload_version(relpath1, 0, 'URI:LIT:1', 'URI:LIT:0', 0, pathinfo) - - c = db.cursor - c.execute("SELECT size, mtime_ns, ctime_ns" - " FROM local_files" - " WHERE path=?", - (relpath1,)) - row = c.fetchone() - self.failUnlessEqual(row, (pathinfo.size, pathinfo.mtime_ns, pathinfo.ctime_ns)) - - # Second test uses magic_folder.is_new_file instead of SQL query directly - # to confirm the previous upload entry in the db. - relpath2 = u"myFile2" - path2 = os.path.join(self.basedir, relpath2) - fileutil.write(path2, "meow\n") - pathinfo = fileutil.get_pathinfo(path2) - db.did_upload_version(relpath2, 0, 'URI:LIT:2', 'URI:LIT:1', 0, pathinfo) - db_entry = db.get_db_entry(relpath2) - self.assertFalse(magic_folder.is_new_file(pathinfo, db_entry)) - - different_pathinfo = fileutil.PathInfo(isdir=False, isfile=True, islink=False, - exists=True, size=0, mtime_ns=pathinfo.mtime_ns, - ctime_ns=pathinfo.ctime_ns) - self.assertTrue(magic_folder.is_new_file(different_pathinfo, db_entry)) - - def _test_magicfolder_start_service(self): - # what is this even testing? - d = defer.succeed(None) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 0)) - - d.addCallback(lambda ign: self.create_invite_join_magic_folder(self.alice_nickname, self.local_dir)) - d.addCallback(self._restart_client) - - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 1)) - d.addBoth(self.cleanup) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 0)) - return d - - @skipIf(sys.platform == "linux2", "fails on certain linux flavors: see ticket #2834") - def test_move_tree(self): - """ - create an empty directory tree and 'mv' it into the magic folder, - noting the new directory and uploading it. - - also creates a directory tree with one file in it and 'mv's it - into the magic folder, so we upload the file and record the - directory. (XXX split to separate test) - """ - empty_tree_name = self.unicode_or_fallback(u"empty_tr\u00EAe", u"empty_tree") - empty_tree_dir = abspath_expanduser_unicode(empty_tree_name, base=self.basedir) - new_empty_tree_dir = abspath_expanduser_unicode(empty_tree_name, base=self.local_dir) - - small_tree_name = self.unicode_or_fallback(u"small_tr\u00EAe", u"empty_tree") - small_tree_dir = abspath_expanduser_unicode(small_tree_name, base=self.basedir) - new_small_tree_dir = abspath_expanduser_unicode(small_tree_name, base=self.local_dir) - - d = DeferredContext(defer.succeed(None)) - - @inline_callbacks - def _check_move_empty_tree(res): - self.mkdir_nonascii(empty_tree_dir) - yield self.fileops.move(empty_tree_dir, new_empty_tree_dir) - yield iterate(self.magicfolder) - - d.addCallback(_check_move_empty_tree) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 0)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 1)) - - @inline_callbacks - def _check_move_small_tree(res): - self.mkdir_nonascii(small_tree_dir) - what_path = abspath_expanduser_unicode(u"what", base=small_tree_dir) - fileutil.write(what_path, "say when") - yield self.fileops.move(small_tree_dir, new_small_tree_dir) - upstatus = list(self.magicfolder.uploader.get_status()) - downstatus = list(self.magicfolder.downloader.get_status()) - - self.assertEqual(2, len(upstatus)) - self.assertEqual(0, len(downstatus)) - yield iterate(self.magicfolder) - - # when we add the dir, we queue a scan of it; so we want - # the upload to "go" as well requiring 1 more iteration - yield iterate(self.magicfolder) - - d.addCallback(_check_move_small_tree) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 1)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 3)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2)) - - @inline_callbacks - def _check_moved_tree_is_watched(res): - another_path = abspath_expanduser_unicode(u"another", base=new_small_tree_dir) - yield self.fileops.write(another_path, "file") - yield iterate(self.magicfolder) - yield iterate(self.magicfolder) # windows; why? - - d.addCallback(_check_moved_tree_is_watched) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 4)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 2)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2)) - - return d.result - - def test_persistence(self): - """ - Perform an upload of a given file and then stop the client. - Start a new client and magic-folder service... and verify that the file is NOT uploaded - a second time. This test is meant to test the database persistence along with - the startup and shutdown code paths of the magic-folder service. - """ - self.collective_dircap = "" # XXX hmmm? - - d = DeferredContext(defer.succeed(None)) - - @inline_callbacks - def create_test_file(filename): - test_file = abspath_expanduser_unicode(filename, base=self.local_dir) - yield self.fileops.write(test_file, "meow %s" % filename) - yield iterate(self.magicfolder) - yield iterate(self.magicfolder) # windows; why? - - d.addCallback(lambda ign: create_test_file(u"what1")) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0)) - d.addCallback(self.cleanup) - - d.addCallback(self._restart_client) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 0)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0)) - d.addCallback(lambda ign: create_test_file(u"what2")) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1)) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0)) - return d.result - - # all this "self.*" state via 9000 mix-ins is really really - # hard to read, keep track of, etc. Very hard to understand - # what each test uses for setup, etc. :( - - @inline_callbacks - def test_delete(self): - # setup: create a file 'foo' - path = os.path.join(self.local_dir, u'foo') - yield self.fileops.write(path, 'foo\n') - yield iterate_uploader(self.magicfolder) - yield iterate_uploader(self.magicfolder) # req'd for windows; not sure why? - self.assertTrue(os.path.exists(path)) - node, metadata = yield self.magicfolder.downloader._get_collective_latest_file(u'foo') - self.assertTrue(node is not None, "Failed to find %r in DMD" % (path,)) - - # the test: delete the file (and do fake notifies) - yield self.fileops.delete(path) - - yield iterate_uploader(self.magicfolder) - self.assertFalse(os.path.exists(path)) - - yield iterate_downloader(self.magicfolder) - # ensure we still have a DB entry, and that the version is 1 - node, metadata = yield self.magicfolder.downloader._get_collective_latest_file(u'foo') - self.assertTrue(node is not None, "Failed to find %r in DMD" % (path,)) - self.failUnlessEqual(metadata['version'], 1) - - @inline_callbacks - def test_batched_process(self): - """ - status APIs correctly function when there are 2 items queued at - once for processing - """ - # setup: get at least two items into the deque - path0 = os.path.join(self.local_dir, u'foo') - yield self.fileops.write(path0, 'foo\n') - path1 = os.path.join(self.local_dir, u'bar') - yield self.fileops.write(path1, 'bar\n') - - # get the status before we've processed anything - upstatus0 = list(self.magicfolder.uploader.get_status()) - upstatus1 = [] - - def one_item(item): - # grab status after we've processed a single item - us = list(self.magicfolder.uploader.get_status()) - upstatus1.extend(us) - one_d = self.magicfolder.uploader.set_hook('item_processed') - # can't 'yield' here because the hook isn't called until - # inside iterate() - one_d.addCallbacks(one_item, self.fail) - - yield iterate_uploader(self.magicfolder) - yield iterate_uploader(self.magicfolder) # req'd for windows; not sure why? - - # no matter which part of the queue the items are in, we - # should see the same status from the outside - self.assertEqual(upstatus0, upstatus1) - - @inline_callbacks - def test_real_notify_failure(self): - """ - Simulate an exception from the _real_notify helper in - magic-folder's uploader, confirming error-handling works. - """ - - orig_notify = self.magicfolder.uploader._real_notify - - class BadStuff(Exception): - pass - - def bad_stuff(*args, **kw): - # call original method .. - orig_notify(*args, **kw) - # ..but then cause a special problem - raise BadStuff("the bad stuff") - - patch_notify = mock.patch.object( - self.magicfolder.uploader, - '_real_notify', - mock.Mock(side_effect=bad_stuff), - ) - with patch_notify: - path0 = os.path.join(self.local_dir, u'foo') - yield self.fileops.write(path0, 'foo\n') - # this actually triggers two notifies - - # do a reactor turn; this is necessary because our "bad_stuff" - # method calls the hook (so the above 'yield' resumes) right - # *before* it raises the exception; thus, we ensure all the - # pending callbacks including the exception are processed - # before we flush the errors. - yield task.deferLater(reactor, 0, lambda: None) - - errors = self.eliot_logger.flushTracebacks(BadStuff) - # it seems on Windows the "RealTest" variant only produces 1 - # notification for some reason.. - self.assertTrue(len(errors) >= 1) - - @inline_callbacks - def test_delete_and_restore(self): - # setup: create a file - path = os.path.join(self.local_dir, u'foo') - yield self.fileops.write(path, 'foo\n') - yield iterate_uploader(self.magicfolder) - yield iterate_uploader(self.magicfolder) # req'd for windows; why? - self.assertTrue(os.path.exists(path)) - - # ...and delete the file - yield self.fileops.delete(path) - yield iterate_uploader(self.magicfolder) - self.assertFalse(os.path.exists(path)) - - # ensure we still have a DB entry, and that the version is 1 - node, metadata = yield self.magicfolder.downloader._get_collective_latest_file(u'foo') - self.assertTrue(node is not None, "Failed to find %r in DMD" % (path,)) - self.failUnlessEqual(metadata['version'], 1) - - # restore the file, with different contents - path = os.path.join(self.local_dir, u'foo') - yield self.fileops.write(path, 'bar\n') - yield iterate_uploader(self.magicfolder) - - # ensure we still have a DB entry, and that the version is 2 - node, metadata = yield self.magicfolder.downloader._get_collective_latest_file(u'foo') - self.assertTrue(node is not None, "Failed to find %r in DMD" % (path,)) - self.failUnlessEqual(metadata['version'], 2) - - def test_write_short_file(self): - # Write something short enough for a LIT file. - return self._check_file(u"short", "test") - - def test_magic_folder(self): - d = DeferredContext(defer.succeed(None)) - # Write something short enough for a LIT file. - d.addCallback(lambda ign: self._check_file(u"short", "test")) - - # Write to the same file again with different data. - d.addCallback(lambda ign: self._check_file(u"short", "different")) - - # Test that temporary files are not uploaded. - d.addCallback(lambda ign: self._check_file(u"tempfile", "test", temporary=True)) - - # Test creation of a subdirectory. - d.addCallback(lambda ign: self._check_mkdir(u"directory")) - - # Write something longer, and also try to test a Unicode name if the fs can represent it. - name_u = self.unicode_or_fallback(u"l\u00F8ng", u"long") - d.addCallback(lambda ign: self._check_file(name_u, "test"*100)) - - # TODO: test that causes an upload failure. - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0)) - - return d.result - - @inline_callbacks - def _create_directory_with_file(self, relpath_u, content): - path_f = os.path.join(self.local_dir, relpath_u) - path_d = os.path.dirname(path_f) - # Create a new directory in the monitored directory. - yield self.fileops.mkdir(path_d) - # Give the system a chance to notice and process it. - yield iterate(self.magicfolder) - # Create a new file in that new directory. - yield self.fileops.write(path_f, content) - # Another opportunity to process. - yield iterate(self.magicfolder) - - @inline_callbacks - def test_create_file_in_sub_directory(self): - reldir_u = u'subdir' - # The OS and the DMD may have conflicting conventions for directory - # the separator. Construct a value for each. - dmd_relpath_u = u'/'.join((reldir_u, u'some-file')) - platform_relpath_u = join(reldir_u, u'some-file') - content = u'some great content' - yield self._create_directory_with_file( - platform_relpath_u, - content, - ) - # The new directory and file should have been noticed and uploaded. - downloader = self.magicfolder.downloader - encoded_dir_u = magicpath.path2magic(reldir_u + u"/") - encoded_path_u = magicpath.path2magic(dmd_relpath_u) - - with start_action(action_type=u"retrieve-metadata"): - dir_node, dir_meta = yield downloader._get_collective_latest_file( - encoded_dir_u, - ) - path_node, path_meta = yield downloader._get_collective_latest_file( - encoded_path_u, - ) - - self.expectThat(dir_node, Not(Is(None)), "dir node") - self.expectThat(dir_meta, ContainsDict({'version': Equals(0)}), "dir meta") - self.expectThat(path_node, Not(Is(None)), "path node") - self.expectThat(path_meta, ContainsDict({'version': Equals(0)}), "path meta") - - @inline_callbacks - def test_delete_file_in_sub_directory(self): - dmd_relpath_u = u'/'.join((u'subdir', u'some-file')) - platform_relpath_u = join(u'subdir', u'some-file') - content = u'some great content' - yield self._create_directory_with_file( - platform_relpath_u, - content, - ) - # Delete the file in the sub-directory. - yield self.fileops.delete(os.path.join(self.local_dir, platform_relpath_u)) - # Let the deletion be processed. - yield iterate(self.magicfolder) - # Verify the deletion was uploaded. - encoded_path_u = magicpath.path2magic(dmd_relpath_u) - downloader = self.magicfolder.downloader - node, metadata = yield downloader._get_collective_latest_file(encoded_path_u) - self.assertThat(node, Not(Is(None))) - self.assertThat(metadata['version'], Equals(1)) - self.assertThat(metadata['deleted'], Equals(True)) - - def test_delete_sub_directory_containing_file(self): - reldir_u = u'subdir' - relpath_u = os.path.join(reldir_u, u'some-file') - content = u'some great content' - yield self._create_directory_with_file( - relpath_u, - content, - ) - # Delete the sub-directory and the file in it. Don't wait in between - # because the case where all events are delivered before any - # processing happens is interesting. And don't use the fileops API to - # delete the contained file so that we don't necessarily generate a - # notification for that path at all. We require that the - # implementation behave correctly when receiving only the notification - # for the containing directory. - os.unlink(os.path.join(self.local_dir, relpath_u)) - yield self.fileops.delete(os.path.join(self.local_dir, reldir_u)) - - # Now allow processing. - yield iterate(self.magicfolder) - # Give it some extra time because of recursive directory processing. - yield iterate(self.magicfolder) - - # Deletion of both entities should have been uploaded. - downloader = self.magicfolder.downloader - encoded_dir_u = magicpath.path2magic(reldir_u + u"/") - encoded_path_u = magicpath.path2magic(relpath_u) - - dir_node, dir_meta = yield downloader._get_collective_latest_file(encoded_dir_u) - path_node, path_meta = yield downloader._get_collective_latest_file(encoded_path_u) - - self.expectThat(dir_node, Not(Is(None)), "dir node") - self.expectThat(dir_meta, ContainsDict({ - "version": Equals(1), - "deleted": Equals(True), - }), "dir meta") - - self.expectThat(path_node, Not(Is(None)), "path node") - self.expectThat(path_meta, ContainsDict({ - "version": Equals(1), - "deleted": Equals(True), - }), "path meta") - - -@skipIf(support_missing, support_message) -class MockTestAliceBob(MagicFolderAliceBobTestMixin, AsyncTestCase): - inject_inotify = True - - def setUp(self): - self.inotify = fake_inotify - self.patch(magic_folder, 'get_inotify_module', lambda: self.inotify) - return super(MockTestAliceBob, self).setUp() - - -@skipIf(support_missing, support_message) -class MockTest(SingleMagicFolderTestMixin, AsyncTestCase): - """This can run on any platform, and even if twisted.internet.inotify can't be imported.""" - inject_inotify = True - - def setUp(self): - self.inotify = fake_inotify - self.patch(magic_folder, 'get_inotify_module', lambda: self.inotify) - return super(MockTest, self).setUp() - - def test_errors(self): - self.set_up_grid(oneshare=True) - - errors_dir = abspath_expanduser_unicode(u"errors_dir", base=self.basedir) - os.mkdir(errors_dir) - not_a_dir = abspath_expanduser_unicode(u"NOT_A_DIR", base=self.basedir) - fileutil.write(not_a_dir, "") - magicfolderdb = abspath_expanduser_unicode(u"magicfolderdb", base=self.basedir) - doesnotexist = abspath_expanduser_unicode(u"doesnotexist", base=self.basedir) - - client = self.g.clients[0] - d = DeferredContext(client.create_dirnode()) - def _check_errors(n): - self.failUnless(IDirectoryNode.providedBy(n)) - upload_dircap = n.get_uri() - readonly_dircap = n.get_readonly_uri() - - self.shouldFail(ValueError, 'does not exist', 'does not exist', - MagicFolder, client, upload_dircap, '', doesnotexist, magicfolderdb, 0o077, 'default') - self.shouldFail(ValueError, 'is not a directory', 'is not a directory', - MagicFolder, client, upload_dircap, '', not_a_dir, magicfolderdb, 0o077, 'default') - self.shouldFail(AssertionError, 'bad upload.dircap', 'does not refer to a directory', - MagicFolder, client, 'bad', '', errors_dir, magicfolderdb, 0o077, 'default') - self.shouldFail(AssertionError, 'non-directory upload.dircap', 'does not refer to a directory', - MagicFolder, client, 'URI:LIT:foo', '', errors_dir, magicfolderdb, 0o077, 'default') - self.shouldFail(AssertionError, 'readonly upload.dircap', 'is not a writecap to a directory', - MagicFolder, client, readonly_dircap, '', errors_dir, magicfolderdb, 0o077, 'default') - self.shouldFail(AssertionError, 'collective dircap', 'is not a readonly cap to a directory', - MagicFolder, client, upload_dircap, upload_dircap, errors_dir, magicfolderdb, 0o077, 'default') - - def _not_implemented(): - raise NotImplementedError("blah") - self.patch(magic_folder, 'get_inotify_module', _not_implemented) - self.shouldFail(NotImplementedError, 'unsupported', 'blah', - MagicFolder, client, upload_dircap, '', errors_dir, magicfolderdb, 0o077, 'default') - d.addCallback(_check_errors) - return d.result - - def test_write_downloaded_file(self): - workdir = fileutil.abspath_expanduser_unicode(u"cli/MagicFolder/write-downloaded-file") - local_file = fileutil.abspath_expanduser_unicode(u"foobar", base=workdir) - - class TestWriteFileMixin(WriteFileMixin): - def _log(self, msg): - pass - - writefile = TestWriteFileMixin() - writefile._umask = 0o077 - - # create a file with name "foobar" with content "foo" - # write downloaded file content "bar" into "foobar" with is_conflict = False - fileutil.make_dirs(workdir) - fileutil.write(local_file, "foo") - - # if is_conflict is False, then the .conflict file shouldn't exist. - now = time.time() - writefile._write_downloaded_file(workdir, local_file, "bar", False, now=now) - conflicted_path = local_file + u".conflict" - self.failIf(os.path.exists(conflicted_path)) - - # no backup - backup_path = local_file + u".backup" - self.failIf(os.path.exists(backup_path)) - - # .tmp file shouldn't exist - self.failIf(os.path.exists(local_file + u".tmp")) - - # The original file should have the new content - self.failUnlessEqual(fileutil.read(local_file), "bar") - - # .. and approximately the correct timestamp. - pathinfo = fileutil.get_pathinfo(local_file) - error_ns = pathinfo.mtime_ns - fileutil.seconds_to_ns(now - WriteFileMixin.FUDGE_SECONDS) - permitted_error_ns = fileutil.seconds_to_ns(WriteFileMixin.FUDGE_SECONDS)/4 - self.failUnless(abs(error_ns) < permitted_error_ns, (error_ns, permitted_error_ns)) - - # now a test for conflicted case - writefile._write_downloaded_file(workdir, local_file, "bar", True, None) - self.failUnless(os.path.exists(conflicted_path)) - - # .tmp file shouldn't exist - self.failIf(os.path.exists(local_file + u".tmp")) - - def test_periodic_full_scan(self): - """ - Create a file in a subdir without doing a notify on it and - fast-forward time to prove we do a full scan periodically. - """ - sub_dir = abspath_expanduser_unicode(u"subdir", base=self.local_dir) - self.mkdir_nonascii(sub_dir) - - d = DeferredContext(defer.succeed(None)) - - def _create_file_without_event(res): - processed_d = self.magicfolder.uploader.set_hook('processed') - what_path = abspath_expanduser_unicode(u"what", base=sub_dir) - fileutil.write(what_path, "say when") - self.magicfolder.uploader._clock.advance(self.magicfolder.uploader._periodic_full_scan_duration + 1) - # this will have now done the full scan, so we have to do - # an iteration to process anything from it - iterate_d = iterate_uploader(self.magicfolder) - return processed_d.addCallback(lambda ignored: iterate_d) - d.addCallback(_create_file_without_event) - def _advance_clock(res): - processed_d = self.magicfolder.uploader.set_hook('processed') - self.magicfolder.uploader._clock.advance(4) - return processed_d - d.addCallback(_advance_clock) - d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 1)) - return d.result - - def test_statistics(self): - d = DeferredContext(defer.succeed(None)) - # Write something short enough for a LIT file. - d.addCallback(lambda ign: self._check_file(u"short", "test")) - - # test magic-folder statistics - d.addCallback(lambda res: self.GET("statistics")) - def _got_stats(res): - self.assertIn("Operational Statistics", res) - self.assertIn("Magic Folder", res) - self.assertIn("
  • Local Directories Monitored: 1 directories
  • ", res) - self.assertIn("
  • Files Uploaded: 1 files
  • ", res) - self.assertIn("
  • Files Queued for Upload: 0 files
  • ", res) - self.assertIn("
  • Failed Uploads: 0 files
  • ", res) - self.assertIn("
  • Files Downloaded: 0 files
  • ", res) - self.assertIn("
  • Files Queued for Download: 0 files
  • ", res) - self.assertIn("
  • Failed Downloads: 0 files
  • ", res) - d.addCallback(_got_stats) - d.addCallback(lambda res: self.GET("statistics?t=json")) - def _got_stats_json(res): - data = json.loads(res) - self.assertEqual(data["counters"]["magic_folder.uploader.dirs_monitored"], 1) - self.assertEqual(data["counters"]["magic_folder.uploader.objects_succeeded"], 1) - self.assertEqual(data["counters"]["magic_folder.uploader.files_uploaded"], 1) - self.assertEqual(data["counters"]["magic_folder.uploader.objects_queued"], 0) - d.addCallback(_got_stats_json) - return d.result - - -@skipIf(support_missing, support_message) -class RealTest(SingleMagicFolderTestMixin, AsyncTestCase): - """This is skipped unless both Twisted and the platform support inotify.""" - inject_inotify = False - - def setUp(self): - d = super(RealTest, self).setUp() - self.inotify = magic_folder.get_inotify_module() - return d - - -@skipIf(support_missing, support_message) -class RealTestAliceBob(MagicFolderAliceBobTestMixin, AsyncTestCase): - """This is skipped unless both Twisted and the platform support inotify.""" - inject_inotify = False - - def setUp(self): - d = super(RealTestAliceBob, self).setUp() - self.inotify = magic_folder.get_inotify_module() - return d diff --git a/src/allmydata/test/test_magicpath.py b/src/allmydata/test/test_magicpath.py deleted file mode 100644 index 1227a2c4d..000000000 --- a/src/allmydata/test/test_magicpath.py +++ /dev/null @@ -1,28 +0,0 @@ - -from twisted.trial import unittest - -from allmydata import magicpath - - -class MagicPath(unittest.TestCase): - tests = { - u"Documents/work/critical-project/qed.txt": u"Documents@_work@_critical-project@_qed.txt", - u"Documents/emails/bunnyfufu@hoppingforest.net": u"Documents@_emails@_bunnyfufu@@hoppingforest.net", - u"foo/@/bar": u"foo@_@@@_bar", - } - - def test_path2magic(self): - for test, expected in self.tests.items(): - self.failUnlessEqual(magicpath.path2magic(test), expected) - - def test_magic2path(self): - for expected, test in self.tests.items(): - self.failUnlessEqual(magicpath.magic2path(test), expected) - - def test_should_ignore(self): - self.failUnlessEqual(magicpath.should_ignore_file(u".bashrc"), True) - self.failUnlessEqual(magicpath.should_ignore_file(u"bashrc."), False) - self.failUnlessEqual(magicpath.should_ignore_file(u"forest/tree/branch/.bashrc"), True) - self.failUnlessEqual(magicpath.should_ignore_file(u"forest/tree/.branch/bashrc"), True) - self.failUnlessEqual(magicpath.should_ignore_file(u"forest/.tree/branch/bashrc"), True) - self.failUnlessEqual(magicpath.should_ignore_file(u"forest/tree/branch/bashrc"), False) diff --git a/src/allmydata/test/test_python2_regressions.py b/src/allmydata/test/test_python2_regressions.py index 565e3ad80..84484f1cf 100644 --- a/src/allmydata/test/test_python2_regressions.py +++ b/src/allmydata/test/test_python2_regressions.py @@ -15,9 +15,6 @@ from testtools.matchers import ( BLACKLIST = { "allmydata.test.check_load", - "allmydata.watchdog._watchdog_541", - "allmydata.watchdog.inotify", - "allmydata.windows.inotify", "allmydata.windows.registry", } diff --git a/src/allmydata/test/test_websocket_logs.py b/src/allmydata/test/test_websocket_logs.py index 983613a28..e666a4902 100644 --- a/src/allmydata/test/test_websocket_logs.py +++ b/src/allmydata/test/test_websocket_logs.py @@ -40,7 +40,7 @@ class TestStreamingLogs(unittest.TestCase): messages.append(json.loads(msg)) proto.on("message", got_message) - @log_call(action_type=u"test:cli:magic-folder:cleanup") + @log_call(action_type=u"test:cli:some-exciting-action") def do_a_thing(): pass diff --git a/src/allmydata/test/web/test_web.py b/src/allmydata/test/web/test_web.py index 346e0e2c3..3e0967811 100644 --- a/src/allmydata/test/web/test_web.py +++ b/src/allmydata/test/web/test_web.py @@ -35,7 +35,6 @@ from allmydata.immutable import upload from allmydata.immutable.downloader.status import DownloadStatus from allmydata.dirnode import DirectoryNode from allmydata.nodemaker import NodeMaker -from allmydata.frontends.magic_folder import QueuedItem from allmydata.web import status from allmydata.web.common import WebError, MultiFormatPage from allmydata.util import fileutil, base32, hashutil @@ -125,29 +124,6 @@ class FakeUploader(service.Service): return (self.helper_furl, self.helper_connected) -def create_test_queued_item(relpath_u, history=[]): - progress = mock.Mock() - progress.progress = 100.0 - item = QueuedItem(relpath_u, progress, 1234) - for the_status, timestamp in history: - item.set_status(the_status, current_time=timestamp) - return item - - -class FakeMagicFolder(object): - def __init__(self): - self.uploader = FakeStatus() - self.downloader = FakeStatus() - - def get_public_status(self): - return ( - True, - [ - 'a magic-folder status message' - ], - ) - - def build_one_ds(): ds = DownloadStatus("storage_index", 1234) now = time.time() @@ -282,7 +258,6 @@ class FakeClient(_Client): # don't upcall to Client.__init__, since we only want to initialize a # minimal subset service.MultiService.__init__(self) - self._magic_folders = dict() self.all_contents = {} self.nodeid = "fake_nodeid" self.nickname = u"fake_nickname \u263A" @@ -997,79 +972,6 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi d.addCallback(_check) return d - @defer.inlineCallbacks - def test_magicfolder_status_bad_token(self): - with self.assertRaises(Error): - yield self.POST( - '/magic_folder?t=json', - t='json', - name='default', - token='not the token you are looking for', - ) - - @defer.inlineCallbacks - def test_magicfolder_status_wrong_folder(self): - with self.assertRaises(Exception) as ctx: - yield self.POST( - '/magic_folder?t=json', - t='json', - name='a non-existent magic-folder', - token=self.s.get_auth_token(), - ) - self.assertIn( - "Not Found", - str(ctx.exception) - ) - - @defer.inlineCallbacks - def test_magicfolder_status_success(self): - self.s._magic_folders['default'] = mf = FakeMagicFolder() - mf.uploader.status = [ - create_test_queued_item(u"rel/uppath", [('done', 12345)]) - ] - mf.downloader.status = [ - create_test_queued_item(u"rel/downpath", [('done', 23456)]) - ] - data = yield self.POST( - '/magic_folder?t=json', - t='json', - name='default', - token=self.s.get_auth_token(), - ) - data = json.loads(data) - self.assertEqual( - data, - [ - { - "status": "done", - "path": "rel/uppath", - "kind": "upload", - "percent_done": 100.0, - "done_at": 12345, - "size": 1234, - }, - { - "status": "done", - "path": "rel/downpath", - "kind": "download", - "percent_done": 100.0, - "done_at": 23456, - "size": 1234, - }, - ] - ) - - @defer.inlineCallbacks - def test_magicfolder_root_success(self): - self.s._magic_folders['default'] = mf = FakeMagicFolder() - mf.uploader.status = [ - create_test_queued_item(u"rel/path", [('done', 12345)]) - ] - data = yield self.GET( - '/', - ) - del data - def test_status(self): h = self.s.get_history() dl_num = h.list_all_download_statuses()[0].get_counter() diff --git a/src/allmydata/util/eliotutil.py b/src/allmydata/util/eliotutil.py index 7b36a27d1..16ab373e0 100644 --- a/src/allmydata/util/eliotutil.py +++ b/src/allmydata/util/eliotutil.py @@ -16,15 +16,6 @@ __all__ = [ "opt_help_eliot_destinations", "validateInstanceOf", "validateSetMembership", - "MAYBE_NOTIFY", - "CALLBACK", - "INOTIFY_EVENTS", - "RELPATH", - "VERSION", - "LAST_UPLOADED_URI", - "LAST_DOWNLOADED_URI", - "LAST_DOWNLOADED_TIMESTAMP", - "PATHINFO", ] from sys import ( @@ -86,14 +77,6 @@ from twisted.internet.defer import ( ) from twisted.application.service import Service - -from .fileutil import ( - PathInfo, -) -from .fake_inotify import ( - humanReadableMask, -) - def validateInstanceOf(t): """ Return an Eliot validator that requires values to be instances of ``t``. @@ -112,72 +95,6 @@ def validateSetMembership(s): raise ValidationError("{} not in {}".format(v, s)) return validator -RELPATH = Field.for_types( - u"relpath", - [unicode], - u"The relative path of a file in a magic-folder.", -) - -VERSION = Field.for_types( - u"version", - [int, long], - u"The version of the file.", -) - -LAST_UPLOADED_URI = Field.for_types( - u"last_uploaded_uri", - [unicode, bytes, None], - u"The filecap to which this version of this file was uploaded.", -) - -LAST_DOWNLOADED_URI = Field.for_types( - u"last_downloaded_uri", - [unicode, bytes, None], - u"The filecap from which the previous version of this file was downloaded.", -) - -LAST_DOWNLOADED_TIMESTAMP = Field.for_types( - u"last_downloaded_timestamp", - [float, int, long], - u"(XXX probably not really, don't trust this) The timestamp of the last download of this file.", -) - -PATHINFO = Field( - u"pathinfo", - lambda v: None if v is None else { - "isdir": v.isdir, - "isfile": v.isfile, - "islink": v.islink, - "exists": v.exists, - "size": v.size, - "mtime_ns": v.mtime_ns, - "ctime_ns": v.ctime_ns, - }, - u"The metadata for this version of this file.", - validateInstanceOf((type(None), PathInfo)), -) - -INOTIFY_EVENTS = Field( - u"inotify_events", - humanReadableMask, - u"Details about a filesystem event generating a notification event.", - validateInstanceOf((int, long)), -) - -MAYBE_NOTIFY = ActionType( - u"filesystem:notification:maybe-notify", - [], - [], - u"A filesystem event is being considered for dispatch to an application handler.", -) - -CALLBACK = ActionType( - u"filesystem:notification:callback", - [INOTIFY_EVENTS], - [], - u"A filesystem event is being dispatched to an application callback." -) - def eliot_logging_service(reactor, destinations): """ Parse the given Eliot destination descriptions and return an ``IService`` diff --git a/src/allmydata/util/fake_inotify.py b/src/allmydata/util/fake_inotify.py deleted file mode 100644 index c6d0b16e2..000000000 --- a/src/allmydata/util/fake_inotify.py +++ /dev/null @@ -1,109 +0,0 @@ - -# Most of this is copied from Twisted 11.0. The reason for this hack is that -# twisted.internet.inotify can't be imported when the platform does not support inotify. - -import six - -if six.PY3: - long = int - -# from /usr/src/linux/include/linux/inotify.h - -IN_ACCESS = long(0x00000001) # File was accessed -IN_MODIFY = long(0x00000002) # File was modified -IN_ATTRIB = long(0x00000004) # Metadata changed -IN_CLOSE_WRITE = long(0x00000008) # Writeable file was closed -IN_CLOSE_NOWRITE = long(0x00000010) # Unwriteable file closed -IN_OPEN = long(0x00000020) # File was opened -IN_MOVED_FROM = long(0x00000040) # File was moved from X -IN_MOVED_TO = long(0x00000080) # File was moved to Y -IN_CREATE = long(0x00000100) # Subfile was created -IN_DELETE = long(0x00000200) # Subfile was delete -IN_DELETE_SELF = long(0x00000400) # Self was deleted -IN_MOVE_SELF = long(0x00000800) # Self was moved -IN_UNMOUNT = long(0x00002000) # Backing fs was unmounted -IN_Q_OVERFLOW = long(0x00004000) # Event queued overflowed -IN_IGNORED = long(0x00008000) # File was ignored - -IN_ONLYDIR = 0x01000000 # only watch the path if it is a directory -IN_DONT_FOLLOW = 0x02000000 # don't follow a sym link -IN_MASK_ADD = 0x20000000 # add to the mask of an already existing watch -IN_ISDIR = 0x40000000 # event occurred against dir -IN_ONESHOT = 0x80000000 # only send event once - -IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # closes -IN_MOVED = IN_MOVED_FROM | IN_MOVED_TO # moves -IN_CHANGED = IN_MODIFY | IN_ATTRIB # changes - -IN_WATCH_MASK = (IN_MODIFY | IN_ATTRIB | - IN_CREATE | IN_DELETE | - IN_DELETE_SELF | IN_MOVE_SELF | - IN_UNMOUNT | IN_MOVED_FROM | IN_MOVED_TO) - - -_FLAG_TO_HUMAN = [ - (IN_ACCESS, 'access'), - (IN_MODIFY, 'modify'), - (IN_ATTRIB, 'attrib'), - (IN_CLOSE_WRITE, 'close_write'), - (IN_CLOSE_NOWRITE, 'close_nowrite'), - (IN_OPEN, 'open'), - (IN_MOVED_FROM, 'moved_from'), - (IN_MOVED_TO, 'moved_to'), - (IN_CREATE, 'create'), - (IN_DELETE, 'delete'), - (IN_DELETE_SELF, 'delete_self'), - (IN_MOVE_SELF, 'move_self'), - (IN_UNMOUNT, 'unmount'), - (IN_Q_OVERFLOW, 'queue_overflow'), - (IN_IGNORED, 'ignored'), - (IN_ONLYDIR, 'only_dir'), - (IN_DONT_FOLLOW, 'dont_follow'), - (IN_MASK_ADD, 'mask_add'), - (IN_ISDIR, 'is_dir'), - (IN_ONESHOT, 'one_shot') -] - - - -def humanReadableMask(mask): - """ - Auxiliary function that converts an hexadecimal mask into a series - of human readable flags. - """ - s = [] - for k, v in _FLAG_TO_HUMAN: - if k & mask: - s.append(v) - return s - - -from eliot import start_action - -# This class is not copied from Twisted; it acts as a mock. -class INotify(object): - def startReading(self): - pass - - def stopReading(self): - pass - - def loseConnection(self): - pass - - def watch(self, filepath, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False): - self.callbacks = callbacks - - def event(self, filepath, mask): - with start_action(action_type=u"fake-inotify:event", path=filepath.asTextMode().path, mask=mask): - for cb in self.callbacks: - cb(None, filepath, mask) - - -__all__ = ["INotify", "humanReadableMask", "IN_WATCH_MASK", "IN_ACCESS", - "IN_MODIFY", "IN_ATTRIB", "IN_CLOSE_NOWRITE", "IN_CLOSE_WRITE", - "IN_OPEN", "IN_MOVED_FROM", "IN_MOVED_TO", "IN_CREATE", - "IN_DELETE", "IN_DELETE_SELF", "IN_MOVE_SELF", "IN_UNMOUNT", - "IN_Q_OVERFLOW", "IN_IGNORED", "IN_ONLYDIR", "IN_DONT_FOLLOW", - "IN_MASK_ADD", "IN_ISDIR", "IN_ONESHOT", "IN_CLOSE", - "IN_MOVED", "IN_CHANGED"] diff --git a/src/allmydata/watchdog/__init__.py b/src/allmydata/watchdog/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/allmydata/watchdog/_watchdog_541.py b/src/allmydata/watchdog/_watchdog_541.py deleted file mode 100644 index 87686ce6d..000000000 --- a/src/allmydata/watchdog/_watchdog_541.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -Hotfix for https://github.com/gorakhargosh/watchdog/issues/541 -""" - -from watchdog.observers.fsevents import FSEventsEmitter - -# The class object has already been bundled up in the default arguments to -# FSEventsObserver.__init__. So mutate the class object (instead of replacing -# it with a safer version). -original_on_thread_stop = FSEventsEmitter.on_thread_stop -def safe_on_thread_stop(self): - if self.is_alive(): - return original_on_thread_stop(self) - -def patch(): - FSEventsEmitter.on_thread_stop = safe_on_thread_stop diff --git a/src/allmydata/watchdog/inotify.py b/src/allmydata/watchdog/inotify.py deleted file mode 100644 index 68a121a60..000000000 --- a/src/allmydata/watchdog/inotify.py +++ /dev/null @@ -1,212 +0,0 @@ - -""" -An implementation of an inotify-like interface on top of the ``watchdog`` library. -""" - -from __future__ import ( - unicode_literals, - print_function, - absolute_import, - division, -) - -__all__ = [ - "humanReadableMask", "INotify", - "IN_WATCH_MASK", "IN_ACCESS", "IN_MODIFY", "IN_ATTRIB", "IN_CLOSE_NOWRITE", - "IN_CLOSE_WRITE", "IN_OPEN", "IN_MOVED_FROM", "IN_MOVED_TO", "IN_CREATE", - "IN_DELETE", "IN_DELETE_SELF", "IN_MOVE_SELF", "IN_UNMOUNT", "IN_ONESHOT", - "IN_Q_OVERFLOW", "IN_IGNORED", "IN_ONLYDIR", "IN_DONT_FOLLOW", "IN_MOVED", - "IN_MASK_ADD", "IN_ISDIR", "IN_CLOSE", "IN_CHANGED", "_FLAG_TO_HUMAN", -] - -from watchdog.observers import Observer -from watchdog.events import ( - FileSystemEvent, - FileSystemEventHandler, DirCreatedEvent, FileCreatedEvent, - DirDeletedEvent, FileDeletedEvent, FileModifiedEvent -) - -from twisted.internet import reactor -from twisted.python.filepath import FilePath -from allmydata.util.fileutil import abspath_expanduser_unicode - -from eliot import ( - ActionType, - Message, - Field, - preserve_context, - start_action, -) - -from allmydata.util.pollmixin import PollMixin -from allmydata.util.assertutil import _assert, precondition -from allmydata.util import encodingutil -from allmydata.util.fake_inotify import humanReadableMask, \ - IN_WATCH_MASK, IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_NOWRITE, IN_CLOSE_WRITE, \ - IN_OPEN, IN_MOVED_FROM, IN_MOVED_TO, IN_CREATE, IN_DELETE, IN_DELETE_SELF, \ - IN_MOVE_SELF, IN_UNMOUNT, IN_Q_OVERFLOW, IN_IGNORED, IN_ONLYDIR, IN_DONT_FOLLOW, \ - IN_MASK_ADD, IN_ISDIR, IN_ONESHOT, IN_CLOSE, IN_MOVED, IN_CHANGED, \ - _FLAG_TO_HUMAN - -from ..util.eliotutil import ( - MAYBE_NOTIFY, - CALLBACK, - validateInstanceOf, -) - -from . import _watchdog_541 - -_watchdog_541.patch() - -NOT_STARTED = "NOT_STARTED" -STARTED = "STARTED" -STOPPING = "STOPPING" -STOPPED = "STOPPED" - -_PATH = Field.for_types( - u"path", - [bytes, unicode], - u"The path an inotify event concerns.", -) - -_EVENT = Field( - u"event", - lambda e: e.__class__.__name__, - u"The watchdog event that has taken place.", - validateInstanceOf(FileSystemEvent), -) - -ANY_INOTIFY_EVENT = ActionType( - u"watchdog:inotify:any-event", - [_PATH, _EVENT], - [], - u"An inotify event is being dispatched.", -) - -class INotifyEventHandler(FileSystemEventHandler): - def __init__(self, path, mask, callbacks, pending_delay): - FileSystemEventHandler.__init__(self) - self._path = path - self._mask = mask - self._callbacks = callbacks - self._pending_delay = pending_delay - self._pending = set() - - def _maybe_notify(self, path, event): - with MAYBE_NOTIFY(): - event_mask = IN_CHANGED - if isinstance(event, FileModifiedEvent): - event_mask = event_mask | IN_CLOSE_WRITE - event_mask = event_mask | IN_MODIFY - if isinstance(event, (DirCreatedEvent, FileCreatedEvent)): - # For our purposes, IN_CREATE is irrelevant. - event_mask = event_mask | IN_CLOSE_WRITE - if isinstance(event, (DirDeletedEvent, FileDeletedEvent)): - event_mask = event_mask | IN_DELETE - if event.is_directory: - event_mask = event_mask | IN_ISDIR - if not (self._mask & event_mask): - return - for cb in self._callbacks: - try: - with CALLBACK(inotify_events=event_mask): - cb(None, FilePath(path), event_mask) - except: - # Eliot already logged the exception for us. - # There's nothing else we can do about it here. - pass - - def process(self, event): - event_filepath_u = event.src_path.decode(encodingutil.get_filesystem_encoding()) - event_filepath_u = abspath_expanduser_unicode(event_filepath_u, base=self._path) - - if event_filepath_u == self._path: - # ignore events for parent directory - return - - self._maybe_notify(event_filepath_u, event) - - def on_any_event(self, event): - with ANY_INOTIFY_EVENT(path=event.src_path, event=event): - reactor.callFromThread( - preserve_context(self.process), - event, - ) - - -class INotify(PollMixin): - """ - I am a prototype INotify, made to work on Mac OS X (Darwin) - using the Watchdog python library. This is actually a simplified subset - of the twisted Linux INotify class because we do not utilize the watch mask - and only implement the following methods: - - watch - - startReading - - stopReading - - wait_until_stopped - - set_pending_delay - """ - def __init__(self): - self._pending_delay = 1.0 - self.recursive_includes_new_subdirectories = False - self._callbacks = {} - self._watches = {} - self._state = NOT_STARTED - self._observer = Observer(timeout=self._pending_delay) - - def set_pending_delay(self, delay): - Message.log(message_type=u"watchdog:inotify:set-pending-delay", delay=delay) - assert self._state != STARTED - self._pending_delay = delay - - def startReading(self): - with start_action(action_type=u"watchdog:inotify:start-reading"): - assert self._state != STARTED - try: - # XXX twisted.internet.inotify doesn't require watches to - # be set before startReading is called. - # _assert(len(self._callbacks) != 0, "no watch set") - self._observer.start() - self._state = STARTED - except: - self._state = STOPPED - raise - - def stopReading(self): - with start_action(action_type=u"watchdog:inotify:stop-reading"): - if self._state != STOPPED: - self._state = STOPPING - self._observer.unschedule_all() - self._observer.stop() - self._observer.join() - self._state = STOPPED - - def wait_until_stopped(self): - return self.poll(lambda: self._state == STOPPED) - - def _isWatched(self, path_u): - return path_u in self._callbacks.keys() - - def ignore(self, path): - path_u = path.path - self._observer.unschedule(self._watches[path_u]) - del self._callbacks[path_u] - del self._watches[path_u] - - def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False): - precondition(isinstance(autoAdd, bool), autoAdd=autoAdd) - precondition(isinstance(recursive, bool), recursive=recursive) - assert autoAdd == False - - path_u = path.path - if not isinstance(path_u, unicode): - path_u = path_u.decode('utf-8') - _assert(isinstance(path_u, unicode), path_u=path_u) - - if path_u not in self._callbacks.keys(): - self._callbacks[path_u] = callbacks or [] - self._watches[path_u] = self._observer.schedule( - INotifyEventHandler(path_u, mask, self._callbacks[path_u], self._pending_delay), - path=path_u, - recursive=False, - ) diff --git a/src/allmydata/web/magic_folder.py b/src/allmydata/web/magic_folder.py deleted file mode 100644 index 27d715d3a..000000000 --- a/src/allmydata/web/magic_folder.py +++ /dev/null @@ -1,52 +0,0 @@ -import json - -from allmydata.web.common import TokenOnlyWebApi, get_arg, WebError - - -class MagicFolderWebApi(TokenOnlyWebApi): - """ - I provide the web-based API for Magic Folder status etc. - """ - - def __init__(self, client): - TokenOnlyWebApi.__init__(self, client) - self.client = client - - def post_json(self, req): - req.setHeader("content-type", "application/json") - nick = get_arg(req, 'name', 'default') - - try: - magic_folder = self.client._magic_folders[nick] - except KeyError: - raise WebError( - "No such magic-folder '{}'".format(nick), - 404, - ) - - data = [] - for item in magic_folder.uploader.get_status(): - d = dict( - path=item.relpath_u, - status=item.status_history()[-1][0], - kind='upload', - ) - for (status, ts) in item.status_history(): - d[status + '_at'] = ts - d['percent_done'] = item.progress.progress - d['size'] = item.size - data.append(d) - - for item in magic_folder.downloader.get_status(): - d = dict( - path=item.relpath_u, - status=item.status_history()[-1][0], - kind='download', - ) - for (status, ts) in item.status_history(): - d[status + '_at'] = ts - d['percent_done'] = item.progress.progress - d['size'] = item.size - data.append(d) - - return json.dumps(data) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 4ededb8c9..8726cb00f 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -21,7 +21,7 @@ from allmydata.version_checks import get_package_versions_string from allmydata.util import log from allmydata.interfaces import IFileNode from allmydata.web import filenode, directory, unlinked, status -from allmydata.web import storage, magic_folder +from allmydata.web import storage from allmydata.web.common import ( abbreviate_size, getxmlfile, @@ -200,9 +200,6 @@ class Root(MultiFormatPage): self.putChild("uri", URIHandler(client)) self.putChild("cap", URIHandler(client)) - # handler for "/magic_folder" URIs - self.putChild("magic_folder", magic_folder.MagicFolderWebApi(client)) - # Handler for everything beneath "/private", an area of the resource # hierarchy which is only accessible with the private per-node API # auth token. @@ -299,30 +296,6 @@ class Root(MultiFormatPage): return description - def data_magic_folders(self, ctx, data): - return self.client._magic_folders.keys() - - def render_magic_folder_row(self, ctx, data): - magic_folder = self.client._magic_folders[data] - (ok, messages) = magic_folder.get_public_status() - ctx.fillSlots("magic_folder_name", data) - if ok: - ctx.fillSlots("magic_folder_status", "yes") - ctx.fillSlots("magic_folder_status_alt", "working") - else: - ctx.fillSlots("magic_folder_status", "no") - ctx.fillSlots("magic_folder_status_alt", "not working") - - status = T.ul(class_="magic-folder-status") - for msg in messages: - status[T.li[str(msg)]] - return ctx.tag[status] - - def render_magic_folder(self, ctx, data): - if not self.client._magic_folders: - return T.p() - return ctx.tag - def render_services(self, ctx, data): ul = T.ul() try: diff --git a/src/allmydata/web/static/css/new-tahoe.css b/src/allmydata/web/static/css/new-tahoe.css index caf540c07..38bd662e1 100644 --- a/src/allmydata/web/static/css/new-tahoe.css +++ b/src/allmydata/web/static/css/new-tahoe.css @@ -53,11 +53,6 @@ body { .connection-status { } -.magic-folder-status { - clear: left; - margin-left: 40px; /* width of status-indicator + margins */ -} - .furl { font-size: 0.8em; word-wrap: break-word; diff --git a/src/allmydata/web/statistics.xhtml b/src/allmydata/web/statistics.xhtml index fac0607e7..11f5c9643 100644 --- a/src/allmydata/web/statistics.xhtml +++ b/src/allmydata/web/statistics.xhtml @@ -20,18 +20,6 @@
  • Files Retrieved (mutable):
  • -

    Magic Folder

    - -
      -
    • Local Directories Monitored:
    • -
    • Files Uploaded:
    • -
    • Files Queued for Upload:
    • -
    • Failed Uploads:
    • -
    • Files Downloaded:
    • -
    • Files Queued for Download:
    • -
    • Failed Downloads:
    • -
    -

    Raw Stats:

     
    diff --git a/src/allmydata/web/status.py b/src/allmydata/web/status.py
    index 50e5ec2f8..67c2fffdb 100644
    --- a/src/allmydata/web/status.py
    +++ b/src/allmydata/web/status.py
    @@ -1210,36 +1210,6 @@ class Statistics(MultiFormatPage):
             return "%s files / %s bytes (%s)" % (files, bytes,
                                                  abbreviate_size(bytes))
     
    -    def render_magic_uploader_monitored(self, ctx, data):
    -        dirs = data["counters"].get("magic_folder.uploader.dirs_monitored", 0)
    -        return "%s directories" % (dirs,)
    -
    -    def render_magic_uploader_succeeded(self, ctx, data):
    -        # TODO: bytes uploaded
    -        files = data["counters"].get("magic_folder.uploader.objects_succeeded", 0)
    -        return "%s files" % (files,)
    -
    -    def render_magic_uploader_queued(self, ctx, data):
    -        files = data["counters"].get("magic_folder.uploader.objects_queued", 0)
    -        return "%s files" % (files,)
    -
    -    def render_magic_uploader_failed(self, ctx, data):
    -        files = data["counters"].get("magic_folder.uploader.objects_failed", 0)
    -        return "%s files" % (files,)
    -
    -    def render_magic_downloader_succeeded(self, ctx, data):
    -        # TODO: bytes uploaded
    -        files = data["counters"].get("magic_folder.downloader.objects_succeeded", 0)
    -        return "%s files" % (files,)
    -
    -    def render_magic_downloader_queued(self, ctx, data):
    -        files = data["counters"].get("magic_folder.downloader.objects_queued", 0)
    -        return "%s files" % (files,)
    -
    -    def render_magic_downloader_failed(self, ctx, data):
    -        files = data["counters"].get("magic_folder.downloader.objects_failed", 0)
    -        return "%s files" % (files,)
    -
         def render_raw(self, ctx, data):
             raw = pprint.pformat(data)
             return ctx.tag[raw]
    diff --git a/src/allmydata/web/welcome.xhtml b/src/allmydata/web/welcome.xhtml
    index 079a7cb21..5022bce45 100644
    --- a/src/allmydata/web/welcome.xhtml
    +++ b/src/allmydata/web/welcome.xhtml
    @@ -159,13 +159,6 @@
                 
               
     
    -          
    -

    Magic Folders

    -
    -
    img/connected-.png

    -
    -
    -

    Connected to diff --git a/src/allmydata/windows/inotify.py b/src/allmydata/windows/inotify.py deleted file mode 100644 index 17bea3597..000000000 --- a/src/allmydata/windows/inotify.py +++ /dev/null @@ -1,379 +0,0 @@ - -# Windows near-equivalent to twisted.internet.inotify -# This should only be imported on Windows. - -from __future__ import print_function - -import six -import os, sys - -from eliot import ( - start_action, - Message, - log_call, -) - -from twisted.internet import reactor -from twisted.internet.threads import deferToThread - -from allmydata.util.fake_inotify import humanReadableMask, \ - IN_WATCH_MASK, IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_NOWRITE, IN_CLOSE_WRITE, \ - IN_OPEN, IN_MOVED_FROM, IN_MOVED_TO, IN_CREATE, IN_DELETE, IN_DELETE_SELF, \ - IN_MOVE_SELF, IN_UNMOUNT, IN_Q_OVERFLOW, IN_IGNORED, IN_ONLYDIR, IN_DONT_FOLLOW, \ - IN_MASK_ADD, IN_ISDIR, IN_ONESHOT, IN_CLOSE, IN_MOVED, IN_CHANGED -[humanReadableMask, \ - IN_WATCH_MASK, IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_NOWRITE, IN_CLOSE_WRITE, \ - IN_OPEN, IN_MOVED_FROM, IN_MOVED_TO, IN_CREATE, IN_DELETE, IN_DELETE_SELF, \ - IN_MOVE_SELF, IN_UNMOUNT, IN_Q_OVERFLOW, IN_IGNORED, IN_ONLYDIR, IN_DONT_FOLLOW, \ - IN_MASK_ADD, IN_ISDIR, IN_ONESHOT, IN_CLOSE, IN_MOVED, IN_CHANGED] - -from allmydata.util.assertutil import _assert, precondition -from allmydata.util.encodingutil import quote_output -from allmydata.util import log, fileutil -from allmydata.util.pollmixin import PollMixin -from ..util.eliotutil import ( - MAYBE_NOTIFY, - CALLBACK, -) - -from ctypes import WINFUNCTYPE, WinError, windll, POINTER, byref, create_string_buffer, \ - addressof, get_last_error -from ctypes.wintypes import BOOL, HANDLE, DWORD, LPCWSTR, LPVOID - -if six.PY3: - long = int - -# -FILE_LIST_DIRECTORY = 1 - -# -CreateFileW = WINFUNCTYPE( - HANDLE, LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE, - use_last_error=True -)(("CreateFileW", windll.kernel32)) - -FILE_SHARE_READ = 0x00000001 -FILE_SHARE_WRITE = 0x00000002 -FILE_SHARE_DELETE = 0x00000004 - -OPEN_EXISTING = 3 - -FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 - -# -CloseHandle = WINFUNCTYPE( - BOOL, HANDLE, - use_last_error=True -)(("CloseHandle", windll.kernel32)) - -# -ReadDirectoryChangesW = WINFUNCTYPE( - BOOL, HANDLE, LPVOID, DWORD, BOOL, DWORD, POINTER(DWORD), LPVOID, LPVOID, - use_last_error=True -)(("ReadDirectoryChangesW", windll.kernel32)) - -FILE_NOTIFY_CHANGE_FILE_NAME = 0x00000001 -FILE_NOTIFY_CHANGE_DIR_NAME = 0x00000002 -FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x00000004 -#FILE_NOTIFY_CHANGE_SIZE = 0x00000008 -FILE_NOTIFY_CHANGE_LAST_WRITE = 0x00000010 -FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x00000020 -#FILE_NOTIFY_CHANGE_CREATION = 0x00000040 -FILE_NOTIFY_CHANGE_SECURITY = 0x00000100 - -# -FILE_ACTION_ADDED = 0x00000001 -FILE_ACTION_REMOVED = 0x00000002 -FILE_ACTION_MODIFIED = 0x00000003 -FILE_ACTION_RENAMED_OLD_NAME = 0x00000004 -FILE_ACTION_RENAMED_NEW_NAME = 0x00000005 - -_action_to_string = { - FILE_ACTION_ADDED : "FILE_ACTION_ADDED", - FILE_ACTION_REMOVED : "FILE_ACTION_REMOVED", - FILE_ACTION_MODIFIED : "FILE_ACTION_MODIFIED", - FILE_ACTION_RENAMED_OLD_NAME : "FILE_ACTION_RENAMED_OLD_NAME", - FILE_ACTION_RENAMED_NEW_NAME : "FILE_ACTION_RENAMED_NEW_NAME", -} - -_action_to_inotify_mask = { - FILE_ACTION_ADDED : IN_CREATE, - FILE_ACTION_REMOVED : IN_DELETE, - FILE_ACTION_MODIFIED : IN_CHANGED, - FILE_ACTION_RENAMED_OLD_NAME : IN_MOVED_FROM, - FILE_ACTION_RENAMED_NEW_NAME : IN_MOVED_TO, -} - -INVALID_HANDLE_VALUE = 0xFFFFFFFF - -FALSE = 0 -TRUE = 1 - -class Event(object): - """ - * action: a FILE_ACTION_* constant (not a bit mask) - * filename: a Unicode string, giving the name relative to the watched directory - """ - def __init__(self, action, filename): - self.action = action - self.filename = filename - - def __repr__(self): - return "Event(%r, %r)" % (_action_to_string.get(self.action, self.action), self.filename) - - -class FileNotifyInformation(object): - """ - I represent a buffer containing FILE_NOTIFY_INFORMATION structures, and can - iterate over those structures, decoding them into Event objects. - """ - - def __init__(self, size=1024): - self.size = size - self.buffer = create_string_buffer(size) - address = addressof(self.buffer) - _assert(address & 3 == 0, "address 0x%X returned by create_string_buffer is not DWORD-aligned" % (address,)) - self.data = None - - def read_changes(self, hDirectory, recursive, filter): - bytes_returned = DWORD(0) - r = ReadDirectoryChangesW(hDirectory, - self.buffer, - self.size, - recursive, - filter, - byref(bytes_returned), - None, # NULL -> no overlapped I/O - None # NULL -> no completion routine - ) - if r == 0: - self.data = None - raise WinError(get_last_error()) - self.data = self.buffer.raw[:bytes_returned.value] - - def __iter__(self): - # Iterator implemented as generator: - if self.data is None: - return - pos = 0 - while True: - bytes = self._read_dword(pos+8) - s = Event(self._read_dword(pos+4), - self.data[pos+12 : pos+12+bytes].decode('utf-16-le')) - Message.log(message_type="fni", info=repr(s)) - - next_entry_offset = self._read_dword(pos) - yield s - if next_entry_offset == 0: - break - pos = pos + next_entry_offset - - def _read_dword(self, i): - # little-endian - return ( ord(self.data[i]) | - (ord(self.data[i+1]) << 8) | - (ord(self.data[i+2]) << 16) | - (ord(self.data[i+3]) << 24)) - - -def _open_directory(path_u): - hDirectory = CreateFileW(path_u, - FILE_LIST_DIRECTORY, # access rights - FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, - # don't prevent other processes from accessing - None, # no security descriptor - OPEN_EXISTING, # directory must already exist - FILE_FLAG_BACKUP_SEMANTICS, # necessary to open a directory - None # no template file - ) - if hDirectory == INVALID_HANDLE_VALUE: - e = WinError(get_last_error()) - raise OSError("Opening directory %s gave WinError: %s" % (quote_output(path_u), e)) - return hDirectory - - -def simple_test(): - path_u = u"test" - filter = FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE - recursive = TRUE - - hDirectory = _open_directory(path_u) - fni = FileNotifyInformation() - print("Waiting...") - while True: - fni.read_changes(hDirectory, recursive, filter) - print(repr(fni.data)) - for info in fni: - print(info) - -def medium_test(): - from twisted.python.filepath import FilePath - - def print_(*event): - print(event) - - notifier = INotify() - notifier.set_pending_delay(1.0) - IN_EXCL_UNLINK = long(0x04000000) - mask = ( IN_CREATE - | IN_CLOSE_WRITE - | IN_MOVED_TO - | IN_MOVED_FROM - | IN_DELETE - | IN_ONLYDIR - | IN_EXCL_UNLINK - ) - notifier.watch(FilePath(u"test"), mask, callbacks=[print_], recursive=True) - notifier.startReading() - reactor.run() - - -NOT_STARTED = "NOT_STARTED" -STARTED = "STARTED" -STOPPING = "STOPPING" -STOPPED = "STOPPED" - -class INotify(PollMixin): - def __init__(self): - self._state = NOT_STARTED - self._filter = None - self._callbacks = None - self._hDirectory = None - self._path = None - self._pending = set() - self._pending_delay = 1.0 - self._pending_call = None - self.recursive_includes_new_subdirectories = True - - def set_pending_delay(self, delay): - self._pending_delay = delay - - def startReading(self): - deferToThread(self._thread) - return self.poll(lambda: self._state != NOT_STARTED) - - def stopReading(self): - # FIXME race conditions - if self._state != STOPPED: - self._state = STOPPING - if self._pending_call: - self._pending_call.cancel() - self._pending_call = None - - def wait_until_stopped(self): - try: - fileutil.write(os.path.join(self._path.path, u".ignore-me"), "") - except IOError: - pass - return self.poll(lambda: self._state == STOPPED) - - def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False): - precondition(self._state == NOT_STARTED, "watch() can only be called before startReading()", state=self._state) - precondition(self._filter is None, "only one watch is supported") - precondition(isinstance(autoAdd, bool), autoAdd=autoAdd) - precondition(isinstance(recursive, bool), recursive=recursive) - #precondition(autoAdd == recursive, "need autoAdd and recursive to be the same", autoAdd=autoAdd, recursive=recursive) - - self._path = path - path_u = path.path - if not isinstance(path_u, unicode): - path_u = path_u.decode(sys.getfilesystemencoding()) - _assert(isinstance(path_u, unicode), path_u=path_u) - - self._filter = FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE - - if mask & (IN_ACCESS | IN_CLOSE_NOWRITE | IN_OPEN): - self._filter = self._filter | FILE_NOTIFY_CHANGE_LAST_ACCESS - if mask & IN_ATTRIB: - self._filter = self._filter | FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY - - self._recursive = TRUE if recursive else FALSE - self._callbacks = callbacks or [] - self._hDirectory = _open_directory(path_u) - - def _thread(self): - try: - _assert(self._filter is not None, "no watch set") - - # To call Twisted or Tahoe APIs, use reactor.callFromThread as described in - # . - - fni = FileNotifyInformation() - - while True: - self._state = STARTED - action = start_action( - action_type=u"read-changes", - directory=self._path.path, - recursive=self._recursive, - filter=self._filter, - ) - try: - with action: - fni.read_changes(self._hDirectory, self._recursive, self._filter) - except WindowsError as e: - self._state = STOPPING - - if self._check_stop(): - return - for info in fni: - path = self._path.preauthChild(info.filename) # FilePath with Unicode path - if info.action == FILE_ACTION_MODIFIED and path.isdir(): - Message.log( - message_type=u"filtering-out", - info=repr(info), - ) - continue - else: - Message.log( - message_type=u"processing", - info=repr(info), - ) - #mask = _action_to_inotify_mask.get(info.action, IN_CHANGED) - - @log_call( - action_type=MAYBE_NOTIFY.action_type, - include_args=[], - include_result=False, - ) - def _do_pending_calls(): - event_mask = IN_CHANGED - self._pending_call = None - for path1 in self._pending: - if self._callbacks: - for cb in self._callbacks: - try: - with CALLBACK(inotify_events=event_mask): - cb(None, path1, event_mask) - except Exception as e2: - log.err(e2) - self._pending = set() - - def _maybe_notify(path2): - if path2 not in self._pending: - self._pending.add(path2) - if self._state not in [STOPPING, STOPPED]: - _do_pending_calls() -# if self._pending_call is None and self._state not in [STOPPING, STOPPED]: -# self._pending_call = reactor.callLater(self._pending_delay, _do_pending_calls) - - reactor.callFromThread(_maybe_notify, path) - if self._check_stop(): - return - except Exception as e: - log.err(e) - self._state = STOPPED - raise - - def _check_stop(self): - if self._state == STOPPING: - hDirectory = self._hDirectory - self._callbacks = None - self._hDirectory = None - CloseHandle(hDirectory) - self._state = STOPPED - if self._pending_call: - self._pending_call.cancel() - self._pending_call = None - - return self._state == STOPPED From 6f1f30e4bf5bf1e5c825e92786541213360dabdc Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Thu, 13 Feb 2020 12:15:09 -0500 Subject: [PATCH 0013/1472] pyflakes cleanups --- src/allmydata/test/test_client.py | 2 +- src/allmydata/test/web/test_web.py | 2 -- src/allmydata/util/eliotutil.py | 2 -- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/allmydata/test/test_client.py b/src/allmydata/test/test_client.py index 85d508093..41a44d5a6 100644 --- a/src/allmydata/test/test_client.py +++ b/src/allmydata/test/test_client.py @@ -39,7 +39,7 @@ from testtools.twistedsupport import ( import allmydata import allmydata.util.log -from allmydata.node import OldConfigError, OldConfigOptionError, UnescapedHashError, _Config, create_node_dir +from allmydata.node import OldConfigError, UnescapedHashError, _Config, create_node_dir from allmydata.frontends.auth import NeedRootcapLookupScheme from allmydata.version_checks import ( get_package_versions_string, diff --git a/src/allmydata/test/web/test_web.py b/src/allmydata/test/web/test_web.py index 3e0967811..f84923521 100644 --- a/src/allmydata/test/web/test_web.py +++ b/src/allmydata/test/web/test_web.py @@ -3,7 +3,6 @@ from __future__ import print_function import os.path, re, urllib, time, cgi import json import treq -import mock from bs4 import BeautifulSoup @@ -63,7 +62,6 @@ from ..common_web import ( ) from allmydata.client import _Client, SecretHolder from .common import unknown_rwcap, unknown_rocap, unknown_immcap, FAVICON_MARKUP -from ..status import FakeStatus # create a fake uploader/downloader, and a couple of fake dirnodes, then # create a webserver that works against them diff --git a/src/allmydata/util/eliotutil.py b/src/allmydata/util/eliotutil.py index 16ab373e0..f6f40945d 100644 --- a/src/allmydata/util/eliotutil.py +++ b/src/allmydata/util/eliotutil.py @@ -42,8 +42,6 @@ from attr.validators import ( from eliot import ( ILogger, Message, - Field, - ActionType, FileDestination, add_destinations, remove_destination, From 5862e1bea3342067e5d59ce18b8124f91fca809f Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 17 Feb 2020 15:35:56 -0500 Subject: [PATCH 0014/1472] Use GitHub Actions for CI. Fixes: ticket:3277 --- .github/workflows/ci.yml | 82 ++++++++++++++++++++++++++++++++++++++++ newsfragments/3277.minor | 0 2 files changed, 82 insertions(+) create mode 100644 .github/workflows/ci.yml create mode 100644 newsfragments/3277.minor diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..1dcfc892e --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,82 @@ +name: CI + +on: + push: + pull_request: + release: + types: [published] + schedule: + # Daily at 3:21 + - cron: '21 3 * * *' + +jobs: + test: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: + - macos-latest + - windows-latest + - ubuntu-latest + python-version: + - 2.7 + + steps: + + # Get vcpython27 on Windows + Python 2.7, to build zfec + # extension. See https://chocolatey.org/packages/vcpython27 and + # https://github.com/crazy-max/ghaction-chocolatey + - name: Install MSVC 9.0 for Python 2.7 on Windows + if: matrix.os == 'windows-latest' && matrix.python-version == '2.7' + uses: crazy-max/ghaction-chocolatey@v1 + with: + args: install vcpython27 + + - name: Check out Tahoe-LAFS sources + uses: actions/checkout@v2 + + - name: Fetch all history for all tags and branches + run: git fetch --prune --unshallow + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python packages + run: | + pip install --upgrade codecov tox setuptools + pip list + + - name: Display tool versions + run: python misc/build_helpers/show-tool-versions.py + + # # codechecks throws type error; disable for now. + # - name: Run "tox -e codechecks" + # run: tox -e codechecks + + - name: Run "tox -e py27" + if: matrix.python-version == '2.7' + run: tox -e py27 + + - name: Run "tox -e coverage" + run: tox -e coverage + + - name: Run "tox -e pyinstaller" + run: tox -e pyinstaller + + - name: Upload eliot.log in case of failure + uses: actions/upload-artifact@v1 + if: failure() + with: + name: eliot.log + path: eliot.log + + # Set Codecov token inside Project Settings > Secrets as + # CODECOV_TOKEN. + - name: Upload coverage report + uses: codecov/codecov-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + file: coverage.xml diff --git a/newsfragments/3277.minor b/newsfragments/3277.minor new file mode 100644 index 000000000..e69de29bb From afdfb48ee1775da8e6beb2d6b7bfe59766ef7493 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 25 Feb 2020 22:59:04 -0500 Subject: [PATCH 0015/1472] Run codechecks, but continue on error. --- .github/workflows/ci.yml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1dcfc892e..ea311c553 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -52,9 +52,14 @@ jobs: - name: Display tool versions run: python misc/build_helpers/show-tool-versions.py - # # codechecks throws type error; disable for now. - # - name: Run "tox -e codechecks" - # run: tox -e codechecks + # Codechecks errors out when running Towncrier within GitHub + # Actions. See this issue: + # https://github.com/hawkowl/towncrier/issues/175. + # + # Run codechecks anyway, and continue on error, for now. + - name: Run "tox -e codechecks" + run: tox -e codechecks + continue-on-error: true - name: Run "tox -e py27" if: matrix.python-version == '2.7' From 77c6b39a700c7f76ce7e54cda5d62d3d6874dccd Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 25 Feb 2020 23:01:29 -0500 Subject: [PATCH 0016/1472] Update a name --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ea311c553..e1ca4a679 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,7 +27,7 @@ jobs: # Get vcpython27 on Windows + Python 2.7, to build zfec # extension. See https://chocolatey.org/packages/vcpython27 and # https://github.com/crazy-max/ghaction-chocolatey - - name: Install MSVC 9.0 for Python 2.7 on Windows + - name: [Windows] Install MSVC 9.0 for Python 2.7 if: matrix.os == 'windows-latest' && matrix.python-version == '2.7' uses: crazy-max/ghaction-chocolatey@v1 with: From c535b83f0662a25360aa979de050ce600befb7dc Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 25 Feb 2020 23:02:02 -0500 Subject: [PATCH 0017/1472] Rename "test" to "coverage" --- .github/workflows/ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e1ca4a679..54f962cb9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,8 @@ on: - cron: '21 3 * * *' jobs: - test: + + coverage: runs-on: ${{ matrix.os }} strategy: fail-fast: false From 1a4ae0fc81f7e413bb79277b1fe7bff87bc534a4 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 25 Feb 2020 23:02:35 -0500 Subject: [PATCH 0018/1472] Remove "tox -e py27" Running "tox -e coverage" is sufficient. --- .github/workflows/ci.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 54f962cb9..ebdb5d223 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -62,10 +62,6 @@ jobs: run: tox -e codechecks continue-on-error: true - - name: Run "tox -e py27" - if: matrix.python-version == '2.7' - run: tox -e py27 - - name: Run "tox -e coverage" run: tox -e coverage From 383b102b78827ac89e9638750d9112137b50cef1 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 25 Feb 2020 23:05:03 -0500 Subject: [PATCH 0019/1472] Run integration tests Run integration tests only on macOS for now, because on Windows and Ubuntu integration tests get stuck and time out after six hours. --- .github/workflows/ci.yml | 62 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ebdb5d223..9f255b9d6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -82,3 +82,65 @@ jobs: with: token: ${{ secrets.CODECOV_TOKEN }} file: coverage.xml + + integration: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: + - macos-latest + python-version: + - 2.7 + + steps: + + - name: [Ubuntu] Install Tor + if: matrix.os == 'ubuntu-latest' + run: sudo apt install tor + + - name: [macOS] Install Tor + if: matrix.os == 'macos-latest' + run: brew install tor + + - name: [Windows] Install Tor + if: matrix.os == 'windows-latest' + uses: crazy-max/ghaction-chocolatey@v1 + with: + args: install tor + + - name: Install MSVC 9.0 for Python 2.7 [Windows] + if: matrix.os == 'windows-latest' && matrix.python-version == '2.7' + uses: crazy-max/ghaction-chocolatey@v1 + with: + args: install vcpython27 + + - name: Check out Tahoe-LAFS sources + uses: actions/checkout@v2 + + - name: Fetch all history for all tags and branches + run: git fetch --prune --unshallow + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python packages + run: | + pip install --upgrade tox + pip list + + - name: Display tool versions + run: python misc/build_helpers/show-tool-versions.py + + - name: Run "tox -e integration" + run: tox -e integration + + - name: Upload eliot.log in case of failure + uses: actions/upload-artifact@v1 + if: failure() + with: + name: eliot.log + path: eliot.log + From 4924cd9c0478c0406911d7f4d302487f4bcdfb2f Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 25 Feb 2020 23:10:26 -0500 Subject: [PATCH 0020/1472] Run "tox -e pyinstaller" in a separate step --- .github/workflows/ci.yml | 47 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9f255b9d6..67e9bdfb7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -65,9 +65,6 @@ jobs: - name: Run "tox -e coverage" run: tox -e coverage - - name: Run "tox -e pyinstaller" - run: tox -e pyinstaller - - name: Upload eliot.log in case of failure uses: actions/upload-artifact@v1 if: failure() @@ -144,3 +141,47 @@ jobs: name: eliot.log path: eliot.log + packaging: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: + - macos-latest + - windows-latest + - ubuntu-latest + python-version: + - 2.7 + + steps: + + # Get vcpython27 on Windows + Python 2.7, to build zfec + # extension. See https://chocolatey.org/packages/vcpython27 and + # https://github.com/crazy-max/ghaction-chocolatey + - name: [Windows] Install MSVC 9.0 for Python 2.7 + if: matrix.os == 'windows-latest' && matrix.python-version == '2.7' + uses: crazy-max/ghaction-chocolatey@v1 + with: + args: install vcpython27 + + - name: Check out Tahoe-LAFS sources + uses: actions/checkout@v2 + + - name: Fetch all history for all tags and branches + run: git fetch --prune --unshallow + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python packages + run: | + pip install --upgrade codecov tox setuptools + pip list + + - name: Display tool versions + run: python misc/build_helpers/show-tool-versions.py + + - name: Run "tox -e pyinstaller" + run: tox -e pyinstaller From d996cc8d1129809a9409d2544f462b3620ff5ea2 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 25 Feb 2020 23:17:45 -0500 Subject: [PATCH 0021/1472] Fix a syntax error Per GitHub, this is not fine: - name: [Windows] Do stuff run: ... This is fine: - name: Do stuff [Windows] run: ... --- .github/workflows/ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 67e9bdfb7..95b24bb62 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,7 +28,7 @@ jobs: # Get vcpython27 on Windows + Python 2.7, to build zfec # extension. See https://chocolatey.org/packages/vcpython27 and # https://github.com/crazy-max/ghaction-chocolatey - - name: [Windows] Install MSVC 9.0 for Python 2.7 + - name: Install MSVC 9.0 for Python 2.7 [Windows] if: matrix.os == 'windows-latest' && matrix.python-version == '2.7' uses: crazy-max/ghaction-chocolatey@v1 with: @@ -92,15 +92,15 @@ jobs: steps: - - name: [Ubuntu] Install Tor + - name: Install Tor [Ubuntu] if: matrix.os == 'ubuntu-latest' run: sudo apt install tor - - name: [macOS] Install Tor + - name: Install Tor [macOS] if: matrix.os == 'macos-latest' run: brew install tor - - name: [Windows] Install Tor + - name: Install Tor [Windows] if: matrix.os == 'windows-latest' uses: crazy-max/ghaction-chocolatey@v1 with: @@ -158,7 +158,7 @@ jobs: # Get vcpython27 on Windows + Python 2.7, to build zfec # extension. See https://chocolatey.org/packages/vcpython27 and # https://github.com/crazy-max/ghaction-chocolatey - - name: [Windows] Install MSVC 9.0 for Python 2.7 + - name: Install MSVC 9.0 for Python 2.7 [Windows] if: matrix.os == 'windows-latest' && matrix.python-version == '2.7' uses: crazy-max/ghaction-chocolatey@v1 with: From 5ba0ba787f67f25455cae95431eabdb19e7f24e6 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Wed, 26 Feb 2020 09:09:54 -0500 Subject: [PATCH 0022/1472] Omit codechecks --- .github/workflows/ci.yml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 95b24bb62..6c6fe829f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -53,15 +53,6 @@ jobs: - name: Display tool versions run: python misc/build_helpers/show-tool-versions.py - # Codechecks errors out when running Towncrier within GitHub - # Actions. See this issue: - # https://github.com/hawkowl/towncrier/issues/175. - # - # Run codechecks anyway, and continue on error, for now. - - name: Run "tox -e codechecks" - run: tox -e codechecks - continue-on-error: true - - name: Run "tox -e coverage" run: tox -e coverage From 2d0cf3d3c3aeb8690abfdcec59555f513cdb3767 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Wed, 26 Feb 2020 09:11:23 -0500 Subject: [PATCH 0023/1472] Upload integration.eliot.json if integration tests fail --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6c6fe829f..7aa8edba1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -129,8 +129,8 @@ jobs: uses: actions/upload-artifact@v1 if: failure() with: - name: eliot.log - path: eliot.log + name: integration.eliot.json + path: integration.eliot.json packaging: runs-on: ${{ matrix.os }} From 9752d6a2ff1f1dc6a78f90d65ddbe7a9e8987f38 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Wed, 26 Feb 2020 09:15:02 -0500 Subject: [PATCH 0024/1472] Don't run tests periodically While working on the PR, I never saw this actually working. --- .github/workflows/ci.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7aa8edba1..838ccd30d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,9 +5,6 @@ on: pull_request: release: types: [published] - schedule: - # Daily at 3:21 - - cron: '21 3 * * *' jobs: From 29947aad86fb6608c5702aa94801a19e4ad19d2f Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Wed, 26 Feb 2020 09:19:37 -0500 Subject: [PATCH 0025/1472] Don't run coverage tests on Ubuntu --- .github/workflows/ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 838ccd30d..5a865a956 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,6 @@ jobs: os: - macos-latest - windows-latest - - ubuntu-latest python-version: - 2.7 From c4061fffa0651004a305887dfa670593acf6c979 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 27 Feb 2020 23:15:51 -0500 Subject: [PATCH 0026/1472] Fearlessly use codecov.io token --- .github/workflows/ci.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5a865a956..c9e8e5e3a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,12 +59,10 @@ jobs: name: eliot.log path: eliot.log - # Set Codecov token inside Project Settings > Secrets as - # CODECOV_TOKEN. - name: Upload coverage report uses: codecov/codecov-action@v1 with: - token: ${{ secrets.CODECOV_TOKEN }} + token: abf679b6-e2e6-4b33-b7b5-6cfbd41ee691 file: coverage.xml integration: From a38029f9db20346ddaa7a52fe38555be64bbd2de Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 6 Mar 2020 14:09:52 -0500 Subject: [PATCH 0027/1472] Remove `on.release` from workflow triggers Per GitHub's documentation for on.release events, `on.release` event will run our workflow any time the `release` event occurs, but it is unclear if Tahoe will need this yet. Removing for now. --- .github/workflows/ci.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c9e8e5e3a..a59581180 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,8 +3,6 @@ name: CI on: push: pull_request: - release: - types: [published] jobs: From bcfd3a5eb88f0990a7ce17fc6c6267a9fce879c3 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 6 Mar 2020 15:32:27 -0500 Subject: [PATCH 0028/1472] Pass posargs to py.test when running integration tests Running "tox -e integration" takes a while. It would be helpful to run tests from just one file. With this change, we can do that, like so: $ tox -e integration -- integration/test_web.py Or even just one test, like so: $ tox -e integration -- integration/test_web.py::test_index With this investigating failing integration tests will be a little easier, hopefully. Fixes: ticket:3285 --- newsfragments/3285.minor | 0 tox.ini | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 newsfragments/3285.minor diff --git a/newsfragments/3285.minor b/newsfragments/3285.minor new file mode 100644 index 000000000..e69de29bb diff --git a/tox.ini b/tox.ini index 7d869b6bd..44016c891 100644 --- a/tox.ini +++ b/tox.ini @@ -53,7 +53,7 @@ setenv = COVERAGE_PROCESS_START=.coveragerc commands = # NOTE: 'run with "py.test --keep-tempdir -s -v integration/" to debug failures' - py.test --coverage -v integration/ + py.test --coverage -v {posargs:integration} coverage combine coverage report From 3dd480fd82c29f5b98c6a66d21d676b8dfdfc285 Mon Sep 17 00:00:00 2001 From: meejah Date: Fri, 17 Jan 2020 01:55:21 -0700 Subject: [PATCH 0029/1472] tweak release HOWTO --- docs/how_to_make_a_tahoe-lafs_release.org | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/how_to_make_a_tahoe-lafs_release.org b/docs/how_to_make_a_tahoe-lafs_release.org index 79438c85d..44b9e3dd1 100644 --- a/docs/how_to_make_a_tahoe-lafs_release.org +++ b/docs/how_to_make_a_tahoe-lafs_release.org @@ -17,13 +17,14 @@ people are Release Maintainers: - [ ] all appveyor checks pass - [ ] all buildbot workers pass their checks -* freeze master branch [0/] +* freeze master branch [0/1] - [ ] announced the freeze of the master branch on IRC (i.e. non-release PRs won't be merged until after release) * sync documentation [0/7] - - [ ] NEWS.rst: summarize user-visible changes, aim for one page of text + + - [ ] NEWS.rst: (run "tox -e news") - [ ] added final release name and date to top-most item in NEWS.rst - - [ ] updated relnotes.txt + - [ ] updated relnotes.txt (change next, last versions; summarize NEWS) - [ ] updated CREDITS - [ ] updated docs/known_issues.rst - [ ] docs/INSTALL.rst only points to current tahoe-lafs-X.Y.Z.tar.gz source code file From 602c6d1811c74edc970ee30cdb1d6f73b2d060c9 Mon Sep 17 00:00:00 2001 From: meejah Date: Fri, 17 Jan 2020 01:56:02 -0700 Subject: [PATCH 0030/1472] news --- newsfragments/3275.minor | 1 + 1 file changed, 1 insertion(+) create mode 100644 newsfragments/3275.minor diff --git a/newsfragments/3275.minor b/newsfragments/3275.minor new file mode 100644 index 000000000..dc25bc1a9 --- /dev/null +++ b/newsfragments/3275.minor @@ -0,0 +1 @@ +Updates to release process/docs \ No newline at end of file From 241b5a3d364e13d9c9c03f1571bc5d889c786ffc Mon Sep 17 00:00:00 2001 From: meejah Date: Wed, 11 Mar 2020 10:52:26 -0600 Subject: [PATCH 0031/1472] use Tox to build NEWS --- tox.ini | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tox.ini b/tox.ini index 44016c891..7948e897a 100644 --- a/tox.ini +++ b/tox.ini @@ -115,6 +115,37 @@ commands = # put it back mv pyproject.toml towncrier.pyproject.toml +[testenv:news] +passenv = TAHOE_LAFS_* PIP_* SUBUNITREPORTER_* USERPROFILE HOMEDRIVE HOMEPATH +# see comment in [testenv] about "certifi" +whitelist_externals = mv +deps = + certifi + towncrier >= 19.2 +commands = + # With pip >= 10 the existence of pyproject.toml (which we are + # required to have to configure towncrier) triggers a "build + # isolation" mode which prevents anything from working. Avoid + # triggering that pip behavior by keeping the towncrier configuration + # somewhere else and only bringing it in when it's actually needed + # (after pip is done). + # + # Some discussion is available at + # https://github.com/pypa/pip/issues/5696 + # + # towncrier post 19.2 (unreleased as of this writing) adds a --config + # option that can be used instead of this file shuffling. + mv towncrier.pyproject.toml pyproject.toml + + # towncrier 19.2 + works with python2.7 + python -m towncrier --yes + + # put it back + mv pyproject.toml towncrier.pyproject.toml + + # commit the changes + git commit -m "update NEWS.txt for release" + [testenv:deprecations] setenv = PYTHONWARNINGS=default::DeprecationWarning From 8f983cb160cd9724e15ccc5eee30878b31cc7e71 Mon Sep 17 00:00:00 2001 From: meejah Date: Fri, 17 Jan 2020 05:18:46 -0700 Subject: [PATCH 0032/1472] bump version to 1.14.0 and update release notes --- docs/INSTALL.rst | 12 ++++++------ relnotes.txt | 36 +++++++++++++++--------------------- 2 files changed, 21 insertions(+), 27 deletions(-) diff --git a/docs/INSTALL.rst b/docs/INSTALL.rst index 5f9d72d1d..51c7f6da7 100644 --- a/docs/INSTALL.rst +++ b/docs/INSTALL.rst @@ -163,7 +163,7 @@ from PyPI with ``venv/bin/pip install tahoe-lafs``. After installation, run Successfully installed ... % venv/bin/tahoe --version - tahoe-lafs: 1.13.0 + tahoe-lafs: 1.14.0 foolscap: ... % @@ -183,14 +183,14 @@ You can also install directly from the source tarball URL:: New python executable in ~/venv/bin/python2.7 Installing setuptools, pip, wheel...done. - % venv/bin/pip install https://tahoe-lafs.org/downloads/tahoe-lafs-1.13.0.tar.bz2 - Collecting https://tahoe-lafs.org/downloads/tahoe-lafs-1.13.0.tar.bz2 + % venv/bin/pip install https://tahoe-lafs.org/downloads/tahoe-lafs-1.14.0.tar.bz2 + Collecting https://tahoe-lafs.org/downloads/tahoe-lafs-1.14.0.tar.bz2 ... Installing collected packages: ... Successfully installed ... % venv/bin/tahoe --version - tahoe-lafs: 1.13.0 + tahoe-lafs: 1.14.0 ... Extras @@ -224,7 +224,7 @@ the additional libraries needed to run the unit tests:: Successfully installed ... % venv/bin/tahoe --version - tahoe-lafs: 1.13.0.post34.dev0 + tahoe-lafs: 1.14.0.post34.dev0 ... This way, you won't have to re-run the ``pip install`` step each time you @@ -273,7 +273,7 @@ result in a "all tests passed" mesage:: % tox GLOB sdist-make: ~/tahoe-lafs/setup.py py27 recreate: ~/tahoe-lafs/.tox/py27 - py27 inst: ~/tahoe-lafs/.tox/dist/tahoe-lafs-1.13.0.post8.dev0.zip + py27 inst: ~/tahoe-lafs/.tox/dist/tahoe-lafs-1.14.0.post8.dev0.zip py27 runtests: commands[0] | tahoe --version py27 runtests: commands[1] | trial --rterrors allmydata allmydata.test.test_auth diff --git a/relnotes.txt b/relnotes.txt index 0caadd8f2..a46996dfb 100644 --- a/relnotes.txt +++ b/relnotes.txt @@ -1,6 +1,6 @@ -ANNOUNCING Tahoe, the Least-Authority File Store, v1.13.0 +ANNOUNCING Tahoe, the Least-Authority File Store, v1.14.0 -The Tahoe-LAFS team is pleased to announce version 1.13.0 of +The Tahoe-LAFS team is pleased to announce version 1.14.0 of Tahoe-LAFS, an extremely reliable decentralized storage system. Get it with "pip install tahoe-lafs", or download a tarball here: @@ -18,22 +18,16 @@ unique security and fault-tolerance properties: The previous stable release of Tahoe-LAFS was v1.12.1, released on January 18, 2017. -v1.13.0 fixes several problems with the magic-folder frontend, adds ---json to more commands, adds the "tahoe status" and "tahoe -magic-folder status" commands and some internal refactoring. It is -also now possible to have multiple magic folders in a single Tahoe -client. +The v1.14.0 release: makes several Magic Folder improvements (MacOS +support, better logs, fewer conflict cases); adds an Eliot streaming +logs endpoint; adds an extension point for storage customization; +makes a bunch of bug-fixes and cleanups. NixOS is a supported +platform; Fedora 29 is no longer a supported platform. Several early +parts of Python3 porting have landed. -A long-standing feature to improve the share-placement algorithm -("servers of happiness") has been updated and merged. "tahoe backup" -now reports progress. - -New clients can now be added to a grid using "magic wormhole" [14], a -SPAKE2-based method of securely connecting two computers. This adds -the "tahoe invite" command for a current grid-participant to invite a -new client and a corresponding option "tahoe create-client --join" for -the other side. These use a server at ``wormhole.tahoe-lafs.org`` -currently operated by Least Authority. +DEPRECATED: ``tahoe start``, ``tahoe stop``, ``tahoe restart`` and +``tahoe daemonize`` are all deprecated in favour of using ``tahoe +run`` (along with a suitable process manager if desired). Please see ``NEWS.rst`` for a more complete list of changes. @@ -159,12 +153,12 @@ May 17, 2018 San Francisco, California, USA -[1] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.13.0/NEWS.rst +[1] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.14.0/NEWS.rst [2] https://github.com/tahoe-lafs/tahoe-lafs/blob/master/docs/known_issues.rst [3] https://tahoe-lafs.org/trac/tahoe-lafs/wiki/RelatedProjects -[4] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.13.0/COPYING.GPL -[5] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.13.0/COPYING.TGPPL.rst -[6] https://tahoe-lafs.readthedocs.org/en/tahoe-lafs-1.13.0/INSTALL.html +[4] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.14.0/COPYING.GPL +[5] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.14.0/COPYING.TGPPL.rst +[6] https://tahoe-lafs.readthedocs.org/en/tahoe-lafs-1.14.0/INSTALL.html [7] https://tahoe-lafs.org/cgi-bin/mailman/listinfo/tahoe-dev [8] https://tahoe-lafs.org/trac/tahoe-lafs/roadmap [9] https://github.com/tahoe-lafs/tahoe-lafs/blob/master/CREDITS From 843fca4a6a3b48cb4d4474a5c7ea8180171fdcb3 Mon Sep 17 00:00:00 2001 From: meejah Date: Wed, 11 Mar 2020 17:38:50 -0600 Subject: [PATCH 0033/1472] update NEWS.txt for release --- NEWS.rst | 109 ++++++++++++++++++++++++++++++++ newsfragments/1432.feature | 1 - newsfragments/1827.removed | 1 - newsfragments/1893.minor | 0 newsfragments/2098.removed | 1 - newsfragments/2239.removed | 1 - newsfragments/2266.minor | 0 newsfragments/2283.minor | 0 newsfragments/2392.removed | 1 - newsfragments/2479.other | 1 - newsfragments/2749.installation | 1 - newsfragments/2766.minor | 0 newsfragments/2771.other | 1 - newsfragments/2870.bugfix | 1 - newsfragments/2908.other | 1 - newsfragments/2920.other | 1 - newsfragments/2933.other | 1 - newsfragments/2935.bugfix | 1 - newsfragments/2936.bugfix | 1 - newsfragments/2941.bugfix | 1 - newsfragments/2942.feature | 1 - newsfragments/2944.bugfix | 1 - newsfragments/2950.bugfix | 1 - newsfragments/2955.feature | 1 - newsfragments/2955.removed | 1 - newsfragments/2956.other | 1 - newsfragments/2957.installation | 1 - newsfragments/2958.other | 1 - newsfragments/2959.other | 1 - newsfragments/2960.other | 1 - newsfragments/2961.other | 1 - newsfragments/2962.other | 1 - newsfragments/2965.bugfix | 1 - newsfragments/2966.other | 1 - newsfragments/2967.other | 1 - newsfragments/2968.other | 1 - newsfragments/2969.other | 1 - newsfragments/2970.other | 1 - newsfragments/2971.other | 1 - newsfragments/2972.feature | 1 - newsfragments/2973.other | 1 - newsfragments/2974.other | 1 - newsfragments/2975.feature | 1 - newsfragments/2976.bugfix | 1 - newsfragments/2977.other | 1 - newsfragments/2980.minor | 0 newsfragments/2981.other | 0 newsfragments/2982.other | 0 newsfragments/2985.minor | 0 newsfragments/2986.minor | 0 newsfragments/2987.minor | 0 newsfragments/2988.minor | 0 newsfragments/2989.minor | 0 newsfragments/2990.minor | 0 newsfragments/2991.minor | 0 newsfragments/2992.minor | 0 newsfragments/2995.minor | 0 newsfragments/2997.bugfix | 1 - newsfragments/3000.minor | 0 newsfragments/3001.other | 1 - newsfragments/3002.other | 1 - newsfragments/3003.other | 1 - newsfragments/3004.minor | 0 newsfragments/3005.minor | 0 newsfragments/3006.feature | 1 - newsfragments/3007.minor | 0 newsfragments/3008.other | 1 - newsfragments/3009.other | 1 - newsfragments/3010.other | 1 - newsfragments/3011.other | 1 - newsfragments/3012.minor | 0 newsfragments/3013.other | 1 - newsfragments/3014.other | 1 - newsfragments/3015.other | 1 - newsfragments/3016.other | 1 - newsfragments/3017.minor | 0 newsfragments/3019.other | 1 - newsfragments/3020.other | 1 - newsfragments/3021.minor | 0 newsfragments/3023.minor | 0 newsfragments/3024.other | 1 - newsfragments/3025.minor | 0 newsfragments/3026.minor | 0 newsfragments/3027.other | 1 - newsfragments/3028.other | 1 - newsfragments/3029.minor | 0 newsfragments/3030.other | 1 - newsfragments/3031.other | 1 - newsfragments/3036.minor | 0 newsfragments/3038.minor | 0 newsfragments/3041.feature | 1 - newsfragments/3042.other | 1 - newsfragments/3043.other | 1 - newsfragments/3048.minor | 0 newsfragments/3049.feature | 1 - newsfragments/3051.feature | 1 - newsfragments/3053.feature | 1 - newsfragments/3054.feature | 1 - newsfragments/3086.minor | 0 newsfragments/3097.minor | 0 newsfragments/3111.minor | 0 newsfragments/3118.minor | 0 newsfragments/3119.minor | 0 newsfragments/3152.other | 1 - newsfragments/3184.feature | 1 - newsfragments/3227.minor | 0 newsfragments/3228.bugfix | 1 - newsfragments/3229.minor | 0 newsfragments/3232.minor | 0 newsfragments/3233.minor | 0 newsfragments/3234.other | 1 - newsfragments/3235.feature | 1 - newsfragments/3237.minor | 1 - newsfragments/3238.minor | 0 newsfragments/3239.minor | 0 newsfragments/3240.minor | 0 newsfragments/3242.minor | 0 newsfragments/3243.minor | 0 newsfragments/3245.minor | 0 newsfragments/3246.minor | 0 newsfragments/3248.minor | 0 newsfragments/3250.minor | 0 newsfragments/3252.minor | 0 newsfragments/3255.minor | 0 newsfragments/3256.minor | 0 newsfragments/3259.minor | 0 newsfragments/3261.minor | 0 newsfragments/3262.minor | 0 newsfragments/3263.minor | 0 newsfragments/3264.minor | 0 newsfragments/3265.minor | 0 newsfragments/3266.other | 1 - newsfragments/3267.minor | 0 newsfragments/3268.minor | 0 newsfragments/3271.minor | 0 newsfragments/3272.minor | 0 newsfragments/3273.removed | 1 - newsfragments/3274.minor | 0 newsfragments/3275.minor | 1 - newsfragments/3276.minor | 0 newsfragments/3279.minor | 0 newsfragments/3281.minor | 0 newsfragments/3282.minor | 0 newsfragments/3285.minor | 0 144 files changed, 109 insertions(+), 76 deletions(-) delete mode 100644 newsfragments/1432.feature delete mode 100644 newsfragments/1827.removed delete mode 100644 newsfragments/1893.minor delete mode 100644 newsfragments/2098.removed delete mode 100644 newsfragments/2239.removed delete mode 100644 newsfragments/2266.minor delete mode 100644 newsfragments/2283.minor delete mode 100644 newsfragments/2392.removed delete mode 100644 newsfragments/2479.other delete mode 100644 newsfragments/2749.installation delete mode 100644 newsfragments/2766.minor delete mode 100644 newsfragments/2771.other delete mode 100644 newsfragments/2870.bugfix delete mode 100644 newsfragments/2908.other delete mode 100644 newsfragments/2920.other delete mode 100644 newsfragments/2933.other delete mode 100644 newsfragments/2935.bugfix delete mode 100644 newsfragments/2936.bugfix delete mode 100644 newsfragments/2941.bugfix delete mode 100644 newsfragments/2942.feature delete mode 100644 newsfragments/2944.bugfix delete mode 100644 newsfragments/2950.bugfix delete mode 100644 newsfragments/2955.feature delete mode 100644 newsfragments/2955.removed delete mode 100644 newsfragments/2956.other delete mode 100644 newsfragments/2957.installation delete mode 100644 newsfragments/2958.other delete mode 100644 newsfragments/2959.other delete mode 100644 newsfragments/2960.other delete mode 100644 newsfragments/2961.other delete mode 100644 newsfragments/2962.other delete mode 100644 newsfragments/2965.bugfix delete mode 100644 newsfragments/2966.other delete mode 100644 newsfragments/2967.other delete mode 100644 newsfragments/2968.other delete mode 100644 newsfragments/2969.other delete mode 100644 newsfragments/2970.other delete mode 100644 newsfragments/2971.other delete mode 100644 newsfragments/2972.feature delete mode 100644 newsfragments/2973.other delete mode 100644 newsfragments/2974.other delete mode 100644 newsfragments/2975.feature delete mode 100644 newsfragments/2976.bugfix delete mode 100644 newsfragments/2977.other delete mode 100644 newsfragments/2980.minor delete mode 100644 newsfragments/2981.other delete mode 100644 newsfragments/2982.other delete mode 100644 newsfragments/2985.minor delete mode 100644 newsfragments/2986.minor delete mode 100644 newsfragments/2987.minor delete mode 100644 newsfragments/2988.minor delete mode 100644 newsfragments/2989.minor delete mode 100644 newsfragments/2990.minor delete mode 100644 newsfragments/2991.minor delete mode 100644 newsfragments/2992.minor delete mode 100644 newsfragments/2995.minor delete mode 100644 newsfragments/2997.bugfix delete mode 100644 newsfragments/3000.minor delete mode 100644 newsfragments/3001.other delete mode 100644 newsfragments/3002.other delete mode 100644 newsfragments/3003.other delete mode 100644 newsfragments/3004.minor delete mode 100644 newsfragments/3005.minor delete mode 100644 newsfragments/3006.feature delete mode 100644 newsfragments/3007.minor delete mode 100644 newsfragments/3008.other delete mode 100644 newsfragments/3009.other delete mode 100644 newsfragments/3010.other delete mode 100644 newsfragments/3011.other delete mode 100644 newsfragments/3012.minor delete mode 100644 newsfragments/3013.other delete mode 100644 newsfragments/3014.other delete mode 100644 newsfragments/3015.other delete mode 100644 newsfragments/3016.other delete mode 100644 newsfragments/3017.minor delete mode 100644 newsfragments/3019.other delete mode 100644 newsfragments/3020.other delete mode 100644 newsfragments/3021.minor delete mode 100644 newsfragments/3023.minor delete mode 100644 newsfragments/3024.other delete mode 100644 newsfragments/3025.minor delete mode 100644 newsfragments/3026.minor delete mode 100644 newsfragments/3027.other delete mode 100644 newsfragments/3028.other delete mode 100644 newsfragments/3029.minor delete mode 100644 newsfragments/3030.other delete mode 100644 newsfragments/3031.other delete mode 100644 newsfragments/3036.minor delete mode 100644 newsfragments/3038.minor delete mode 100644 newsfragments/3041.feature delete mode 100644 newsfragments/3042.other delete mode 100644 newsfragments/3043.other delete mode 100644 newsfragments/3048.minor delete mode 100644 newsfragments/3049.feature delete mode 100644 newsfragments/3051.feature delete mode 100644 newsfragments/3053.feature delete mode 100644 newsfragments/3054.feature delete mode 100644 newsfragments/3086.minor delete mode 100644 newsfragments/3097.minor delete mode 100644 newsfragments/3111.minor delete mode 100644 newsfragments/3118.minor delete mode 100644 newsfragments/3119.minor delete mode 100644 newsfragments/3152.other delete mode 100644 newsfragments/3184.feature delete mode 100644 newsfragments/3227.minor delete mode 100644 newsfragments/3228.bugfix delete mode 100644 newsfragments/3229.minor delete mode 100644 newsfragments/3232.minor delete mode 100644 newsfragments/3233.minor delete mode 100644 newsfragments/3234.other delete mode 100644 newsfragments/3235.feature delete mode 100644 newsfragments/3237.minor delete mode 100644 newsfragments/3238.minor delete mode 100644 newsfragments/3239.minor delete mode 100644 newsfragments/3240.minor delete mode 100644 newsfragments/3242.minor delete mode 100644 newsfragments/3243.minor delete mode 100644 newsfragments/3245.minor delete mode 100644 newsfragments/3246.minor delete mode 100644 newsfragments/3248.minor delete mode 100644 newsfragments/3250.minor delete mode 100644 newsfragments/3252.minor delete mode 100644 newsfragments/3255.minor delete mode 100644 newsfragments/3256.minor delete mode 100644 newsfragments/3259.minor delete mode 100644 newsfragments/3261.minor delete mode 100644 newsfragments/3262.minor delete mode 100644 newsfragments/3263.minor delete mode 100644 newsfragments/3264.minor delete mode 100644 newsfragments/3265.minor delete mode 100644 newsfragments/3266.other delete mode 100644 newsfragments/3267.minor delete mode 100644 newsfragments/3268.minor delete mode 100644 newsfragments/3271.minor delete mode 100644 newsfragments/3272.minor delete mode 100644 newsfragments/3273.removed delete mode 100644 newsfragments/3274.minor delete mode 100644 newsfragments/3275.minor delete mode 100644 newsfragments/3276.minor delete mode 100644 newsfragments/3279.minor delete mode 100644 newsfragments/3281.minor delete mode 100644 newsfragments/3282.minor delete mode 100644 newsfragments/3285.minor diff --git a/NEWS.rst b/NEWS.rst index 7525a4285..2ac448999 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -5,6 +5,115 @@ User-Visible Changes in Tahoe-LAFS ================================== .. towncrier start line +Release 1.13.0.post1475 (2020-03-11) +'''''''''''''''''''''''''''''''''''' + +Features +-------- + +- Magic-Folders are now supported on macOS. (`#1432 `_) +- Add a "tox -e draftnews" which runs towncrier in draft mode (`#2942 `_) +- Fedora 29 is now tested as part of the project's continuous integration system. (`#2955 `_) +- The Magic-Folder frontend now emits structured, causal logs. This makes it easier for developers to make sense of its behavior and for users to submit useful debugging information alongside problem reports. (`#2972 `_) +- The `tahoe` CLI now accepts arguments for configuring structured logging messages which Tahoe-LAFS is being converted to emit. This change does not introduce any new defaults for on-filesystem logging. (`#2975 `_) +- The web API now publishes streaming Eliot logs via a token-protected WebSocket at /private/logs/v1. (`#3006 `_) +- End-to-end in-memory tests for websocket features (`#3041 `_) +- allmydata.interfaces.IFoolscapStoragePlugin has been introduced, an extension point for customizing the storage protocol. (`#3049 `_) +- Static storage server "announcements" in ``private/servers.yaml`` are now individually logged and ignored if they cannot be interpreted. (`#3051 `_) +- Storage servers can now be configured to load plugins for allmydata.interfaces.IFoolscapStoragePlugin and offer them to clients. (`#3053 `_) +- Storage clients can now be configured to load plugins for allmydata.interfaces.IFoolscapStoragePlugin and use them to negotiate with servers. (`#3054 `_) +- The [storage] configuration section now accepts a boolean *anonymous* item to enable or disable anonymous storage access. The default behavior remains unchanged. (`#3184 `_) +- Enable the helper when creating a node with `tahoe create-node --helper` (`#3235 `_) + + +Bug Fixes +--------- + +- refactor initialization code to be more async-friendly (`#2870 `_) +- Configuration-checking code wasn't being called due to indenting (`#2935 `_) +- refactor configuration handling out of Node into _Config (`#2936 `_) +- "tox -e codechecks" no longer dirties the working tree. (`#2941 `_) +- Updated the Tor release key, used by the integration tests. (`#2944 `_) +- `tahoe backup` no longer fails with an unhandled exception when it encounters a special file (device, fifo) in the backup source. (`#2950 `_) +- Magic-Folders now creates spurious conflict files in fewer cases. In particular, if files are added to the folder while a client is offline, that client will not create conflict files for all those new files when it starts up. (`#2965 `_) +- The confusing and misplaced sub-command group headings in `tahoe --help` output have been removed. (`#2976 `_) +- The Magic-Folder frontend is now more responsive to subtree changes on Windows. (`#2997 `_) +- remove ancient bundled jquery and d3, and the "dowload timeline" feature they support (`#3228 `_) + + +Dependency/Installation Changes +------------------------------- + +- Tahoe-LAFS no longer makes start-up time assertions about the versions of its dependencies. It is the responsibility of the administrator of the installation to ensure the correct version of dependencies are supplied. (`#2749 `_) +- Tahoe-LAFS now depends on Twisted 16.6 or newer. (`#2957 `_) + + +Removed Features +---------------- + +- "tahoe rm", an old alias for "tahoe unlink", has been removed. (`#1827 `_) +- The direct dependencies on pyutil and zbase32 have been removed. (`#2098 `_) +- Untested and unmaintained code for running Tahoe-LAFS as a Windows service has been removed. (`#2239 `_) +- The redundant "pypywin32" dependency has been removed. (`#2392 `_) +- Fedora 27 is no longer tested as part of the project's continuous integration system. (`#2955 `_) +- "tahoe start", "tahoe daemonize", "tahoe restart", and "tahoe stop" are now deprecated in favor of using "tahoe run", possibly with a third-party process manager. (`#3273 `_) + + +Other Changes +------------- + +- Tahoe-LAFS now tests for PyPy compatibility on CI. (`#2479 `_) +- Tahoe-LAFS now requires Twisted 18.4.0 or newer. (`#2771 `_) +- Tahoe-LAFS now uses towncrier to maintain the NEWS file. (`#2908 `_) +- The release process document has been updated. (`#2920 `_) +- allmydata.test.test_system.SystemTest is now more reliable with respect to bound address collisions. (`#2933 `_) +- The Tox configuration has been fixed to work around a problem on Windows CI. (`#2956 `_) +- The PyInstaller CI job now works around a pip/pyinstaller incompatibility. (`#2958 `_) +- Some CI jobs for integration tests have been moved from TravisCI to CircleCI. (`#2959 `_) +- Several warnings from a new release of pyflakes have been fixed. (`#2960 `_) +- Some Slackware 14.2 continuous integration problems have been resolved. (`#2961 `_) +- Some macOS continuous integration failures have been fixed. (`#2962 `_) +- The NoNetworkGrid implementation has been somewhat improved. (`#2966 `_) +- A bug in the test suite for the create-alias command has been fixed. (`#2967 `_) +- The integration test suite has been updated to use pytest-twisted instead of deprecated pytest APIs. (`#2968 `_) +- The magic-folder integration test suite now performs more aggressive cleanup of the processes it launches. (`#2969 `_) +- The integration tests now correctly document the `--keep-tempdir` option. (`#2970 `_) +- A misuse of super() in the integration tests has been fixed. (`#2971 `_) +- Several utilities to facilitate the use of the Eliot causal logging library have been introduced. (`#2973 `_) +- The Windows CI configuration has been tweaked. (`#2974 `_) +- The Magic-Folder frontend has had additional logging improvements. (`#2977 `_) +- (`#2981 `_, `#2982 `_) +- Added a simple sytax checker so that once a file has reached python3 compatibility, it will not regress. (`#3001 `_) +- Converted all uses of the print statement to the print function in the ./misc/ directory. (`#3002 `_) +- The contributor guidelines are now linked from the GitHub pull request creation page. (`#3003 `_) +- Updated the testing code to use the print function instead of the print statement. (`#3008 `_) +- Replaced print statement with print fuction for all tahoe_* scripts. (`#3009 `_) +- Replaced all remaining instances of the print statement with the print function. (`#3010 `_) +- Replace StringIO imports with six.moves. (`#3011 `_) +- Updated all Python files to use PEP-3110 exception syntax for Python3 compatibility. (`#3013 `_) +- Update raise syntax for Python3 compatibility. (`#3014 `_) +- Updated instances of octal literals to use the format 0o123 for Python3 compatibility. (`#3015 `_) +- allmydata.test.no_network, allmydata.test.test_system, and allmydata.test.web.test_introducer are now more reliable with respect to bound address collisions. (`#3016 `_) +- Removed tuple unpacking from function and lambda definitions for Python3 compatibility. (`#3019 `_) +- Updated Python2 long numeric literals for Python3 compatibility. (`#3020 `_) +- CircleCI jobs are now faster as a result of pre-building configured Docker images for the CI jobs. (`#3024 `_) +- Removed used of backticks for "repr" for Python3 compatibility. (`#3027 `_) +- Updated string literal syntax for Python3 compatibility. (`#3028 `_) +- Updated CI to enforce Python3 syntax for entire repo. (`#3030 `_) +- Replaced pycryptopp with cryptography. (`#3031 `_) +- All old-style classes ported to new-style. (`#3042 `_) +- Whitelisted "/bin/mv" as command for codechecks performed by tox. This fixes a current warning and prevents future errors (for tox 4). (`#3043 `_) +- Progress towards Python 3 compatibility is now visible at . (`#3152 `_) +- Collect coverage information from integration tests (`#3234 `_) +- NixOS is now a supported Tahoe-LAFS platform. (`#3266 `_) + + +Misc/Other +---------- + +- `#1893 `_, `#2266 `_, `#2283 `_, `#2766 `_, `#2980 `_, `#2985 `_, `#2986 `_, `#2987 `_, `#2988 `_, `#2989 `_, `#2990 `_, `#2991 `_, `#2992 `_, `#2995 `_, `#3000 `_, `#3004 `_, `#3005 `_, `#3007 `_, `#3012 `_, `#3017 `_, `#3021 `_, `#3023 `_, `#3025 `_, `#3026 `_, `#3029 `_, `#3036 `_, `#3038 `_, `#3048 `_, `#3086 `_, `#3097 `_, `#3111 `_, `#3118 `_, `#3119 `_, `#3227 `_, `#3229 `_, `#3232 `_, `#3233 `_, `#3237 `_, `#3238 `_, `#3239 `_, `#3240 `_, `#3242 `_, `#3243 `_, `#3245 `_, `#3246 `_, `#3248 `_, `#3250 `_, `#3252 `_, `#3255 `_, `#3256 `_, `#3259 `_, `#3261 `_, `#3262 `_, `#3263 `_, `#3264 `_, `#3265 `_, `#3267 `_, `#3268 `_, `#3271 `_, `#3272 `_, `#3274 `_, `#3275 `_, `#3276 `_, `#3279 `_, `#3281 `_, `#3282 `_, `#3285 `_ + + Release 1.13.0 (05-August-2018) ''''''''''''''''''''''''''''''' diff --git a/newsfragments/1432.feature b/newsfragments/1432.feature deleted file mode 100644 index 2bb9a8b01..000000000 --- a/newsfragments/1432.feature +++ /dev/null @@ -1 +0,0 @@ -Magic-Folders are now supported on macOS. diff --git a/newsfragments/1827.removed b/newsfragments/1827.removed deleted file mode 100644 index 647655157..000000000 --- a/newsfragments/1827.removed +++ /dev/null @@ -1 +0,0 @@ -"tahoe rm", an old alias for "tahoe unlink", has been removed. diff --git a/newsfragments/1893.minor b/newsfragments/1893.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2098.removed b/newsfragments/2098.removed deleted file mode 100644 index 184d512e4..000000000 --- a/newsfragments/2098.removed +++ /dev/null @@ -1 +0,0 @@ -The direct dependencies on pyutil and zbase32 have been removed. \ No newline at end of file diff --git a/newsfragments/2239.removed b/newsfragments/2239.removed deleted file mode 100644 index 2f46ffe8e..000000000 --- a/newsfragments/2239.removed +++ /dev/null @@ -1 +0,0 @@ -Untested and unmaintained code for running Tahoe-LAFS as a Windows service has been removed. diff --git a/newsfragments/2266.minor b/newsfragments/2266.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2283.minor b/newsfragments/2283.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2392.removed b/newsfragments/2392.removed deleted file mode 100644 index 7343f600f..000000000 --- a/newsfragments/2392.removed +++ /dev/null @@ -1 +0,0 @@ -The redundant "pypywin32" dependency has been removed. \ No newline at end of file diff --git a/newsfragments/2479.other b/newsfragments/2479.other deleted file mode 100644 index dc37daf4a..000000000 --- a/newsfragments/2479.other +++ /dev/null @@ -1 +0,0 @@ -Tahoe-LAFS now tests for PyPy compatibility on CI. diff --git a/newsfragments/2749.installation b/newsfragments/2749.installation deleted file mode 100644 index 615d505b1..000000000 --- a/newsfragments/2749.installation +++ /dev/null @@ -1 +0,0 @@ -Tahoe-LAFS no longer makes start-up time assertions about the versions of its dependencies. It is the responsibility of the administrator of the installation to ensure the correct version of dependencies are supplied. diff --git a/newsfragments/2766.minor b/newsfragments/2766.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2771.other b/newsfragments/2771.other deleted file mode 100644 index a72dced99..000000000 --- a/newsfragments/2771.other +++ /dev/null @@ -1 +0,0 @@ -Tahoe-LAFS now requires Twisted 18.4.0 or newer. \ No newline at end of file diff --git a/newsfragments/2870.bugfix b/newsfragments/2870.bugfix deleted file mode 100644 index d7f1b9135..000000000 --- a/newsfragments/2870.bugfix +++ /dev/null @@ -1 +0,0 @@ -refactor initialization code to be more async-friendly \ No newline at end of file diff --git a/newsfragments/2908.other b/newsfragments/2908.other deleted file mode 100644 index ccda51784..000000000 --- a/newsfragments/2908.other +++ /dev/null @@ -1 +0,0 @@ -Tahoe-LAFS now uses towncrier to maintain the NEWS file. diff --git a/newsfragments/2920.other b/newsfragments/2920.other deleted file mode 100644 index b52772368..000000000 --- a/newsfragments/2920.other +++ /dev/null @@ -1 +0,0 @@ -The release process document has been updated. diff --git a/newsfragments/2933.other b/newsfragments/2933.other deleted file mode 100644 index c0f0ba8b8..000000000 --- a/newsfragments/2933.other +++ /dev/null @@ -1 +0,0 @@ -allmydata.test.test_system.SystemTest is now more reliable with respect to bound address collisions. diff --git a/newsfragments/2935.bugfix b/newsfragments/2935.bugfix deleted file mode 100644 index 23b985259..000000000 --- a/newsfragments/2935.bugfix +++ /dev/null @@ -1 +0,0 @@ -Configuration-checking code wasn't being called due to indenting \ No newline at end of file diff --git a/newsfragments/2936.bugfix b/newsfragments/2936.bugfix deleted file mode 100644 index 8bca69813..000000000 --- a/newsfragments/2936.bugfix +++ /dev/null @@ -1 +0,0 @@ -refactor configuration handling out of Node into _Config \ No newline at end of file diff --git a/newsfragments/2941.bugfix b/newsfragments/2941.bugfix deleted file mode 100644 index 201e7d3ac..000000000 --- a/newsfragments/2941.bugfix +++ /dev/null @@ -1 +0,0 @@ -"tox -e codechecks" no longer dirties the working tree. diff --git a/newsfragments/2942.feature b/newsfragments/2942.feature deleted file mode 100644 index 1655a0330..000000000 --- a/newsfragments/2942.feature +++ /dev/null @@ -1 +0,0 @@ -Add a "tox -e draftnews" which runs towncrier in draft mode \ No newline at end of file diff --git a/newsfragments/2944.bugfix b/newsfragments/2944.bugfix deleted file mode 100644 index eb147950b..000000000 --- a/newsfragments/2944.bugfix +++ /dev/null @@ -1 +0,0 @@ -Updated the Tor release key, used by the integration tests. \ No newline at end of file diff --git a/newsfragments/2950.bugfix b/newsfragments/2950.bugfix deleted file mode 100644 index 8df25e4ff..000000000 --- a/newsfragments/2950.bugfix +++ /dev/null @@ -1 +0,0 @@ -`tahoe backup` no longer fails with an unhandled exception when it encounters a special file (device, fifo) in the backup source. diff --git a/newsfragments/2955.feature b/newsfragments/2955.feature deleted file mode 100644 index ad3aeba4d..000000000 --- a/newsfragments/2955.feature +++ /dev/null @@ -1 +0,0 @@ -Fedora 29 is now tested as part of the project's continuous integration system. \ No newline at end of file diff --git a/newsfragments/2955.removed b/newsfragments/2955.removed deleted file mode 100644 index 71c187d20..000000000 --- a/newsfragments/2955.removed +++ /dev/null @@ -1 +0,0 @@ -Fedora 27 is no longer tested as part of the project's continuous integration system. \ No newline at end of file diff --git a/newsfragments/2956.other b/newsfragments/2956.other deleted file mode 100644 index c69807477..000000000 --- a/newsfragments/2956.other +++ /dev/null @@ -1 +0,0 @@ -The Tox configuration has been fixed to work around a problem on Windows CI. diff --git a/newsfragments/2957.installation b/newsfragments/2957.installation deleted file mode 100644 index c3d2dff9a..000000000 --- a/newsfragments/2957.installation +++ /dev/null @@ -1 +0,0 @@ -Tahoe-LAFS now depends on Twisted 16.6 or newer. \ No newline at end of file diff --git a/newsfragments/2958.other b/newsfragments/2958.other deleted file mode 100644 index d4a7ca4cc..000000000 --- a/newsfragments/2958.other +++ /dev/null @@ -1 +0,0 @@ -The PyInstaller CI job now works around a pip/pyinstaller incompatibility. \ No newline at end of file diff --git a/newsfragments/2959.other b/newsfragments/2959.other deleted file mode 100644 index a8da6337a..000000000 --- a/newsfragments/2959.other +++ /dev/null @@ -1 +0,0 @@ -Some CI jobs for integration tests have been moved from TravisCI to CircleCI. diff --git a/newsfragments/2960.other b/newsfragments/2960.other deleted file mode 100644 index 2ef00c922..000000000 --- a/newsfragments/2960.other +++ /dev/null @@ -1 +0,0 @@ -Several warnings from a new release of pyflakes have been fixed. diff --git a/newsfragments/2961.other b/newsfragments/2961.other deleted file mode 100644 index e299bb020..000000000 --- a/newsfragments/2961.other +++ /dev/null @@ -1 +0,0 @@ -Some Slackware 14.2 continuous integration problems have been resolved. diff --git a/newsfragments/2962.other b/newsfragments/2962.other deleted file mode 100644 index de6cce22f..000000000 --- a/newsfragments/2962.other +++ /dev/null @@ -1 +0,0 @@ -Some macOS continuous integration failures have been fixed. diff --git a/newsfragments/2965.bugfix b/newsfragments/2965.bugfix deleted file mode 100644 index 58d4582e8..000000000 --- a/newsfragments/2965.bugfix +++ /dev/null @@ -1 +0,0 @@ -Magic-Folders now creates spurious conflict files in fewer cases. In particular, if files are added to the folder while a client is offline, that client will not create conflict files for all those new files when it starts up. diff --git a/newsfragments/2966.other b/newsfragments/2966.other deleted file mode 100644 index 92b5437f6..000000000 --- a/newsfragments/2966.other +++ /dev/null @@ -1 +0,0 @@ -The NoNetworkGrid implementation has been somewhat improved. diff --git a/newsfragments/2967.other b/newsfragments/2967.other deleted file mode 100644 index 7ee8118b3..000000000 --- a/newsfragments/2967.other +++ /dev/null @@ -1 +0,0 @@ -A bug in the test suite for the create-alias command has been fixed. diff --git a/newsfragments/2968.other b/newsfragments/2968.other deleted file mode 100644 index ca8f82d7f..000000000 --- a/newsfragments/2968.other +++ /dev/null @@ -1 +0,0 @@ -The integration test suite has been updated to use pytest-twisted instead of deprecated pytest APIs. \ No newline at end of file diff --git a/newsfragments/2969.other b/newsfragments/2969.other deleted file mode 100644 index 690b5bc78..000000000 --- a/newsfragments/2969.other +++ /dev/null @@ -1 +0,0 @@ -The magic-folder integration test suite now performs more aggressive cleanup of the processes it launches. diff --git a/newsfragments/2970.other b/newsfragments/2970.other deleted file mode 100644 index 881a1dd3b..000000000 --- a/newsfragments/2970.other +++ /dev/null @@ -1 +0,0 @@ -The integration tests now correctly document the `--keep-tempdir` option. \ No newline at end of file diff --git a/newsfragments/2971.other b/newsfragments/2971.other deleted file mode 100644 index 920ff983c..000000000 --- a/newsfragments/2971.other +++ /dev/null @@ -1 +0,0 @@ -A misuse of super() in the integration tests has been fixed. \ No newline at end of file diff --git a/newsfragments/2972.feature b/newsfragments/2972.feature deleted file mode 100644 index 94ed65ca3..000000000 --- a/newsfragments/2972.feature +++ /dev/null @@ -1 +0,0 @@ -The Magic-Folder frontend now emits structured, causal logs. This makes it easier for developers to make sense of its behavior and for users to submit useful debugging information alongside problem reports. diff --git a/newsfragments/2973.other b/newsfragments/2973.other deleted file mode 100644 index 4bd9b09a3..000000000 --- a/newsfragments/2973.other +++ /dev/null @@ -1 +0,0 @@ -Several utilities to facilitate the use of the Eliot causal logging library have been introduced. \ No newline at end of file diff --git a/newsfragments/2974.other b/newsfragments/2974.other deleted file mode 100644 index 80905b7c8..000000000 --- a/newsfragments/2974.other +++ /dev/null @@ -1 +0,0 @@ -The Windows CI configuration has been tweaked. \ No newline at end of file diff --git a/newsfragments/2975.feature b/newsfragments/2975.feature deleted file mode 100644 index 596a11d4e..000000000 --- a/newsfragments/2975.feature +++ /dev/null @@ -1 +0,0 @@ -The `tahoe` CLI now accepts arguments for configuring structured logging messages which Tahoe-LAFS is being converted to emit. This change does not introduce any new defaults for on-filesystem logging. diff --git a/newsfragments/2976.bugfix b/newsfragments/2976.bugfix deleted file mode 100644 index 69687630c..000000000 --- a/newsfragments/2976.bugfix +++ /dev/null @@ -1 +0,0 @@ -The confusing and misplaced sub-command group headings in `tahoe --help` output have been removed. diff --git a/newsfragments/2977.other b/newsfragments/2977.other deleted file mode 100644 index a30aba7a6..000000000 --- a/newsfragments/2977.other +++ /dev/null @@ -1 +0,0 @@ -The Magic-Folder frontend has had additional logging improvements. \ No newline at end of file diff --git a/newsfragments/2980.minor b/newsfragments/2980.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2981.other b/newsfragments/2981.other deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2982.other b/newsfragments/2982.other deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2985.minor b/newsfragments/2985.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2986.minor b/newsfragments/2986.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2987.minor b/newsfragments/2987.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2988.minor b/newsfragments/2988.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2989.minor b/newsfragments/2989.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2990.minor b/newsfragments/2990.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2991.minor b/newsfragments/2991.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2992.minor b/newsfragments/2992.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2995.minor b/newsfragments/2995.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/2997.bugfix b/newsfragments/2997.bugfix deleted file mode 100644 index 5b8615575..000000000 --- a/newsfragments/2997.bugfix +++ /dev/null @@ -1 +0,0 @@ -The Magic-Folder frontend is now more responsive to subtree changes on Windows. diff --git a/newsfragments/3000.minor b/newsfragments/3000.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3001.other b/newsfragments/3001.other deleted file mode 100644 index 8607600d1..000000000 --- a/newsfragments/3001.other +++ /dev/null @@ -1 +0,0 @@ -Added a simple sytax checker so that once a file has reached python3 compatibility, it will not regress. diff --git a/newsfragments/3002.other b/newsfragments/3002.other deleted file mode 100644 index a10cad243..000000000 --- a/newsfragments/3002.other +++ /dev/null @@ -1 +0,0 @@ -Converted all uses of the print statement to the print function in the ./misc/ directory. diff --git a/newsfragments/3003.other b/newsfragments/3003.other deleted file mode 100644 index e1875f5b5..000000000 --- a/newsfragments/3003.other +++ /dev/null @@ -1 +0,0 @@ -The contributor guidelines are now linked from the GitHub pull request creation page. diff --git a/newsfragments/3004.minor b/newsfragments/3004.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3005.minor b/newsfragments/3005.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3006.feature b/newsfragments/3006.feature deleted file mode 100644 index d9ce19b54..000000000 --- a/newsfragments/3006.feature +++ /dev/null @@ -1 +0,0 @@ -The web API now publishes streaming Eliot logs via a token-protected WebSocket at /private/logs/v1. \ No newline at end of file diff --git a/newsfragments/3007.minor b/newsfragments/3007.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3008.other b/newsfragments/3008.other deleted file mode 100644 index c5947100e..000000000 --- a/newsfragments/3008.other +++ /dev/null @@ -1 +0,0 @@ -Updated the testing code to use the print function instead of the print statement. diff --git a/newsfragments/3009.other b/newsfragments/3009.other deleted file mode 100644 index 4115bdf90..000000000 --- a/newsfragments/3009.other +++ /dev/null @@ -1 +0,0 @@ -Replaced print statement with print fuction for all tahoe_* scripts. diff --git a/newsfragments/3010.other b/newsfragments/3010.other deleted file mode 100644 index fa1c406d1..000000000 --- a/newsfragments/3010.other +++ /dev/null @@ -1 +0,0 @@ -Replaced all remaining instances of the print statement with the print function. diff --git a/newsfragments/3011.other b/newsfragments/3011.other deleted file mode 100644 index b6a553df2..000000000 --- a/newsfragments/3011.other +++ /dev/null @@ -1 +0,0 @@ -Replace StringIO imports with six.moves. diff --git a/newsfragments/3012.minor b/newsfragments/3012.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3013.other b/newsfragments/3013.other deleted file mode 100644 index fd5a34e30..000000000 --- a/newsfragments/3013.other +++ /dev/null @@ -1 +0,0 @@ -Updated all Python files to use PEP-3110 exception syntax for Python3 compatibility. diff --git a/newsfragments/3014.other b/newsfragments/3014.other deleted file mode 100644 index 7be507e14..000000000 --- a/newsfragments/3014.other +++ /dev/null @@ -1 +0,0 @@ -Update raise syntax for Python3 compatibility. diff --git a/newsfragments/3015.other b/newsfragments/3015.other deleted file mode 100644 index 68841f923..000000000 --- a/newsfragments/3015.other +++ /dev/null @@ -1 +0,0 @@ -Updated instances of octal literals to use the format 0o123 for Python3 compatibility. diff --git a/newsfragments/3016.other b/newsfragments/3016.other deleted file mode 100644 index 0af735c73..000000000 --- a/newsfragments/3016.other +++ /dev/null @@ -1 +0,0 @@ -allmydata.test.no_network, allmydata.test.test_system, and allmydata.test.web.test_introducer are now more reliable with respect to bound address collisions. diff --git a/newsfragments/3017.minor b/newsfragments/3017.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3019.other b/newsfragments/3019.other deleted file mode 100644 index 76b07a5dd..000000000 --- a/newsfragments/3019.other +++ /dev/null @@ -1 +0,0 @@ -Removed tuple unpacking from function and lambda definitions for Python3 compatibility. diff --git a/newsfragments/3020.other b/newsfragments/3020.other deleted file mode 100644 index c66702d59..000000000 --- a/newsfragments/3020.other +++ /dev/null @@ -1 +0,0 @@ -Updated Python2 long numeric literals for Python3 compatibility. diff --git a/newsfragments/3021.minor b/newsfragments/3021.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3023.minor b/newsfragments/3023.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3024.other b/newsfragments/3024.other deleted file mode 100644 index a3ec47ebb..000000000 --- a/newsfragments/3024.other +++ /dev/null @@ -1 +0,0 @@ -CircleCI jobs are now faster as a result of pre-building configured Docker images for the CI jobs. diff --git a/newsfragments/3025.minor b/newsfragments/3025.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3026.minor b/newsfragments/3026.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3027.other b/newsfragments/3027.other deleted file mode 100644 index 8d548608c..000000000 --- a/newsfragments/3027.other +++ /dev/null @@ -1 +0,0 @@ -Removed used of backticks for "repr" for Python3 compatibility. diff --git a/newsfragments/3028.other b/newsfragments/3028.other deleted file mode 100644 index 4381305b2..000000000 --- a/newsfragments/3028.other +++ /dev/null @@ -1 +0,0 @@ -Updated string literal syntax for Python3 compatibility. diff --git a/newsfragments/3029.minor b/newsfragments/3029.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3030.other b/newsfragments/3030.other deleted file mode 100644 index 7c7b1f0b0..000000000 --- a/newsfragments/3030.other +++ /dev/null @@ -1 +0,0 @@ -Updated CI to enforce Python3 syntax for entire repo. diff --git a/newsfragments/3031.other b/newsfragments/3031.other deleted file mode 100644 index e2e24ee59..000000000 --- a/newsfragments/3031.other +++ /dev/null @@ -1 +0,0 @@ -Replaced pycryptopp with cryptography. diff --git a/newsfragments/3036.minor b/newsfragments/3036.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3038.minor b/newsfragments/3038.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3041.feature b/newsfragments/3041.feature deleted file mode 100644 index b78695126..000000000 --- a/newsfragments/3041.feature +++ /dev/null @@ -1 +0,0 @@ -End-to-end in-memory tests for websocket features \ No newline at end of file diff --git a/newsfragments/3042.other b/newsfragments/3042.other deleted file mode 100644 index 5759b9145..000000000 --- a/newsfragments/3042.other +++ /dev/null @@ -1 +0,0 @@ -All old-style classes ported to new-style. \ No newline at end of file diff --git a/newsfragments/3043.other b/newsfragments/3043.other deleted file mode 100644 index edc6bb77e..000000000 --- a/newsfragments/3043.other +++ /dev/null @@ -1 +0,0 @@ -Whitelisted "/bin/mv" as command for codechecks performed by tox. This fixes a current warning and prevents future errors (for tox 4). diff --git a/newsfragments/3048.minor b/newsfragments/3048.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3049.feature b/newsfragments/3049.feature deleted file mode 100644 index 67a4068aa..000000000 --- a/newsfragments/3049.feature +++ /dev/null @@ -1 +0,0 @@ -allmydata.interfaces.IFoolscapStoragePlugin has been introduced, an extension point for customizing the storage protocol. diff --git a/newsfragments/3051.feature b/newsfragments/3051.feature deleted file mode 100644 index 8f4f6f377..000000000 --- a/newsfragments/3051.feature +++ /dev/null @@ -1 +0,0 @@ -Static storage server "announcements" in ``private/servers.yaml`` are now individually logged and ignored if they cannot be interpreted. diff --git a/newsfragments/3053.feature b/newsfragments/3053.feature deleted file mode 100644 index 8882aecb0..000000000 --- a/newsfragments/3053.feature +++ /dev/null @@ -1 +0,0 @@ -Storage servers can now be configured to load plugins for allmydata.interfaces.IFoolscapStoragePlugin and offer them to clients. \ No newline at end of file diff --git a/newsfragments/3054.feature b/newsfragments/3054.feature deleted file mode 100644 index 2193bffbb..000000000 --- a/newsfragments/3054.feature +++ /dev/null @@ -1 +0,0 @@ -Storage clients can now be configured to load plugins for allmydata.interfaces.IFoolscapStoragePlugin and use them to negotiate with servers. diff --git a/newsfragments/3086.minor b/newsfragments/3086.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3097.minor b/newsfragments/3097.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3111.minor b/newsfragments/3111.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3118.minor b/newsfragments/3118.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3119.minor b/newsfragments/3119.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3152.other b/newsfragments/3152.other deleted file mode 100644 index 31ec507ef..000000000 --- a/newsfragments/3152.other +++ /dev/null @@ -1 +0,0 @@ -Progress towards Python 3 compatibility is now visible at . diff --git a/newsfragments/3184.feature b/newsfragments/3184.feature deleted file mode 100644 index 92767a4f2..000000000 --- a/newsfragments/3184.feature +++ /dev/null @@ -1 +0,0 @@ -The [storage] configuration section now accepts a boolean *anonymous* item to enable or disable anonymous storage access. The default behavior remains unchanged. diff --git a/newsfragments/3227.minor b/newsfragments/3227.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3228.bugfix b/newsfragments/3228.bugfix deleted file mode 100644 index da808d23a..000000000 --- a/newsfragments/3228.bugfix +++ /dev/null @@ -1 +0,0 @@ -remove ancient bundled jquery and d3, and the "dowload timeline" feature they support \ No newline at end of file diff --git a/newsfragments/3229.minor b/newsfragments/3229.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3232.minor b/newsfragments/3232.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3233.minor b/newsfragments/3233.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3234.other b/newsfragments/3234.other deleted file mode 100644 index 35dd44a1a..000000000 --- a/newsfragments/3234.other +++ /dev/null @@ -1 +0,0 @@ -Collect coverage information from integration tests diff --git a/newsfragments/3235.feature b/newsfragments/3235.feature deleted file mode 100644 index 57908d4b4..000000000 --- a/newsfragments/3235.feature +++ /dev/null @@ -1 +0,0 @@ -Enable the helper when creating a node with `tahoe create-node --helper` diff --git a/newsfragments/3237.minor b/newsfragments/3237.minor deleted file mode 100644 index 5f0aeb202..000000000 --- a/newsfragments/3237.minor +++ /dev/null @@ -1 +0,0 @@ -Wait for integration-test clients to be ready using status-API \ No newline at end of file diff --git a/newsfragments/3238.minor b/newsfragments/3238.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3239.minor b/newsfragments/3239.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3240.minor b/newsfragments/3240.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3242.minor b/newsfragments/3242.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3243.minor b/newsfragments/3243.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3245.minor b/newsfragments/3245.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3246.minor b/newsfragments/3246.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3248.minor b/newsfragments/3248.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3250.minor b/newsfragments/3250.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3252.minor b/newsfragments/3252.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3255.minor b/newsfragments/3255.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3256.minor b/newsfragments/3256.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3259.minor b/newsfragments/3259.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3261.minor b/newsfragments/3261.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3262.minor b/newsfragments/3262.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3263.minor b/newsfragments/3263.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3264.minor b/newsfragments/3264.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3265.minor b/newsfragments/3265.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3266.other b/newsfragments/3266.other deleted file mode 100644 index 656356e63..000000000 --- a/newsfragments/3266.other +++ /dev/null @@ -1 +0,0 @@ -NixOS is now a supported Tahoe-LAFS platform. diff --git a/newsfragments/3267.minor b/newsfragments/3267.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3268.minor b/newsfragments/3268.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3271.minor b/newsfragments/3271.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3272.minor b/newsfragments/3272.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3273.removed b/newsfragments/3273.removed deleted file mode 100644 index 103da5587..000000000 --- a/newsfragments/3273.removed +++ /dev/null @@ -1 +0,0 @@ -"tahoe start", "tahoe daemonize", "tahoe restart", and "tahoe stop" are now deprecated in favor of using "tahoe run", possibly with a third-party process manager. diff --git a/newsfragments/3274.minor b/newsfragments/3274.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3275.minor b/newsfragments/3275.minor deleted file mode 100644 index dc25bc1a9..000000000 --- a/newsfragments/3275.minor +++ /dev/null @@ -1 +0,0 @@ -Updates to release process/docs \ No newline at end of file diff --git a/newsfragments/3276.minor b/newsfragments/3276.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3279.minor b/newsfragments/3279.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3281.minor b/newsfragments/3281.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3282.minor b/newsfragments/3282.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3285.minor b/newsfragments/3285.minor deleted file mode 100644 index e69de29bb..000000000 From eff5df164be449f30b77e050987d3ba978c62a32 Mon Sep 17 00:00:00 2001 From: meejah Date: Wed, 11 Mar 2020 18:41:32 -0600 Subject: [PATCH 0034/1472] update title --- NEWS.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/NEWS.rst b/NEWS.rst index 2ac448999..2ca67a1f6 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -5,8 +5,8 @@ User-Visible Changes in Tahoe-LAFS ================================== .. towncrier start line -Release 1.13.0.post1475 (2020-03-11) -'''''''''''''''''''''''''''''''''''' +Release 1.14.0 (2020-03-11) +''''''''''''''''''''''''''' Features -------- From 37af15c24536ad0d290ceb641fbc4af6538cefd6 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 10:09:25 -0400 Subject: [PATCH 0035/1472] Bump CentOS from 7 to 8 on CI --- .circleci/config.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 45e42bca4..41e4ea7fa 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -21,7 +21,7 @@ workflows: requires: - "fedora-29" - - "centos-7" + - "centos-8" - "slackware-14.2" @@ -67,7 +67,7 @@ workflows: - "build-image-ubuntu-18.04" - "build-image-fedora-28" - "build-image-fedora-29" - - "build-image-centos-7" + - "build-image-centos-8" - "build-image-slackware-14.2" - "build-image-pypy-2.7-7.1.1-jessie" @@ -269,9 +269,9 @@ jobs: user: "nobody" - centos-7: &RHEL_DERIV + centos-8: &RHEL_DERIV docker: - - image: "tahoelafsci/centos:7" + - image: "tahoelafsci/centos:8" user: "nobody" environment: *UTF_8_ENVIRONMENT @@ -481,12 +481,12 @@ jobs: TAG: "18.04" - build-image-centos-7: + build-image-centos-8: <<: *BUILD_IMAGE environment: DISTRO: "centos" - TAG: "7" + TAG: "8" build-image-fedora-28: From 757dd61eb5c63e06f0add5ff560a9f141ebc1706 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 10:11:36 -0400 Subject: [PATCH 0036/1472] news fragment --- newsfragments/3296.installation | 1 + 1 file changed, 1 insertion(+) create mode 100644 newsfragments/3296.installation diff --git a/newsfragments/3296.installation b/newsfragments/3296.installation new file mode 100644 index 000000000..78cf83f60 --- /dev/null +++ b/newsfragments/3296.installation @@ -0,0 +1 @@ +Tahoe-LAFS now supports CentOS 8 and no longer supports CentOS 7. \ No newline at end of file From 3c1bffdc9ed0ffc3812da86c94d8970493849f6d Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 10:36:54 -0400 Subject: [PATCH 0037/1472] Don't install Twisted trunk@HEAD since it doesn't support Python 2 --- tox.ini | 1 - 1 file changed, 1 deletion(-) diff --git a/tox.ini b/tox.ini index 7948e897a..3134a0d9f 100644 --- a/tox.ini +++ b/tox.ini @@ -158,7 +158,6 @@ setenv = deps = # Take the base deps as well! {[testenv]deps} - git+https://github.com/twisted/twisted git+https://github.com/warner/foolscap commands = flogtool --version From 03dfb10863fd6e1635f4e170e4781734bd2db977 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 10:37:24 -0400 Subject: [PATCH 0038/1472] news fragment --- newsfragments/3297.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3297.minor diff --git a/newsfragments/3297.minor b/newsfragments/3297.minor new file mode 100644 index 000000000..e69de29bb From 16d452b68271d4f5563756e0cf2be51b35e8c8e5 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 11:01:21 -0400 Subject: [PATCH 0039/1472] Skip another Eliot test --- nix/eliot.nix | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nix/eliot.nix b/nix/eliot.nix index 04344ff79..df41b3a61 100644 --- a/nix/eliot.nix +++ b/nix/eliot.nix @@ -17,7 +17,8 @@ buildPythonPackage rec { # Fails intermittently. substituteInPlace eliot/tests/test_validation.py \ - --replace "def test_omitLoggerFromActionType" "def xtest_omitLoggerFromActionType" + --replace "def test_omitLoggerFromActionType" "def xtest_omitLoggerFromActionType" \ + --replace "def test_logCallsDefaultLoggerWrite" "def xtest_logCallsDefaultLoggerWrite" ''; checkInputs = [ testtools pytest hypothesis ]; From 333ca2aa0af8a9f40ee970bb8aaec94cc3ed894e Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 11:01:33 -0400 Subject: [PATCH 0040/1472] news fragment --- newsfragments/3298.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3298.minor diff --git a/newsfragments/3298.minor b/newsfragments/3298.minor new file mode 100644 index 000000000..e69de29bb From 02ca2ad219096814664eee769497f889db323e02 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 10:38:52 -0400 Subject: [PATCH 0041/1472] This package has been renamed --- .circleci/Dockerfile.centos | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/Dockerfile.centos b/.circleci/Dockerfile.centos index aeb9eb914..9fb6b7c2b 100644 --- a/.circleci/Dockerfile.centos +++ b/.circleci/Dockerfile.centos @@ -12,7 +12,7 @@ RUN yum install --assumeyes \ sudo \ make automake gcc gcc-c++ \ python \ - python-devel \ + python2-devel \ libffi-devel \ openssl-devel \ libyaml-devel \ From 74f46b66868ad96af10dba4caf3cd2829a9b876f Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 10:57:28 -0400 Subject: [PATCH 0042/1472] this one renamed too --- .circleci/Dockerfile.centos | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/Dockerfile.centos b/.circleci/Dockerfile.centos index 9fb6b7c2b..146414560 100644 --- a/.circleci/Dockerfile.centos +++ b/.circleci/Dockerfile.centos @@ -11,7 +11,7 @@ RUN yum install --assumeyes \ git \ sudo \ make automake gcc gcc-c++ \ - python \ + python2 \ python2-devel \ libffi-devel \ openssl-devel \ From 5ccc241990595186588526cde463e00f44acf8ab Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 10:57:37 -0400 Subject: [PATCH 0043/1472] no more libyaml-devel, maybe it was merged into libyaml --- .circleci/Dockerfile.centos | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/Dockerfile.centos b/.circleci/Dockerfile.centos index 146414560..febb61545 100644 --- a/.circleci/Dockerfile.centos +++ b/.circleci/Dockerfile.centos @@ -15,7 +15,7 @@ RUN yum install --assumeyes \ python2-devel \ libffi-devel \ openssl-devel \ - libyaml-devel \ + libyaml \ /usr/bin/virtualenv \ net-tools From 25edce62576b14bcc14e2d5f66aca23462c55a7c Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 12:13:45 -0400 Subject: [PATCH 0044/1472] Don't create the magic-folder directories for each node --- integration/conftest.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/integration/conftest.py b/integration/conftest.py index a85bc15f6..5395d7c5f 100644 --- a/integration/conftest.py +++ b/integration/conftest.py @@ -332,11 +332,6 @@ def storage_nodes(reactor, temp_dir, introducer, introducer_furl, flog_gatherer, @pytest.fixture(scope='session') @log_call(action_type=u"integration:alice", include_args=[], include_result=False) def alice(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request): - try: - mkdir(join(temp_dir, 'magic-alice')) - except OSError: - pass - process = pytest_twisted.blockon( _create_node( reactor, request, temp_dir, introducer_furl, flog_gatherer, "alice", @@ -351,11 +346,6 @@ def alice(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, requ @pytest.fixture(scope='session') @log_call(action_type=u"integration:bob", include_args=[], include_result=False) def bob(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request): - try: - mkdir(join(temp_dir, 'magic-bob')) - except OSError: - pass - process = pytest_twisted.blockon( _create_node( reactor, request, temp_dir, introducer_furl, flog_gatherer, "bob", From e336d977dd4c0cd71ecf817c66b4609aeead908e Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 12:19:41 -0400 Subject: [PATCH 0045/1472] Filter push trigger down to master branch --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a59581180..5d41a78b4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,6 +2,8 @@ name: CI on: push: + branches: + - "master" pull_request: jobs: From 2c6b402e0adfe042a1195d07c1ea77e209a926ca Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Apr 2020 12:20:03 -0400 Subject: [PATCH 0046/1472] news fragment --- newsfragments/3300.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3300.minor diff --git a/newsfragments/3300.minor b/newsfragments/3300.minor new file mode 100644 index 000000000..e69de29bb From c73953d94aa391a4170691c8e85fa8a123b24221 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 14 Apr 2020 08:40:48 -0400 Subject: [PATCH 0047/1472] Capture and report Foolscap Tub Listener creation stack on error --- src/allmydata/test/__init__.py | 38 ++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/src/allmydata/test/__init__.py b/src/allmydata/test/__init__.py index 804d37935..af55be353 100644 --- a/src/allmydata/test/__init__.py +++ b/src/allmydata/test/__init__.py @@ -52,6 +52,44 @@ def _configure_hypothesis(): settings.load_profile(profile_name) _configure_hypothesis() +def logging_for_pb_listener(): + """ + Make Foolscap listen error reports include Listener creation stack + information. + """ + from traceback import extract_stack, format_list + from foolscap.pb import Listener + from twisted.python.log import err + from twisted.application import service + + original__init__ = Listener.__init__ + def _listener__init__(self, *a, **kw): + original__init__(self, *a, **kw) + # Capture the stack here, where Listener is instantiated. This is + # likely to explain what code is responsible for this Listener, useful + # information to have when the Listener eventually fails to listen. + self._creation_stack = extract_stack() + + # Override the Foolscap implementation with one that has an errback + def _listener_startService(self): + service.Service.startService(self) + d = self._ep.listen(self) + def _listening(lp): + self._lp = lp + d.addCallbacks( + _listening, + # Make sure that this listen failure is reported promptly and with + # the creation stack. + err, + errbackArgs=( + "Listener created at {}".format( + "".join(format_list(self._creation_stack)), + ), + ), + ) + Listener.__init__ = _listener__init__ + Listener.startService = _listener_startService +logging_for_pb_listener() import sys if sys.platform == "win32": From 47bef6038f8213fde691a4b9e8ee7861d9747fd5 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 14 Apr 2020 08:41:10 -0400 Subject: [PATCH 0048/1472] news fragment --- newsfragments/3302.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3302.minor diff --git a/newsfragments/3302.minor b/newsfragments/3302.minor new file mode 100644 index 000000000..e69de29bb From f89ad002f87666b148e31d2b0654160bacfddcf9 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 14 Apr 2020 16:14:07 -0400 Subject: [PATCH 0049/1472] Use 2.7-buster Docker image for pypy Fix failing PyPy CI job. Fixes: ticket:3299 --- .circleci/Dockerfile.pypy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/Dockerfile.pypy b/.circleci/Dockerfile.pypy index fd32c1f0d..471611ecc 100644 --- a/.circleci/Dockerfile.pypy +++ b/.circleci/Dockerfile.pypy @@ -1,4 +1,4 @@ -FROM pypy:2.7-7.1.1-jessie +FROM pypy:2.7-buster ENV WHEELHOUSE_PATH /tmp/wheelhouse ENV VIRTUALENV_PATH /tmp/venv From 6e6cdb353e902db6ed5e6c491a31b42a6061ae06 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 14 Apr 2020 16:29:17 -0400 Subject: [PATCH 0050/1472] Add newsfragment --- newsfragments/3299.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3299.minor diff --git a/newsfragments/3299.minor b/newsfragments/3299.minor new file mode 100644 index 000000000..e69de29bb From 4fc6840c109216c35b56dbbe8f0d07ea6a9fa026 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 14 Apr 2020 17:35:44 -0400 Subject: [PATCH 0051/1472] Build Docker images for pypy 2.7-buster on CircleCI --- .circleci/config.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 41e4ea7fa..61ed12a5d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -27,8 +27,8 @@ workflows: - "nixos-19.09" - # Test against PyPy 2.7/7.1.1 - - "pypy2.7-7.1" + # Test against PyPy 2.7 + - "pypy2.7-buster" # Other assorted tasks and configurations - "lint" @@ -69,7 +69,7 @@ workflows: - "build-image-fedora-29" - "build-image-centos-8" - "build-image-slackware-14.2" - - "build-image-pypy-2.7-7.1.1-jessie" + - "build-image-pypy-2.7-buster" jobs: @@ -198,10 +198,10 @@ jobs: user: "nobody" - pypy2.7-7.1: + pypy2.7-buster: <<: *DEBIAN docker: - - image: "tahoelafsci/pypy:2.7-7.1.1-jessie" + - image: "tahoelafsci/pypy:2.7-buster" user: "nobody" environment: @@ -513,9 +513,9 @@ jobs: TAG: "14.2" - build-image-pypy-2.7-7.1.1-jessie: + build-image-pypy-2.7-buster: <<: *BUILD_IMAGE environment: DISTRO: "pypy" - TAG: "2.7-7.1.1-jessie" + TAG: "2.7-buster" From 72ca4e8c53191aabe6599ac72c113106552f1b8f Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Wed, 15 Apr 2020 08:54:50 -0400 Subject: [PATCH 0052/1472] move the imports --- src/allmydata/test/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/allmydata/test/__init__.py b/src/allmydata/test/__init__.py index af55be353..fb62e438f 100644 --- a/src/allmydata/test/__init__.py +++ b/src/allmydata/test/__init__.py @@ -1,3 +1,8 @@ +from traceback import extract_stack, format_list +from foolscap.pb import Listener +from twisted.python.log import err +from twisted.application import service + from foolscap.logging.incident import IncidentQualifier class NonQualifier(IncidentQualifier, object): @@ -57,11 +62,6 @@ def logging_for_pb_listener(): Make Foolscap listen error reports include Listener creation stack information. """ - from traceback import extract_stack, format_list - from foolscap.pb import Listener - from twisted.python.log import err - from twisted.application import service - original__init__ = Listener.__init__ def _listener__init__(self, *a, **kw): original__init__(self, *a, **kw) From d4d8ce4e13d6f5b9103c18c1da722fe26f0f0bde Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Wed, 15 Apr 2020 08:55:00 -0400 Subject: [PATCH 0053/1472] missing front matter --- src/allmydata/test/__init__.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/allmydata/test/__init__.py b/src/allmydata/test/__init__.py index fb62e438f..60d9b87d6 100644 --- a/src/allmydata/test/__init__.py +++ b/src/allmydata/test/__init__.py @@ -1,3 +1,18 @@ +# Tahoe-LAFS -- secure, distributed storage grid +# +# Copyright © 2020 The Tahoe-LAFS Software Foundation +# +# This file is part of Tahoe-LAFS. +# +# See the docs/about.rst file for licensing information. + +""" +Some setup that should apply across the entire test suite. + +Rather than defining interesting APIs for other code to use, this just causes +some side-effects which make things better when the test suite runs. +""" + from traceback import extract_stack, format_list from foolscap.pb import Listener from twisted.python.log import err From 71aa17ab0bbb3371f43c205aeda8092867c300b2 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Wed, 15 Apr 2020 09:07:23 -0400 Subject: [PATCH 0054/1472] I assume it's utf-8 --- src/allmydata/test/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/allmydata/test/__init__.py b/src/allmydata/test/__init__.py index 60d9b87d6..abbde919f 100644 --- a/src/allmydata/test/__init__.py +++ b/src/allmydata/test/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Tahoe-LAFS -- secure, distributed storage grid # # Copyright © 2020 The Tahoe-LAFS Software Foundation From 30d0953bcf1585fb91f7df1b9742d4f8f35eaa18 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Thu, 16 Apr 2020 08:24:15 -0400 Subject: [PATCH 0055/1472] Disable the Eliot test suite --- nix/eliot.nix | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/nix/eliot.nix b/nix/eliot.nix index df41b3a61..c5975e990 100644 --- a/nix/eliot.nix +++ b/nix/eliot.nix @@ -12,15 +12,14 @@ buildPythonPackage rec { postPatch = '' substituteInPlace setup.py \ --replace "boltons >= 19.0.1" boltons - # depends on eliot.prettyprint._main which we don't have here. - rm eliot/tests/test_prettyprint.py - - # Fails intermittently. - substituteInPlace eliot/tests/test_validation.py \ - --replace "def test_omitLoggerFromActionType" "def xtest_omitLoggerFromActionType" \ - --replace "def test_logCallsDefaultLoggerWrite" "def xtest_logCallsDefaultLoggerWrite" ''; + # A seemingly random subset of the test suite fails intermittently. After + # Tahoe-LAFS is ported to Python 3 we can update to a newer Eliot and, if + # the test suite continues to fail, maybe it will be more likely that we can + # have upstream fix it for us. + doCheck = false; + checkInputs = [ testtools pytest hypothesis ]; propagatedBuildInputs = [ zope_interface pyrsistent boltons ]; From d308577397a1635ad49e22f16f7d0a760ae9453b Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Thu, 16 Apr 2020 08:24:31 -0400 Subject: [PATCH 0056/1472] Fixes: ticket:3303 --- newsfragments/3303.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3303.minor diff --git a/newsfragments/3303.minor b/newsfragments/3303.minor new file mode 100644 index 000000000..e69de29bb From d142ccb159c70da4b1c1e4159f955f06b5deb7c2 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 4 Feb 2020 06:46:08 -0500 Subject: [PATCH 0057/1472] Use twisted.web.template in web/storage.py Related to ticket:3247. Nevow usage has been removed, and generated page looks the same as its former self, but tests are failing because test_storage.py assumes that we're using nevow. --- src/allmydata/web/storage.py | 258 ++++++++++++++----------- src/allmydata/web/storage_status.xhtml | 60 +++--- 2 files changed, 182 insertions(+), 136 deletions(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 79c0a8c38..3000fb70d 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -1,10 +1,10 @@ import time, json -from nevow import rend, tags as T +from twisted.python.filepath import FilePath +from twisted.web.template import tags as T, renderer, Element, renderElement, XMLFile from allmydata.web.common import ( - getxmlfile, abbreviate_time, - MultiFormatPage, + MultiFormatResource ) from allmydata.util.abbreviate import abbreviate_space from allmydata.util import time_format, idlib @@ -16,91 +16,100 @@ def remove_prefix(s, prefix): return s[len(prefix):] -class StorageStatus(MultiFormatPage): - docFactory = getxmlfile("storage_status.xhtml") - # the default 'data' argument is the StorageServer instance +class StorageStatusElement(Element): + loader = XMLFile(FilePath(__file__).sibling("storage_status.xhtml")) - def __init__(self, storage, nickname=""): - rend.Page.__init__(self, storage) + def __init__(self, storage, nickname): + super(StorageStatusElement, self).__init__() self.storage = storage - self.nickname = nickname + self.nick = nickname - def render_JSON(self, req): - req.setHeader("content-type", "text/plain") - d = {"stats": self.storage.get_stats(), - "bucket-counter": self.storage.bucket_counter.get_state(), - "lease-checker": self.storage.lease_checker.get_state(), - "lease-checker-progress": self.storage.lease_checker.get_progress(), - } - return json.dumps(d, indent=1) + "\n" + @renderer + def nickname(self, req, tag): + return self.nick - def data_nickname(self, ctx, storage): - return self.nickname - def data_nodeid(self, ctx, storage): + @renderer + def nodeid(self, req, tag): return idlib.nodeid_b2a(self.storage.my_nodeid) - def render_storage_running(self, ctx, storage): - if storage: - return ctx.tag - else: - return T.h1["No Storage Server Running"] + def get_stat(self, key): + return self.storage.get_stats().get(key) - def render_bool(self, ctx, data): - return {True: "Yes", False: "No"}[bool(data)] + def str(self, tag, val): + if val is None: + return tag("?") + return tag(str(val)) - def render_abbrev_space(self, ctx, size): - if size is None: - return "?" - return abbreviate_space(size) + def abbr(self, tag, val): + if val is None: + return tag("?") + return tag(abbreviate_space(val)) - def render_space(self, ctx, size): - if size is None: - return "?" - return "%d" % size + @renderer + def disk_total(self, req, tag): + return self.str(tag, self.get_stat("storage_server.disk_total")) - def data_stats(self, ctx, data): - # FYI: 'data' appears to be self, rather than the StorageServer - # object in self.original that gets passed to render_* methods. I - # still don't understand Nevow. + @renderer + def disk_total_abbrev(self, req, tag): + return self.abbr(tag, self.get_stat("storage_server.disk_total")) - # Nevow has nevow.accessors.DictionaryContainer: Any data= directive - # that appears in a context in which the current data is a dictionary - # will be looked up as keys in that dictionary. So if data_stats() - # returns a dictionary, then we can use something like this: - # - #
      - #
    • disk_total:
    • - #
    + @renderer + def disk_used(self, req, tag): + return self.str(tag, self.get_stat("storage_server.disk_used")) - # to use get_stats()["storage_server.disk_total"] . However, - # DictionaryContainer does a raw d[] instead of d.get(), so any - # missing keys will cause an error, even if the renderer can tolerate - # None values. To overcome this, we either need a dict-like object - # that always returns None for unknown keys, or we must pre-populate - # our dict with those missing keys, or we should get rid of data_ - # methods that return dicts (or find some way to override Nevow's - # handling of dictionaries). + @renderer + def disk_used_abbrev(self, req, tag): + return self.abbr(tag, self.get_stat("storage_server.disk_used")) - d = dict([ (remove_prefix(k, "storage_server."), v) - for k,v in self.storage.get_stats().items() ]) - d.setdefault("disk_total", None) - d.setdefault("disk_used", None) - d.setdefault("disk_free_for_root", None) - d.setdefault("disk_free_for_nonroot", None) - d.setdefault("reserved_space", None) - d.setdefault("disk_avail", None) - return d + @renderer + def disk_free_for_root(self, req, tag): + return self.str(tag, self.get_stat("storage_server.disk_free_for_root")) - def data_last_complete_bucket_count(self, ctx, data): + @renderer + def disk_free_for_root_abbrev(self, req, tag): + return self.abbr(tag, self.get_stat("storage_server.disk_free_for_root")) + + @renderer + def disk_free_for_nonroot(self, req, tag): + return self.str(tag, self.get_stat("storage_server.disk_free_for_nonroot")) + + @renderer + def disk_free_for_nonroot_abbrev(self, req, tag): + return self.abbr(tag, self.get_stat("storage_server.disk_free_for_nonroot")) + + @renderer + def reserved_space(self, req, tag): + return self.str(tag, self.get_stat("storage_server.reserved_space")) + + @renderer + def reserved_space_abbrev(self, req, tag): + return self.abbr(tag, self.get_stat("storage_server.reserved_space")) + + @renderer + def disk_avail(self, req, tag): + return self.str(tag, self.get_stat("storage_server.disk_avail")) + + @renderer + def disk_avail_abbrev(self, req, tag): + return self.abbr(tag, self.get_stat("storage_server.disk_avail")) + + @renderer + def accepting_immutable_shares(self, req, tag): + accepting = self.get_stat("storage_server.accepting_immutable_shares") + return {True: "Yes", False: "No"}[bool(accepting)] + + @renderer + def last_complete_bucket_count(self, req, tag): s = self.storage.bucket_counter.get_state() count = s.get("last-complete-bucket-count") if count is None: return "Not computed yet" - return count + return str(count) - def render_count_crawler_status(self, ctx, storage): + @renderer + def count_crawler_status(self, req, tag): p = self.storage.bucket_counter.get_progress() - return ctx.tag[self.format_crawler_progress(p)] + return self.format_crawler_progress(p) def format_crawler_progress(self, p): cycletime = p["estimated-time-per-cycle"] @@ -127,55 +136,51 @@ class StorageStatus(MultiFormatPage): return ["Next crawl in %s" % abbreviate_time(soon), cycletime_s] - def render_lease_expiration_enabled(self, ctx, data): + @renderer + def storage_running(self, req, tag): + if self.storage: + return tag + return tag("No Storage Server Running") + + @renderer + def lease_expiration_enabled(self, req, tag): lc = self.storage.lease_checker if lc.expiration_enabled: - return ctx.tag["Enabled: expired leases will be removed"] + return tag("Enabled: expired leases will be removed") else: - return ctx.tag["Disabled: scan-only mode, no leases will be removed"] + return tag("Disabled: scan-only mode, no leases will be removed") - def render_lease_expiration_mode(self, ctx, data): + @renderer + def lease_expiration_mode(self, req, tag): lc = self.storage.lease_checker if lc.mode == "age": if lc.override_lease_duration is None: - ctx.tag["Leases will expire naturally, probably 31 days after " - "creation or renewal."] + tag("Leases will expire naturally, probably 31 days after " + "creation or renewal.") else: - ctx.tag["Leases created or last renewed more than %s ago " - "will be considered expired." - % abbreviate_time(lc.override_lease_duration)] + tag("Leases created or last renewed more than %s ago " + "will be considered expired." + % abbreviate_time(lc.override_lease_duration)) else: assert lc.mode == "cutoff-date" localizedutcdate = time.strftime("%d-%b-%Y", time.gmtime(lc.cutoff_date)) isoutcdate = time_format.iso_utc_date(lc.cutoff_date) - ctx.tag["Leases created or last renewed before %s (%s) UTC " - "will be considered expired." % (isoutcdate, localizedutcdate, )] + tag("Leases created or last renewed before %s (%s) UTC " + "will be considered expired." + % (isoutcdate, localizedutcdate, )) if len(lc.mode) > 2: - ctx.tag[" The following sharetypes will be expired: ", - " ".join(sorted(lc.sharetypes_to_expire)), "."] - return ctx.tag + tag(" The following sharetypes will be expired: ", + " ".join(sorted(lc.sharetypes_to_expire)), ".") + return tag - def format_recovered(self, sr, a): - def maybe(d): - if d is None: - return "?" - return "%d" % d - return "%s shares, %s buckets (%s mutable / %s immutable), %s (%s / %s)" % \ - (maybe(sr["%s-shares" % a]), - maybe(sr["%s-buckets" % a]), - maybe(sr["%s-buckets-mutable" % a]), - maybe(sr["%s-buckets-immutable" % a]), - abbreviate_space(sr["%s-diskbytes" % a]), - abbreviate_space(sr["%s-diskbytes-mutable" % a]), - abbreviate_space(sr["%s-diskbytes-immutable" % a]), - ) - - def render_lease_current_cycle_progress(self, ctx, data): + @renderer + def lease_current_cycle_progress(self, req, tag): lc = self.storage.lease_checker p = lc.get_progress() - return ctx.tag[self.format_crawler_progress(p)] + return tag(self.format_crawler_progress(p)) - def render_lease_current_cycle_results(self, ctx, data): + @renderer + def lease_current_cycle_results(self, req, tag): lc = self.storage.lease_checker p = lc.get_progress() if not p["cycle-in-progress"]: @@ -229,10 +234,10 @@ class StorageStatus(MultiFormatPage): T.ul[ [T.li[ ["SI %s shnum %d" % corrupt_share for corrupt_share in so_far["corrupt-shares"] ] ]]]) + return tag("Current cycle:", p) - return ctx.tag["Current cycle:", p] - - def render_lease_last_cycle_results(self, ctx, data): + @renderer + def lease_last_cycle_results(self, req, tag): lc = self.storage.lease_checker h = lc.get_state()["history"] if not h: @@ -240,15 +245,15 @@ class StorageStatus(MultiFormatPage): last = h[max(h.keys())] start, end = last["cycle-start-finish-times"] - ctx.tag["Last complete cycle (which took %s and finished %s ago)" - " recovered: " % (abbreviate_time(end-start), - abbreviate_time(time.time() - end)), - self.format_recovered(last["space-recovered"], "actual") - ] + tag("Last complete cycle (which took %s and finished %s ago)" + " recovered: " % (abbreviate_time(end-start), + abbreviate_time(time.time() - end)), + self.format_recovered(last["space-recovered"], "actual")) p = T.ul() + def add(*pieces): - p[T.li[pieces]] + p(T.li(pieces)) saw = self.format_recovered(last["space-recovered"], "examined") add("and saw a total of ", saw) @@ -264,4 +269,37 @@ class StorageStatus(MultiFormatPage): for corrupt_share in last["corrupt-shares"] ] ]]]) - return ctx.tag[p] + return tag(p) + + def format_recovered(self, sr, a): + def maybe(d): + if d is None: + return "?" + return "%d" % d + return "%s shares, %s buckets (%s mutable / %s immutable), %s (%s / %s)" % \ + (maybe(sr["%s-shares" % a]), + maybe(sr["%s-buckets" % a]), + maybe(sr["%s-buckets-mutable" % a]), + maybe(sr["%s-buckets-immutable" % a]), + abbreviate_space(sr["%s-diskbytes" % a]), + abbreviate_space(sr["%s-diskbytes-mutable" % a]), + abbreviate_space(sr["%s-diskbytes-immutable" % a]), + ) + +class StorageStatus(MultiFormatResource): + def __init__(self, storage, nickname=""): + super(StorageStatus, self).__init__() + self.storage = storage + self.nickname = nickname + + def render_HTML(self, req): + return renderElement(req, StorageStatusElement(self.storage, self.nickname)) + + def render_JSON(self, req): + req.setHeader("content-type", "text/plain") + d = {"stats": self.storage.get_stats(), + "bucket-counter": self.storage.bucket_counter.get_state(), + "lease-checker": self.storage.lease_checker.get_state(), + "lease-checker-progress": self.storage.lease_checker.get_progress(), + } + return json.dumps(d, indent=1) + "\n" diff --git a/src/allmydata/web/storage_status.xhtml b/src/allmydata/web/storage_status.xhtml index d97daf9af..cfd7a860c 100644 --- a/src/allmydata/web/storage_status.xhtml +++ b/src/allmydata/web/storage_status.xhtml @@ -1,4 +1,4 @@ - + Tahoe-LAFS - Storage Server Status @@ -7,19 +7,19 @@ -
    +

    Storage Server Status

    - +
    - - + + - - + + - - + + - - + + - - + + - - + +
    Total disk space:()()
    Disk space used:- ()- ()
    @@ -28,18 +28,18 @@
    Disk space free (root):()() [see 1]
    Disk space free (non-root):()() [see 2]
    Reserved space:- ()- ()
    @@ -48,23 +48,31 @@
    Space Available to Tahoe:()()
      -
    • Server Nickname:
    • -
    • Server Nodeid:
    • -
    • Accepting new shares: -
    • +
    • Server Nickname: + + + +
    • +
    • Server Nodeid: + + + +
    • +
    • Accepting new shares: +
    • Total buckets: - + (the number of files and directories for which this server is holding a share)
        -
      • +
    @@ -72,11 +80,11 @@

    Lease Expiration Crawler

      -
    • Expiration
    • -
    • -
    • -
    • -
    • +
    • Expiration
    • +
    • +
    • +
    • +

    From b29652e0f0d27aafd52771b3f7b57f511fcec076 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 4 Feb 2020 22:59:45 -0500 Subject: [PATCH 0058/1472] Add `StorageStatus::renderSynchronously` Related to ticket:3247 test_storage.py wants a `StorageStatus::renderSynchronously()` method and a `StorageStatus::renderHTTP()` method. Let us begin with the goofy first-cut. Both these methods are not only wrong, but they will also not please the test suite. However error messages produced in CI can be shared, and that way I can hopefully get unstuck. --- src/allmydata/web/storage.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 3000fb70d..8f36cf44d 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -1,7 +1,8 @@ import time, json from twisted.python.filepath import FilePath -from twisted.web.template import tags as T, renderer, Element, renderElement, XMLFile +from twisted.web.template import tags as T, \ + renderer, Element, renderElement, XMLFile from allmydata.web.common import ( abbreviate_time, MultiFormatResource @@ -303,3 +304,14 @@ class StorageStatus(MultiFormatResource): "lease-checker-progress": self.storage.lease_checker.get_progress(), } return json.dumps(d, indent=1) + "\n" + + def renderSynchronously(self): + # to appease the test suite. + elem = StorageStatusElement(self.storage, self.nickname) + result = [] + flattenString(None, elem).addCallback(result.append) + return result + + def renderHTTP(self, ctx=None): + # to appease the test suite. + self.renderSynchronously() From d3790a4d4227f31f06a35042448a1a119fa4658e Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 4 Feb 2020 23:16:48 -0500 Subject: [PATCH 0059/1472] Add missing `flattenString` import --- src/allmydata/web/storage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 8f36cf44d..a912d79f6 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -2,7 +2,7 @@ import time, json from twisted.python.filepath import FilePath from twisted.web.template import tags as T, \ - renderer, Element, renderElement, XMLFile + renderer, Element, renderElement, XMLFile, flattenString from allmydata.web.common import ( abbreviate_time, MultiFormatResource From c019c7e9556e31c599997ce6463fe679668c3a73 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Wed, 5 Feb 2020 22:05:33 -0500 Subject: [PATCH 0060/1472] Second version of renderSynchronously --- src/allmydata/web/storage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index a912d79f6..4ad5a2baf 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -310,7 +310,7 @@ class StorageStatus(MultiFormatResource): elem = StorageStatusElement(self.storage, self.nickname) result = [] flattenString(None, elem).addCallback(result.append) - return result + return result[0] def renderHTTP(self, ctx=None): # to appease the test suite. From 4e81a3a0a2906a2e2c3774a126afa750b7f9bf3c Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Wed, 5 Feb 2020 22:09:16 -0500 Subject: [PATCH 0061/1472] Check storage server status before using it --- src/allmydata/web/storage.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 4ad5a2baf..9a99286bf 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -31,9 +31,13 @@ class StorageStatusElement(Element): @renderer def nodeid(self, req, tag): + if not self.storage: + return tag("No storage server running.") return idlib.nodeid_b2a(self.storage.my_nodeid) def get_stat(self, key): + if not self.storage: + return None return self.storage.get_stats().get(key) def str(self, tag, val): @@ -101,6 +105,8 @@ class StorageStatusElement(Element): @renderer def last_complete_bucket_count(self, req, tag): + if not self.storage: + return tag("No storage server running.") s = self.storage.bucket_counter.get_state() count = s.get("last-complete-bucket-count") if count is None: @@ -109,6 +115,8 @@ class StorageStatusElement(Element): @renderer def count_crawler_status(self, req, tag): + if not self.storage: + return tag("No storage server running.") p = self.storage.bucket_counter.get_progress() return self.format_crawler_progress(p) @@ -145,6 +153,8 @@ class StorageStatusElement(Element): @renderer def lease_expiration_enabled(self, req, tag): + if not self.storage: + return tag("No storage server running.") lc = self.storage.lease_checker if lc.expiration_enabled: return tag("Enabled: expired leases will be removed") @@ -153,6 +163,8 @@ class StorageStatusElement(Element): @renderer def lease_expiration_mode(self, req, tag): + if not self.storage: + return tag("No storage server running.") lc = self.storage.lease_checker if lc.mode == "age": if lc.override_lease_duration is None: @@ -176,12 +188,16 @@ class StorageStatusElement(Element): @renderer def lease_current_cycle_progress(self, req, tag): + if not self.storage: + return tag("No storage server running.") lc = self.storage.lease_checker p = lc.get_progress() return tag(self.format_crawler_progress(p)) @renderer def lease_current_cycle_results(self, req, tag): + if not self.storage: + return tag("No storage server running.") lc = self.storage.lease_checker p = lc.get_progress() if not p["cycle-in-progress"]: @@ -239,6 +255,8 @@ class StorageStatusElement(Element): @renderer def lease_last_cycle_results(self, req, tag): + if not self.storage: + return tag("No storage server running.") lc = self.storage.lease_checker h = lc.get_state()["history"] if not h: From c88c97aad540b928129944bdcd349df6c7df72de Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Wed, 5 Feb 2020 23:09:39 -0500 Subject: [PATCH 0062/1472] Use right syntax for twisted.web.template tags --- src/allmydata/web/storage.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 9a99286bf..1e206d8cf 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -212,7 +212,7 @@ class StorageStatusElement(Element): p = T.ul() def add(*pieces): - p[T.li[pieces]] + p(T.li(pieces)) def maybe(d): if d is None: @@ -248,9 +248,9 @@ class StorageStatusElement(Element): if so_far["corrupt-shares"]: add("Corrupt shares:", - T.ul[ [T.li[ ["SI %s shnum %d" % corrupt_share + T.ul( (T.li( ["SI %s shnum %d" % corrupt_share for corrupt_share in so_far["corrupt-shares"] ] - ]]]) + )))) return tag("Current cycle:", p) @renderer @@ -284,9 +284,9 @@ class StorageStatusElement(Element): if last["corrupt-shares"]: add("Corrupt shares:", - T.ul[ [T.li[ ["SI %s shnum %d" % corrupt_share + T.ul( (T.li( ["SI %s shnum %d" % corrupt_share for corrupt_share in last["corrupt-shares"] ] - ]]]) + )))) return tag(p) From c7a63f957d4a476092f3bcff70e57dacc9d47c60 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 11 Feb 2020 05:17:35 -0500 Subject: [PATCH 0063/1472] Refactor so that test_util pass --- src/allmydata/test/test_storage.py | 5 +++-- src/allmydata/web/storage.py | 10 ++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 9f3aee9b8..85f4dd2bd 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -30,7 +30,8 @@ from allmydata.interfaces import BadWriteEnablerError from allmydata.test.common import LoggingServiceParent, ShouldFailMixin from allmydata.test.common_web import WebRenderingMixin from allmydata.test.no_network import NoNetworkServer -from allmydata.web.storage import StorageStatus, remove_prefix +from allmydata.web.storage import StorageStatus, StorageStatusElement, \ + remove_prefix from allmydata.storage_client import ( _StorageServer, ) @@ -4208,7 +4209,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): self.failUnlessIn("Reserved space: - 10.00 MB (10000000)", s) def test_util(self): - w = StorageStatus(None) + w = StorageStatusElement(None, None) self.failUnlessEqual(w.render_space(None, None), "?") self.failUnlessEqual(w.render_space(None, 10e6), "10000000") self.failUnlessEqual(w.render_abbrev_space(None, None), "?") diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 1e206d8cf..6b033bc5a 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -50,6 +50,16 @@ class StorageStatusElement(Element): return tag("?") return tag(abbreviate_space(val)) + def render_abbrev_space(self, ctx, size): + if size is None: + return "?" + return abbreviate_space(size) + + def render_space(self, ctx, size): + if size is None: + return "?" + return "%d" % size + @renderer def disk_total(self, req, tag): return self.str(tag, self.get_stat("storage_server.disk_total")) From 0993e610468afe97876c9e4af4367ac9a6a7bde4 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 11 Feb 2020 05:20:45 -0500 Subject: [PATCH 0064/1472] Drop unused `ctx` argument from render_space methods --- src/allmydata/test/test_storage.py | 8 ++++---- src/allmydata/web/storage.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 85f4dd2bd..6f4c455f2 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -4210,9 +4210,9 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): def test_util(self): w = StorageStatusElement(None, None) - self.failUnlessEqual(w.render_space(None, None), "?") - self.failUnlessEqual(w.render_space(None, 10e6), "10000000") - self.failUnlessEqual(w.render_abbrev_space(None, None), "?") - self.failUnlessEqual(w.render_abbrev_space(None, 10e6), "10.00 MB") + self.failUnlessEqual(w.render_space(None), "?") + self.failUnlessEqual(w.render_space(10e6), "10000000") + self.failUnlessEqual(w.render_abbrev_space(None), "?") + self.failUnlessEqual(w.render_abbrev_space(10e6), "10.00 MB") self.failUnlessEqual(remove_prefix("foo.bar", "foo."), "bar") self.failUnlessEqual(remove_prefix("foo.bar", "baz."), None) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 6b033bc5a..88f099b24 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -50,12 +50,12 @@ class StorageStatusElement(Element): return tag("?") return tag(abbreviate_space(val)) - def render_abbrev_space(self, ctx, size): + def render_abbrev_space(self, size): if size is None: return "?" return abbreviate_space(size) - def render_space(self, ctx, size): + def render_space(self, size): if size is None: return "?" return "%d" % size From d46df30bd059633d68dee3eb5d42ba5e375d0d5c Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 11 Feb 2020 05:49:11 -0500 Subject: [PATCH 0065/1472] Use render_space methods to render space --- src/allmydata/web/storage.py | 46 +++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 88f099b24..496766b92 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -40,16 +40,6 @@ class StorageStatusElement(Element): return None return self.storage.get_stats().get(key) - def str(self, tag, val): - if val is None: - return tag("?") - return tag(str(val)) - - def abbr(self, tag, val): - if val is None: - return tag("?") - return tag(abbreviate_space(val)) - def render_abbrev_space(self, size): if size is None: return "?" @@ -62,51 +52,63 @@ class StorageStatusElement(Element): @renderer def disk_total(self, req, tag): - return self.str(tag, self.get_stat("storage_server.disk_total")) + val = self.get_stat("storage_server.disk_total") + return tag(self.render_space(val)) @renderer def disk_total_abbrev(self, req, tag): - return self.abbr(tag, self.get_stat("storage_server.disk_total")) + val = self.get_stat("storage_server.disk_total") + return tag(self.render_abbrev_space(val)) @renderer def disk_used(self, req, tag): - return self.str(tag, self.get_stat("storage_server.disk_used")) + val = self.get_stat("storage_server.disk_used") + return tag(self.render_space(val)) @renderer def disk_used_abbrev(self, req, tag): - return self.abbr(tag, self.get_stat("storage_server.disk_used")) + val = self.get_stat("storage_server.disk_used") + return tag(self.render_abbrev_space(val)) @renderer def disk_free_for_root(self, req, tag): - return self.str(tag, self.get_stat("storage_server.disk_free_for_root")) + val = self.get_stat("storage_server.disk_free_for_root") + return tag(self.render_space(val)) @renderer def disk_free_for_root_abbrev(self, req, tag): - return self.abbr(tag, self.get_stat("storage_server.disk_free_for_root")) + val = self.get_stat("storage_server.disk_free_for_root") + return tag(self.render_abbrev_space(val)) @renderer def disk_free_for_nonroot(self, req, tag): - return self.str(tag, self.get_stat("storage_server.disk_free_for_nonroot")) + val = self.get_stat("storage_server.disk_free_for_nonroot") + return tag(self.render_space(val)) @renderer def disk_free_for_nonroot_abbrev(self, req, tag): - return self.abbr(tag, self.get_stat("storage_server.disk_free_for_nonroot")) + val = self.get_stat("storage_server.disk_free_for_nonroot") + return tag(self.render_abbrev_space(val)) @renderer def reserved_space(self, req, tag): - return self.str(tag, self.get_stat("storage_server.reserved_space")) + val = self.get_stat("storage_server.reserved_space") + return tag(self.render_space(val)) @renderer def reserved_space_abbrev(self, req, tag): - return self.abbr(tag, self.get_stat("storage_server.reserved_space")) + val = self.get_stat("storage_server.reserved_space") + return tag(self.render_abbrev_space(val)) @renderer def disk_avail(self, req, tag): - return self.str(tag, self.get_stat("storage_server.disk_avail")) + val = self.get_stat("storage_server.disk_avail") + return tag(self.render_space(val)) @renderer def disk_avail_abbrev(self, req, tag): - return self.abbr(tag, self.get_stat("storage_server.disk_avail")) + val = self.get_stat("storage_server.disk_avail") + return tag(self.render_abbrev_space(val)) @renderer def accepting_immutable_shares(self, req, tag): From b14f36082c148119cc9814c56a02a4155447032a Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 11 Feb 2020 06:22:25 -0500 Subject: [PATCH 0066/1472] Use within table cells --- src/allmydata/web/storage_status.xhtml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/allmydata/web/storage_status.xhtml b/src/allmydata/web/storage_status.xhtml index cfd7a860c..6e0e26008 100644 --- a/src/allmydata/web/storage_status.xhtml +++ b/src/allmydata/web/storage_status.xhtml @@ -13,13 +13,13 @@ - - + + - - + + - - + + - - + + - - + + - - + +
    Total disk space:()()
    Disk space used:- ()- ()
    @@ -28,18 +28,18 @@
    Disk space free (root):()() [see 1]
    Disk space free (non-root):()() [see 2]
    Reserved space:- ()- ()
    @@ -48,8 +48,8 @@
    Space Available to Tahoe:()()
    From 2df2ae92d56722ee9d810a678d9b74fe197cb1ab Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 11 Feb 2020 07:34:55 -0500 Subject: [PATCH 0067/1472] Fix test_storage.WebStatus.test_no_server failure --- src/allmydata/web/storage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 496766b92..c2e517dce 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -161,7 +161,7 @@ class StorageStatusElement(Element): def storage_running(self, req, tag): if self.storage: return tag - return tag("No Storage Server Running") + return T.h1("No Storage Server Running") @renderer def lease_expiration_enabled(self, req, tag): From b1c78244abeab83e0061e7586b26d4b44911a85d Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 11 Feb 2020 08:19:29 -0500 Subject: [PATCH 0068/1472] Use parentheses in import statement --- src/allmydata/web/storage.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index c2e517dce..d45541457 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -1,8 +1,14 @@ import time, json from twisted.python.filepath import FilePath -from twisted.web.template import tags as T, \ - renderer, Element, renderElement, XMLFile, flattenString +from twisted.web.template import ( + Element, + XMLFile, + tags as T, + renderer, + renderElement, + flattenString +) from allmydata.web.common import ( abbreviate_time, MultiFormatResource From d3ff578640f85181fffd787b5209e7847033d8e9 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 11 Feb 2020 09:55:33 -0500 Subject: [PATCH 0069/1472] Use parentheses in test suite's import statement --- src/allmydata/test/test_storage.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 6f4c455f2..6956fc757 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -30,8 +30,11 @@ from allmydata.interfaces import BadWriteEnablerError from allmydata.test.common import LoggingServiceParent, ShouldFailMixin from allmydata.test.common_web import WebRenderingMixin from allmydata.test.no_network import NoNetworkServer -from allmydata.web.storage import StorageStatus, StorageStatusElement, \ +from allmydata.web.storage import ( + StorageStatus, + StorageStatusElement, remove_prefix +) from allmydata.storage_client import ( _StorageServer, ) From 227d06fe64533ba45b1319b7f498e0df3492f670 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 11 Feb 2020 17:02:14 -0500 Subject: [PATCH 0070/1472] Add docstrings to StorageStatusElement --- src/allmydata/web/storage.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index d45541457..df8354a38 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -24,9 +24,15 @@ def remove_prefix(s, prefix): class StorageStatusElement(Element): + """Class to render a storage status page.""" + loader = XMLFile(FilePath(__file__).sibling("storage_status.xhtml")) - def __init__(self, storage, nickname): + def __init__(self, storage, nickname=""): + """ + :param _StorageServer storage: data about storage. + :param string nickname: friendly name for storage. + """ super(StorageStatusElement, self).__init__() self.storage = storage self.nick = nickname From f22417e51b4fadbba09cf2ad882605c5aecaef20 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Wed, 12 Feb 2020 12:14:24 -0500 Subject: [PATCH 0071/1472] Rename function for clarity --- src/allmydata/web/storage.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index df8354a38..a5d65fa85 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -47,7 +47,7 @@ class StorageStatusElement(Element): return tag("No storage server running.") return idlib.nodeid_b2a(self.storage.my_nodeid) - def get_stat(self, key): + def _get_storage_stat(self, key): if not self.storage: return None return self.storage.get_stats().get(key) @@ -64,67 +64,67 @@ class StorageStatusElement(Element): @renderer def disk_total(self, req, tag): - val = self.get_stat("storage_server.disk_total") + val = self._get_storage_stat("storage_server.disk_total") return tag(self.render_space(val)) @renderer def disk_total_abbrev(self, req, tag): - val = self.get_stat("storage_server.disk_total") + val = self._get_storage_stat("storage_server.disk_total") return tag(self.render_abbrev_space(val)) @renderer def disk_used(self, req, tag): - val = self.get_stat("storage_server.disk_used") + val = self._get_storage_stat("storage_server.disk_used") return tag(self.render_space(val)) @renderer def disk_used_abbrev(self, req, tag): - val = self.get_stat("storage_server.disk_used") + val = self._get_storage_stat("storage_server.disk_used") return tag(self.render_abbrev_space(val)) @renderer def disk_free_for_root(self, req, tag): - val = self.get_stat("storage_server.disk_free_for_root") + val = self._get_storage_stat("storage_server.disk_free_for_root") return tag(self.render_space(val)) @renderer def disk_free_for_root_abbrev(self, req, tag): - val = self.get_stat("storage_server.disk_free_for_root") + val = self._get_storage_stat("storage_server.disk_free_for_root") return tag(self.render_abbrev_space(val)) @renderer def disk_free_for_nonroot(self, req, tag): - val = self.get_stat("storage_server.disk_free_for_nonroot") + val = self._get_storage_stat("storage_server.disk_free_for_nonroot") return tag(self.render_space(val)) @renderer def disk_free_for_nonroot_abbrev(self, req, tag): - val = self.get_stat("storage_server.disk_free_for_nonroot") + val = self._get_storage_stat("storage_server.disk_free_for_nonroot") return tag(self.render_abbrev_space(val)) @renderer def reserved_space(self, req, tag): - val = self.get_stat("storage_server.reserved_space") + val = self._get_storage_stat("storage_server.reserved_space") return tag(self.render_space(val)) @renderer def reserved_space_abbrev(self, req, tag): - val = self.get_stat("storage_server.reserved_space") + val = self._get_storage_stat("storage_server.reserved_space") return tag(self.render_abbrev_space(val)) @renderer def disk_avail(self, req, tag): - val = self.get_stat("storage_server.disk_avail") + val = self._get_storage_stat("storage_server.disk_avail") return tag(self.render_space(val)) @renderer def disk_avail_abbrev(self, req, tag): - val = self.get_stat("storage_server.disk_avail") + val = self._get_storage_stat("storage_server.disk_avail") return tag(self.render_abbrev_space(val)) @renderer def accepting_immutable_shares(self, req, tag): - accepting = self.get_stat("storage_server.accepting_immutable_shares") + accepting = self._get_storage_stat("storage_server.accepting_immutable_shares") return {True: "Yes", False: "No"}[bool(accepting)] @renderer From 6e9a4e30d733c2a08d801df03f1107d9177ec76e Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Wed, 12 Feb 2020 12:15:26 -0500 Subject: [PATCH 0072/1472] Add a docstring --- src/allmydata/web/storage.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index a5d65fa85..263fa9344 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -48,6 +48,30 @@ class StorageStatusElement(Element): return idlib.nodeid_b2a(self.storage.my_nodeid) def _get_storage_stat(self, key): + """Get storage server statistics. + + Storage Server keeps a dict that contains various usage and + latency statistics. The dict looks like this: + + { + 'storage_server.accepting_immutable_shares': 1, + 'storage_server.allocated': 0, + 'storage_server.disk_avail': 106539192320, + 'storage_server.disk_free_for_nonroot': 106539192320, + 'storage_server.disk_free_for_root': 154415284224, + 'storage_server.disk_total': 941088460800, + 'storage_server.disk_used': 786673176576, + 'storage_server.latencies.add-lease.01_0_percentile': None, + 'storage_server.latencies.add-lease.10_0_percentile': None, + ... + } + + ``StorageServer.get_stats()`` returns the above dict. Storage + status page uses a subset of the items in the dict, concerning + disk usage. + + :param str key: storage server statistic we want to know. + """ if not self.storage: return None return self.storage.get_stats().get(key) From 9b51bdf7faca3314e5dff2de22b8e035b9114fec Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 13 Feb 2020 12:25:20 -0500 Subject: [PATCH 0073/1472] Return result from renderHTTP Bogus renderHTTP, but test failures are now down to three. We just need to handle requests for JSON now. --- src/allmydata/web/storage.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 263fa9344..1656df70b 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -378,6 +378,18 @@ class StorageStatus(MultiFormatResource): flattenString(None, elem).addCallback(result.append) return result[0] + # to appease the test suite def renderHTTP(self, ctx=None): - # to appease the test suite. - self.renderSynchronously() + """Send HTML or JSON formatted data, based on request. + + This function contains a bit of nevow-ism, but since this is + only called from the test suite, the nevow-ism should go away + as we update things. + + :param _nevow.context.WovenContext ctx: context is passed on + from the test suite. We get a request out of this + context, and use the request to render a result. + + """ + from nevow.inevow import IRequest + return self.render(IRequest(ctx)) From 7a053ddeff85d32376c12582ca35a825e708704a Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 13 Feb 2020 15:29:42 -0500 Subject: [PATCH 0074/1472] Use explicit `None`-check on self.storage --- src/allmydata/web/storage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 1656df70b..76a09fda3 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -43,7 +43,7 @@ class StorageStatusElement(Element): @renderer def nodeid(self, req, tag): - if not self.storage: + if self.storage is None: return tag("No storage server running.") return idlib.nodeid_b2a(self.storage.my_nodeid) From e2fc1fc07ea56cd6b4805864d1d275170e0c261e Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 13 Feb 2020 16:49:15 -0500 Subject: [PATCH 0075/1472] Move `renderSynchronously` to test suite --- src/allmydata/test/test_storage.py | 36 +++++++++++++++++++----------- src/allmydata/web/storage.py | 10 +-------- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 6956fc757..f37401cc2 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -4,6 +4,7 @@ from twisted.trial import unittest from twisted.internet import defer from twisted.application import service +from twisted.web.template import flattenString from foolscap.api import fireEventually import itertools from allmydata import interfaces @@ -2963,6 +2964,15 @@ def remove_tags(s): s = re.sub(r'\s+', ' ', s) return s +def renderSynchronously(ss): + """ + :param _StorageStatus ss: a StorageStatus instance. + """ + elem = StorageStatusElement(ss.storage, ss.nickname) + result = [] + flattenString(None, elem).addCallback(result.append) + return result[0] + class MyBucketCountingCrawler(BucketCountingCrawler): def finished_prefix(self, cycle, prefix): BucketCountingCrawler.finished_prefix(self, cycle, prefix) @@ -2999,7 +3009,7 @@ class BucketCounter(unittest.TestCase, pollmixin.PollMixin): w = StorageStatus(ss) # this sample is before the crawler has started doing anything - html = w.renderSynchronously() + html = renderSynchronously(w) self.failUnlessIn("

    Storage Server Status

    ", html) s = remove_tags(html) self.failUnlessIn("Accepting new shares: Yes", s) @@ -3022,7 +3032,7 @@ class BucketCounter(unittest.TestCase, pollmixin.PollMixin): self.failUnlessEqual(state["last-complete-prefix"], ss.bucket_counter.prefixes[0]) ss.bucket_counter.cpu_slice = 100.0 # finish as fast as possible - html = w.renderSynchronously() + html = renderSynchronously(w) s = remove_tags(html) self.failUnlessIn(" Current crawl ", s) self.failUnlessIn(" (next work in ", s) @@ -3034,7 +3044,7 @@ class BucketCounter(unittest.TestCase, pollmixin.PollMixin): d.addCallback(lambda ignored: self.poll(_watch)) def _check2(ignored): ss.bucket_counter.cpu_slice = orig_cpu_slice - html = w.renderSynchronously() + html = renderSynchronously(w) s = remove_tags(html) self.failUnlessIn("Total buckets: 0 (the number of", s) self.failUnless("Next crawl in 59 minutes" in s or "Next crawl in 60 minutes" in s, s) @@ -3096,20 +3106,20 @@ class BucketCounter(unittest.TestCase, pollmixin.PollMixin): def _check_1(ignored): # no ETA is available yet - html = w.renderSynchronously() + html = renderSynchronously(w) s = remove_tags(html) self.failUnlessIn("complete (next work", s) def _check_2(ignored): # one prefix has finished, so an ETA based upon that elapsed time # should be available. - html = w.renderSynchronously() + html = renderSynchronously(w) s = remove_tags(html) self.failUnlessIn("complete (ETA ", s) def _check_3(ignored): # two prefixes have finished - html = w.renderSynchronously() + html = renderSynchronously(w) s = remove_tags(html) self.failUnlessIn("complete (ETA ", s) d.callback("done") @@ -4064,7 +4074,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): def test_no_server(self): w = StorageStatus(None) - html = w.renderSynchronously() + html = renderSynchronously(w) self.failUnlessIn("

    No Storage Server Running

    ", html) def test_status(self): @@ -4110,7 +4120,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): ss = StorageServer(basedir, "\x00" * 20) ss.setServiceParent(self.s) w = StorageStatus(ss) - html = w.renderSynchronously() + html = renderSynchronously(w) self.failUnlessIn("

    Storage Server Status

    ", html) s = remove_tags(html) self.failUnlessIn("Accepting new shares: Yes", s) @@ -4130,7 +4140,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): ss = StorageServer(basedir, "\x00" * 20) ss.setServiceParent(self.s) w = StorageStatus(ss) - html = w.renderSynchronously() + html = renderSynchronously(w) self.failUnlessIn("

    Storage Server Status

    ", html) s = remove_tags(html) self.failUnlessIn("Accepting new shares: No", s) @@ -4166,7 +4176,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): ss.setServiceParent(self.s) w = StorageStatus(ss) - html = w.renderSynchronously() + html = renderSynchronously(w) self.failUnlessIn("

    Storage Server Status

    ", html) s = remove_tags(html) @@ -4184,7 +4194,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): ss = StorageServer(basedir, "\x00" * 20, readonly_storage=True) ss.setServiceParent(self.s) w = StorageStatus(ss) - html = w.renderSynchronously() + html = renderSynchronously(w) self.failUnlessIn("

    Storage Server Status

    ", html) s = remove_tags(html) self.failUnlessIn("Accepting new shares: No", s) @@ -4195,7 +4205,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): ss = StorageServer(basedir, "\x00" * 20, reserved_space=10e6) ss.setServiceParent(self.s) w = StorageStatus(ss) - html = w.renderSynchronously() + html = renderSynchronously(w) self.failUnlessIn("

    Storage Server Status

    ", html) s = remove_tags(html) self.failUnlessIn("Reserved space: - 10.00 MB (10000000)", s) @@ -4206,7 +4216,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): ss = StorageServer(basedir, "\x00" * 20, reserved_space=10e6) ss.setServiceParent(self.s) w = StorageStatus(ss) - html = w.renderSynchronously() + html = renderSynchronously(w) self.failUnlessIn("

    Storage Server Status

    ", html) s = remove_tags(html) self.failUnlessIn("Reserved space: - 10.00 MB (10000000)", s) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 76a09fda3..e9c2f6bb5 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -6,8 +6,7 @@ from twisted.web.template import ( XMLFile, tags as T, renderer, - renderElement, - flattenString + renderElement ) from allmydata.web.common import ( abbreviate_time, @@ -371,13 +370,6 @@ class StorageStatus(MultiFormatResource): } return json.dumps(d, indent=1) + "\n" - def renderSynchronously(self): - # to appease the test suite. - elem = StorageStatusElement(self.storage, self.nickname) - result = [] - flattenString(None, elem).addCallback(result.append) - return result[0] - # to appease the test suite def renderHTTP(self, ctx=None): """Send HTML or JSON formatted data, based on request. From c061f6830e66ff7770bc0af43eb0442fbdf0f441 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 13 Feb 2020 17:00:54 -0500 Subject: [PATCH 0076/1472] Use `successResultOf` in `renderSynchronously` Get rid of [].append trick when dealing with the deferred. --- src/allmydata/test/test_storage.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index f37401cc2..0a176bd60 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -2969,9 +2969,8 @@ def renderSynchronously(ss): :param _StorageStatus ss: a StorageStatus instance. """ elem = StorageStatusElement(ss.storage, ss.nickname) - result = [] - flattenString(None, elem).addCallback(result.append) - return result[0] + deferred = flattenString(None, elem) + return unittest.TestCase().successResultOf(deferred) class MyBucketCountingCrawler(BucketCountingCrawler): def finished_prefix(self, cycle, prefix): From aab940f65fb95815c9998b79876adeda5a0fb5f8 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 13 Feb 2020 17:18:37 -0500 Subject: [PATCH 0077/1472] Remove redundant `None`-checks on `self.storage` When no storage is up, `storage_running()` renderer will return a big honking `no storage server running` message, and no further renderers will be invoked. Therefore the extra defense is probably not required. (I tested this hypothesis. The extra defense is not required, unless there's something I have not seen.) --- src/allmydata/web/storage.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index e9c2f6bb5..cbee2780f 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -42,8 +42,6 @@ class StorageStatusElement(Element): @renderer def nodeid(self, req, tag): - if self.storage is None: - return tag("No storage server running.") return idlib.nodeid_b2a(self.storage.my_nodeid) def _get_storage_stat(self, key): @@ -71,8 +69,6 @@ class StorageStatusElement(Element): :param str key: storage server statistic we want to know. """ - if not self.storage: - return None return self.storage.get_stats().get(key) def render_abbrev_space(self, size): @@ -152,8 +148,6 @@ class StorageStatusElement(Element): @renderer def last_complete_bucket_count(self, req, tag): - if not self.storage: - return tag("No storage server running.") s = self.storage.bucket_counter.get_state() count = s.get("last-complete-bucket-count") if count is None: @@ -162,8 +156,6 @@ class StorageStatusElement(Element): @renderer def count_crawler_status(self, req, tag): - if not self.storage: - return tag("No storage server running.") p = self.storage.bucket_counter.get_progress() return self.format_crawler_progress(p) @@ -200,8 +192,6 @@ class StorageStatusElement(Element): @renderer def lease_expiration_enabled(self, req, tag): - if not self.storage: - return tag("No storage server running.") lc = self.storage.lease_checker if lc.expiration_enabled: return tag("Enabled: expired leases will be removed") @@ -210,8 +200,6 @@ class StorageStatusElement(Element): @renderer def lease_expiration_mode(self, req, tag): - if not self.storage: - return tag("No storage server running.") lc = self.storage.lease_checker if lc.mode == "age": if lc.override_lease_duration is None: @@ -235,16 +223,12 @@ class StorageStatusElement(Element): @renderer def lease_current_cycle_progress(self, req, tag): - if not self.storage: - return tag("No storage server running.") lc = self.storage.lease_checker p = lc.get_progress() return tag(self.format_crawler_progress(p)) @renderer def lease_current_cycle_results(self, req, tag): - if not self.storage: - return tag("No storage server running.") lc = self.storage.lease_checker p = lc.get_progress() if not p["cycle-in-progress"]: @@ -302,8 +286,6 @@ class StorageStatusElement(Element): @renderer def lease_last_cycle_results(self, req, tag): - if not self.storage: - return tag("No storage server running.") lc = self.storage.lease_checker h = lc.get_state()["history"] if not h: From 0382b1ec63cfdcebf5f76f03ee939edbd0901d35 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 13 Feb 2020 17:23:35 -0500 Subject: [PATCH 0078/1472] Use Unicode strings to render space --- src/allmydata/web/storage.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index cbee2780f..607eeff79 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -73,13 +73,13 @@ class StorageStatusElement(Element): def render_abbrev_space(self, size): if size is None: - return "?" + return u"?" return abbreviate_space(size) def render_space(self, size): if size is None: - return "?" - return "%d" % size + return u"?" + return u"%d" % size @renderer def disk_total(self, req, tag): From 36a486426e970d86fb2dea42ab9ffba0dfc6238c Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 13 Feb 2020 22:02:30 -0500 Subject: [PATCH 0079/1472] Mark `format_recovered` as static method --- src/allmydata/web/storage.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 607eeff79..d670c4231 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -319,7 +319,8 @@ class StorageStatusElement(Element): return tag(p) - def format_recovered(self, sr, a): + @staticmethod + def format_recovered(sr, a): def maybe(d): if d is None: return "?" From 9bb7812148b7aa17cfa641f080ab8e3eb2bce642 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 14 Feb 2020 07:25:49 -0500 Subject: [PATCH 0080/1472] Add news fragment --- newsfragments/3247.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3247.minor diff --git a/newsfragments/3247.minor b/newsfragments/3247.minor new file mode 100644 index 000000000..e69de29bb From 7625d959bc2735816710af01e80073b102312e17 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 14 Feb 2020 08:26:44 -0500 Subject: [PATCH 0081/1472] Use to render node nickname and id CI did not like the old way, but it passed in my system. Odd. --- src/allmydata/web/storage_status.xhtml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/allmydata/web/storage_status.xhtml b/src/allmydata/web/storage_status.xhtml index 6e0e26008..8f74c478c 100644 --- a/src/allmydata/web/storage_status.xhtml +++ b/src/allmydata/web/storage_status.xhtml @@ -57,12 +57,12 @@
    • Server Nickname: - +
    • Server Nodeid: - +
    • Accepting new shares: From ff019e5b12d3210046e269519a1e474f1d99ceb3 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 14 Feb 2020 10:10:48 -0500 Subject: [PATCH 0082/1472] Use BeautifulSoup to check favicon in storage page --- src/allmydata/test/web/test_web.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/allmydata/test/web/test_web.py b/src/allmydata/test/web/test_web.py index f84923521..19753d3b7 100644 --- a/src/allmydata/test/web/test_web.py +++ b/src/allmydata/test/web/test_web.py @@ -963,8 +963,9 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi def test_storage(self): d = self.GET("/storage") def _check(res): - self.failUnlessIn('Storage Server Status', res) - self.failUnlessIn(FAVICON_MARKUP, res) + soup = BeautifulSoup(res, 'html5lib') + assert_soup_has_text(self, soup, 'Storage Server Status') + assert_soup_has_favicon(self, soup) res_u = res.decode('utf-8') self.failUnlessIn(u'
    • Server Nickname: fake_nickname \u263A
    • ', res_u) d.addCallback(_check) From 0cbe2871fd80ffd186d54848995c6b6c91ee5ca1 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 14 Feb 2020 11:42:42 -0500 Subject: [PATCH 0083/1472] Give the suite the precise string it wants --- src/allmydata/web/storage_status.xhtml | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/src/allmydata/web/storage_status.xhtml b/src/allmydata/web/storage_status.xhtml index 8f74c478c..d052fbabd 100644 --- a/src/allmydata/web/storage_status.xhtml +++ b/src/allmydata/web/storage_status.xhtml @@ -55,16 +55,8 @@
        -
      • Server Nickname: - - - -
      • -
      • Server Nodeid: - - - -
      • +
      • Server Nickname:
      • +
      • Server Nodeid:
      • Accepting new shares:
      • Total buckets: From 6c3256517a0fa8cd1b8c00267d0f80325f868a70 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 17 Feb 2020 12:52:48 -0500 Subject: [PATCH 0084/1472] Use slots to render storage stats table --- src/allmydata/web/storage.py | 80 ++++++++------------------ src/allmydata/web/storage_status.xhtml | 26 ++++----- 2 files changed, 36 insertions(+), 70 deletions(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index d670c4231..d26f4a5dd 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -82,64 +82,30 @@ class StorageStatusElement(Element): return u"%d" % size @renderer - def disk_total(self, req, tag): - val = self._get_storage_stat("storage_server.disk_total") - return tag(self.render_space(val)) + def storage_stats(self, req, tag): + # Render storage status table that appears near the top of the page. + total = self._get_storage_stat("storage_server.disk_total") + used = self._get_storage_stat("storage_server.disk_used") + free_root = self._get_storage_stat("storage_server.disk_free_for_root") + free_nonroot = self._get_storage_stat("storage_server.disk_free_for_nonroot") + reserved = self._get_storage_stat("storage_server.reserved_space") + available = self._get_storage_stat("storage_server.disk_avail") - @renderer - def disk_total_abbrev(self, req, tag): - val = self._get_storage_stat("storage_server.disk_total") - return tag(self.render_abbrev_space(val)) - - @renderer - def disk_used(self, req, tag): - val = self._get_storage_stat("storage_server.disk_used") - return tag(self.render_space(val)) - - @renderer - def disk_used_abbrev(self, req, tag): - val = self._get_storage_stat("storage_server.disk_used") - return tag(self.render_abbrev_space(val)) - - @renderer - def disk_free_for_root(self, req, tag): - val = self._get_storage_stat("storage_server.disk_free_for_root") - return tag(self.render_space(val)) - - @renderer - def disk_free_for_root_abbrev(self, req, tag): - val = self._get_storage_stat("storage_server.disk_free_for_root") - return tag(self.render_abbrev_space(val)) - - @renderer - def disk_free_for_nonroot(self, req, tag): - val = self._get_storage_stat("storage_server.disk_free_for_nonroot") - return tag(self.render_space(val)) - - @renderer - def disk_free_for_nonroot_abbrev(self, req, tag): - val = self._get_storage_stat("storage_server.disk_free_for_nonroot") - return tag(self.render_abbrev_space(val)) - - @renderer - def reserved_space(self, req, tag): - val = self._get_storage_stat("storage_server.reserved_space") - return tag(self.render_space(val)) - - @renderer - def reserved_space_abbrev(self, req, tag): - val = self._get_storage_stat("storage_server.reserved_space") - return tag(self.render_abbrev_space(val)) - - @renderer - def disk_avail(self, req, tag): - val = self._get_storage_stat("storage_server.disk_avail") - return tag(self.render_space(val)) - - @renderer - def disk_avail_abbrev(self, req, tag): - val = self._get_storage_stat("storage_server.disk_avail") - return tag(self.render_abbrev_space(val)) + tag.fillSlots( + disk_total = self.render_space(total), + disk_total_abbrev = self.render_abbrev_space(total), + disk_used = self.render_space(used), + disk_used_abbrev = self.render_abbrev_space(used), + disk_free_for_root = self.render_space(free_root), + disk_free_for_root_abbrev = self.render_abbrev_space(free_root), + disk_free_for_nonroot = self.render_space(free_nonroot), + disk_free_for_nonroot_abbrev = self.render_abbrev_space(free_nonroot), + reserved_space = self.render_space(reserved), + reserved_space_abbrev = self.render_abbrev_space(reserved), + disk_avail = self.render_space(available), + disk_avail_abbrev = self.render_abbrev_space(available) + ) + return tag @renderer def accepting_immutable_shares(self, req, tag): diff --git a/src/allmydata/web/storage_status.xhtml b/src/allmydata/web/storage_status.xhtml index d052fbabd..bbf3d2c8a 100644 --- a/src/allmydata/web/storage_status.xhtml +++ b/src/allmydata/web/storage_status.xhtml @@ -11,15 +11,15 @@

        Storage Server Status

        - +
        - - + + - - + + - - + + - - + + - - + + - - + +
        Total disk space:()()
        Disk space used:- ()- ()
        @@ -28,18 +28,18 @@
        Disk space free (root):()() [see 1]
        Disk space free (non-root):()() [see 2]
        Reserved space:- ()- ()
        @@ -48,8 +48,8 @@
        Space Available to Tahoe:()()
        From 554c477cea5a3c777794923d406137aa1be46f18 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 25 Feb 2020 08:52:45 -0500 Subject: [PATCH 0085/1472] Prefix member variable with "_" --- src/allmydata/test/test_storage.py | 2 +- src/allmydata/web/storage.py | 40 +++++++++++++++--------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 0a176bd60..56d281688 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -2968,7 +2968,7 @@ def renderSynchronously(ss): """ :param _StorageStatus ss: a StorageStatus instance. """ - elem = StorageStatusElement(ss.storage, ss.nickname) + elem = StorageStatusElement(ss._storage, ss._nickname) deferred = flattenString(None, elem) return unittest.TestCase().successResultOf(deferred) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index d26f4a5dd..9b3daa0cb 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -33,16 +33,16 @@ class StorageStatusElement(Element): :param string nickname: friendly name for storage. """ super(StorageStatusElement, self).__init__() - self.storage = storage - self.nick = nickname + self._storage = storage + self._nickname = nickname @renderer def nickname(self, req, tag): - return self.nick + return self._nickname @renderer def nodeid(self, req, tag): - return idlib.nodeid_b2a(self.storage.my_nodeid) + return idlib.nodeid_b2a(self._storage.my_nodeid) def _get_storage_stat(self, key): """Get storage server statistics. @@ -69,7 +69,7 @@ class StorageStatusElement(Element): :param str key: storage server statistic we want to know. """ - return self.storage.get_stats().get(key) + return self._storage.get_stats().get(key) def render_abbrev_space(self, size): if size is None: @@ -114,7 +114,7 @@ class StorageStatusElement(Element): @renderer def last_complete_bucket_count(self, req, tag): - s = self.storage.bucket_counter.get_state() + s = self._storage.bucket_counter.get_state() count = s.get("last-complete-bucket-count") if count is None: return "Not computed yet" @@ -122,7 +122,7 @@ class StorageStatusElement(Element): @renderer def count_crawler_status(self, req, tag): - p = self.storage.bucket_counter.get_progress() + p = self._storage.bucket_counter.get_progress() return self.format_crawler_progress(p) def format_crawler_progress(self, p): @@ -152,13 +152,13 @@ class StorageStatusElement(Element): @renderer def storage_running(self, req, tag): - if self.storage: + if self._storage: return tag return T.h1("No Storage Server Running") @renderer def lease_expiration_enabled(self, req, tag): - lc = self.storage.lease_checker + lc = self._storage.lease_checker if lc.expiration_enabled: return tag("Enabled: expired leases will be removed") else: @@ -166,7 +166,7 @@ class StorageStatusElement(Element): @renderer def lease_expiration_mode(self, req, tag): - lc = self.storage.lease_checker + lc = self._storage.lease_checker if lc.mode == "age": if lc.override_lease_duration is None: tag("Leases will expire naturally, probably 31 days after " @@ -189,13 +189,13 @@ class StorageStatusElement(Element): @renderer def lease_current_cycle_progress(self, req, tag): - lc = self.storage.lease_checker + lc = self._storage.lease_checker p = lc.get_progress() return tag(self.format_crawler_progress(p)) @renderer def lease_current_cycle_results(self, req, tag): - lc = self.storage.lease_checker + lc = self._storage.lease_checker p = lc.get_progress() if not p["cycle-in-progress"]: return "" @@ -252,7 +252,7 @@ class StorageStatusElement(Element): @renderer def lease_last_cycle_results(self, req, tag): - lc = self.storage.lease_checker + lc = self._storage.lease_checker h = lc.get_state()["history"] if not h: return "" @@ -304,18 +304,18 @@ class StorageStatusElement(Element): class StorageStatus(MultiFormatResource): def __init__(self, storage, nickname=""): super(StorageStatus, self).__init__() - self.storage = storage - self.nickname = nickname + self._storage = storage + self._nickname = nickname def render_HTML(self, req): - return renderElement(req, StorageStatusElement(self.storage, self.nickname)) + return renderElement(req, StorageStatusElement(self._storage, self._nickname)) def render_JSON(self, req): req.setHeader("content-type", "text/plain") - d = {"stats": self.storage.get_stats(), - "bucket-counter": self.storage.bucket_counter.get_state(), - "lease-checker": self.storage.lease_checker.get_state(), - "lease-checker-progress": self.storage.lease_checker.get_progress(), + d = {"stats": self._storage.get_stats(), + "bucket-counter": self._storage.bucket_counter.get_state(), + "lease-checker": self._storage.lease_checker.get_state(), + "lease-checker-progress": self._storage.lease_checker.get_progress(), } return json.dumps(d, indent=1) + "\n" From 110734daf065e7b2706e7d92b2361c21954c11eb Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 5 Mar 2020 15:45:18 -0500 Subject: [PATCH 0086/1472] Use a helper to exercise render() in storage test cases --- src/allmydata/test/test_storage.py | 50 ++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 56d281688..86b23cd48 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -30,6 +30,7 @@ from allmydata.mutable.layout import MDMFSlotWriteProxy, MDMFSlotReadProxy, \ from allmydata.interfaces import BadWriteEnablerError from allmydata.test.common import LoggingServiceParent, ShouldFailMixin from allmydata.test.common_web import WebRenderingMixin +from nevow.testutil import FakeRequest from allmydata.test.no_network import NoNetworkServer from allmydata.web.storage import ( StorageStatus, @@ -2972,6 +2973,29 @@ def renderSynchronously(ss): deferred = flattenString(None, elem) return unittest.TestCase().successResultOf(deferred) +def renderDeferred(resource, **kwargs): + """ + Use this to exercise an overridden MultiFormatResource.render(), + usually for output=json or render_GET. It returns a Deferred. + + :param _MultiFormatResource resource: an HTTP resource to be rendered. + + """ + # We should be using twisted.web's DummyRequest here instead of + # nevow's FakeRequest, but right now it is a bit of a problem: see + # web/common.py. MultiFormatResource.render() makes a get_arg() + # call, which does a IRequest(ctx_or_req). IRequest can handle + # FakeRequest, but it can't handle DummyRequest. + req = FakeRequest(**kwargs) + req.fields = None + d = defer.maybeDeferred(resource.render, req) + def _done(res): + if isinstance(res, str): + return res + req.v + return req.v + d.addCallback(_done) + return d + class MyBucketCountingCrawler(BucketCountingCrawler): def finished_prefix(self, cycle, prefix): BucketCountingCrawler.finished_prefix(self, cycle, prefix) @@ -3291,7 +3315,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): self.failIfEqual(sr2["configured-diskbytes"], None) self.failIfEqual(sr2["original-sharebytes"], None) d.addCallback(_after_first_bucket) - d.addCallback(lambda ign: self.render1(webstatus)) + d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html_in_cycle(html): s = remove_tags(html) self.failUnlessIn("So far, this cycle has examined " @@ -3366,7 +3390,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): self.failUnlessEqual(count_leases(mutable_si_2), 1) self.failUnlessEqual(count_leases(mutable_si_3), 2) d.addCallback(_after_first_cycle) - d.addCallback(lambda ign: self.render1(webstatus)) + d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html(html): s = remove_tags(html) self.failUnlessIn("recovered: 0 shares, 0 buckets " @@ -3466,7 +3490,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): d2.addCallback(_after_first_bucket) return d2 d.addCallback(_after_first_bucket) - d.addCallback(lambda ign: self.render1(webstatus)) + d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html_in_cycle(html): s = remove_tags(html) # the first bucket encountered gets deleted, and its prefix @@ -3525,7 +3549,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): self.failUnless(rec["configured-diskbytes"] >= 0, rec["configured-diskbytes"]) d.addCallback(_after_first_cycle) - d.addCallback(lambda ign: self.render1(webstatus)) + d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html(html): s = remove_tags(html) self.failUnlessIn("Expiration Enabled: expired leases will be removed", s) @@ -3610,7 +3634,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): d2.addCallback(_after_first_bucket) return d2 d.addCallback(_after_first_bucket) - d.addCallback(lambda ign: self.render1(webstatus)) + d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html_in_cycle(html): s = remove_tags(html) # the first bucket encountered gets deleted, and its prefix @@ -3671,7 +3695,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): self.failUnless(rec["configured-diskbytes"] >= 0, rec["configured-diskbytes"]) d.addCallback(_after_first_cycle) - d.addCallback(lambda ign: self.render1(webstatus)) + d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html(html): s = remove_tags(html) self.failUnlessIn("Expiration Enabled:" @@ -3733,7 +3757,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): self.failUnlessEqual(count_shares(mutable_si_3), 1) self.failUnlessEqual(count_leases(mutable_si_3), 2) d.addCallback(_after_first_cycle) - d.addCallback(lambda ign: self.render1(webstatus)) + d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html(html): s = remove_tags(html) self.failUnlessIn("The following sharetypes will be expired: immutable.", s) @@ -3790,7 +3814,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): self.failUnlessEqual(count_shares(mutable_si_2), 0) self.failUnlessEqual(count_shares(mutable_si_3), 0) d.addCallback(_after_first_cycle) - d.addCallback(lambda ign: self.render1(webstatus)) + d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html(html): s = remove_tags(html) self.failUnlessIn("The following sharetypes will be expired: mutable.", s) @@ -4021,7 +4045,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): # it also turns all tuples into lists self.failUnlessEqual(corrupt_shares, [[first_b32, 0]]) d.addCallback(_check_json) - d.addCallback(lambda ign: self.render1(w)) + d.addCallback(lambda ign: renderDeferred(w)) def _check_html(html): s = remove_tags(html) self.failUnlessIn("Corrupt shares: SI %s shnum 0" % first_b32, s) @@ -4046,7 +4070,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): corrupt_shares = last["corrupt-shares"] self.failUnlessEqual(corrupt_shares, [[first_b32, 0]]) d.addCallback(_check_json_history) - d.addCallback(lambda ign: self.render1(w)) + d.addCallback(lambda ign: renderDeferred(w)) def _check_html_history(html): s = remove_tags(html) self.failUnlessIn("Corrupt shares: SI %s shnum 0" % first_b32, s) @@ -4060,7 +4084,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): return d def render_json(self, page): - d = self.render1(page, args={"t": ["json"]}) + d = renderDeferred(page, args={"t": ["json"]}) return d class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): @@ -4083,7 +4107,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): ss = StorageServer(basedir, nodeid) ss.setServiceParent(self.s) w = StorageStatus(ss, "nickname") - d = self.render1(w) + d = renderDeferred(w) def _check_html(html): self.failUnlessIn("

        Storage Server Status

        ", html) s = remove_tags(html) @@ -4104,7 +4128,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): return d def render_json(self, page): - d = self.render1(page, args={"t": ["json"]}) + d = renderDeferred(page, args={"t": ["json"]}) return d def test_status_no_disk_stats(self): From f1fe3a75884c2073310727a5f4c4127bfb928645 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 5 Mar 2020 15:46:09 -0500 Subject: [PATCH 0087/1472] Get rid of WebRenderingMixin in storage test --- src/allmydata/test/test_storage.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 86b23cd48..cff8c3e22 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -29,7 +29,6 @@ from allmydata.mutable.layout import MDMFSlotWriteProxy, MDMFSlotReadProxy, \ SHARE_HASH_CHAIN_SIZE from allmydata.interfaces import BadWriteEnablerError from allmydata.test.common import LoggingServiceParent, ShouldFailMixin -from allmydata.test.common_web import WebRenderingMixin from nevow.testutil import FakeRequest from allmydata.test.no_network import NoNetworkServer from allmydata.web.storage import ( @@ -3185,7 +3184,7 @@ class InstrumentedStorageServer(StorageServer): class No_ST_BLOCKS_StorageServer(StorageServer): LeaseCheckerClass = No_ST_BLOCKS_LeaseCheckingCrawler -class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): +class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): def setUp(self): self.s = service.MultiService() @@ -4087,7 +4086,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): d = renderDeferred(page, args={"t": ["json"]}) return d -class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): +class WebStatus(unittest.TestCase, pollmixin.PollMixin): def setUp(self): self.s = service.MultiService() From bae32179bf467e8802d235e5bcd4c1c18411a5c9 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 5 Mar 2020 15:57:12 -0500 Subject: [PATCH 0088/1472] Remove StorageStatus.renderHTTP This was added to please the test suite. Pleased to remove it! --- src/allmydata/web/storage.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 9b3daa0cb..63ac47282 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -318,19 +318,3 @@ class StorageStatus(MultiFormatResource): "lease-checker-progress": self._storage.lease_checker.get_progress(), } return json.dumps(d, indent=1) + "\n" - - # to appease the test suite - def renderHTTP(self, ctx=None): - """Send HTML or JSON formatted data, based on request. - - This function contains a bit of nevow-ism, but since this is - only called from the test suite, the nevow-ism should go away - as we update things. - - :param _nevow.context.WovenContext ctx: context is passed on - from the test suite. We get a request out of this - context, and use the request to render a result. - - """ - from nevow.inevow import IRequest - return self.render(IRequest(ctx)) From 9c7357bc61dd74e4b15ae0da2d28a5275acd6b5e Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 30 Mar 2020 16:15:44 -0400 Subject: [PATCH 0089/1472] Remove an extraneous directive `t:data` is not really a Twisted template directive. Added my mistake, removing now. --- src/allmydata/web/storage_status.xhtml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/storage_status.xhtml b/src/allmydata/web/storage_status.xhtml index bbf3d2c8a..354f9f177 100644 --- a/src/allmydata/web/storage_status.xhtml +++ b/src/allmydata/web/storage_status.xhtml @@ -57,7 +57,7 @@
        • Server Nickname:
        • Server Nodeid:
        • -
        • Accepting new shares: +
        • Accepting new shares:
        • Total buckets: From 8c92187d9263fa8d092c62c1305200191e850ec5 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 6 Apr 2020 19:00:55 -0400 Subject: [PATCH 0090/1472] Avoid using nevow FakeRequest in storage test. Use twisted.web.server.Request instead, with a DummyChannel. There's still one line of inevitable nevow now, because of code in web/common.py; but that should be easily replaceable once we switch that over. --- src/allmydata/test/test_storage.py | 47 +++++++++++++++--------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index cff8c3e22..40b9b52e9 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -5,6 +5,16 @@ from twisted.trial import unittest from twisted.internet import defer from twisted.application import service from twisted.web.template import flattenString + +# We need to use `nevow.inevow.IRequest` for now for compatibility +# with the code in web/common.py. Once nevow bits are gone from +# web/common.py, we can use `twisted.web.iweb.IRequest` here. +from nevow.inevow import IRequest + +from twisted.web.server import Request +from twisted.web.test.test_web import DummyChannel +from zope.interface import implements + from foolscap.api import fireEventually import itertools from allmydata import interfaces @@ -29,7 +39,6 @@ from allmydata.mutable.layout import MDMFSlotWriteProxy, MDMFSlotReadProxy, \ SHARE_HASH_CHAIN_SIZE from allmydata.interfaces import BadWriteEnablerError from allmydata.test.common import LoggingServiceParent, ShouldFailMixin -from nevow.testutil import FakeRequest from allmydata.test.no_network import NoNetworkServer from allmydata.web.storage import ( StorageStatus, @@ -2972,28 +2981,20 @@ def renderSynchronously(ss): deferred = flattenString(None, elem) return unittest.TestCase().successResultOf(deferred) -def renderDeferred(resource, **kwargs): - """ - Use this to exercise an overridden MultiFormatResource.render(), - usually for output=json or render_GET. It returns a Deferred. +def renderDeferred(ss): + elem = StorageStatusElement(ss._storage, ss._nickname) + return flattenString(None, elem) - :param _MultiFormatResource resource: an HTTP resource to be rendered. +class JSONRequest(Request): + implements(IRequest) - """ - # We should be using twisted.web's DummyRequest here instead of - # nevow's FakeRequest, but right now it is a bit of a problem: see - # web/common.py. MultiFormatResource.render() makes a get_arg() - # call, which does a IRequest(ctx_or_req). IRequest can handle - # FakeRequest, but it can't handle DummyRequest. - req = FakeRequest(**kwargs) - req.fields = None - d = defer.maybeDeferred(resource.render, req) - def _done(res): - if isinstance(res, str): - return res + req.v - return req.v - d.addCallback(_done) - return d + def __init__(self, **kwargs): + Request.__init__(self, DummyChannel(), **kwargs) + self.args = {"t": ["json"]} + self.fields = {} + +def renderJSON(resource): + return resource.render(JSONRequest()) class MyBucketCountingCrawler(BucketCountingCrawler): def finished_prefix(self, cycle, prefix): @@ -4083,7 +4084,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): return d def render_json(self, page): - d = renderDeferred(page, args={"t": ["json"]}) + d = renderJSON(page) return d class WebStatus(unittest.TestCase, pollmixin.PollMixin): @@ -4127,7 +4128,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin): return d def render_json(self, page): - d = renderDeferred(page, args={"t": ["json"]}) + d = renderJSON(page) return d def test_status_no_disk_stats(self): From 3e7dea7dda3bee3dfb2713297c10f3fac1b2698f Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 6 Apr 2020 19:01:22 -0400 Subject: [PATCH 0091/1472] Wrap renderer results in tags --- src/allmydata/web/storage.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index 63ac47282..ba6609456 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -38,11 +38,11 @@ class StorageStatusElement(Element): @renderer def nickname(self, req, tag): - return self._nickname + return tag(self._nickname) @renderer def nodeid(self, req, tag): - return idlib.nodeid_b2a(self._storage.my_nodeid) + return tag(idlib.nodeid_b2a(self._storage.my_nodeid)) def _get_storage_stat(self, key): """Get storage server statistics. @@ -110,20 +110,20 @@ class StorageStatusElement(Element): @renderer def accepting_immutable_shares(self, req, tag): accepting = self._get_storage_stat("storage_server.accepting_immutable_shares") - return {True: "Yes", False: "No"}[bool(accepting)] + return tag({True: "Yes", False: "No"}[bool(accepting)]) @renderer def last_complete_bucket_count(self, req, tag): s = self._storage.bucket_counter.get_state() count = s.get("last-complete-bucket-count") if count is None: - return "Not computed yet" - return str(count) + return tag("Not computed yet") + return tag(str(count)) @renderer def count_crawler_status(self, req, tag): p = self._storage.bucket_counter.get_progress() - return self.format_crawler_progress(p) + return tag(self.format_crawler_progress(p)) def format_crawler_progress(self, p): cycletime = p["estimated-time-per-cycle"] From 8b7ef33b3d3b7caf6a91f6f803111288d2bb5ed4 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 6 Apr 2020 19:05:17 -0400 Subject: [PATCH 0092/1472] Remove redundant render_json() method --- src/allmydata/test/test_storage.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 40b9b52e9..e20a89da6 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -3399,7 +3399,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): "(2 mutable / 2 immutable),", s) self.failUnlessIn("but expiration was not enabled", s) d.addCallback(_check_html) - d.addCallback(lambda ign: self.render_json(webstatus)) + d.addCallback(lambda ign: renderJSON(webstatus)) def _check_json(raw): data = json.loads(raw) self.failUnlessIn("lease-checker", data) @@ -4036,7 +4036,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): self.failUnlessEqual(so_far["corrupt-shares"], [(first_b32, 0)]) d.addCallback(_after_first_bucket) - d.addCallback(lambda ign: self.render_json(w)) + d.addCallback(lambda ign: renderJSON(w)) def _check_json(raw): data = json.loads(raw) # grr. json turns all dict keys into strings. @@ -4063,7 +4063,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): self.failUnlessEqual(rec["examined-shares"], 3) self.failUnlessEqual(last["corrupt-shares"], [(first_b32, 0)]) d.addCallback(_after_first_cycle) - d.addCallback(lambda ign: self.render_json(w)) + d.addCallback(lambda ign: renderJSON(w)) def _check_json_history(raw): data = json.loads(raw) last = data["lease-checker"]["history"]["0"] @@ -4083,9 +4083,6 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): d.addBoth(_cleanup) return d - def render_json(self, page): - d = renderJSON(page) - return d class WebStatus(unittest.TestCase, pollmixin.PollMixin): @@ -4116,7 +4113,7 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin): self.failUnlessIn("Accepting new shares: Yes", s) self.failUnlessIn("Reserved space: - 0 B (0)", s) d.addCallback(_check_html) - d.addCallback(lambda ign: self.render_json(w)) + d.addCallback(lambda ign: renderJSON(w)) def _check_json(raw): data = json.loads(raw) s = data["stats"] @@ -4127,9 +4124,6 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin): d.addCallback(_check_json) return d - def render_json(self, page): - d = renderJSON(page) - return d def test_status_no_disk_stats(self): def call_get_disk_stats(whichdir, reserved_space=0): From 72b8f720802f91d5aef25643b326d1ba98b54cc2 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 6 Apr 2020 19:13:08 -0400 Subject: [PATCH 0093/1472] Add docstrings to storage test helpers --- src/allmydata/test/test_storage.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index e20a89da6..b69713df5 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -2975,6 +2975,8 @@ def remove_tags(s): def renderSynchronously(ss): """ + Return fully rendered HTML document. + :param _StorageStatus ss: a StorageStatus instance. """ elem = StorageStatusElement(ss._storage, ss._nickname) @@ -2982,10 +2984,20 @@ def renderSynchronously(ss): return unittest.TestCase().successResultOf(deferred) def renderDeferred(ss): + """ + Return a `Deferred` HTML renderer. + + :param _StorageStatus ss: a StorageStatus instance. + """ elem = StorageStatusElement(ss._storage, ss._nickname) return flattenString(None, elem) class JSONRequest(Request): + """ + A Request with t=json argument added to it. + + This is useful to invoke a Resouce.render_JSON() method. + """ implements(IRequest) def __init__(self, **kwargs): @@ -2994,6 +3006,11 @@ class JSONRequest(Request): self.fields = {} def renderJSON(resource): + """Exercise resouce.render_JSON() + + :param _MultiFormatResource resouce: A `twisted.web.resouce.Resource` + that contains a render_JSON() method. + """ return resource.render(JSONRequest()) class MyBucketCountingCrawler(BucketCountingCrawler): From b2b706198042fa39ddc4959d3804647340c8bb24 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 6 Apr 2020 19:18:15 -0400 Subject: [PATCH 0094/1472] Refactor storage test helpers Rewrite `renderSynchronously()` to use `renderDeferred()` --- src/allmydata/test/test_storage.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index b69713df5..f50c3b352 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -2979,9 +2979,7 @@ def renderSynchronously(ss): :param _StorageStatus ss: a StorageStatus instance. """ - elem = StorageStatusElement(ss._storage, ss._nickname) - deferred = flattenString(None, elem) - return unittest.TestCase().successResultOf(deferred) + return unittest.TestCase().successResultOf(renderDeferred(ss)) def renderDeferred(ss): """ From 82cd5a87fe9582dd1de8e021b8b8a8459d3ba499 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 6 Apr 2020 21:48:49 -0400 Subject: [PATCH 0095/1472] Use DummyRequest in storage tests Using twisted.web.server.Request causes test_new_style_classes to fail like so: Traceback (most recent call last): Failure: testtools.testresult.real._StringException: Traceback (most recent call last): File ".tox/coverage/lib/python2.7/site-packages/allmydata/test/test_python2_regressions.py", line 69, in test_new_style_classes "Expected to find no classic classes.", File ".tox/coverage/lib/python2.7/site-packages/testtools/testcase.py", line 502, in assertThat raise mismatch_error testtools.matchers._impl.MismatchError: !=: reference = set([]) actual = set([]) : Expected to find no classic classes. Seems that `DummyRequest` is an acceptable new style class. --- src/allmydata/test/test_storage.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index f50c3b352..a0816a6ce 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -11,8 +11,8 @@ from twisted.web.template import flattenString # web/common.py, we can use `twisted.web.iweb.IRequest` here. from nevow.inevow import IRequest -from twisted.web.server import Request -from twisted.web.test.test_web import DummyChannel +# from twisted.web.server import Request +from twisted.web.test.test_web import DummyRequest from zope.interface import implements from foolscap.api import fireEventually @@ -2990,7 +2990,7 @@ def renderDeferred(ss): elem = StorageStatusElement(ss._storage, ss._nickname) return flattenString(None, elem) -class JSONRequest(Request): +class JSONRequest(DummyRequest): """ A Request with t=json argument added to it. @@ -2999,7 +2999,7 @@ class JSONRequest(Request): implements(IRequest) def __init__(self, **kwargs): - Request.__init__(self, DummyChannel(), **kwargs) + DummyRequest.__init__(self, b"/", **kwargs) self.args = {"t": ["json"]} self.fields = {} From a14cee5cce616a5490f64f1aa71ba9106c3e3270 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 9 Apr 2020 15:09:58 -0400 Subject: [PATCH 0096/1472] Remove nevow from allmydata.web.root.IncidentReporter This change calls for an explanation: - `RenderMixin` doesn't seem to be adding anything here, so it is gone. - The web browser was unhappy without a charset in the response (Firefox 74 was anyway), so `content-type` header also gets a `charset=UTF-8`. - Returning a Unicode string made nevow appserver unhappy, so it is just a `str`. The precise error message was: exceptions.TypeError: ('Could not adapt', u'An incident report has been saved to logs/incidents/ in the node directory.', ) Fixes: ticket:3294 --- newsfragments/3294.minor | 0 src/allmydata/web/root.py | 13 +++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 newsfragments/3294.minor diff --git a/newsfragments/3294.minor b/newsfragments/3294.minor new file mode 100644 index 000000000..e69de29bb diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 8726cb00f..95c80994e 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -28,7 +28,7 @@ from allmydata.web.common import ( WebError, get_arg, MultiFormatPage, - RenderMixin, + MultiFormatResource, get_format, get_mutable_type, render_time_delta, @@ -169,14 +169,15 @@ class FileHandler(rend.Page): raise WebError("/file must be followed by a file-cap and a name", http.NOT_FOUND) -class IncidentReporter(RenderMixin, rend.Page): - def render_POST(self, ctx): - req = IRequest(ctx) +class IncidentReporter(MultiFormatResource): + """Handler for /report_incident POST request""" + + def render(self, req): log.msg(format="User reports incident through web page: %(details)s", details=get_arg(req, "details", ""), level=log.WEIRD, umid="LkD9Pw") - req.setHeader("content-type", "text/plain") - return "An incident report has been saved to logs/incidents/ in the node directory." + req.setHeader("content-type", "text/plain; charset=UTF-8") + return b"An incident report has been saved to logs/incidents/ in the node directory." SPACE = u"\u00A0"*2 From 497a832ad920e47ef71608a953ba950430719ce5 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 9 Apr 2020 15:54:33 -0400 Subject: [PATCH 0097/1472] Use Twisted < 20.0.0 Twisted 20.3.0 has dropped Python 2.7 support, so we need to stick to the prior release as long as we use Python 2.7. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index caa37f16e..17ae09295 100644 --- a/setup.py +++ b/setup.py @@ -98,7 +98,7 @@ install_requires = [ # `pip install tahoe-lafs[sftp]` would not install requirements # specified by Twisted[conch]. Since this would be the *whole point* of # an sftp extra in Tahoe-LAFS, there is no point in having one. - "Twisted[tls,conch] >= 18.4.0", + "Twisted[tls,conch] >= 18.4.0, < 20.0.0", # We need Nevow >= 0.11.1 which can be installed using pip. "Nevow >= 0.11.1", From 15131a9f71da04c448b9fc707556a77ce18f836d Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 14 Apr 2020 16:10:20 -0400 Subject: [PATCH 0098/1472] Handle just POST requests in IncidentReporter --- src/allmydata/web/root.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 95c80994e..06d02efbc 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -173,6 +173,9 @@ class IncidentReporter(MultiFormatResource): """Handler for /report_incident POST request""" def render(self, req): + if req.method != "POST": + raise WebError("/report_incident can only be used with POST") + log.msg(format="User reports incident through web page: %(details)s", details=get_arg(req, "details", ""), level=log.WEIRD, umid="LkD9Pw") From 73938ad446bff329864fd82f03e7d3b5ebf503ff Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 16 Apr 2020 13:30:08 -0400 Subject: [PATCH 0099/1472] Drop upper bound on Twisted version CI broke build because CI was broke; the new Twisted release wasn't the problem. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 17ae09295..caa37f16e 100644 --- a/setup.py +++ b/setup.py @@ -98,7 +98,7 @@ install_requires = [ # `pip install tahoe-lafs[sftp]` would not install requirements # specified by Twisted[conch]. Since this would be the *whole point* of # an sftp extra in Tahoe-LAFS, there is no point in having one. - "Twisted[tls,conch] >= 18.4.0, < 20.0.0", + "Twisted[tls,conch] >= 18.4.0", # We need Nevow >= 0.11.1 which can be installed using pip. "Nevow >= 0.11.1", From fd4d2709a57ed66006f948c2957dba24410ce013 Mon Sep 17 00:00:00 2001 From: meejah Date: Thu, 22 Aug 2019 17:10:02 -0600 Subject: [PATCH 0100/1472] search-replace "n:" -> "t:" --- src/allmydata/web/directory.xhtml | 38 +++++++++++++++---------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/allmydata/web/directory.xhtml b/src/allmydata/web/directory.xhtml index ee94d5814..ffeff1de7 100644 --- a/src/allmydata/web/directory.xhtml +++ b/src/allmydata/web/directory.xhtml @@ -2,7 +2,7 @@ - + @@ -30,19 +30,19 @@
          -

          +

          -
          - - +
          +
          + @@ -51,25 +51,25 @@ - - - - - - - - + + + + + + + + - +
          Type Filename Size
          This directory is empty.
          This directory is empty.
          @@ -124,7 +122,7 @@
          @@ -138,22 +136,32 @@

          -
          img/connected-.png
          -
          +
          + + img/connected-.png + + +
          +

          -
          img/connected-.png
          -
          +
          + + img/connected-.png + + +
          +

          -
          +

          Services

          -
          +
          @@ -161,61 +169,89 @@

          - Connected to - of known storage servers + Connected to + of known storage servers

          - +
          - + - + - + - - - + + + - +

          Nickname

          Connection

          Last RX

          Last RX

          Version

          Available

          -
          img/connected-.png
          -
          -
          +
          + + img/connected-.png + + +
          +
          +
          - - + + - - + + + + + + +
          You are not presently connected to any servers.
          You are not presently connected to any servers.
          -

          Connected to of introducers

          +

          Connected to of introducers

          - +
          - + - + - + + - - +

          Connection

          Last RX

          Last RX

          -
          img/connected-.png
          - -
          +
          + + img/connected-.png + + +
          + + + + +
          + + + + +
          +
          + + + +
          No introducers are configured.
          No introducers are configured.
          @@ -224,9 +260,9 @@
          From f34cdb81adf7ec4665c7a6fec1b9ca0af2d7cf49 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 16:42:03 -0400 Subject: [PATCH 0217/1472] Create RootElement --- src/allmydata/web/root.py | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index f9f361a9c..3cf9e9db3 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -16,6 +16,15 @@ from nevow.inevow import IRequest from nevow.static import File as nevow_File # TODO: merge with static.File? from nevow.util import resource_filename +from twisted.python.filepath import FilePath +from twisted.web.template import ( + Element, + XMLFile, + renderer, + renderElement, + tags +) + import allmydata # to display import path from allmydata.version_checks import get_package_versions_string from allmydata.util import log @@ -193,10 +202,9 @@ class IncidentReporter(MultiFormatResource): SPACE = u"\u00A0"*2 -class Root(MultiFormatPage): +class Root(MultiFormatResource): addSlash = True - docFactory = getxmlfile("welcome.xhtml") _connectedalts = { "not-configured": "Not Configured", @@ -205,7 +213,7 @@ class Root(MultiFormatPage): } def __init__(self, client, clock=None, now_fn=None): - rend.Page.__init__(self, client) + super(Root, self).__init__() self.client = client self.now_fn = now_fn @@ -261,13 +269,8 @@ class Root(MultiFormatPage): def data_import_path(self, ctx, data): return str(allmydata) - def render_my_nodeid(self, ctx, data): - tubid_s = "TubID: "+self.client.get_long_tubid() - return T.td(title=tubid_s)[self.client.get_long_nodeid()] - - def data_my_nickname(self, ctx, data): - return self.client.nickname - + def render_HTML(self, req): + return renderElement(req, RootElement(self.client)) def render_JSON(self, req): req.setHeader("content-type", "application/json; charset=utf-8") @@ -307,6 +310,22 @@ class Root(MultiFormatPage): return description +class RootElement(Element): + + loader = XMLFile(FilePath(__file__).sibling("welcome.xhtml")) + + def __init__(self, client): + super(RootElement, self).__init__() + self._client = client + + @renderer + def my_nodeid(self, req, tag): + tubid_s = "TubID: "+self._client.get_long_tubid() + return tags.td(self._client.get_long_nodeid(), title=tubid_s) + + @renderer + def my_nickname(self, req, tag): + return tag(self._client.nickname) def render_services(self, ctx, data): ul = T.ul() From 110528f5693c9aa80163c931fc54516091cf5a54 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 16:42:26 -0400 Subject: [PATCH 0218/1472] Handle path "/" when rendering root path --- src/allmydata/web/root.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 3cf9e9db3..1803582b1 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -257,7 +257,9 @@ class Root(MultiFormatResource): except KeyError: storage_server = None return storage.StorageStatus(storage_server, self.client.nickname) - + if not path: + # Render "/" path. + return self # FIXME: This code is duplicated in root.py and introweb.py. def data_rendered_at(self, ctx, data): From 65b6daf990a5b13cf4b58574dc3edcaf65702765 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 16:43:47 -0400 Subject: [PATCH 0219/1472] Rewrite incident button using twisted tags --- src/allmydata/web/root.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 1803582b1..1bcafde56 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -567,14 +567,18 @@ class RootElement(Element): ]] return T.div[form] - def render_incident_button(self, ctx, data): + @renderer + def incident_button(self, req, tag): # this button triggers a foolscap-logging "incident" - form = T.form(action="report_incident", method="post", - enctype="multipart/form-data")[ - T.fieldset[ - T.input(type="hidden", name="t", value="report-incident"), - "What went wrong?"+SPACE, - T.input(type="text", name="details"), SPACE, - T.input(type="submit", value=u"Save \u00BB"), - ]] - return T.div[form] + form = tags.form( + tags.fieldset( + tags.input(type="hidden", name="t", value="report-incident"), + "What went wrong?"+SPACE, + tags.input(type="text", name="details"), SPACE, + tags.input(type="submit", value=u"Save \u00BB"), + ), + action="report_incident", + method="post", + enctype="multipart/form-data" + ) + return tags.div(form) From c86d4341c87e25545960ac8418d93e1b2128220b Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 16:52:54 -0400 Subject: [PATCH 0220/1472] Render "connected to introducers" status --- src/allmydata/web/root.py | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 1bcafde56..45f17a9ae 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -206,12 +206,6 @@ class Root(MultiFormatResource): addSlash = True - _connectedalts = { - "not-configured": "Not Configured", - "yes": "Connected", - "no": "Disconnected", - } - def __init__(self, client, clock=None, now_fn=None): super(Root, self).__init__() self.client = client @@ -320,6 +314,12 @@ class RootElement(Element): super(RootElement, self).__init__() self._client = client + _connectedalts = { + "not-configured": "Not Configured", + "yes": "Connected", + "no": "Disconnected", + } + @renderer def my_nodeid(self, req, tag): tubid_s = "TubID: "+self._client.get_long_tubid() @@ -329,6 +329,21 @@ class RootElement(Element): def my_nickname(self, req, tag): return tag(self._client.nickname) + def _connected_introducers(self): + return len([1 for cs in self._client.introducer_connection_statuses() + if cs.connected]) + + @renderer + def connected_to_at_least_one_introducer(self, req, tag): + if self._connected_introducers(): + return "yes" + return "no" + + @renderer + def connected_to_at_least_one_introducer_alt(self, req, tag): + state = self.connected_to_at_least_one_introducer(req, tag) + return self._connectedalts.get(state) + def render_services(self, ctx, data): ul = T.ul() try: @@ -366,18 +381,6 @@ class RootElement(Element): def data_total_introducers(self, ctx, data): return len(self.client.introducer_connection_statuses()) - def data_connected_introducers(self, ctx, data): - return len([1 for cs in self.client.introducer_connection_statuses() - if cs.connected]) - - def data_connected_to_at_least_one_introducer(self, ctx, data): - if self.data_connected_introducers(ctx, data): - return "yes" - return "no" - - def data_connected_to_at_least_one_introducer_alt(self, ctx, data): - return self._connectedalts[self.data_connected_to_at_least_one_introducer(ctx, data)] - # In case we configure multiple introducers def data_introducers(self, ctx, data): return self.client.introducer_connection_statuses() From a4f86181f9e2d5f0ca77a5b31d3e7cb4209505f6 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 16:55:10 -0400 Subject: [PATCH 0221/1472] Render introducer description --- src/allmydata/web/root.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 45f17a9ae..d717662fa 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -369,14 +369,15 @@ class RootElement(Element): return ctx.tag[ul] - def data_introducer_description(self, ctx, data): - connected_count = self.data_connected_introducers( ctx, data ) + @renderer + def introducer_description(self, req, tag): + connected_count = self._connected_introducers() if connected_count == 0: - return "No introducers connected" + return tag("No introducers connected") elif connected_count == 1: - return "1 introducer connected" + return tag("1 introducer connected") else: - return "%s introducers connected" % (connected_count,) + return tag("%s introducers connected" % (connected_count,)) def data_total_introducers(self, ctx, data): return len(self.client.introducer_connection_statuses()) From 622d15627860f7dc20fe41e7abce32984906a2ee Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 17:01:37 -0400 Subject: [PATCH 0222/1472] Render "connected to helpers" status --- src/allmydata/web/root.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index d717662fa..817a81ded 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -449,9 +449,10 @@ class RootElement(Element): return "Helper not connected" return "Helper" - def data_connected_to_helper(self, ctx, data): + @renderer + def connected_to_helper(self, req, tag): try: - uploader = self.client.getServiceNamed("uploader") + uploader = self._client.getServiceNamed("uploader") except KeyError: return "no" # we don't even have an Uploader furl, connected = uploader.get_helper_info() @@ -462,8 +463,10 @@ class RootElement(Element): return "yes" return "no" - def data_connected_to_helper_alt(self, ctx, data): - return self._connectedalts[self.data_connected_to_helper(ctx, data)] + @renderer + def connected_to_helper_alt(self, req, tag): + state = self.connected_to_helper(req, tag) + return self._connectedalts.get(state) def data_known_storage_servers(self, ctx, data): sb = self.client.get_storage_broker() From 332bd75fcc0531da8a60d29ac199122d69d95cd1 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 17:03:52 -0400 Subject: [PATCH 0223/1472] Render helper description --- src/allmydata/web/root.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 817a81ded..d18fff172 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -444,10 +444,11 @@ class RootElement(Element): (prefix, _, swissnum) = furl.rpartition("/") return "%s/[censored]" % (prefix,) - def data_helper_description(self, ctx, data): - if self.data_connected_to_helper(ctx, data) == "no": - return "Helper not connected" - return "Helper" + @renderer + def helper_description(self, req, tag): + if self.connected_to_helper(req, tag) == "no": + return tag("Helper not connected") + return tag("Helper") @renderer def connected_to_helper(self, req, tag): From 8b4c7296d7405f6b796da7f95f652a3d039eeb02 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 17:07:09 -0400 Subject: [PATCH 0224/1472] Move helper state to a separate function --- src/allmydata/web/root.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index d18fff172..26e365f06 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -444,14 +444,7 @@ class RootElement(Element): (prefix, _, swissnum) = furl.rpartition("/") return "%s/[censored]" % (prefix,) - @renderer - def helper_description(self, req, tag): - if self.connected_to_helper(req, tag) == "no": - return tag("Helper not connected") - return tag("Helper") - - @renderer - def connected_to_helper(self, req, tag): + def _connected_to_helper(self): try: uploader = self._client.getServiceNamed("uploader") except KeyError: @@ -464,10 +457,19 @@ class RootElement(Element): return "yes" return "no" + @renderer + def helper_description(self, req, tag): + if self._connected_to_helper() == "no": + return tag("Helper not connected") + return tag("Helper") + + @renderer + def connected_to_helper(self, req, tag): + return tag(self._connected_to_helper()) + @renderer def connected_to_helper_alt(self, req, tag): - state = self.connected_to_helper(req, tag) - return self._connectedalts.get(state) + return tag(self._connectedalts.get(self._connected_to_helper())) def data_known_storage_servers(self, ctx, data): sb = self.client.get_storage_broker() From 39d616a971c199380eddd9b9a19b7e3907be765e Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 17:11:36 -0400 Subject: [PATCH 0225/1472] Render helper furl prefix --- src/allmydata/web/root.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 26e365f06..8c9a2362d 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -432,17 +432,18 @@ class RootElement(Element): self._render_connection_status(ctx, cs) return ctx.tag - def data_helper_furl_prefix(self, ctx, data): + @renderer + def helper_furl_prefix(self, req, tag): try: - uploader = self.client.getServiceNamed("uploader") + uploader = self._client.getServiceNamed("uploader") except KeyError: - return None + return tag("None") furl, connected = uploader.get_helper_info() if not furl: - return None + return tag("None") # trim off the secret swissnum (prefix, _, swissnum) = furl.rpartition("/") - return "%s/[censored]" % (prefix,) + return tag("%s/[censored]" % (prefix,)) def _connected_to_helper(self): try: From 1713076a9ddd382e4ae7462049cfe392d80c641c Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 17:19:19 -0400 Subject: [PATCH 0226/1472] Render services --- src/allmydata/web/root.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 8c9a2362d..e3b5de271 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -344,10 +344,11 @@ class RootElement(Element): state = self.connected_to_at_least_one_introducer(req, tag) return self._connectedalts.get(state) - def render_services(self, ctx, data): - ul = T.ul() + @renderer + def services(self, req, tag): + ul = tags.ul() try: - ss = self.client.getServiceNamed("storage") + ss = self._client.getServiceNamed("storage") stats = ss.get_stats() if stats["storage_server.accepting_immutable_shares"]: msg = "accepting new shares" @@ -356,18 +357,18 @@ class RootElement(Element): available = stats.get("storage_server.disk_avail") if available is not None: msg += ", %s available" % abbreviate_size(available) - ul[T.li[T.a(href="storage")["Storage Server"], ": ", msg]] + ul(tags.li(tags.a("Storage Server", ": ", msg, href="storage"))) except KeyError: - ul[T.li["Not running storage server"]] + ul(tags.li("Not running storage server")) - if self.client.helper: - stats = self.client.helper.get_stats() + if self._client.helper: + stats = self._client.helper.get_stats() active_uploads = stats["chk_upload_helper.active_uploads"] - ul[T.li["Helper: %d active uploads" % (active_uploads,)]] + ul(tags.li("Helper: %d active uploads" % (active_uploads,))) else: - ul[T.li["Not running helper"]] + ul(tags.li("Not running helper")) - return ctx.tag[ul] + return tag(ul) @renderer def introducer_description(self, req, tag): From dcea7ea4769b8e2990ebf5bf253560341cf0717d Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 17:22:27 -0400 Subject: [PATCH 0227/1472] Render connected/known storage server status --- src/allmydata/web/root.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index e3b5de271..f431638f4 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -473,13 +473,15 @@ class RootElement(Element): def connected_to_helper_alt(self, req, tag): return tag(self._connectedalts.get(self._connected_to_helper())) - def data_known_storage_servers(self, ctx, data): - sb = self.client.get_storage_broker() - return len(sb.get_all_serverids()) + @renderer + def known_storage_servers(self, req, tag): + sb = self._client.get_storage_broker() + return tag(str(len(sb.get_all_serverids()))) - def data_connected_storage_servers(self, ctx, data): - sb = self.client.get_storage_broker() - return len(sb.get_connected_servers()) + @renderer + def connected_storage_servers(self, req, tag): + sb = self._client.get_storage_broker() + return tag(str(len(sb.get_connected_servers()))) def data_services(self, ctx, data): sb = self.client.get_storage_broker() From 3c4056a9476cfba3425d141a267bdf07f86faab2 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 27 Apr 2020 10:49:44 -0400 Subject: [PATCH 0228/1472] Replace nevow.static.File with twisted.web.static.File --- src/allmydata/web/root.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index f431638f4..a131596ae 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -13,7 +13,7 @@ from hyperlink import DecodedURL, URL from nevow import rend, tags as T from nevow.inevow import IRequest -from nevow.static import File as nevow_File # TODO: merge with static.File? +from twisted.web import static from nevow.util import resource_filename from twisted.python.filepath import FilePath @@ -225,7 +225,7 @@ class Root(MultiFormatResource): self.putChild("statistics", status.Statistics(client.stats_provider)) static_dir = resource_filename("allmydata.web", "static") for filen in os.listdir(static_dir): - self.putChild(filen, nevow_File(os.path.join(static_dir, filen))) + self.putChild(filen, static.File(os.path.join(static_dir, filen))) self.putChild("report_incident", IncidentReporter()) From 75d41843d7a203420b487a2e9a99a3eb83676d9f Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 04:59:46 -0400 Subject: [PATCH 0229/1472] Use transparent nodes --- src/allmydata/web/welcome.xhtml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/allmydata/web/welcome.xhtml b/src/allmydata/web/welcome.xhtml index bb6329bc7..a8aa0973d 100644 --- a/src/allmydata/web/welcome.xhtml +++ b/src/allmydata/web/welcome.xhtml @@ -138,8 +138,8 @@

          - img/connected-.png - + img/connected-.png +
          @@ -149,8 +149,8 @@

          - img/connected-.png - + img/connected-.png +
          From 907e8ccf6b0cee0a50c4422906eacced9e17251c Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:13:41 -0400 Subject: [PATCH 0230/1472] Use services renderer method in root template --- src/allmydata/web/welcome.xhtml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/welcome.xhtml b/src/allmydata/web/welcome.xhtml index a8aa0973d..8e25a2da2 100644 --- a/src/allmydata/web/welcome.xhtml +++ b/src/allmydata/web/welcome.xhtml @@ -173,7 +173,7 @@ of known storage servers

          - +
          From 164218cf57855c8e4bb96f334ae3a4ac076ed731 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:14:35 -0400 Subject: [PATCH 0231/1472] Update service table renderer --- src/allmydata/web/root.py | 84 ++++++++++++++++++++++++--------------- 1 file changed, 52 insertions(+), 32 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index a131596ae..9c016ac14 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -387,47 +387,48 @@ class RootElement(Element): def data_introducers(self, ctx, data): return self.client.introducer_connection_statuses() - def _render_connection_status(self, ctx, cs): + def _render_connection_status(self, tag, cs): connected = "yes" if cs.connected else "no" - ctx.fillSlots("service_connection_status", connected) - ctx.fillSlots("service_connection_status_alt", + tag.fillSlots("service_connection_status", connected) + tag.fillSlots("service_connection_status_alt", self._connectedalts[connected]) since = cs.last_connection_time - ctx.fillSlots("service_connection_status_rel_time", + tag.fillSlots("service_connection_status_rel_time", render_time_delta(since, self.now_fn()) if since is not None else "N/A") - ctx.fillSlots("service_connection_status_abs_time", + tag.fillSlots("service_connection_status_abs_time", render_time_attr(since) if since is not None else "N/A") last_received_data_time = cs.last_received_time - ctx.fillSlots("last_received_data_abs_time", + tag.fillSlots("last_received_data_abs_time", render_time_attr(last_received_data_time) if last_received_data_time is not None else "N/A") - ctx.fillSlots("last_received_data_rel_time", - render_time_delta(last_received_data_time, self.now_fn()) + tag.fillSlots("last_received_data_rel_time", + render_time_delta(last_received_data_time, + self.now_fn()) if last_received_data_time is not None else "N/A") others = cs.non_connected_statuses if cs.connected: - ctx.fillSlots("summary", cs.summary) + tag.fillSlots("summary", cs.summary) if others: details = "\n".join(["* %s: %s\n" % (which, others[which]) for which in sorted(others)]) - ctx.fillSlots("details", "Other hints:\n" + details) + tag.fillSlots("details", "Other hints:\n" + details) else: - ctx.fillSlots("details", "(no other hints)") + tag.fillSlots("details", "(no other hints)") else: - details = T.ul() + details = tags.ul() for which in sorted(others): - details[T.li["%s: %s" % (which, others[which])]] - ctx.fillSlots("summary", [cs.summary, details]) - ctx.fillSlots("details", "") + details[tags.li("%s: %s" % (which, others[which]))] + tag.fillSlots("summary", [cs.summary, details]) + tag.fillSlots("details", "") def render_introducers_row(self, ctx, cs): self._render_connection_status(ctx, cs) @@ -483,28 +484,47 @@ class RootElement(Element): sb = self._client.get_storage_broker() return tag(str(len(sb.get_connected_servers()))) - def data_services(self, ctx, data): - sb = self.client.get_storage_broker() + def _services(self): + sb = self._client.get_storage_broker() return sorted(sb.get_known_servers(), key=lambda s: s.get_serverid()) - def render_service_row(self, ctx, server): - cs = server.get_connection_status() - self._render_connection_status(ctx, cs) + @renderer + def service_row(self, req, tag): + servers = self._services() - ctx.fillSlots("peerid", server.get_longname()) - ctx.fillSlots("nickname", server.get_nickname()) + # FIXME: handle empty list of servers in a better manner. + if not servers: + tag.fillSlots(peerid="", + nickname="", + service_connection_status="", + service_connection_status_alt="", + details="", + summary="", + service_connection_status_abs_time="", + service_connection_status_rel_time="", + last_received_data_abs_time="", + last_received_data_rel_time="", + version="", + available_space="") - announcement = server.get_announcement() - version = announcement.get("my-version", "") - available_space = server.get_available_space() - if available_space is None: - available_space = "N/A" - else: - available_space = abbreviate_size(available_space) - ctx.fillSlots("version", version) - ctx.fillSlots("available_space", available_space) + for server in servers: + cs = server.get_connection_status() + self._render_connection_status(tag, cs) - return ctx.tag + tag.fillSlots("peerid", server.get_longname()) + tag.fillSlots("nickname", server.get_nickname()) + + announcement = server.get_announcement() + version = announcement.get("my-version", "") + available_space = server.get_available_space() + if available_space is None: + available_space = "N/A" + else: + available_space = abbreviate_size(available_space) + tag.fillSlots("version", version) + tag.fillSlots("available_space", available_space) + + return tag def render_download_form(self, ctx, data): # this is a form where users can download files by URI From 2f53b2a67dc74dfa2338d434dc27c91fe24ac6d4 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:17:22 -0400 Subject: [PATCH 0232/1472] Add connected introducers renderer --- src/allmydata/web/root.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 9c016ac14..ca86dcd8f 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -333,6 +333,10 @@ class RootElement(Element): return len([1 for cs in self._client.introducer_connection_statuses() if cs.connected]) + @renderer + def connected_introducers(self, req, tag): + return tag(str(self._connected_introducers())) + @renderer def connected_to_at_least_one_introducer(self, req, tag): if self._connected_introducers(): From 89980a039cc536bba9583151482432a8b6fe55af Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:19:47 -0400 Subject: [PATCH 0233/1472] Render total introducers --- src/allmydata/web/root.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index ca86dcd8f..f29a39204 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -384,8 +384,9 @@ class RootElement(Element): else: return tag("%s introducers connected" % (connected_count,)) - def data_total_introducers(self, ctx, data): - return len(self.client.introducer_connection_statuses()) + @renderer + def total_introducers(self, req, tag): + return tag(str(self.client.introducer_connection_statuses())) # In case we configure multiple introducers def data_introducers(self, ctx, data): From a646c7decdd4e25d970ea3ea1295ccedc8734fcd Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:42:07 -0400 Subject: [PATCH 0234/1472] Fix typo --- src/allmydata/web/root.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index f29a39204..bb822d063 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -386,7 +386,7 @@ class RootElement(Element): @renderer def total_introducers(self, req, tag): - return tag(str(self.client.introducer_connection_statuses())) + return tag(str(self._client.introducer_connection_statuses())) # In case we configure multiple introducers def data_introducers(self, ctx, data): From 8d4949f96bce5cd2168c66171622f2b12019d764 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:42:57 -0400 Subject: [PATCH 0235/1472] Render total introducers correctly --- src/allmydata/web/root.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index bb822d063..342a8085f 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -386,7 +386,7 @@ class RootElement(Element): @renderer def total_introducers(self, req, tag): - return tag(str(self._client.introducer_connection_statuses())) + return tag(str(len(self._client.introducer_connection_statuses()))) # In case we configure multiple introducers def data_introducers(self, ctx, data): From 7ba4b945a4b29ec8e7663fe4126c3fc2ab506d59 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:43:41 -0400 Subject: [PATCH 0236/1472] Use introducers renderer method in root template --- src/allmydata/web/welcome.xhtml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/welcome.xhtml b/src/allmydata/web/welcome.xhtml index 8e25a2da2..4a88b1ea0 100644 --- a/src/allmydata/web/welcome.xhtml +++ b/src/allmydata/web/welcome.xhtml @@ -218,7 +218,7 @@

          Connected to of introducers

          -

          Nickname

          +
          From 44f16e77cf20bd8c4e197dc5da3cd0fdb80b322b Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:47:36 -0400 Subject: [PATCH 0237/1472] Add introducers renderer --- src/allmydata/web/root.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 342a8085f..4144f3ea3 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -389,8 +389,15 @@ class RootElement(Element): return tag(str(len(self._client.introducer_connection_statuses()))) # In case we configure multiple introducers - def data_introducers(self, ctx, data): - return self.client.introducer_connection_statuses() + @renderer + def introducers(self, req, tag): + ix = self._get_introducers() + if not ix: + return tag("No introducers") + return tag + + def _get_introducers(self): + return self._client.introducer_connection_statuses() def _render_connection_status(self, tag, cs): connected = "yes" if cs.connected else "no" From 5155befedc6b3909b0257b3381a66d0fea50dfbb Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:48:28 -0400 Subject: [PATCH 0238/1472] Rewrite total introducers --- src/allmydata/web/root.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 4144f3ea3..a2aaa020f 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -386,7 +386,7 @@ class RootElement(Element): @renderer def total_introducers(self, req, tag): - return tag(str(len(self._client.introducer_connection_statuses()))) + return tag(str(len(self._get_introducers()))) # In case we configure multiple introducers @renderer From 5d84a25f213af0327b6d5dea548a0354e9e48256 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:50:03 -0400 Subject: [PATCH 0239/1472] Add introducers row renderer --- src/allmydata/web/root.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index a2aaa020f..bf0535000 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -442,9 +442,11 @@ class RootElement(Element): tag.fillSlots("summary", [cs.summary, details]) tag.fillSlots("details", "") - def render_introducers_row(self, ctx, cs): - self._render_connection_status(ctx, cs) - return ctx.tag + @renderer + def introducers_row(self, req, tag): + for cs in self._get_introducers(): + self._render_connection_status(tag, cs) + return tag @renderer def helper_furl_prefix(self, req, tag): From ed6970b168d5855fe27b16a875b74f8644d28be3 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:52:57 -0400 Subject: [PATCH 0240/1472] Correct fill slots usage in connection status --- src/allmydata/web/root.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index bf0535000..e9e6fcc2b 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -401,26 +401,26 @@ class RootElement(Element): def _render_connection_status(self, tag, cs): connected = "yes" if cs.connected else "no" - tag.fillSlots("service_connection_status", connected) - tag.fillSlots("service_connection_status_alt", + tag.fillSlots(service_connection_status=connected) + tag.fillSlots(service_connection_status_alt= self._connectedalts[connected]) since = cs.last_connection_time - tag.fillSlots("service_connection_status_rel_time", + tag.fillSlots(service_connection_status_rel_time= render_time_delta(since, self.now_fn()) if since is not None else "N/A") - tag.fillSlots("service_connection_status_abs_time", + tag.fillSlots(service_connection_status_abs_time= render_time_attr(since) if since is not None else "N/A") last_received_data_time = cs.last_received_time - tag.fillSlots("last_received_data_abs_time", + tag.fillSlots(last_received_data_abs_time= render_time_attr(last_received_data_time) if last_received_data_time is not None else "N/A") - tag.fillSlots("last_received_data_rel_time", + tag.fillSlots(last_received_data_rel_time= render_time_delta(last_received_data_time, self.now_fn()) if last_received_data_time is not None @@ -428,19 +428,19 @@ class RootElement(Element): others = cs.non_connected_statuses if cs.connected: - tag.fillSlots("summary", cs.summary) + tag.fillSlots(summary=cs.summary) if others: details = "\n".join(["* %s: %s\n" % (which, others[which]) for which in sorted(others)]) - tag.fillSlots("details", "Other hints:\n" + details) + tag.fillSlots(details="Other hints:\n" + details) else: - tag.fillSlots("details", "(no other hints)") + tag.fillSlots(details="(no other hints)") else: details = tags.ul() for which in sorted(others): - details[tags.li("%s: %s" % (which, others[which]))] - tag.fillSlots("summary", [cs.summary, details]) - tag.fillSlots("details", "") + details(tags.li("%s: %s" % (which, others[which]))) + tag.fillSlots(summary=[cs.summary, details]) + tag.fillSlots(details="") @renderer def introducers_row(self, req, tag): From d064f6b2b70c3129c4d1e31c6f2a6f5273560dca Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:55:10 -0400 Subject: [PATCH 0241/1472] Add "rendered at" renderer --- src/allmydata/web/root.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index e9e6fcc2b..1a09f2074 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -255,10 +255,6 @@ class Root(MultiFormatResource): # Render "/" path. return self - # FIXME: This code is duplicated in root.py and introweb.py. - def data_rendered_at(self, ctx, data): - return render_time(time.time()) - def data_version(self, ctx, data): return get_package_versions_string() @@ -630,3 +626,7 @@ class RootElement(Element): enctype="multipart/form-data" ) return tags.div(form) + + @renderer + def rendered_at(self, req, tag): + return render_time(time.time()) From 32e0c779b6168cdbe7510fff9b0dcd764ef9b9fb Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:56:27 -0400 Subject: [PATCH 0242/1472] Add version renderer --- src/allmydata/web/root.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 1a09f2074..e61ac932e 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -255,9 +255,6 @@ class Root(MultiFormatResource): # Render "/" path. return self - def data_version(self, ctx, data): - return get_package_versions_string() - def data_import_path(self, ctx, data): return str(allmydata) @@ -630,3 +627,8 @@ class RootElement(Element): @renderer def rendered_at(self, req, tag): return render_time(time.time()) + + @renderer + def version(self, req, tag): + return get_package_versions_string() + From 8ce1512501af95808177c1b3a0af266cb63da19d Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 07:57:15 -0400 Subject: [PATCH 0243/1472] Render import path --- src/allmydata/web/root.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index e61ac932e..f7432ff9b 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -255,9 +255,6 @@ class Root(MultiFormatResource): # Render "/" path. return self - def data_import_path(self, ctx, data): - return str(allmydata) - def render_HTML(self, req): return renderElement(req, RootElement(self.client)) @@ -632,3 +629,6 @@ class RootElement(Element): def version(self, req, tag): return get_package_versions_string() + @renderer + def import_path(self, req, tag): + return str(allmydata) From a885f14c43efed7870aff1bf7fe4e2fb0ac8f6f3 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 11:47:18 -0400 Subject: [PATCH 0244/1472] Pass now_fn to RootElement --- src/allmydata/web/root.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index f7432ff9b..44f57853f 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -256,7 +256,7 @@ class Root(MultiFormatResource): return self def render_HTML(self, req): - return renderElement(req, RootElement(self.client)) + return renderElement(req, RootElement(self.client, self.now_fn)) def render_JSON(self, req): req.setHeader("content-type", "application/json; charset=utf-8") @@ -300,9 +300,10 @@ class RootElement(Element): loader = XMLFile(FilePath(__file__).sibling("welcome.xhtml")) - def __init__(self, client): + def __init__(self, client, now_fn): super(RootElement, self).__init__() self._client = client + self._now_fn = now_fn _connectedalts = { "not-configured": "Not Configured", @@ -397,7 +398,7 @@ class RootElement(Element): since = cs.last_connection_time tag.fillSlots(service_connection_status_rel_time= - render_time_delta(since, self.now_fn()) + render_time_delta(since, self._now_fn()) if since is not None else "N/A") tag.fillSlots(service_connection_status_abs_time= @@ -412,7 +413,7 @@ class RootElement(Element): else "N/A") tag.fillSlots(last_received_data_rel_time= render_time_delta(last_received_data_time, - self.now_fn()) + self._now_fn()) if last_received_data_time is not None else "N/A") From 98322effe6547880e7937c992bf8314494199fe3 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 19:07:57 -0400 Subject: [PATCH 0245/1472] Update test to exercise services renderer --- src/allmydata/test/web/test_root.py | 36 ++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/src/allmydata/test/web/test_root.py b/src/allmydata/test/web/test_root.py index 9b543b9f3..ba345ae07 100644 --- a/src/allmydata/test/web/test_root.py +++ b/src/allmydata/test/web/test_root.py @@ -1,13 +1,19 @@ from mock import Mock from twisted.trial import unittest +from twisted.web.template import Tag from twisted.web.test.requesthelper import DummyRequest +from twisted.application import service -from ...storage_client import NativeStorageServer -from ...web.root import Root +from ...storage_client import ( + NativeStorageServer, + StorageFarmBroker, +) +from ...web.root import Root, RootElement from ...util.connection_status import ConnectionStatus from allmydata.web.root import URIHandler from allmydata.web.common import WebError +from allmydata.client import _Client from hypothesis import given from hypothesis.strategies import text @@ -94,9 +100,23 @@ class RenderServiceRow(unittest.TestCase): cs = ConnectionStatus(False, "summary", {}, 0, 0) s.get_connection_status = lambda: cs - r = FakeRoot() - ctx = FakeContext() - res = r.render_service_row(ctx, s) - self.assertIdentical(res, ctx) - self.assertEqual(ctx.slots["version"], "") - self.assertEqual(ctx.slots["nickname"], "") + class FakeClient(_Client): + def __init__(self): + service.MultiService.__init__(self) + self.storage_broker = StorageFarmBroker( + permute_peers=True, + tub_maker=None, + node_config=EMPTY_CLIENT_CONFIG, + ) + self.addService(s) + + client = FakeClient() + root = RootElement(client, None) + req = DummyRequest(b"") + tag = Tag("") + + res = root.service_row(req, tag) + + self.assertIdentical(res, tag) + self.assertEqual(tag.slotData.get("version"), "") + self.assertEqual(tag.slotData.get("nickname"), "") From e5733b6c908f089fe6e2d68777433bc9e984ea8e Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Tue, 28 Apr 2020 19:09:00 -0400 Subject: [PATCH 0246/1472] Remove unused test code --- src/allmydata/test/web/test_root.py | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/src/allmydata/test/web/test_root.py b/src/allmydata/test/web/test_root.py index ba345ae07..7eae44268 100644 --- a/src/allmydata/test/web/test_root.py +++ b/src/allmydata/test/web/test_root.py @@ -9,7 +9,7 @@ from ...storage_client import ( NativeStorageServer, StorageFarmBroker, ) -from ...web.root import Root, RootElement +from ...web.root import RootElement from ...util.connection_status import ConnectionStatus from allmydata.web.root import URIHandler from allmydata.web.common import WebError @@ -23,21 +23,6 @@ from ..common import ( EMPTY_CLIENT_CONFIG, ) -class FakeRoot(Root): - def __init__(self): - pass - def now_fn(self): - return 0 - - -class FakeContext(object): - def __init__(self): - self.slots = {} - self.tag = self - def fillSlots(self, slotname, contents): - self.slots[slotname] = contents - - class RenderSlashUri(unittest.TestCase): """ Ensure that URIs starting with /uri?uri= only accept valid From 80ba700247ad1b587731a3007dfb67910b363a69 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Wed, 29 Apr 2020 12:18:39 -0400 Subject: [PATCH 0247/1472] Correct usage of fillSlots --- src/allmydata/web/root.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 44f57853f..0f18b141c 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -516,8 +516,8 @@ class RootElement(Element): cs = server.get_connection_status() self._render_connection_status(tag, cs) - tag.fillSlots("peerid", server.get_longname()) - tag.fillSlots("nickname", server.get_nickname()) + tag.fillSlots(peerid=server.get_longname(), + nickname=server.get_nickname()) announcement = server.get_announcement() version = announcement.get("my-version", "") @@ -526,8 +526,8 @@ class RootElement(Element): available_space = "N/A" else: available_space = abbreviate_size(available_space) - tag.fillSlots("version", version) - tag.fillSlots("available_space", available_space) + tag.fillSlots(version=version, + available_space=available_space) return tag From 6aebab28a95ac67901599e16dcbc0d16518ee9e1 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 30 Apr 2020 10:09:15 -0400 Subject: [PATCH 0248/1472] Use BeautifulSoup to check rendered document --- src/allmydata/test/web/test_web.py | 88 +++++++++++++++++++++++------- 1 file changed, 68 insertions(+), 20 deletions(-) diff --git a/src/allmydata/test/web/test_web.py b/src/allmydata/test/web/test_web.py index be3c38ffb..5d5cb51f0 100644 --- a/src/allmydata/test/web/test_web.py +++ b/src/allmydata/test/web/test_web.py @@ -53,6 +53,8 @@ from .common import ( assert_soup_has_favicon, assert_soup_has_text, assert_soup_has_tag_with_attributes, + assert_soup_has_tag_with_content, + assert_soup_has_tag_with_attributes_and_content, ) from allmydata.interfaces import IMutableFileNode, SDMF_VERSION, MDMF_VERSION @@ -832,10 +834,16 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi return self.GET("/") d.addCallback(_set_introducer_not_connected_unguessable) def _check_introducer_not_connected_unguessable(res): - html = res.replace('\n', ' ') - self.failIfIn('pb://someIntroducer/secret', html) - self.failUnless(re.search('[ ]*
          No introducers connected
          ', html), res) - + soup = BeautifulSoup(res, 'html5lib') + self.failIfIn('pb://someIntroducer/secret', res) + assert_soup_has_tag_with_attributes( + self, soup, u"img", + {u"alt": u"Disconnected", u"src": u"img/connected-no.png"} + ) + assert_soup_has_tag_with_content( + self, soup, u"div", + u"No introducers connected" + ) d.addCallback(_check_introducer_not_connected_unguessable) # introducer connected, unguessable furl @@ -845,10 +853,21 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi return self.GET("/") d.addCallback(_set_introducer_connected_unguessable) def _check_introducer_connected_unguessable(res): - html = res.replace('\n', ' ') - self.failUnlessIn('
          summary
          ', html) - self.failIfIn('pb://someIntroducer/secret', html) - self.failUnless(re.search('[ ]*
          1 introducer connected
          ', html), res) + soup = BeautifulSoup(res, 'html5lib') + assert_soup_has_tag_with_attributes_and_content( + self, soup, u"div", + u"summary", + { u"class": u"connection-status", u"title": u"(no other hints)" } + ) + self.failIfIn('pb://someIntroducer/secret', res) + assert_soup_has_tag_with_attributes( + self, soup, u"img", + { u"alt": u"Connected", u"src": u"img/connected-yes.png" } + ) + assert_soup_has_tag_with_content( + self, soup, u"div", + u"1 introducer connected" + ) d.addCallback(_check_introducer_connected_unguessable) # introducer connected, guessable furl @@ -858,9 +877,21 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi return self.GET("/") d.addCallback(_set_introducer_connected_guessable) def _check_introducer_connected_guessable(res): + soup = BeautifulSoup(res, 'html5lib') html = res.replace('\n', ' ') - self.failUnlessIn('
          summary
          ', html) - self.failUnless(re.search('[ ]*
          1 introducer connected
          ', html), res) + assert_soup_has_tag_with_attributes_and_content( + self, soup, u"div", + u"summary", + { u"class": u"connection-status", u"title": u"(no other hints)" } + ) + assert_soup_has_tag_with_attributes( + self, soup, u"img", + { u"src": u"img/connected-yes.png", u"alt": u"Connected" } + ) + assert_soup_has_tag_with_content( + self, soup, u"div", + u"1 introducer connected" + ) d.addCallback(_check_introducer_connected_guessable) return d @@ -873,8 +904,11 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi return self.GET("/") d.addCallback(_set_no_helper) def _check_no_helper(res): - html = res.replace('\n', ' ') - self.failUnless(re.search('', html), res) + soup = BeautifulSoup(res, 'html5lib') + assert_soup_has_tag_with_attributes( + self, soup, u"img", + { u"src": u"img/connected-not-configured.png", u"alt": u"Not Configured" } + ) d.addCallback(_check_no_helper) # enable helper, not connected @@ -884,10 +918,17 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi return self.GET("/") d.addCallback(_set_helper_not_connected) def _check_helper_not_connected(res): - html = res.replace('\n', ' ') - self.failUnlessIn('
          pb://someHelper/[censored]
          ', html) - self.failIfIn('pb://someHelper/secret', html) - self.failUnless(re.search('', html), res) + soup = BeautifulSoup(res, 'html5lib') + assert_soup_has_tag_with_attributes_and_content( + self, soup, u"div", + u"pb://someHelper/[censored]", + { u"class": u"furl" } + ) + self.failIfIn('pb://someHelper/secret', res) + assert_soup_has_tag_with_attributes( + self, soup, u"img", + { u"src": u"img/connected-no.png", u"alt": u"Disconnected" } + ) d.addCallback(_check_helper_not_connected) # enable helper, connected @@ -897,10 +938,17 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi return self.GET("/") d.addCallback(_set_helper_connected) def _check_helper_connected(res): - html = res.replace('\n', ' ') - self.failUnlessIn('
          pb://someHelper/[censored]
          ', html) - self.failIfIn('pb://someHelper/secret', html) - self.failUnless(re.search('', html), res) + soup = BeautifulSoup(res, 'html5lib') + assert_soup_has_tag_with_attributes_and_content( + self, soup, u"div", + u"pb://someHelper/[censored]", + { u"class": u"furl" } + ) + self.failIfIn('pb://someHelper/secret', res) + assert_soup_has_tag_with_attributes( + self, soup, u"img", + { u"src": u"img/connected-yes.png", "alt": u"Connected" } + ) d.addCallback(_check_helper_connected) return d From 88b7784f3929bc4091f22a432bdbd7f4590a1195 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 30 Apr 2020 15:09:10 -0400 Subject: [PATCH 0249/1472] Use BeautifulSoup in test.test_system.SystemTest.test_filesystem --- src/allmydata/test/test_system.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/allmydata/test/test_system.py b/src/allmydata/test/test_system.py index 2b7660705..3f8a363d2 100644 --- a/src/allmydata/test/test_system.py +++ b/src/allmydata/test/test_system.py @@ -3,6 +3,8 @@ from __future__ import print_function import os, re, sys, time, json from functools import partial +from bs4 import BeautifulSoup + from twisted.internet import reactor from twisted.trial import unittest from twisted.internet import defer @@ -38,6 +40,9 @@ from .common import ( SameProcessStreamEndpointAssigner, ) from .common_web import do_http, Error +from .web.common import ( + assert_soup_has_tag_with_attributes +) # TODO: move this to common or common_util from allmydata.test.test_runner import RunBinTahoeMixin @@ -1771,8 +1776,11 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): # get the welcome page from the node that uses the helper too d.addCallback(lambda res: do_http("get", self.helper_webish_url)) def _got_welcome_helper(page): - html = page.replace('\n', ' ') - self.failUnless(re.search('', html), page) + soup = BeautifulSoup(page, 'html5lib') + assert_soup_has_tag_with_attributes( + self, soup, u"img", + { u"alt": u"Connected", u"src": u"img/connected-yes.png" } + ) self.failUnlessIn("Not running helper", page) d.addCallback(_got_welcome_helper) From cce6f83054d10b555ed5a5612641d09c98ea72c9 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 30 Apr 2020 18:47:13 -0400 Subject: [PATCH 0250/1472] Remove unused renderers --- src/allmydata/web/root.py | 75 --------------------------------------- 1 file changed, 75 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 0f18b141c..ae40845ab 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -531,81 +531,6 @@ class RootElement(Element): return tag - def render_download_form(self, ctx, data): - # this is a form where users can download files by URI - form = T.form(action="uri", method="get", - enctype="multipart/form-data")[ - T.fieldset[ - T.legend(class_="freeform-form-label")["Download a file"], - T.div["Tahoe-URI to download:"+SPACE, - T.input(type="text", name="uri")], - T.div["Filename to download as:"+SPACE, - T.input(type="text", name="filename")], - T.input(type="submit", value="Download!"), - ]] - return T.div[form] - - def render_view_form(self, ctx, data): - # this is a form where users can download files by URI, or jump to a - # named directory - form = T.form(action="uri", method="get", - enctype="multipart/form-data")[ - T.fieldset[ - T.legend(class_="freeform-form-label")["View a file or directory"], - "Tahoe-URI to view:"+SPACE, - T.input(type="text", name="uri"), SPACE*2, - T.input(type="submit", value="View!"), - ]] - return T.div[form] - - def render_upload_form(self, ctx, data): - # This is a form where users can upload unlinked files. - # Users can choose immutable, SDMF, or MDMF from a radio button. - - upload_chk = T.input(type='radio', name='format', - value='chk', id='upload-chk', - checked='checked') - upload_sdmf = T.input(type='radio', name='format', - value='sdmf', id='upload-sdmf') - upload_mdmf = T.input(type='radio', name='format', - value='mdmf', id='upload-mdmf') - - form = T.form(action="uri", method="post", - enctype="multipart/form-data")[ - T.fieldset[ - T.legend(class_="freeform-form-label")["Upload a file"], - T.div["Choose a file:"+SPACE, - T.input(type="file", name="file", class_="freeform-input-file")], - T.input(type="hidden", name="t", value="upload"), - T.div[upload_chk, T.label(for_="upload-chk") [" Immutable"], SPACE, - upload_sdmf, T.label(for_="upload-sdmf")[" SDMF"], SPACE, - upload_mdmf, T.label(for_="upload-mdmf")[" MDMF (experimental)"], SPACE*2, - T.input(type="submit", value="Upload!")], - ]] - return T.div[form] - - def render_mkdir_form(self, ctx, data): - # This is a form where users can create new directories. - # Users can choose SDMF or MDMF from a radio button. - - mkdir_sdmf = T.input(type='radio', name='format', - value='sdmf', id='mkdir-sdmf', - checked='checked') - mkdir_mdmf = T.input(type='radio', name='format', - value='mdmf', id='mkdir-mdmf') - - form = T.form(action="uri", method="post", - enctype="multipart/form-data")[ - T.fieldset[ - T.legend(class_="freeform-form-label")["Create a directory"], - mkdir_sdmf, T.label(for_='mkdir-sdmf')[" SDMF"], SPACE, - mkdir_mdmf, T.label(for_='mkdir-mdmf')[" MDMF (experimental)"], SPACE*2, - T.input(type="hidden", name="t", value="mkdir"), - T.input(type="hidden", name="redirect_to_result", value="true"), - T.input(type="submit", value="Create a directory"), - ]] - return T.div[form] - @renderer def incident_button(self, req, tag): # this button triggers a foolscap-logging "incident" From cfe4b2925ee075c6eb03d1eba341f9a5402fab71 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 30 Apr 2020 18:48:06 -0400 Subject: [PATCH 0251/1472] Remove an unused method and imports --- src/allmydata/web/root.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index ae40845ab..ffef16db0 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -11,8 +11,6 @@ from twisted.web.util import redirectTo from hyperlink import DecodedURL, URL -from nevow import rend, tags as T -from nevow.inevow import IRequest from twisted.web import static from nevow.util import resource_filename @@ -229,14 +227,6 @@ class Root(MultiFormatResource): self.putChild("report_incident", IncidentReporter()) - # until we get rid of nevow.Page in favour of twisted.web.resource - # we can't use getChild() -- but we CAN use childFactory or - # override locatechild - def childFactory(self, ctx, name): - request = IRequest(ctx) - return self.getChild(name, request) - - def getChild(self, path, request): if path == "helper_status": # the Helper isn't attached until after the Tub starts, so this child From e9796eb3086731fd61b4d24ec81339a8cea3e36e Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 30 Apr 2020 18:49:05 -0400 Subject: [PATCH 0252/1472] Match against root path first --- src/allmydata/web/root.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index ffef16db0..d44231c34 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -228,6 +228,9 @@ class Root(MultiFormatResource): self.putChild("report_incident", IncidentReporter()) def getChild(self, path, request): + if not path: + # Render "/" path. + return self if path == "helper_status": # the Helper isn't attached until after the Tub starts, so this child # needs to created on each request @@ -241,9 +244,6 @@ class Root(MultiFormatResource): except KeyError: storage_server = None return storage.StorageStatus(storage_server, self.client.nickname) - if not path: - # Render "/" path. - return self def render_HTML(self, req): return renderElement(req, RootElement(self.client, self.now_fn)) From 053e61422170e34862327face2c6ff304c1b3115 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 30 Apr 2020 18:54:08 -0400 Subject: [PATCH 0253/1472] Import resource_filename from setuptools Drop the nevow one --- src/allmydata/web/root.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index d44231c34..4680457ef 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -12,7 +12,7 @@ from twisted.web.util import redirectTo from hyperlink import DecodedURL, URL from twisted.web import static -from nevow.util import resource_filename +from pkg_resources import resource_filename from twisted.python.filepath import FilePath from twisted.web.template import ( From 0d8eda5e765478e89edd57b2207ed0ad0e23cefc Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Thu, 30 Apr 2020 18:56:03 -0400 Subject: [PATCH 0254/1472] Rearrange imports --- src/allmydata/web/root.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 4680457ef..30b50c8ff 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -3,17 +3,14 @@ import time import json import urllib +from hyperlink import DecodedURL, URL +from pkg_resources import resource_filename from twisted.web import ( http, resource, + static, ) from twisted.web.util import redirectTo - -from hyperlink import DecodedURL, URL - -from twisted.web import static -from pkg_resources import resource_filename - from twisted.python.filepath import FilePath from twisted.web.template import ( Element, @@ -27,7 +24,12 @@ import allmydata # to display import path from allmydata.version_checks import get_package_versions_string from allmydata.util import log from allmydata.interfaces import IFileNode -from allmydata.web import filenode, directory, unlinked, status +from allmydata.web import ( + filenode, + directory, + unlinked, + status, +) from allmydata.web import storage from allmydata.web.common import ( abbreviate_size, From 1ddf07d59d395bbe45a0eddc1e09503f937d12a0 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 1 May 2020 16:06:52 -0400 Subject: [PATCH 0255/1472] Render services table using SlotsSequenceElement --- src/allmydata/web/root.py | 96 ++++++++++++++++++++++----------- src/allmydata/web/welcome.xhtml | 29 +++++++--- 2 files changed, 85 insertions(+), 40 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 30b50c8ff..e61926525 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -38,6 +38,7 @@ from allmydata.web.common import ( get_arg, MultiFormatPage, MultiFormatResource, + SlotsSequenceElement, get_format, get_mutable_type, render_time_delta, @@ -481,47 +482,78 @@ class RootElement(Element): sb = self._client.get_storage_broker() return tag(str(len(sb.get_connected_servers()))) + @renderer + def services_table(self, req, tag): + rows = [ self._describe_server(server) for server in self._services() ] + return SlotsSequenceElement(tag, rows) + def _services(self): sb = self._client.get_storage_broker() return sorted(sb.get_known_servers(), key=lambda s: s.get_serverid()) - @renderer - def service_row(self, req, tag): - servers = self._services() + def _describe_server(self, server): + peerid = server.get_longname() + nickname = server.get_nickname() + version = server.get_announcement().get("my-version", "") - # FIXME: handle empty list of servers in a better manner. - if not servers: - tag.fillSlots(peerid="", - nickname="", - service_connection_status="", - service_connection_status_alt="", - details="", - summary="", - service_connection_status_abs_time="", - service_connection_status_rel_time="", - last_received_data_abs_time="", - last_received_data_rel_time="", - version="", - available_space="") + space = server.get_available_space() + if space is not None: + available_space = abbreviate_size(space) + else: + available_space = "N/A" - for server in servers: - cs = server.get_connection_status() - self._render_connection_status(tag, cs) + cs = server.get_connection_status() + others = cs.non_connected_statuses - tag.fillSlots(peerid=server.get_longname(), - nickname=server.get_nickname()) - - announcement = server.get_announcement() - version = announcement.get("my-version", "") - available_space = server.get_available_space() - if available_space is None: - available_space = "N/A" + if cs.connected: + summary = cs.summary + if others: + hints = "\n".join(["* %s: %s\n" % (which, others[which]) + for which in sorted(others)]) + details = "Other hints:\n" + hints else: - available_space = abbreviate_size(available_space) - tag.fillSlots(version=version, - available_space=available_space) + details = "(no other hints)" + else: + details = tags.ul() + for which in sorted(others): + details(tags.li("%s: %s" % (which, others[which]))) + summary = [cs.summary, details] - return tag + connected = "yes" if cs.connected else "no" + connected_alt = self._connectedalts[connected] + + since = cs.last_connection_time + + if since is not None: + service_connection_status_rel_time = render_time_delta(since, self._now_fn()) + service_connection_status_abs_time = render_time_attr(since) + else: + service_connection_status_rel_time = "N/A" + service_connection_status_abs_time = "N/A" + + last_received_data_time = cs.last_received_time + + if last_received_data_time is not None: + last_received_data_abs_time = render_time_attr(last_received_data_time) + last_received_data_rel_time = render_time_delta(last_received_data_time, self._now_fn()) + else: + last_received_data_abs_time = "N/A" + last_received_data_rel_time = "N/A" + + return { + "peerid": peerid, + "nickname": nickname, + "version": version, + "available_space": available_space, + "summary": summary, + "details": details, + "service_connection_status": connected, + "service_connection_status_alt": connected_alt, + "service_connection_status_abs_time": service_connection_status_abs_time, + "service_connection_status_rel_time": service_connection_status_rel_time, + "last_received_data_abs_time": last_received_data_abs_time, + "last_received_data_rel_time": last_received_data_rel_time, + } @renderer def incident_button(self, req, tag): diff --git a/src/allmydata/web/welcome.xhtml b/src/allmydata/web/welcome.xhtml index 4a88b1ea0..c7080c53a 100644 --- a/src/allmydata/web/welcome.xhtml +++ b/src/allmydata/web/welcome.xhtml @@ -173,9 +173,11 @@ of known storage servers -

          Connection

          + + +
          - + @@ -183,7 +185,8 @@ - + + - + - + - - + + + + + + + -

          Nickname

          Connection

          Last RX

          Available

          @@ -194,7 +197,7 @@
          @@ -203,18 +206,28 @@ + + + +
          You are not presently connected to any servers.
          You are not presently connected to any servers.
          + +

          Connected to of introducers

          From 029c99a241fa30e0a90e2de9a0b856251c1b7ea2 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 1 May 2020 16:12:52 -0400 Subject: [PATCH 0256/1472] Render introducers table using SlotsSequenceElement --- src/allmydata/web/root.py | 28 ++++++++++++++++++---------- src/allmydata/web/welcome.xhtml | 14 +++++++++++--- 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index e61926525..005695a63 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -426,12 +426,6 @@ class RootElement(Element): tag.fillSlots(summary=[cs.summary, details]) tag.fillSlots(details="") - @renderer - def introducers_row(self, req, tag): - for cs in self._get_introducers(): - self._render_connection_status(tag, cs) - return tag - @renderer def helper_furl_prefix(self, req, tag): try: @@ -487,6 +481,12 @@ class RootElement(Element): rows = [ self._describe_server(server) for server in self._services() ] return SlotsSequenceElement(tag, rows) + @renderer + def introducers_table(self, req, tag): + rows = [ self._describe_connection_status(cs) + for cs in self._get_introducers() ] + return SlotsSequenceElement(tag, rows) + def _services(self): sb = self._client.get_storage_broker() return sorted(sb.get_known_servers(), key=lambda s: s.get_serverid()) @@ -502,7 +502,19 @@ class RootElement(Element): else: available_space = "N/A" + srvstat = { + "peerid": peerid, + "nickname": nickname, + "version": version, + "available_space": available_space, + } + cs = server.get_connection_status() + constat = self._describe_connection_status(cs) + + return dict(srvstat.items() + constat.items()) + + def _describe_connection_status(self, cs): others = cs.non_connected_statuses if cs.connected: @@ -541,10 +553,6 @@ class RootElement(Element): last_received_data_rel_time = "N/A" return { - "peerid": peerid, - "nickname": nickname, - "version": version, - "available_space": available_space, "summary": summary, "details": details, "service_connection_status": connected, diff --git a/src/allmydata/web/welcome.xhtml b/src/allmydata/web/welcome.xhtml index c7080c53a..bdfa53972 100644 --- a/src/allmydata/web/welcome.xhtml +++ b/src/allmydata/web/welcome.xhtml @@ -231,14 +231,17 @@

          Connected to of introducers

          - + + +
          - + + + - + + +

          Connection

          Last RX

          @@ -257,6 +260,7 @@
          @@ -264,7 +268,11 @@
          No introducers are configured.
          + No introducers are configured. +

    From df188c75df312145fa9606bde16a194a5ad6d075 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 1 May 2020 16:50:30 -0400 Subject: [PATCH 0257/1472] Wrap footer elements in tags --- src/allmydata/web/root.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 005695a63..2724984b1 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -581,12 +581,12 @@ class RootElement(Element): @renderer def rendered_at(self, req, tag): - return render_time(time.time()) + return tag(render_time(time.time())) @renderer def version(self, req, tag): - return get_package_versions_string() + return tag(get_package_versions_string()) @renderer def import_path(self, req, tag): - return str(allmydata) + return tag(str(allmydata)) From ade7f613b84d1d3e429d46d636a3e1a52a9e45c0 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 1 May 2020 17:07:08 -0400 Subject: [PATCH 0258/1472] Remove unused renderer --- src/allmydata/web/root.py | 43 --------------------------------------- 1 file changed, 43 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 2724984b1..b2185c5f0 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -383,49 +383,6 @@ class RootElement(Element): def _get_introducers(self): return self._client.introducer_connection_statuses() - def _render_connection_status(self, tag, cs): - connected = "yes" if cs.connected else "no" - tag.fillSlots(service_connection_status=connected) - tag.fillSlots(service_connection_status_alt= - self._connectedalts[connected]) - - since = cs.last_connection_time - tag.fillSlots(service_connection_status_rel_time= - render_time_delta(since, self._now_fn()) - if since is not None - else "N/A") - tag.fillSlots(service_connection_status_abs_time= - render_time_attr(since) - if since is not None - else "N/A") - - last_received_data_time = cs.last_received_time - tag.fillSlots(last_received_data_abs_time= - render_time_attr(last_received_data_time) - if last_received_data_time is not None - else "N/A") - tag.fillSlots(last_received_data_rel_time= - render_time_delta(last_received_data_time, - self._now_fn()) - if last_received_data_time is not None - else "N/A") - - others = cs.non_connected_statuses - if cs.connected: - tag.fillSlots(summary=cs.summary) - if others: - details = "\n".join(["* %s: %s\n" % (which, others[which]) - for which in sorted(others)]) - tag.fillSlots(details="Other hints:\n" + details) - else: - tag.fillSlots(details="(no other hints)") - else: - details = tags.ul() - for which in sorted(others): - details(tags.li("%s: %s" % (which, others[which]))) - tag.fillSlots(summary=[cs.summary, details]) - tag.fillSlots(details="") - @renderer def helper_furl_prefix(self, req, tag): try: From 7b4d81fe283b720885cf566f515698be1f4fc651 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 1 May 2020 17:38:24 -0400 Subject: [PATCH 0259/1472] Rename root.Root variables --- src/allmydata/web/root.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index b2185c5f0..9bf88a611 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -209,8 +209,8 @@ class Root(MultiFormatResource): def __init__(self, client, clock=None, now_fn=None): super(Root, self).__init__() - self.client = client - self.now_fn = now_fn + self._client = client + self._now_fn = now_fn self.putChild("uri", URIHandler(client)) self.putChild("cap", URIHandler(client)) @@ -237,24 +237,24 @@ class Root(MultiFormatResource): if path == "helper_status": # the Helper isn't attached until after the Tub starts, so this child # needs to created on each request - return status.HelperStatus(self.client.helper) + return status.HelperStatus(self._client.helper) if path == "storage": # Storage isn't initialized until after the web hierarchy is # constructed so this child needs to be created later than # `__init__`. try: - storage_server = self.client.getServiceNamed("storage") + storage_server = self._client.getServiceNamed("storage") except KeyError: storage_server = None - return storage.StorageStatus(storage_server, self.client.nickname) + return storage.StorageStatus(storage_server, self._client.nickname) def render_HTML(self, req): - return renderElement(req, RootElement(self.client, self.now_fn)) + return renderElement(req, RootElement(self._client, self._now_fn)) def render_JSON(self, req): req.setHeader("content-type", "application/json; charset=utf-8") - intro_summaries = [s.summary for s in self.client.introducer_connection_statuses()] - sb = self.client.get_storage_broker() + intro_summaries = [s.summary for s in self._client.introducer_connection_statuses()] + sb = self._client.get_storage_broker() servers = self._describe_known_servers(sb) result = { "introducers": { From 80a26e26b1b65f2cabc3404457f25789bd9344b4 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 1 May 2020 17:41:51 -0400 Subject: [PATCH 0260/1472] Refactor methods that describe server/connection --- src/allmydata/web/root.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 9bf88a611..be102cce1 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -435,7 +435,8 @@ class RootElement(Element): @renderer def services_table(self, req, tag): - rows = [ self._describe_server(server) for server in self._services() ] + rows = [ self._describe_server_and_connection(server) + for server in self._services() ] return SlotsSequenceElement(tag, rows) @renderer @@ -449,6 +450,7 @@ class RootElement(Element): return sorted(sb.get_known_servers(), key=lambda s: s.get_serverid()) def _describe_server(self, server): + """Return a dict containing server stats.""" peerid = server.get_longname() nickname = server.get_nickname() version = server.get_announcement().get("my-version", "") @@ -459,19 +461,22 @@ class RootElement(Element): else: available_space = "N/A" - srvstat = { + return { "peerid": peerid, "nickname": nickname, "version": version, "available_space": available_space, } + def _describe_server_and_connection(self, server): + """Return a dict containing both server and connection stats.""" + srvstat = self._describe_server(server) cs = server.get_connection_status() constat = self._describe_connection_status(cs) - return dict(srvstat.items() + constat.items()) def _describe_connection_status(self, cs): + """Return a dict containing some connection stats.""" others = cs.non_connected_statuses if cs.connected: From df9b160a3cf62b77d5302206ce519d91c87c1a94 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Fri, 1 May 2020 17:42:46 -0400 Subject: [PATCH 0261/1472] Mark describe server method as static --- src/allmydata/web/root.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index be102cce1..b4eea1cc9 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -449,7 +449,8 @@ class RootElement(Element): sb = self._client.get_storage_broker() return sorted(sb.get_known_servers(), key=lambda s: s.get_serverid()) - def _describe_server(self, server): + @staticmethod + def _describe_server(server): """Return a dict containing server stats.""" peerid = server.get_longname() nickname = server.get_nickname() From 58a06cb57d070f5e3e468d6ebf76f5d2232dba34 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 4 May 2020 09:45:36 -0400 Subject: [PATCH 0262/1472] Update test that exercises empty nick and version --- src/allmydata/test/web/test_root.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/allmydata/test/web/test_root.py b/src/allmydata/test/web/test_root.py index 7eae44268..5a888587c 100644 --- a/src/allmydata/test/web/test_root.py +++ b/src/allmydata/test/web/test_root.py @@ -1,5 +1,7 @@ from mock import Mock +import time + from twisted.trial import unittest from twisted.web.template import Tag from twisted.web.test.requesthelper import DummyRequest @@ -81,9 +83,8 @@ class RenderServiceRow(unittest.TestCase): ann = {"anonymous-storage-FURL": "pb://w2hqnbaa25yw4qgcvghl5psa3srpfgw3@tcp:127.0.0.1:51309/vucto2z4fxment3vfxbqecblbf6zyp6x", "permutation-seed-base32": "w2hqnbaa25yw4qgcvghl5psa3srpfgw3", } - s = NativeStorageServer("server_id", ann, None, {}, EMPTY_CLIENT_CONFIG) - cs = ConnectionStatus(False, "summary", {}, 0, 0) - s.get_connection_status = lambda: cs + srv = NativeStorageServer("server_id", ann, None, {}, EMPTY_CLIENT_CONFIG) + srv.get_connection_status = lambda: ConnectionStatus(False, "summary", {}, 0, 0) class FakeClient(_Client): def __init__(self): @@ -93,15 +94,17 @@ class RenderServiceRow(unittest.TestCase): tub_maker=None, node_config=EMPTY_CLIENT_CONFIG, ) - self.addService(s) + self.storage_broker.test_add_server("test-srv", srv) - client = FakeClient() - root = RootElement(client, None) + root = RootElement(FakeClient(), time.time) req = DummyRequest(b"") - tag = Tag("") + tag = Tag(b"") - res = root.service_row(req, tag) + # Pick all items from services table. + items = root.services_table(req, tag).item(req, tag) - self.assertIdentical(res, tag) - self.assertEqual(tag.slotData.get("version"), "") - self.assertEqual(tag.slotData.get("nickname"), "") + # Coerce `items` to list and pick the first item from it. + item = list(items)[0] + + self.assertEqual(item.slotData.get("version"), "") + self.assertEqual(item.slotData.get("nickname"), "") From bf0f8a736a49820f8acbb62d55b9d81a8cc97bc2 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 4 May 2020 11:48:41 -0400 Subject: [PATCH 0263/1472] Remove unused imports --- src/allmydata/web/root.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index b4eea1cc9..605b130e5 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -33,10 +33,8 @@ from allmydata.web import ( from allmydata.web import storage from allmydata.web.common import ( abbreviate_size, - getxmlfile, WebError, get_arg, - MultiFormatPage, MultiFormatResource, SlotsSequenceElement, get_format, From 88b369a0c2b491801f9e85c2aa3548c5aac2e8c0 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 4 May 2020 11:48:52 -0400 Subject: [PATCH 0264/1472] Remove unused variables --- src/allmydata/test/web/test_web.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/allmydata/test/web/test_web.py b/src/allmydata/test/web/test_web.py index 5d5cb51f0..5be0b2f7b 100644 --- a/src/allmydata/test/web/test_web.py +++ b/src/allmydata/test/web/test_web.py @@ -878,7 +878,6 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi d.addCallback(_set_introducer_connected_guessable) def _check_introducer_connected_guessable(res): soup = BeautifulSoup(res, 'html5lib') - html = res.replace('\n', ' ') assert_soup_has_tag_with_attributes_and_content( self, soup, u"div", u"summary", From df886726559d8a3b847731af083460af54c51bed Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 4 May 2020 11:52:07 -0400 Subject: [PATCH 0265/1472] Add newsfragment --- newsfragments/3305.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3305.minor diff --git a/newsfragments/3305.minor b/newsfragments/3305.minor new file mode 100644 index 000000000..e69de29bb From 96449bb99b61e6ca655a0bbdaf45282dab6bd7ee Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 4 May 2020 14:05:33 -0400 Subject: [PATCH 0266/1472] Ensure lists before conversion to dict --- src/allmydata/web/root.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 605b130e5..ddb05a72e 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -472,7 +472,7 @@ class RootElement(Element): srvstat = self._describe_server(server) cs = server.get_connection_status() constat = self._describe_connection_status(cs) - return dict(srvstat.items() + constat.items()) + return dict(list(srvstat.items()) + list(constat.items())) def _describe_connection_status(self, cs): """Return a dict containing some connection stats.""" From 4ebd6250e3c10cc0167796e87ed0a28a1eb83976 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 4 May 2020 14:20:05 -0400 Subject: [PATCH 0267/1472] Document root.Root --- src/allmydata/web/root.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index ddb05a72e..f09a500a7 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -206,6 +206,14 @@ class Root(MultiFormatResource): addSlash = True def __init__(self, client, clock=None, now_fn=None): + """ + Render root page ("/") of the URI. + + :client allmydata.client._Client: a stats provider. + :clock: unused here. + :now_fn: a function that returns current time. + + """ super(Root, self).__init__() self._client = client self._now_fn = now_fn From 7d063995f5b9eb79a1c9d5b484b5c4d232a87a65 Mon Sep 17 00:00:00 2001 From: Sajith Sasidharan Date: Mon, 4 May 2020 15:35:21 -0400 Subject: [PATCH 0268/1472] Update copyright years --- src/allmydata/web/welcome.xhtml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/web/welcome.xhtml b/src/allmydata/web/welcome.xhtml index bdfa53972..70c62e265 100644 --- a/src/allmydata/web/welcome.xhtml +++ b/src/allmydata/web/welcome.xhtml @@ -280,7 +280,7 @@