diff --git a/newsfragments/3579.minor b/newsfragments/3579.minor new file mode 100644 index 000000000..e69de29bb diff --git a/src/allmydata/frontends/sftpd.py b/src/allmydata/frontends/sftpd.py index a67e859c6..bc7196de6 100644 --- a/src/allmydata/frontends/sftpd.py +++ b/src/allmydata/frontends/sftpd.py @@ -1,5 +1,17 @@ +""" +Ported to Python 3. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + import six -import heapq, traceback, array, stat, struct +import heapq, traceback, stat, struct from stat import S_IFREG, S_IFDIR from time import time, strftime, localtime @@ -44,6 +56,17 @@ from allmydata.util.log import NOISY, OPERATIONAL, WEIRD, \ if six.PY3: long = int + +def createSFTPError(errorCode, errorMessage): + """ + SFTPError that can accept both Unicode and bytes. + + Twisted expects _native_ strings for the SFTPError message, but we often do + Unicode by default even on Python 2. + """ + return SFTPError(errorCode, six.ensure_str(errorMessage)) + + def eventually_callback(d): return lambda res: eventually(d.callback, res) @@ -52,9 +75,9 @@ def eventually_errback(d): def _utf8(x): - if isinstance(x, unicode): - return x.encode('utf-8') if isinstance(x, str): + return x.encode('utf-8') + if isinstance(x, bytes): return x return repr(x) @@ -63,7 +86,7 @@ def _to_sftp_time(t): """SFTP times are unsigned 32-bit integers representing UTC seconds (ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC. A Tahoe time is the corresponding float.""" - return long(t) & long(0xFFFFFFFF) + return int(t) & int(0xFFFFFFFF) def _convert_error(res, request): @@ -72,7 +95,7 @@ def _convert_error(res, request): if not isinstance(res, Failure): logged_res = res - if isinstance(res, str): logged_res = "" % (len(res),) + if isinstance(res, (bytes, str)): logged_res = "" % (len(res),) logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL) return res @@ -91,10 +114,10 @@ def _convert_error(res, request): raise err if err.check(NoSuchChildError): childname = _utf8(err.value.args[0]) - raise SFTPError(FX_NO_SUCH_FILE, childname) + raise createSFTPError(FX_NO_SUCH_FILE, childname) if err.check(NotWriteableError) or err.check(ChildOfWrongTypeError): msg = _utf8(err.value.args[0]) - raise SFTPError(FX_PERMISSION_DENIED, msg) + raise createSFTPError(FX_PERMISSION_DENIED, msg) if err.check(ExistingChildError): # Versions of SFTP after v3 (which is what twisted.conch implements) # define a specific error code for this case: FX_FILE_ALREADY_EXISTS. @@ -103,16 +126,16 @@ def _convert_error(res, request): # to translate the error to the equivalent of POSIX EEXIST, which is # necessary for some picky programs (such as gedit). msg = _utf8(err.value.args[0]) - raise SFTPError(FX_FAILURE, msg) + raise createSFTPError(FX_FAILURE, msg) if err.check(NotImplementedError): - raise SFTPError(FX_OP_UNSUPPORTED, _utf8(err.value)) + raise createSFTPError(FX_OP_UNSUPPORTED, _utf8(err.value)) if err.check(EOFError): - raise SFTPError(FX_EOF, "end of file reached") + raise createSFTPError(FX_EOF, "end of file reached") if err.check(defer.FirstError): _convert_error(err.value.subFailure, request) # We assume that the error message is not anonymity-sensitive. - raise SFTPError(FX_FAILURE, _utf8(err.value)) + raise createSFTPError(FX_FAILURE, _utf8(err.value)) def _repr_flags(flags): @@ -145,7 +168,7 @@ def _lsLine(name, attrs): # Since we now depend on Twisted v10.1, consider calling Twisted's version. mode = st_mode - perms = array.array('c', '-'*10) + perms = ["-"] * 10 ft = stat.S_IFMT(mode) if stat.S_ISDIR(ft): perms[0] = 'd' elif stat.S_ISREG(ft): perms[0] = '-' @@ -164,7 +187,7 @@ def _lsLine(name, attrs): if mode&stat.S_IXOTH: perms[9] = 'x' # suid/sgid never set - l = perms.tostring() + l = "".join(perms) l += str(st_nlink).rjust(5) + ' ' un = str(st_uid) l += un.ljust(9) @@ -181,6 +204,7 @@ def _lsLine(name, attrs): l += strftime("%b %d %Y ", localtime(st_mtime)) else: l += strftime("%b %d %H:%M ", localtime(st_mtime)) + l = l.encode("utf-8") l += name return l @@ -222,7 +246,7 @@ def _populate_attrs(childnode, metadata, size=None): if childnode and size is None: size = childnode.get_size() if size is not None: - _assert(isinstance(size, (int, long)) and not isinstance(size, bool), size=size) + _assert(isinstance(size, int) and not isinstance(size, bool), size=size) attrs['size'] = size perms = S_IFREG | 0o666 @@ -254,7 +278,7 @@ def _attrs_to_metadata(attrs): for key in attrs: if key == "mtime" or key == "ctime" or key == "createtime": - metadata[key] = long(attrs[key]) + metadata[key] = int(attrs[key]) elif key.startswith("ext_"): metadata[key] = str(attrs[key]) @@ -266,7 +290,7 @@ def _attrs_to_metadata(attrs): def _direntry_for(filenode_or_parent, childname, filenode=None): - precondition(isinstance(childname, (unicode, type(None))), childname=childname) + precondition(isinstance(childname, (str, type(None))), childname=childname) if childname is None: filenode_or_parent = filenode @@ -274,7 +298,7 @@ def _direntry_for(filenode_or_parent, childname, filenode=None): if filenode_or_parent: rw_uri = filenode_or_parent.get_write_uri() if rw_uri and childname: - return rw_uri + "/" + childname.encode('utf-8') + return rw_uri + b"/" + childname.encode('utf-8') else: return rw_uri @@ -326,7 +350,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): if size < self.current_size or size < self.downloaded: self.f.truncate(size) if size > self.current_size: - self.overwrite(self.current_size, "\x00" * (size - self.current_size)) + self.overwrite(self.current_size, b"\x00" * (size - self.current_size)) self.current_size = size # make the invariant self.download_size <= self.current_size be true again @@ -334,7 +358,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): self.download_size = size if self.downloaded >= self.download_size: - self.download_done("size changed") + self.download_done(b"size changed") def registerProducer(self, p, streaming): if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY) @@ -409,21 +433,21 @@ class OverwriteableFileConsumer(PrefixingLogMixin): milestone = end while len(self.milestones) > 0: - (next, d) = self.milestones[0] - if next > milestone: + (next_, d) = self.milestones[0] + if next_ > milestone: return - if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY) + if noisy: self.log("MILESTONE %r %r" % (next_, d), level=NOISY) heapq.heappop(self.milestones) - eventually_callback(d)("reached") + eventually_callback(d)(b"reached") if milestone >= self.download_size: - self.download_done("reached download size") + self.download_done(b"reached download size") def overwrite(self, offset, data): if noisy: self.log(".overwrite(%r, )" % (offset, len(data)), level=NOISY) if self.is_closed: self.log("overwrite called on a closed OverwriteableFileConsumer", level=WEIRD) - raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle") + raise createSFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle") if offset > self.current_size: # Normally writing at an offset beyond the current end-of-file @@ -434,7 +458,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): # the gap between the current EOF and the offset. self.f.seek(self.current_size) - self.f.write("\x00" * (offset - self.current_size)) + self.f.write(b"\x00" * (offset - self.current_size)) start = self.current_size else: self.f.seek(offset) @@ -454,7 +478,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY) if self.is_closed: self.log("read called on a closed OverwriteableFileConsumer", level=WEIRD) - raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") + raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") # Note that the overwrite method is synchronous. When a write request is processed # (e.g. a writeChunk request on the async queue of GeneralSFTPFile), overwrite will @@ -508,7 +532,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): return d def download_done(self, res): - _assert(isinstance(res, (str, Failure)), res=res) + _assert(isinstance(res, (bytes, Failure)), res=res) # Only the first call to download_done counts, but we log subsequent calls # (multiple calls are normal). if self.done_status is not None: @@ -525,8 +549,8 @@ class OverwriteableFileConsumer(PrefixingLogMixin): eventually_callback(self.done)(None) while len(self.milestones) > 0: - (next, d) = self.milestones[0] - if noisy: self.log("MILESTONE FINISH %r %r %r" % (next, d, res), level=NOISY) + (next_, d) = self.milestones[0] + if noisy: self.log("MILESTONE FINISH %r %r %r" % (next_, d, res), level=NOISY) heapq.heappop(self.milestones) # The callback means that the milestone has been reached if # it is ever going to be. Note that the file may have been @@ -540,7 +564,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): self.f.close() except Exception as e: self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD) - self.download_done("closed") + self.download_done(b"closed") return self.done_status def unregisterProducer(self): @@ -564,7 +588,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath) if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY) - precondition(isinstance(userpath, str) and IFileNode.providedBy(filenode), + precondition(isinstance(userpath, bytes) and IFileNode.providedBy(filenode), userpath=userpath, filenode=filenode) self.filenode = filenode self.metadata = metadata @@ -576,7 +600,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): self.log(request, level=OPERATIONAL) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") return defer.execute(_closed) d = defer.Deferred() @@ -593,7 +617,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): # i.e. we respond with an EOF error iff offset is already at EOF. if offset >= len(data): - eventually_errback(d)(Failure(SFTPError(FX_EOF, "read at or past end of file"))) + eventually_errback(d)(Failure(createSFTPError(FX_EOF, "read at or past end of file"))) else: eventually_callback(d)(data[offset:offset+length]) # truncated if offset+length > len(data) return data @@ -604,7 +628,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): def writeChunk(self, offset, data): self.log(".writeChunk(%r, ) denied" % (offset, len(data)), level=OPERATIONAL) - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) def close(self): @@ -618,7 +642,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): self.log(request, level=OPERATIONAL) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle") return defer.execute(_closed) d = defer.execute(_populate_attrs, self.filenode, self.metadata) @@ -627,7 +651,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): def setAttrs(self, attrs): self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL) - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) @@ -648,7 +672,7 @@ class GeneralSFTPFile(PrefixingLogMixin): if noisy: self.log(".__init__(%r, %r = %r, %r, )" % (userpath, flags, _repr_flags(flags), close_notify), level=NOISY) - precondition(isinstance(userpath, str), userpath=userpath) + precondition(isinstance(userpath, bytes), userpath=userpath) self.userpath = userpath self.flags = flags self.close_notify = close_notify @@ -667,11 +691,11 @@ class GeneralSFTPFile(PrefixingLogMixin): # not be set before then. self.consumer = None - def open(self, parent=None, childname=None, filenode=None, metadata=None): + def open(self, parent=None, childname=None, filenode=None, metadata=None): # noqa: F811 self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" % (parent, childname, filenode, metadata), level=OPERATIONAL) - precondition(isinstance(childname, (unicode, type(None))), childname=childname) + precondition(isinstance(childname, (str, type(None))), childname=childname) precondition(filenode is None or IFileNode.providedBy(filenode), filenode=filenode) precondition(not self.closed, sftpfile=self) @@ -688,7 +712,7 @@ class GeneralSFTPFile(PrefixingLogMixin): if (self.flags & FXF_TRUNC) or not filenode: # We're either truncating or creating the file, so we don't need the old contents. self.consumer = OverwriteableFileConsumer(0, tempfile_maker) - self.consumer.download_done("download not needed") + self.consumer.download_done(b"download not needed") else: self.async_.addCallback(lambda ignored: filenode.get_best_readable_version()) @@ -702,7 +726,7 @@ class GeneralSFTPFile(PrefixingLogMixin): d = version.read(self.consumer, 0, None) def _finished(res): if not isinstance(res, Failure): - res = "download finished" + res = b"download finished" self.consumer.download_done(res) d.addBoth(_finished) # It is correct to drop d here. @@ -722,7 +746,7 @@ class GeneralSFTPFile(PrefixingLogMixin): def rename(self, new_userpath, new_parent, new_childname): self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL) - precondition(isinstance(new_userpath, str) and isinstance(new_childname, unicode), + precondition(isinstance(new_userpath, bytes) and isinstance(new_childname, str), new_userpath=new_userpath, new_childname=new_childname) self.userpath = new_userpath self.parent = new_parent @@ -750,11 +774,11 @@ class GeneralSFTPFile(PrefixingLogMixin): self.log(request, level=OPERATIONAL) if not (self.flags & FXF_READ): - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading") return defer.execute(_denied) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") return defer.execute(_closed) d = defer.Deferred() @@ -772,11 +796,11 @@ class GeneralSFTPFile(PrefixingLogMixin): self.log(".writeChunk(%r, )" % (offset, len(data)), level=OPERATIONAL) if not (self.flags & FXF_WRITE): - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle") return defer.execute(_closed) self.has_changed = True @@ -892,7 +916,7 @@ class GeneralSFTPFile(PrefixingLogMixin): self.log(request, level=OPERATIONAL) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle") return defer.execute(_closed) # Optimization for read-only handles, when we already know the metadata. @@ -916,16 +940,16 @@ class GeneralSFTPFile(PrefixingLogMixin): self.log(request, level=OPERATIONAL) if not (self.flags & FXF_WRITE): - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle") return defer.execute(_closed) size = attrs.get("size", None) - if size is not None and (not isinstance(size, (int, long)) or size < 0): - def _bad(): raise SFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer") + if size is not None and (not isinstance(size, int) or size < 0): + def _bad(): raise createSFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer") return defer.execute(_bad) d = defer.Deferred() @@ -1011,7 +1035,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def logout(self): self.log(".logout()", level=OPERATIONAL) - for files in self._heisenfiles.itervalues(): + for files in self._heisenfiles.values(): for f in files: f.abandon() @@ -1038,7 +1062,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): request = "._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry) self.log(request, level=OPERATIONAL) - precondition(isinstance(userpath, str), userpath=userpath) + precondition(isinstance(userpath, bytes), userpath=userpath) # First we synchronously mark all heisenfiles matching the userpath or direntry # as abandoned, and remove them from the two heisenfile dicts. Then we .sync() @@ -1087,8 +1111,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): (from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite)) self.log(request, level=OPERATIONAL) - precondition((isinstance(from_userpath, str) and isinstance(from_childname, unicode) and - isinstance(to_userpath, str) and isinstance(to_childname, unicode)), + precondition((isinstance(from_userpath, bytes) and isinstance(from_childname, str) and + isinstance(to_userpath, bytes) and isinstance(to_childname, str)), from_userpath=from_userpath, from_childname=from_childname, to_userpath=to_userpath, to_childname=to_childname) if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY) @@ -1117,7 +1141,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): # does not mean that they were not committed; it is used to determine whether # a NoSuchChildError from the rename attempt should be suppressed). If overwrite # is False and there were already heisenfiles at the destination userpath or - # direntry, we return a Deferred that fails with SFTPError(FX_PERMISSION_DENIED). + # direntry, we return a Deferred that fails with createSFTPError(FX_PERMISSION_DENIED). from_direntry = _direntry_for(from_parent, from_childname) to_direntry = _direntry_for(to_parent, to_childname) @@ -1126,7 +1150,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): (from_direntry, to_direntry, len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY) if not overwrite and (to_userpath in self._heisenfiles or to_direntry in all_heisenfiles): - def _existing(): raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath) + def _existing(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8")) if noisy: self.log("existing", level=NOISY) return defer.execute(_existing) @@ -1160,7 +1184,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): request = "._update_attrs_for_heisenfiles(%r, %r, %r)" % (userpath, direntry, attrs) self.log(request, level=OPERATIONAL) - _assert(isinstance(userpath, str) and isinstance(direntry, str), + _assert(isinstance(userpath, bytes) and isinstance(direntry, bytes), userpath=userpath, direntry=direntry) files = [] @@ -1193,7 +1217,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): request = "._sync_heisenfiles(%r, %r, ignore=%r)" % (userpath, direntry, ignore) self.log(request, level=OPERATIONAL) - _assert(isinstance(userpath, str) and isinstance(direntry, (str, type(None))), + _assert(isinstance(userpath, bytes) and isinstance(direntry, (bytes, type(None))), userpath=userpath, direntry=direntry) files = [] @@ -1218,7 +1242,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _remove_heisenfile(self, userpath, parent, childname, file_to_remove): if noisy: self.log("._remove_heisenfile(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY) - _assert(isinstance(userpath, str) and isinstance(childname, (unicode, type(None))), + _assert(isinstance(userpath, bytes) and isinstance(childname, (str, type(None))), userpath=userpath, childname=childname) direntry = _direntry_for(parent, childname) @@ -1245,8 +1269,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): (existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata), level=NOISY) - _assert((isinstance(userpath, str) and isinstance(childname, (unicode, type(None))) and - (metadata is None or 'no-write' in metadata)), + _assert((isinstance(userpath, bytes) and isinstance(childname, (str, type(None))) and + (metadata is None or 'no-write' in metadata)), userpath=userpath, childname=childname, metadata=metadata) writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0 @@ -1279,17 +1303,17 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): if not (flags & (FXF_READ | FXF_WRITE)): def _bad_readwrite(): - raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set") + raise createSFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set") return defer.execute(_bad_readwrite) if (flags & FXF_EXCL) and not (flags & FXF_CREAT): def _bad_exclcreat(): - raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT") + raise createSFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT") return defer.execute(_bad_exclcreat) path = self._path_from_string(pathstring) if not path: - def _emptypath(): raise SFTPError(FX_NO_SUCH_FILE, "path cannot be empty") + def _emptypath(): raise createSFTPError(FX_NO_SUCH_FILE, "path cannot be empty") return defer.execute(_emptypath) # The combination of flags is potentially valid. @@ -1348,20 +1372,20 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _got_root(root_and_path): (root, path) = root_and_path if root.is_unknown(): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open an unknown cap (or child of an unknown object). " "Upgrading the gateway to a later Tahoe-LAFS version may help") if not path: # case 1 if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY) if not IFileNode.providedBy(root): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a directory cap") if (flags & FXF_WRITE) and root.is_readonly(): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot write to a non-writeable filecap without a parent directory") if flags & FXF_EXCL: - raise SFTPError(FX_FAILURE, + raise createSFTPError(FX_FAILURE, "cannot create a file exclusively when it already exists") # The file does not need to be added to all_heisenfiles, because it is not @@ -1388,7 +1412,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _got_parent(parent): if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY) if parent.is_unknown(): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a child of an unknown object. " "Upgrading the gateway to a later Tahoe-LAFS version may help") @@ -1403,13 +1427,13 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): # which is consistent with what might happen on a POSIX filesystem. if parent_readonly: - raise SFTPError(FX_FAILURE, + raise createSFTPError(FX_FAILURE, "cannot create a file exclusively when the parent directory is read-only") # 'overwrite=False' ensures failure if the link already exists. # FIXME: should use a single call to set_uri and return (child, metadata) (#1035) - zero_length_lit = "URI:LIT:" + zero_length_lit = b"URI:LIT:" if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" % (parent, zero_length_lit, childname), level=NOISY) d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit, @@ -1435,14 +1459,14 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): metadata['no-write'] = _no_write(parent_readonly, filenode, current_metadata) if filenode.is_unknown(): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open an unknown cap. Upgrading the gateway " "to a later Tahoe-LAFS version may help") if not IFileNode.providedBy(filenode): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a directory as if it were a file") if (flags & FXF_WRITE) and metadata['no-write']: - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a non-writeable file for writing") return self._make_file(file, userpath, flags, parent=parent, childname=childname, @@ -1452,10 +1476,10 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): f.trap(NoSuchChildError) if not (flags & FXF_CREAT): - raise SFTPError(FX_NO_SUCH_FILE, + raise createSFTPError(FX_NO_SUCH_FILE, "the file does not exist, and was not opened with the creation (CREAT) flag") if parent_readonly: - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot create a file when the parent directory is read-only") return self._make_file(file, userpath, flags, parent=parent, childname=childname) @@ -1494,9 +1518,9 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): (to_parent, to_childname) = to_pair if from_childname is None: - raise SFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI") + raise createSFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI") if to_childname is None: - raise SFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI") + raise createSFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI") # # "It is an error if there already exists a file with the name specified @@ -1511,7 +1535,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): d2.addCallback(lambda ign: to_parent.get(to_childname)) def _expect_fail(res): if not isinstance(res, Failure): - raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath) + raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8")) # It is OK if we fail for errors other than NoSuchChildError, since that probably # indicates some problem accessing the destination directory. @@ -1536,7 +1560,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): if not isinstance(err, Failure) or (renamed and err.check(NoSuchChildError)): return None if not overwrite and err.check(ExistingChildError): - raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath) + raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8")) return err d3.addBoth(_check) @@ -1554,7 +1578,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): path = self._path_from_string(pathstring) metadata = _attrs_to_metadata(attrs) if 'no-write' in metadata: - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only") return defer.execute(_denied) d = self._get_root(path) @@ -1566,7 +1590,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _get_or_create_directories(self, node, path, metadata): if not IDirectoryNode.providedBy(node): # TODO: provide the name of the blocking file in the error message. - def _blocked(): raise SFTPError(FX_FAILURE, "cannot create directory because there " + def _blocked(): raise createSFTPError(FX_FAILURE, "cannot create directory because there " "is a file in the way") # close enough return defer.execute(_blocked) @@ -1604,7 +1628,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _got_parent(parent_and_childname): (parent, childname) = parent_and_childname if childname is None: - raise SFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI") + raise createSFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI") direntry = _direntry_for(parent, childname) d2 = defer.succeed(False) @@ -1635,18 +1659,18 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): d.addCallback(_got_parent_or_node) def _list(dirnode): if dirnode.is_unknown(): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot list an unknown cap as a directory. Upgrading the gateway " "to a later Tahoe-LAFS version may help") if not IDirectoryNode.providedBy(dirnode): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot list a file as if it were a directory") d2 = dirnode.list() def _render(children): parent_readonly = dirnode.is_readonly() results = [] - for filename, (child, metadata) in children.iteritems(): + for filename, (child, metadata) in list(children.items()): # The file size may be cached or absent. metadata['no-write'] = _no_write(parent_readonly, child, metadata) attrs = _populate_attrs(child, metadata) @@ -1726,7 +1750,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): if "size" in attrs: # this would require us to download and re-upload the truncated/extended # file contents - def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported") + def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported") return defer.execute(_unsupported) path = self._path_from_string(pathstring) @@ -1743,7 +1767,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): if childname is None: if updated_heisenfiles: return None - raise SFTPError(FX_NO_SUCH_FILE, userpath) + raise createSFTPError(FX_NO_SUCH_FILE, userpath) else: desired_metadata = _attrs_to_metadata(attrs) if noisy: self.log("desired_metadata = %r" % (desired_metadata,), level=NOISY) @@ -1766,7 +1790,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def readLink(self, pathstring): self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL) - def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "readLink") + def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "readLink") return defer.execute(_unsupported) def makeLink(self, linkPathstring, targetPathstring): @@ -1775,7 +1799,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): # If this is implemented, note the reversal of arguments described in point 7 of # . - def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "makeLink") + def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "makeLink") return defer.execute(_unsupported) def extendedRequest(self, extensionName, extensionData): @@ -1784,8 +1808,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): # We implement the three main OpenSSH SFTP extensions; see # - if extensionName == 'posix-rename@openssh.com': - def _bad(): raise SFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request") + if extensionName == b'posix-rename@openssh.com': + def _bad(): raise createSFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request") if 4 > len(extensionData): return defer.execute(_bad) (fromPathLen,) = struct.unpack('>L', extensionData[0:4]) @@ -1802,11 +1826,11 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): # an error, or an FXP_EXTENDED_REPLY. But it happens to do the right thing # (respond with an FXP_STATUS message) if we return a Failure with code FX_OK. def _succeeded(ign): - raise SFTPError(FX_OK, "request succeeded") + raise createSFTPError(FX_OK, "request succeeded") d.addCallback(_succeeded) return d - if extensionName == 'statvfs@openssh.com' or extensionName == 'fstatvfs@openssh.com': + if extensionName == b'statvfs@openssh.com' or extensionName == b'fstatvfs@openssh.com': # f_bsize and f_frsize should be the same to avoid a bug in 'df' return defer.succeed(struct.pack('>11Q', 1024, # uint64 f_bsize /* file system block size */ @@ -1822,7 +1846,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): 65535, # uint64 f_namemax /* maximum filename length */ )) - def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "unsupported %r request " % + def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "unsupported %r request " % (extensionName, len(extensionData))) return defer.execute(_unsupported) @@ -1837,29 +1861,29 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _path_from_string(self, pathstring): if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY) - _assert(isinstance(pathstring, str), pathstring=pathstring) + _assert(isinstance(pathstring, bytes), pathstring=pathstring) # The home directory is the root directory. - pathstring = pathstring.strip("/") - if pathstring == "" or pathstring == ".": + pathstring = pathstring.strip(b"/") + if pathstring == b"" or pathstring == b".": path_utf8 = [] else: - path_utf8 = pathstring.split("/") + path_utf8 = pathstring.split(b"/") # # "Servers SHOULD interpret a path name component ".." as referring to # the parent directory, and "." as referring to the current directory." path = [] for p_utf8 in path_utf8: - if p_utf8 == "..": + if p_utf8 == b"..": # ignore excess .. components at the root if len(path) > 0: path = path[:-1] - elif p_utf8 != ".": + elif p_utf8 != b".": try: p = p_utf8.decode('utf-8', 'strict') except UnicodeError: - raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8") + raise createSFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8") path.append(p) if noisy: self.log(" PATH %r" % (path,), level=NOISY) @@ -1978,9 +2002,9 @@ class SFTPServer(service.MultiService): def __init__(self, client, accountfile, accounturl, sftp_portstr, pubkey_file, privkey_file): - precondition(isinstance(accountfile, (unicode, type(None))), accountfile) - precondition(isinstance(pubkey_file, unicode), pubkey_file) - precondition(isinstance(privkey_file, unicode), privkey_file) + precondition(isinstance(accountfile, (str, type(None))), accountfile) + precondition(isinstance(pubkey_file, str), pubkey_file) + precondition(isinstance(privkey_file, str), privkey_file) service.MultiService.__init__(self) r = Dispatcher(client) diff --git a/src/allmydata/test/test_sftp.py b/src/allmydata/test/test_sftp.py index 1ff0363e8..2214e4e5b 100644 --- a/src/allmydata/test/test_sftp.py +++ b/src/allmydata/test/test_sftp.py @@ -1,4 +1,14 @@ +""" +Ported to Python 3. +""" from __future__ import print_function +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 import re, struct, traceback, time, calendar from stat import S_IFREG, S_IFDIR @@ -73,7 +83,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas return d def _set_up_tree(self): - u = publish.MutableData("mutable file contents") + u = publish.MutableData(b"mutable file contents") d = self.client.create_mutable_file(u) d.addCallback(lambda node: self.root.set_node(u"mutable", node)) def _created_mutable(n): @@ -89,33 +99,33 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas self.readonly_uri = n.get_uri() d.addCallback(_created_readonly) - gross = upload.Data("0123456789" * 101, None) + gross = upload.Data(b"0123456789" * 101, None) d.addCallback(lambda ign: self.root.add_file(u"gro\u00DF", gross)) def _created_gross(n): self.gross = n self.gross_uri = n.get_uri() d.addCallback(_created_gross) - small = upload.Data("0123456789", None) + small = upload.Data(b"0123456789", None) d.addCallback(lambda ign: self.root.add_file(u"small", small)) def _created_small(n): self.small = n self.small_uri = n.get_uri() d.addCallback(_created_small) - small2 = upload.Data("Small enough for a LIT too", None) + small2 = upload.Data(b"Small enough for a LIT too", None) d.addCallback(lambda ign: self.root.add_file(u"small2", small2)) def _created_small2(n): self.small2 = n self.small2_uri = n.get_uri() d.addCallback(_created_small2) - empty_litdir_uri = "URI:DIR2-LIT:" + empty_litdir_uri = b"URI:DIR2-LIT:" # contains one child which is itself also LIT: - tiny_litdir_uri = "URI:DIR2-LIT:gqytunj2onug64tufqzdcosvkjetutcjkq5gw4tvm5vwszdgnz5hgyzufqydulbshj5x2lbm" + tiny_litdir_uri = b"URI:DIR2-LIT:gqytunj2onug64tufqzdcosvkjetutcjkq5gw4tvm5vwszdgnz5hgyzufqydulbshj5x2lbm" - unknown_uri = "x-tahoe-crazy://I_am_from_the_future." + unknown_uri = b"x-tahoe-crazy://I_am_from_the_future." d.addCallback(lambda ign: self.root._create_and_validate_node(None, empty_litdir_uri, name=u"empty_lit_dir")) def _created_empty_lit_dir(n): @@ -151,55 +161,55 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas version = self.handler.gotVersion(3, {}) self.failUnless(isinstance(version, dict)) - self.failUnlessReallyEqual(self.handler._path_from_string(""), []) - self.failUnlessReallyEqual(self.handler._path_from_string("/"), []) - self.failUnlessReallyEqual(self.handler._path_from_string("."), []) - self.failUnlessReallyEqual(self.handler._path_from_string("//"), []) - self.failUnlessReallyEqual(self.handler._path_from_string("/."), []) - self.failUnlessReallyEqual(self.handler._path_from_string("/./"), []) - self.failUnlessReallyEqual(self.handler._path_from_string("foo"), [u"foo"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/foo"), [u"foo"]) - self.failUnlessReallyEqual(self.handler._path_from_string("foo/"), [u"foo"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/foo/"), [u"foo"]) - self.failUnlessReallyEqual(self.handler._path_from_string("foo/bar"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/foo/bar"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("foo/bar//"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/foo/bar//"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("foo/./bar"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("./foo/./bar"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("foo/../bar"), [u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/foo/../bar"), [u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("../bar"), [u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/../bar"), [u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b""), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/"), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"."), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"//"), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/."), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/./"), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo"), [u"foo"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo"), [u"foo"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/"), [u"foo"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/"), [u"foo"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/bar"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/bar"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/bar//"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/bar//"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/./bar"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"./foo/./bar"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/../bar"), [u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/../bar"), [u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"../bar"), [u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/../bar"), [u"bar"]) - self.failUnlessReallyEqual(self.handler.realPath(""), "/") - self.failUnlessReallyEqual(self.handler.realPath("/"), "/") - self.failUnlessReallyEqual(self.handler.realPath("."), "/") - self.failUnlessReallyEqual(self.handler.realPath("//"), "/") - self.failUnlessReallyEqual(self.handler.realPath("/."), "/") - self.failUnlessReallyEqual(self.handler.realPath("/./"), "/") - self.failUnlessReallyEqual(self.handler.realPath("foo"), "/foo") - self.failUnlessReallyEqual(self.handler.realPath("/foo"), "/foo") - self.failUnlessReallyEqual(self.handler.realPath("foo/"), "/foo") - self.failUnlessReallyEqual(self.handler.realPath("/foo/"), "/foo") - self.failUnlessReallyEqual(self.handler.realPath("foo/bar"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("/foo/bar"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("foo/bar//"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("/foo/bar//"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("foo/./bar"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("./foo/./bar"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("foo/../bar"), "/bar") - self.failUnlessReallyEqual(self.handler.realPath("/foo/../bar"), "/bar") - self.failUnlessReallyEqual(self.handler.realPath("../bar"), "/bar") - self.failUnlessReallyEqual(self.handler.realPath("/../bar"), "/bar") + self.failUnlessReallyEqual(self.handler.realPath(b""), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"/"), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"."), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"//"), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"/."), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"/./"), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"foo"), b"/foo") + self.failUnlessReallyEqual(self.handler.realPath(b"/foo"), b"/foo") + self.failUnlessReallyEqual(self.handler.realPath(b"foo/"), b"/foo") + self.failUnlessReallyEqual(self.handler.realPath(b"/foo/"), b"/foo") + self.failUnlessReallyEqual(self.handler.realPath(b"foo/bar"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"/foo/bar"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"foo/bar//"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"/foo/bar//"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"foo/./bar"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"./foo/./bar"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"foo/../bar"), b"/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"/foo/../bar"), b"/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"../bar"), b"/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"/../bar"), b"/bar") d.addCallback(_check) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "_path_from_string invalid UTF-8", - self.handler._path_from_string, "\xFF")) + self.handler._path_from_string, b"\xFF")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "realPath invalid UTF-8", - self.handler.realPath, "\xFF")) + self.handler.realPath, b"\xFF")) return d @@ -240,10 +250,10 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "readLink link", - self.handler.readLink, "link")) + self.handler.readLink, b"link")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "makeLink link file", - self.handler.makeLink, "link", "file")) + self.handler.makeLink, b"link", b"file")) return d @@ -274,64 +284,64 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openDirectory small", - self.handler.openDirectory, "small")) + self.handler.openDirectory, b"small")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openDirectory unknown", - self.handler.openDirectory, "unknown")) + self.handler.openDirectory, b"unknown")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openDirectory nodir", - self.handler.openDirectory, "nodir")) + self.handler.openDirectory, b"nodir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openDirectory nodir/nodir", - self.handler.openDirectory, "nodir/nodir")) + self.handler.openDirectory, b"nodir/nodir")) gross = u"gro\u00DF".encode("utf-8") expected_root = [ - ('empty_lit_dir', r'dr-xr-xr-x .* 0 .* empty_lit_dir$', {'permissions': S_IFDIR | 0o555}), - (gross, r'-rw-rw-rw- .* 1010 .* '+gross+'$', {'permissions': S_IFREG | 0o666, 'size': 1010}), + (b'empty_lit_dir', br'dr-xr-xr-x .* 0 .* empty_lit_dir$', {'permissions': S_IFDIR | 0o555}), + (gross, br'-rw-rw-rw- .* 1010 .* '+gross+b'$', {'permissions': S_IFREG | 0o666, 'size': 1010}), # The fall of the Berlin wall may have been on 9th or 10th November 1989 depending on the gateway's timezone. #('loop', r'drwxrwxrwx .* 0 Nov (09|10) 1989 loop$', {'permissions': S_IFDIR | 0777}), - ('loop', r'drwxrwxrwx .* 0 .* loop$', {'permissions': S_IFDIR | 0o777}), - ('mutable', r'-rw-rw-rw- .* 0 .* mutable$', {'permissions': S_IFREG | 0o666}), - ('readonly', r'-r--r--r-- .* 0 .* readonly$', {'permissions': S_IFREG | 0o444}), - ('small', r'-rw-rw-rw- .* 10 .* small$', {'permissions': S_IFREG | 0o666, 'size': 10}), - ('small2', r'-rw-rw-rw- .* 26 .* small2$', {'permissions': S_IFREG | 0o666, 'size': 26}), - ('tiny_lit_dir', r'dr-xr-xr-x .* 0 .* tiny_lit_dir$', {'permissions': S_IFDIR | 0o555}), - ('unknown', r'\?--------- .* 0 .* unknown$', {'permissions': 0}), + (b'loop', br'drwxrwxrwx .* 0 .* loop$', {'permissions': S_IFDIR | 0o777}), + (b'mutable', br'-rw-rw-rw- .* 0 .* mutable$', {'permissions': S_IFREG | 0o666}), + (b'readonly', br'-r--r--r-- .* 0 .* readonly$', {'permissions': S_IFREG | 0o444}), + (b'small', br'-rw-rw-rw- .* 10 .* small$', {'permissions': S_IFREG | 0o666, 'size': 10}), + (b'small2', br'-rw-rw-rw- .* 26 .* small2$', {'permissions': S_IFREG | 0o666, 'size': 26}), + (b'tiny_lit_dir', br'dr-xr-xr-x .* 0 .* tiny_lit_dir$', {'permissions': S_IFDIR | 0o555}), + (b'unknown', br'\?--------- .* 0 .* unknown$', {'permissions': 0}), ] - d.addCallback(lambda ign: self.handler.openDirectory("")) + d.addCallback(lambda ign: self.handler.openDirectory(b"")) d.addCallback(lambda res: self._compareDirLists(res, expected_root)) - d.addCallback(lambda ign: self.handler.openDirectory("loop")) + d.addCallback(lambda ign: self.handler.openDirectory(b"loop")) d.addCallback(lambda res: self._compareDirLists(res, expected_root)) - d.addCallback(lambda ign: self.handler.openDirectory("loop/loop")) + d.addCallback(lambda ign: self.handler.openDirectory(b"loop/loop")) d.addCallback(lambda res: self._compareDirLists(res, expected_root)) - d.addCallback(lambda ign: self.handler.openDirectory("empty_lit_dir")) + d.addCallback(lambda ign: self.handler.openDirectory(b"empty_lit_dir")) d.addCallback(lambda res: self._compareDirLists(res, [])) # The UTC epoch may either be in Jan 1 1970 or Dec 31 1969 depending on the gateway's timezone. expected_tiny_lit = [ - ('short', r'-r--r--r-- .* 8 (Jan 01 1970|Dec 31 1969) short$', {'permissions': S_IFREG | 0o444, 'size': 8}), + (b'short', br'-r--r--r-- .* 8 (Jan 01 1970|Dec 31 1969) short$', {'permissions': S_IFREG | 0o444, 'size': 8}), ] - d.addCallback(lambda ign: self.handler.openDirectory("tiny_lit_dir")) + d.addCallback(lambda ign: self.handler.openDirectory(b"tiny_lit_dir")) d.addCallback(lambda res: self._compareDirLists(res, expected_tiny_lit)) - d.addCallback(lambda ign: self.handler.getAttrs("small", True)) + d.addCallback(lambda ign: self.handler.getAttrs(b"small", True)) d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) - d.addCallback(lambda ign: self.handler.setAttrs("small", {})) + d.addCallback(lambda ign: self.handler.setAttrs(b"small", {})) d.addCallback(lambda res: self.failUnlessReallyEqual(res, None)) - d.addCallback(lambda ign: self.handler.getAttrs("small", True)) + d.addCallback(lambda ign: self.handler.getAttrs(b"small", True)) d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "setAttrs size", - self.handler.setAttrs, "small", {'size': 0})) + self.handler.setAttrs, b"small", {'size': 0})) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) @@ -343,53 +353,53 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "openFile small 0 bad", - self.handler.openFile, "small", 0, {})) + self.handler.openFile, b"small", 0, {})) # attempting to open a non-existent file should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile nofile READ nosuch", - self.handler.openFile, "nofile", sftp.FXF_READ, {})) + self.handler.openFile, b"nofile", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile nodir/file READ nosuch", - self.handler.openFile, "nodir/file", sftp.FXF_READ, {})) + self.handler.openFile, b"nodir/file", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown READ denied", - self.handler.openFile, "unknown", sftp.FXF_READ, {})) + self.handler.openFile, b"unknown", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown/file READ denied", - self.handler.openFile, "unknown/file", sftp.FXF_READ, {})) + self.handler.openFile, b"unknown/file", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir READ denied", - self.handler.openFile, "tiny_lit_dir", sftp.FXF_READ, {})) + self.handler.openFile, b"tiny_lit_dir", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown uri READ denied", - self.handler.openFile, "uri/"+self.unknown_uri, sftp.FXF_READ, {})) + self.handler.openFile, b"uri/"+self.unknown_uri, sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir uri READ denied", - self.handler.openFile, "uri/"+self.tiny_lit_dir_uri, sftp.FXF_READ, {})) + self.handler.openFile, b"uri/"+self.tiny_lit_dir_uri, sftp.FXF_READ, {})) # FIXME: should be FX_NO_SUCH_FILE? d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile noexist uri READ denied", - self.handler.openFile, "uri/URI:noexist", sftp.FXF_READ, {})) + self.handler.openFile, b"uri/URI:noexist", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile invalid UTF-8 uri READ denied", - self.handler.openFile, "uri/URI:\xFF", sftp.FXF_READ, {})) + self.handler.openFile, b"uri/URI:\xFF", sftp.FXF_READ, {})) # reading an existing file should succeed - d.addCallback(lambda ign: self.handler.openFile("small", sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"small", sftp.FXF_READ, {})) def _read_small(rf): d2 = rf.readChunk(0, 10) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.readChunk(2, 6)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "234567")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"234567")) d2.addCallback(lambda ign: rf.readChunk(1, 0)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) d2.addCallback(lambda ign: rf.readChunk(8, 4)) # read that starts before EOF is OK - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "89")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"89")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF (0-byte)", @@ -404,12 +414,12 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d2.addCallback(lambda ign: rf.getAttrs()) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) - d2.addCallback(lambda ign: self.handler.getAttrs("small", followLinks=0)) + d2.addCallback(lambda ign: self.handler.getAttrs(b"small", followLinks=0)) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "writeChunk on read-only handle denied", - rf.writeChunk, 0, "a")) + rf.writeChunk, 0, b"a")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "setAttrs on read-only handle denied", rf.setAttrs, {})) @@ -432,16 +442,16 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.handler.openFile(gross, sftp.FXF_READ, {})) def _read_gross(rf): d2 = rf.readChunk(0, 10) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.readChunk(2, 6)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "234567")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"234567")) d2.addCallback(lambda ign: rf.readChunk(1, 0)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) d2.addCallback(lambda ign: rf.readChunk(1008, 4)) # read that starts before EOF is OK - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "89")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"89")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF (0-byte)", @@ -461,7 +471,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "writeChunk on read-only handle denied", - rf.writeChunk, 0, "a")) + rf.writeChunk, 0, b"a")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "setAttrs on read-only handle denied", rf.setAttrs, {})) @@ -480,37 +490,37 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(_read_gross) # reading an existing small file via uri/ should succeed - d.addCallback(lambda ign: self.handler.openFile("uri/"+self.small_uri, sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.small_uri, sftp.FXF_READ, {})) def _read_small_uri(rf): d2 = rf.readChunk(0, 10) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_small_uri) # repeat for a large file - d.addCallback(lambda ign: self.handler.openFile("uri/"+self.gross_uri, sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.gross_uri, sftp.FXF_READ, {})) def _read_gross_uri(rf): d2 = rf.readChunk(0, 10) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_gross_uri) # repeat for a mutable file - d.addCallback(lambda ign: self.handler.openFile("uri/"+self.mutable_uri, sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.mutable_uri, sftp.FXF_READ, {})) def _read_mutable_uri(rf): d2 = rf.readChunk(0, 100) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable file contents")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable file contents")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_mutable_uri) # repeat for a file within a directory referenced by URI - d.addCallback(lambda ign: self.handler.openFile("uri/"+self.tiny_lit_dir_uri+"/short", sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.tiny_lit_dir_uri+b"/short", sftp.FXF_READ, {})) def _read_short(rf): d2 = rf.readChunk(0, 100) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "The end.")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"The end.")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_short) @@ -518,7 +528,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # check that failed downloads cause failed reads. Note that this # trashes the grid (by deleting all shares), so this must be at the # end of the test function. - d.addCallback(lambda ign: self.handler.openFile("uri/"+self.gross_uri, sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.gross_uri, sftp.FXF_READ, {})) def _read_broken(rf): d2 = defer.succeed(None) d2.addCallback(lambda ign: self.g.nuke_from_orbit()) @@ -539,10 +549,10 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # The check at the end of openFile_read tested this for large files, # but it trashed the grid in the process, so this needs to be a # separate test. - small = upload.Data("0123456789"*10, None) + small = upload.Data(b"0123456789"*10, None) d = self._set_up("openFile_read_error") d.addCallback(lambda ign: self.root.add_file(u"small", small)) - d.addCallback(lambda n: self.handler.openFile("/uri/"+n.get_uri(), sftp.FXF_READ, {})) + d.addCallback(lambda n: self.handler.openFile(b"/uri/"+n.get_uri(), sftp.FXF_READ, {})) def _read_broken(rf): d2 = defer.succeed(None) d2.addCallback(lambda ign: self.g.nuke_from_orbit()) @@ -566,106 +576,106 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # '' is an invalid filename d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile '' WRITE|CREAT|TRUNC nosuch", - self.handler.openFile, "", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) + self.handler.openFile, b"", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) # TRUNC is not valid without CREAT if the file does not already exist d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile newfile WRITE|TRUNC nosuch", - self.handler.openFile, "newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {})) + self.handler.openFile, b"newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {})) # EXCL is not valid without CREAT d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "openFile small WRITE|EXCL bad", - self.handler.openFile, "small", sftp.FXF_WRITE | sftp.FXF_EXCL, {})) + self.handler.openFile, b"small", sftp.FXF_WRITE | sftp.FXF_EXCL, {})) # cannot write to an existing directory d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir WRITE denied", - self.handler.openFile, "tiny_lit_dir", sftp.FXF_WRITE, {})) + self.handler.openFile, b"tiny_lit_dir", sftp.FXF_WRITE, {})) # cannot write to an existing unknown d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown WRITE denied", - self.handler.openFile, "unknown", sftp.FXF_WRITE, {})) + self.handler.openFile, b"unknown", sftp.FXF_WRITE, {})) # cannot create a child of an unknown d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown/newfile WRITE|CREAT denied", - self.handler.openFile, "unknown/newfile", + self.handler.openFile, b"unknown/newfile", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) # cannot write to a new file in an immutable directory d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/newfile WRITE|CREAT|TRUNC denied", - self.handler.openFile, "tiny_lit_dir/newfile", + self.handler.openFile, b"tiny_lit_dir/newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) # cannot write to an existing immutable file in an immutable directory (with or without CREAT and EXCL) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/short WRITE denied", - self.handler.openFile, "tiny_lit_dir/short", sftp.FXF_WRITE, {})) + self.handler.openFile, b"tiny_lit_dir/short", sftp.FXF_WRITE, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/short WRITE|CREAT denied", - self.handler.openFile, "tiny_lit_dir/short", + self.handler.openFile, b"tiny_lit_dir/short", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) # cannot write to a mutable file via a readonly cap (by path or uri) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile readonly WRITE denied", - self.handler.openFile, "readonly", sftp.FXF_WRITE, {})) + self.handler.openFile, b"readonly", sftp.FXF_WRITE, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile readonly uri WRITE denied", - self.handler.openFile, "uri/"+self.readonly_uri, sftp.FXF_WRITE, {})) + self.handler.openFile, b"uri/"+self.readonly_uri, sftp.FXF_WRITE, {})) # cannot create a file with the EXCL flag if it already exists d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile small WRITE|CREAT|EXCL failure", - self.handler.openFile, "small", + self.handler.openFile, b"small", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile mutable WRITE|CREAT|EXCL failure", - self.handler.openFile, "mutable", + self.handler.openFile, b"mutable", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile mutable uri WRITE|CREAT|EXCL failure", - self.handler.openFile, "uri/"+self.mutable_uri, + self.handler.openFile, b"uri/"+self.mutable_uri, sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile tiny_lit_dir/short WRITE|CREAT|EXCL failure", - self.handler.openFile, "tiny_lit_dir/short", + self.handler.openFile, b"tiny_lit_dir/short", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) # cannot write to an immutable file if we don't have its parent (with or without CREAT, TRUNC, or EXCL) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE denied", - self.handler.openFile, "uri/"+self.small_uri, sftp.FXF_WRITE, {})) + self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT denied", - self.handler.openFile, "uri/"+self.small_uri, + self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE | sftp.FXF_CREAT, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT|TRUNC denied", - self.handler.openFile, "uri/"+self.small_uri, + self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT|EXCL denied", - self.handler.openFile, "uri/"+self.small_uri, + self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) # test creating a new file with truncation and extension d.addCallback(lambda ign: - self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) + self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) def _write(wf): - d2 = wf.writeChunk(0, "0123456789") + d2 = wf.writeChunk(0, b"0123456789") d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None)) - d2.addCallback(lambda ign: wf.writeChunk(8, "0123")) - d2.addCallback(lambda ign: wf.writeChunk(13, "abc")) + d2.addCallback(lambda ign: wf.writeChunk(8, b"0123")) + d2.addCallback(lambda ign: wf.writeChunk(13, b"abc")) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 16})) - d2.addCallback(lambda ign: self.handler.getAttrs("newfile", followLinks=0)) + d2.addCallback(lambda ign: self.handler.getAttrs(b"newfile", followLinks=0)) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 16})) d2.addCallback(lambda ign: wf.setAttrs({})) @@ -685,7 +695,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d2.addCallback(lambda ign: wf.setAttrs({'size': 17})) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17)) - d2.addCallback(lambda ign: self.handler.getAttrs("newfile", followLinks=0)) + d2.addCallback(lambda ign: self.handler.getAttrs(b"newfile", followLinks=0)) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17)) d2.addCallback(lambda ign: @@ -696,7 +706,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "writeChunk on closed file bad", - wf.writeChunk, 0, "a")) + wf.writeChunk, 0, b"a")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "setAttrs on closed file bad", wf.setAttrs, {'size': 0})) @@ -706,77 +716,77 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(_write) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123\x00a\x00\x00\x00")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345670123\x00a\x00\x00\x00")) # test APPEND flag, and also replacing an existing file ("newfile" created by the previous test) d.addCallback(lambda ign: - self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC | sftp.FXF_APPEND, {})) def _write_append(wf): - d2 = wf.writeChunk(0, "0123456789") - d2.addCallback(lambda ign: wf.writeChunk(8, "0123")) + d2 = wf.writeChunk(0, b"0123456789") + d2.addCallback(lambda ign: wf.writeChunk(8, b"0123")) d2.addCallback(lambda ign: wf.setAttrs({'size': 17})) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17)) - d2.addCallback(lambda ign: wf.writeChunk(0, "z")) + d2.addCallback(lambda ign: wf.writeChunk(0, b"z")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_append) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234567890123\x00\x00\x00z")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234567890123\x00\x00\x00z")) # test WRITE | TRUNC without CREAT, when the file already exists # This is invalid according to section 6.3 of the SFTP spec, but required for interoperability, # since POSIX does allow O_WRONLY | O_TRUNC. d.addCallback(lambda ign: - self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {})) + self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {})) def _write_trunc(wf): - d2 = wf.writeChunk(0, "01234") + d2 = wf.writeChunk(0, b"01234") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_trunc) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234")) # test WRITE | TRUNC with permissions: 0 d.addCallback(lambda ign: - self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {'permissions': 0})) + self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {'permissions': 0})) d.addCallback(_write_trunc) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234")) d.addCallback(lambda ign: self.root.get_metadata_for(u"newfile")) d.addCallback(lambda metadata: self.failIf(metadata.get('no-write', False), metadata)) # test EXCL flag d.addCallback(lambda ign: - self.handler.openFile("excl", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"excl", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC | sftp.FXF_EXCL, {})) def _write_excl(wf): d2 = self.root.get(u"excl") d2.addCallback(lambda node: download_to_data(node)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) - d2.addCallback(lambda ign: wf.writeChunk(0, "0123456789")) + d2.addCallback(lambda ign: wf.writeChunk(0, b"0123456789")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_excl) d.addCallback(lambda ign: self.root.get(u"excl")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) # test that writing a zero-length file with EXCL only updates the directory once d.addCallback(lambda ign: - self.handler.openFile("zerolength", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"zerolength", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) def _write_excl_zerolength(wf): d2 = self.root.get(u"zerolength") d2.addCallback(lambda node: download_to_data(node)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) # FIXME: no API to get the best version number exists (fix as part of #993) """ @@ -793,84 +803,84 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(_write_excl_zerolength) d.addCallback(lambda ign: self.root.get(u"zerolength")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) # test WRITE | CREAT | EXCL | APPEND d.addCallback(lambda ign: - self.handler.openFile("exclappend", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"exclappend", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL | sftp.FXF_APPEND, {})) def _write_excl_append(wf): d2 = self.root.get(u"exclappend") d2.addCallback(lambda node: download_to_data(node)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) - d2.addCallback(lambda ign: wf.writeChunk(10, "0123456789")) - d2.addCallback(lambda ign: wf.writeChunk(5, "01234")) + d2.addCallback(lambda ign: wf.writeChunk(10, b"0123456789")) + d2.addCallback(lambda ign: wf.writeChunk(5, b"01234")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_excl_append) d.addCallback(lambda ign: self.root.get(u"exclappend")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345678901234")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345678901234")) # test WRITE | CREAT | APPEND when the file does not already exist d.addCallback(lambda ign: - self.handler.openFile("creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_APPEND, {})) def _write_creat_append_new(wf): - d2 = wf.writeChunk(10, "0123456789") - d2.addCallback(lambda ign: wf.writeChunk(5, "01234")) + d2 = wf.writeChunk(10, b"0123456789") + d2.addCallback(lambda ign: wf.writeChunk(5, b"01234")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_append_new) d.addCallback(lambda ign: self.root.get(u"creatappend")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345678901234")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345678901234")) # ... and when it does exist d.addCallback(lambda ign: - self.handler.openFile("creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_APPEND, {})) def _write_creat_append_existing(wf): - d2 = wf.writeChunk(5, "01234") + d2 = wf.writeChunk(5, b"01234") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_append_existing) d.addCallback(lambda ign: self.root.get(u"creatappend")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234567890123401234")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234567890123401234")) # test WRITE | CREAT without TRUNC, when the file does not already exist d.addCallback(lambda ign: - self.handler.openFile("newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) + self.handler.openFile(b"newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_creat_new(wf): - d2 = wf.writeChunk(0, "0123456789") + d2 = wf.writeChunk(0, b"0123456789") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_new) d.addCallback(lambda ign: self.root.get(u"newfile2")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) # ... and when it does exist d.addCallback(lambda ign: - self.handler.openFile("newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) + self.handler.openFile(b"newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_creat_existing(wf): - d2 = wf.writeChunk(0, "abcde") + d2 = wf.writeChunk(0, b"abcde") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_existing) d.addCallback(lambda ign: self.root.get(u"newfile2")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcde56789")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcde56789")) d.addCallback(lambda ign: self.root.set_node(u"mutable2", self.mutable)) # test writing to a mutable file d.addCallback(lambda ign: - self.handler.openFile("mutable", sftp.FXF_WRITE, {})) + self.handler.openFile(b"mutable", sftp.FXF_WRITE, {})) def _write_mutable(wf): - d2 = wf.writeChunk(8, "new!") + d2 = wf.writeChunk(8, b"new!") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_mutable) @@ -881,30 +891,30 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas self.failUnlessReallyEqual(node.get_uri(), self.mutable_uri) return node.download_best_version() d.addCallback(_check_same_file) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable new! contents")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable new! contents")) # ... and with permissions, which should be ignored d.addCallback(lambda ign: - self.handler.openFile("mutable", sftp.FXF_WRITE, {'permissions': 0})) + self.handler.openFile(b"mutable", sftp.FXF_WRITE, {'permissions': 0})) d.addCallback(_write_mutable) d.addCallback(lambda ign: self.root.get(u"mutable")) d.addCallback(_check_same_file) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable new! contents")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable new! contents")) # ... and with a setAttrs call that diminishes the parent link to read-only, first by path d.addCallback(lambda ign: - self.handler.openFile("mutable", sftp.FXF_WRITE, {})) + self.handler.openFile(b"mutable", sftp.FXF_WRITE, {})) def _write_mutable_setattr(wf): - d2 = wf.writeChunk(8, "read-only link from parent") + d2 = wf.writeChunk(8, b"read-only link from parent") - d2.addCallback(lambda ign: self.handler.setAttrs("mutable", {'permissions': 0o444})) + d2.addCallback(lambda ign: self.handler.setAttrs(b"mutable", {'permissions': 0o444})) d2.addCallback(lambda ign: self.root.get(u"mutable")) d2.addCallback(lambda node: self.failUnless(node.is_readonly())) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o666)) - d2.addCallback(lambda ign: self.handler.getAttrs("mutable", followLinks=0)) + d2.addCallback(lambda ign: self.handler.getAttrs(b"mutable", followLinks=0)) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o444)) d2.addCallback(lambda ign: wf.close()) @@ -918,13 +928,13 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas self.failUnlessReallyEqual(node.get_storage_index(), self.mutable.get_storage_index()) return node.download_best_version() d.addCallback(_check_readonly_file) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable read-only link from parent")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable read-only link from parent")) # ... and then by handle d.addCallback(lambda ign: - self.handler.openFile("mutable2", sftp.FXF_WRITE, {})) + self.handler.openFile(b"mutable2", sftp.FXF_WRITE, {})) def _write_mutable2_setattr(wf): - d2 = wf.writeChunk(7, "2") + d2 = wf.writeChunk(7, b"2") d2.addCallback(lambda ign: wf.setAttrs({'permissions': 0o444, 'size': 8})) @@ -934,7 +944,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o444)) - d2.addCallback(lambda ign: self.handler.getAttrs("mutable2", followLinks=0)) + d2.addCallback(lambda ign: self.handler.getAttrs(b"mutable2", followLinks=0)) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o666)) d2.addCallback(lambda ign: wf.close()) @@ -942,55 +952,55 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(_write_mutable2_setattr) d.addCallback(lambda ign: self.root.get(u"mutable2")) d.addCallback(_check_readonly_file) # from above - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable2")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable2")) # test READ | WRITE without CREAT or TRUNC d.addCallback(lambda ign: - self.handler.openFile("small", sftp.FXF_READ | sftp.FXF_WRITE, {})) + self.handler.openFile(b"small", sftp.FXF_READ | sftp.FXF_WRITE, {})) def _read_write(rwf): - d2 = rwf.writeChunk(8, "0123") + d2 = rwf.writeChunk(8, b"0123") # test immediate read starting after the old end-of-file d2.addCallback(lambda ign: rwf.readChunk(11, 1)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "3")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"3")) d2.addCallback(lambda ign: rwf.readChunk(0, 100)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345670123")) d2.addCallback(lambda ign: rwf.close()) return d2 d.addCallback(_read_write) d.addCallback(lambda ign: self.root.get(u"small")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345670123")) # test WRITE and rename while still open d.addCallback(lambda ign: - self.handler.openFile("small", sftp.FXF_WRITE, {})) + self.handler.openFile(b"small", sftp.FXF_WRITE, {})) def _write_rename(wf): - d2 = wf.writeChunk(0, "abcd") - d2.addCallback(lambda ign: self.handler.renameFile("small", "renamed")) - d2.addCallback(lambda ign: wf.writeChunk(4, "efgh")) + d2 = wf.writeChunk(0, b"abcd") + d2.addCallback(lambda ign: self.handler.renameFile(b"small", b"renamed")) + d2.addCallback(lambda ign: wf.writeChunk(4, b"efgh")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_rename) d.addCallback(lambda ign: self.root.get(u"renamed")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcdefgh0123")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcdefgh0123")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "rename small while open", "small", self.root.get, u"small")) # test WRITE | CREAT | EXCL and rename while still open d.addCallback(lambda ign: - self.handler.openFile("newexcl", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) + self.handler.openFile(b"newexcl", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) def _write_creat_excl_rename(wf): - d2 = wf.writeChunk(0, "abcd") - d2.addCallback(lambda ign: self.handler.renameFile("newexcl", "renamedexcl")) - d2.addCallback(lambda ign: wf.writeChunk(4, "efgh")) + d2 = wf.writeChunk(0, b"abcd") + d2.addCallback(lambda ign: self.handler.renameFile(b"newexcl", b"renamedexcl")) + d2.addCallback(lambda ign: wf.writeChunk(4, b"efgh")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_excl_rename) d.addCallback(lambda ign: self.root.get(u"renamedexcl")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcdefgh")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcdefgh")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "rename newexcl while open", "newexcl", self.root.get, u"newexcl")) @@ -999,21 +1009,21 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas def _open_and_rename_race(ign): slow_open = defer.Deferred() reactor.callLater(1, slow_open.callback, None) - d2 = self.handler.openFile("new", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open) + d2 = self.handler.openFile(b"new", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open) # deliberate race between openFile and renameFile - d3 = self.handler.renameFile("new", "new2") + d3 = self.handler.renameFile(b"new", b"new2") d3.addErrback(lambda err: self.fail("renameFile failed: %r" % (err,))) return d2 d.addCallback(_open_and_rename_race) def _write_rename_race(wf): - d2 = wf.writeChunk(0, "abcd") + d2 = wf.writeChunk(0, b"abcd") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_rename_race) d.addCallback(lambda ign: self.root.get(u"new2")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcd")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcd")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "rename new while open", "new", self.root.get, u"new")) @@ -1024,7 +1034,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas gross = u"gro\u00DF".encode("utf-8") d.addCallback(lambda ign: self.handler.openFile(gross, sftp.FXF_READ | sftp.FXF_WRITE, {})) def _read_write_broken(rwf): - d2 = rwf.writeChunk(0, "abcdefghij") + d2 = rwf.writeChunk(0, b"abcdefghij") d2.addCallback(lambda ign: self.g.nuke_from_orbit()) # reading should fail (reliably if we read past the written chunk) @@ -1048,57 +1058,57 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nofile", - self.handler.removeFile, "nofile")) + self.handler.removeFile, b"nofile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nofile", - self.handler.removeFile, "nofile")) + self.handler.removeFile, b"nofile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nodir/file", - self.handler.removeFile, "nodir/file")) + self.handler.removeFile, b"nodir/file")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removefile ''", - self.handler.removeFile, "")) + self.handler.removeFile, b"")) # removing a directory should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "removeFile tiny_lit_dir", - self.handler.removeFile, "tiny_lit_dir")) + self.handler.removeFile, b"tiny_lit_dir")) # removing a file should succeed d.addCallback(lambda ign: self.root.get(u"gro\u00DF")) d.addCallback(lambda ign: self.handler.removeFile(u"gro\u00DF".encode('utf-8'))) d.addCallback(lambda ign: - self.shouldFail(NoSuchChildError, "removeFile gross", "gro\\xdf", + self.shouldFail(NoSuchChildError, "removeFile gross", "gro", self.root.get, u"gro\u00DF")) # removing an unknown should succeed d.addCallback(lambda ign: self.root.get(u"unknown")) - d.addCallback(lambda ign: self.handler.removeFile("unknown")) + d.addCallback(lambda ign: self.handler.removeFile(b"unknown")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeFile unknown", "unknown", self.root.get, u"unknown")) # removing a link to an open file should not prevent it from being read - d.addCallback(lambda ign: self.handler.openFile("small", sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"small", sftp.FXF_READ, {})) def _remove_and_read_small(rf): - d2 = self.handler.removeFile("small") + d2 = self.handler.removeFile(b"small") d2.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeFile small", "small", self.root.get, u"small")) d2.addCallback(lambda ign: rf.readChunk(0, 10)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_remove_and_read_small) # removing a link to a created file should prevent it from being created - d.addCallback(lambda ign: self.handler.openFile("tempfile", sftp.FXF_READ | sftp.FXF_WRITE | + d.addCallback(lambda ign: self.handler.openFile(b"tempfile", sftp.FXF_READ | sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_remove(rwf): - d2 = rwf.writeChunk(0, "0123456789") - d2.addCallback(lambda ign: self.handler.removeFile("tempfile")) + d2 = rwf.writeChunk(0, b"0123456789") + d2.addCallback(lambda ign: self.handler.removeFile(b"tempfile")) d2.addCallback(lambda ign: rwf.readChunk(0, 10)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rwf.close()) return d2 d.addCallback(_write_remove) @@ -1107,14 +1117,14 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas self.root.get, u"tempfile")) # ... even if the link is renamed while open - d.addCallback(lambda ign: self.handler.openFile("tempfile2", sftp.FXF_READ | sftp.FXF_WRITE | + d.addCallback(lambda ign: self.handler.openFile(b"tempfile2", sftp.FXF_READ | sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_rename_remove(rwf): - d2 = rwf.writeChunk(0, "0123456789") - d2.addCallback(lambda ign: self.handler.renameFile("tempfile2", "tempfile3")) - d2.addCallback(lambda ign: self.handler.removeFile("tempfile3")) + d2 = rwf.writeChunk(0, b"0123456789") + d2.addCallback(lambda ign: self.handler.renameFile(b"tempfile2", b"tempfile3")) + d2.addCallback(lambda ign: self.handler.removeFile(b"tempfile3")) d2.addCallback(lambda ign: rwf.readChunk(0, 10)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rwf.close()) return d2 d.addCallback(_write_rename_remove) @@ -1135,13 +1145,13 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory nodir", - self.handler.removeDirectory, "nodir")) + self.handler.removeDirectory, b"nodir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory nodir/nodir", - self.handler.removeDirectory, "nodir/nodir")) + self.handler.removeDirectory, b"nodir/nodir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory ''", - self.handler.removeDirectory, "")) + self.handler.removeDirectory, b"")) # removing a file should fail d.addCallback(lambda ign: @@ -1150,14 +1160,14 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # removing a directory should succeed d.addCallback(lambda ign: self.root.get(u"tiny_lit_dir")) - d.addCallback(lambda ign: self.handler.removeDirectory("tiny_lit_dir")) + d.addCallback(lambda ign: self.handler.removeDirectory(b"tiny_lit_dir")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeDirectory tiny_lit_dir", "tiny_lit_dir", self.root.get, u"tiny_lit_dir")) # removing an unknown should succeed d.addCallback(lambda ign: self.root.get(u"unknown")) - d.addCallback(lambda ign: self.handler.removeDirectory("unknown")) + d.addCallback(lambda ign: self.handler.removeDirectory(b"unknown")) d.addCallback(lambda err: self.shouldFail(NoSuchChildError, "removeDirectory unknown", "unknown", self.root.get, u"unknown")) @@ -1173,58 +1183,58 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # renaming a non-existent file should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile nofile newfile", - self.handler.renameFile, "nofile", "newfile")) + self.handler.renameFile, b"nofile", b"newfile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile '' newfile", - self.handler.renameFile, "", "newfile")) + self.handler.renameFile, b"", b"newfile")) # renaming a file to a non-existent path should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small nodir/small", - self.handler.renameFile, "small", "nodir/small")) + self.handler.renameFile, b"small", b"nodir/small")) # renaming a file to an invalid UTF-8 name should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small invalid", - self.handler.renameFile, "small", "\xFF")) + self.handler.renameFile, b"small", b"\xFF")) # renaming a file to or from an URI should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small from uri", - self.handler.renameFile, "uri/"+self.small_uri, "new")) + self.handler.renameFile, b"uri/"+self.small_uri, b"new")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small to uri", - self.handler.renameFile, "small", "uri/fake_uri")) + self.handler.renameFile, b"small", b"uri/fake_uri")) # renaming a file onto an existing file, directory or unknown should fail # The SFTP spec isn't clear about what error should be returned, but sshfs depends on # it being FX_PERMISSION_DENIED. d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small small2", - self.handler.renameFile, "small", "small2")) + self.handler.renameFile, b"small", b"small2")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small tiny_lit_dir", - self.handler.renameFile, "small", "tiny_lit_dir")) + self.handler.renameFile, b"small", b"tiny_lit_dir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small unknown", - self.handler.renameFile, "small", "unknown")) + self.handler.renameFile, b"small", b"unknown")) # renaming a file onto a heisenfile should fail, even if the open hasn't completed def _rename_onto_heisenfile_race(wf): slow_open = defer.Deferred() reactor.callLater(1, slow_open.callback, None) - d2 = self.handler.openFile("heisenfile", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open) + d2 = self.handler.openFile(b"heisenfile", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open) # deliberate race between openFile and renameFile d3 = self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small heisenfile", - self.handler.renameFile, "small", "heisenfile") + self.handler.renameFile, b"small", b"heisenfile") d2.addCallback(lambda wf: wf.close()) return deferredutil.gatherResults([d2, d3]) d.addCallback(_rename_onto_heisenfile_race) # renaming a file to a correct path should succeed - d.addCallback(lambda ign: self.handler.renameFile("small", "new_small")) + d.addCallback(lambda ign: self.handler.renameFile(b"small", b"new_small")) d.addCallback(lambda ign: self.root.get(u"new_small")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) @@ -1235,12 +1245,12 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.gross_uri)) # renaming a directory to a correct path should succeed - d.addCallback(lambda ign: self.handler.renameFile("tiny_lit_dir", "new_tiny_lit_dir")) + d.addCallback(lambda ign: self.handler.renameFile(b"tiny_lit_dir", b"new_tiny_lit_dir")) d.addCallback(lambda ign: self.root.get(u"new_tiny_lit_dir")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.tiny_lit_dir_uri)) # renaming an unknown to a correct path should succeed - d.addCallback(lambda ign: self.handler.renameFile("unknown", "new_unknown")) + d.addCallback(lambda ign: self.handler.renameFile(b"unknown", b"new_unknown")) d.addCallback(lambda ign: self.root.get(u"new_unknown")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.unknown_uri)) @@ -1253,7 +1263,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas extData = (struct.pack('>L', len(fromPathstring)) + fromPathstring + struct.pack('>L', len(toPathstring)) + toPathstring) - d2 = self.handler.extendedRequest('posix-rename@openssh.com', extData) + d2 = self.handler.extendedRequest(b'posix-rename@openssh.com', extData) def _check(res): res.trap(sftp.SFTPError) if res.value.code == sftp.FX_OK: @@ -1273,44 +1283,44 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # POSIX-renaming a non-existent file should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix nofile newfile", - _renameFile, "nofile", "newfile")) + _renameFile, b"nofile", b"newfile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix '' newfile", - _renameFile, "", "newfile")) + _renameFile, b"", b"newfile")) # POSIX-renaming a file to a non-existent path should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small nodir/small", - _renameFile, "small", "nodir/small")) + _renameFile, b"small", b"nodir/small")) # POSIX-renaming a file to an invalid UTF-8 name should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small invalid", - _renameFile, "small", "\xFF")) + _renameFile, b"small", b"\xFF")) # POSIX-renaming a file to or from an URI should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small from uri", - _renameFile, "uri/"+self.small_uri, "new")) + _renameFile, b"uri/"+self.small_uri, b"new")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small to uri", - _renameFile, "small", "uri/fake_uri")) + _renameFile, b"small", b"uri/fake_uri")) # POSIX-renaming a file onto an existing file, directory or unknown should succeed - d.addCallback(lambda ign: _renameFile("small", "small2")) + d.addCallback(lambda ign: _renameFile(b"small", b"small2")) d.addCallback(lambda ign: self.root.get(u"small2")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) - d.addCallback(lambda ign: _renameFile("small2", "loop2")) + d.addCallback(lambda ign: _renameFile(b"small2", b"loop2")) d.addCallback(lambda ign: self.root.get(u"loop2")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) - d.addCallback(lambda ign: _renameFile("loop2", "unknown2")) + d.addCallback(lambda ign: _renameFile(b"loop2", b"unknown2")) d.addCallback(lambda ign: self.root.get(u"unknown2")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) # POSIX-renaming a file to a correct new path should succeed - d.addCallback(lambda ign: _renameFile("unknown2", "new_small")) + d.addCallback(lambda ign: _renameFile(b"unknown2", b"new_small")) d.addCallback(lambda ign: self.root.get(u"new_small")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) @@ -1321,12 +1331,12 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.gross_uri)) # POSIX-renaming a directory to a correct path should succeed - d.addCallback(lambda ign: _renameFile("tiny_lit_dir", "new_tiny_lit_dir")) + d.addCallback(lambda ign: _renameFile(b"tiny_lit_dir", b"new_tiny_lit_dir")) d.addCallback(lambda ign: self.root.get(u"new_tiny_lit_dir")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.tiny_lit_dir_uri)) # POSIX-renaming an unknown to a correct path should succeed - d.addCallback(lambda ign: _renameFile("unknown", "new_unknown")) + d.addCallback(lambda ign: _renameFile(b"unknown", b"new_unknown")) d.addCallback(lambda ign: self.root.get(u"new_unknown")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.unknown_uri)) @@ -1339,7 +1349,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self._set_up_tree()) # making a directory at a correct path should succeed - d.addCallback(lambda ign: self.handler.makeDirectory("newdir", {'ext_foo': 'bar', 'ctime': 42})) + d.addCallback(lambda ign: self.handler.makeDirectory(b"newdir", {'ext_foo': 'bar', 'ctime': 42})) d.addCallback(lambda ign: self.root.get_child_and_metadata(u"newdir")) def _got(child_and_metadata): @@ -1355,7 +1365,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(_got) # making intermediate directories should also succeed - d.addCallback(lambda ign: self.handler.makeDirectory("newparent/newchild", {})) + d.addCallback(lambda ign: self.handler.makeDirectory(b"newparent/newchild", {})) d.addCallback(lambda ign: self.root.get(u"newparent")) def _got_newparent(newparent): @@ -1371,17 +1381,17 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "makeDirectory invalid UTF-8", - self.handler.makeDirectory, "\xFF", {})) + self.handler.makeDirectory, b"\xFF", {})) # should fail because there is an existing file "small" d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "makeDirectory small", - self.handler.makeDirectory, "small", {})) + self.handler.makeDirectory, b"small", {})) # directories cannot be created read-only via SFTP d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "makeDirectory newdir2 permissions:0444 denied", - self.handler.makeDirectory, "newdir2", + self.handler.makeDirectory, b"newdir2", {'permissions': 0o444})) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) @@ -1461,24 +1471,24 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas def test_extendedRequest(self): d = self._set_up("extendedRequest") - d.addCallback(lambda ign: self.handler.extendedRequest("statvfs@openssh.com", "/")) + d.addCallback(lambda ign: self.handler.extendedRequest(b"statvfs@openssh.com", b"/")) def _check(res): - self.failUnless(isinstance(res, str)) + self.failUnless(isinstance(res, bytes)) self.failUnlessEqual(len(res), 8*11) d.addCallback(_check) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "extendedRequest foo bar", - self.handler.extendedRequest, "foo", "bar")) + self.handler.extendedRequest, b"foo", b"bar")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest posix-rename@openssh.com invalid 1", - self.handler.extendedRequest, 'posix-rename@openssh.com', '')) + self.handler.extendedRequest, b'posix-rename@openssh.com', b'')) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest posix-rename@openssh.com invalid 2", - self.handler.extendedRequest, 'posix-rename@openssh.com', '\x00\x00\x00\x01')) + self.handler.extendedRequest, b'posix-rename@openssh.com', b'\x00\x00\x00\x01')) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest posix-rename@openssh.com invalid 3", - self.handler.extendedRequest, 'posix-rename@openssh.com', '\x00\x00\x00\x01_\x00\x00\x00\x01')) + self.handler.extendedRequest, b'posix-rename@openssh.com', b'\x00\x00\x00\x01_\x00\x00\x00\x01')) return d diff --git a/src/allmydata/util/_python3.py b/src/allmydata/util/_python3.py index 42e2ec5fb..38d0f4d7e 100644 --- a/src/allmydata/util/_python3.py +++ b/src/allmydata/util/_python3.py @@ -37,6 +37,7 @@ PORTED_MODULES = [ "allmydata.crypto.util", "allmydata.deep_stats", "allmydata.dirnode", + "allmydata.frontends.sftpd", "allmydata.hashtree", "allmydata.immutable.checker", "allmydata.immutable.downloader", @@ -170,6 +171,7 @@ PORTED_TEST_MODULES = [ "allmydata.test.test_pipeline", "allmydata.test.test_python3", "allmydata.test.test_repairer", + "allmydata.test.test_sftp", "allmydata.test.test_spans", "allmydata.test.test_statistics", "allmydata.test.test_stats",