mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-01-01 18:56:41 +00:00
Rename did_upload_file to did_upload_version, and is_new_file_time to is_new_file.
Make them take a PathInfo argument rather than (mtime, ctime, size). Also treat the size of a nonexistent file as None (SQL NULL) rather than 0. Signed-off-by: Daira Hopwood <daira@jacaranda.org>
This commit is contained in:
parent
fb629a185e
commit
937431693d
@ -396,8 +396,8 @@ class MagicFolderDB(BackupDB):
|
||||
else:
|
||||
return row[0]
|
||||
|
||||
def did_upload_file(self, filecap, relpath_u, version, mtime, ctime, size):
|
||||
#print "_did_upload_file(%r, %r, %r, %r, %r, %r)" % (filecap, relpath_u, version, mtime, ctime, size)
|
||||
def did_upload_version(self, filecap, relpath_u, version, pathinfo):
|
||||
#print "did_upload_version(%r, %r, %r, %r)" % (filecap, relpath_u, version, pathinfo)
|
||||
now = time.time()
|
||||
fileid = self.get_or_allocate_fileid_for_cap(filecap)
|
||||
try:
|
||||
@ -410,34 +410,26 @@ class MagicFolderDB(BackupDB):
|
||||
(now, now, fileid))
|
||||
try:
|
||||
self.cursor.execute("INSERT INTO local_files VALUES (?,?,?,?,?,?)",
|
||||
(relpath_u, size, mtime, ctime, fileid, version))
|
||||
(relpath_u, pathinfo.size, pathinfo.mtime, pathinfo.ctime, fileid, version))
|
||||
except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError):
|
||||
self.cursor.execute("UPDATE local_files"
|
||||
" SET size=?, mtime=?, ctime=?, fileid=?, version=?"
|
||||
" WHERE path=?",
|
||||
(size, mtime, ctime, fileid, version, relpath_u))
|
||||
(pathinfo.size, pathinfo.mtime, pathinfo.ctime, fileid, version, relpath_u))
|
||||
self.connection.commit()
|
||||
|
||||
def is_new_file_time(self, path, relpath_u):
|
||||
def is_new_file(self, pathinfo, relpath_u):
|
||||
"""
|
||||
Returns true if the file's current pathinfo (size, mtime, and ctime) has
|
||||
changed from the pathinfo previously stored in the db.
|
||||
"""
|
||||
path = abspath_expanduser_unicode(path)
|
||||
s = os.stat(path)
|
||||
size = s[stat.ST_SIZE]
|
||||
ctime = s[stat.ST_CTIME]
|
||||
mtime = s[stat.ST_MTIME]
|
||||
#print "is_new_file(%r, %r)" % (pathinfo, relpath_u)
|
||||
c = self.cursor
|
||||
c.execute("SELECT size,mtime,ctime,fileid"
|
||||
c.execute("SELECT size, mtime, ctime"
|
||||
" FROM local_files"
|
||||
" WHERE path=?",
|
||||
(relpath_u,))
|
||||
row = self.cursor.fetchone()
|
||||
if not row:
|
||||
return True
|
||||
(last_size,last_mtime,last_ctime,last_fileid) = row
|
||||
if (size, ctime, mtime) == (int(last_size), int(last_ctime), int(last_mtime)):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
return (pathinfo.size, pathinfo.mtime, pathinfo.ctime) != row
|
||||
|
@ -303,12 +303,9 @@ class Uploader(QueueMixin):
|
||||
d2.addCallback(set_deleted)
|
||||
def add_db_entry(filenode):
|
||||
filecap = filenode.get_uri()
|
||||
size = 0
|
||||
now = time.time()
|
||||
ctime = now
|
||||
mtime = now
|
||||
self._db.did_upload_file(filecap, relpath_u, current_version, int(mtime), int(ctime), size)
|
||||
self._db.did_upload_version(filecap, relpath_u, current_version, pathinfo)
|
||||
self._count('files_uploaded')
|
||||
# FIXME consider whether it's correct to retrieve the filenode again.
|
||||
d2.addCallback(lambda x: self._get_filenode(encoded_name_u))
|
||||
d2.addCallback(add_db_entry)
|
||||
|
||||
@ -335,7 +332,7 @@ class Uploader(QueueMixin):
|
||||
version = self._db.get_local_file_version(relpath_u)
|
||||
if version is None:
|
||||
version = 0
|
||||
elif self._db.is_new_file_time(unicode_from_filepath(fp), relpath_u):
|
||||
elif self._db.is_new_file(pathinfo, relpath_u):
|
||||
version += 1
|
||||
else:
|
||||
return None
|
||||
@ -344,9 +341,7 @@ class Uploader(QueueMixin):
|
||||
d2 = self._upload_dirnode.add_file(encoded_name_u, uploadable, metadata={"version":version}, overwrite=True)
|
||||
def add_db_entry(filenode):
|
||||
filecap = filenode.get_uri()
|
||||
# XXX maybe just pass pathinfo
|
||||
self._db.did_upload_file(filecap, relpath_u, version,
|
||||
pathinfo.mtime, pathinfo.ctime, pathinfo.size)
|
||||
self._db.did_upload_version(filecap, relpath_u, version, pathinfo)
|
||||
self._count('files_uploaded')
|
||||
d2.addCallback(add_db_entry)
|
||||
return d2
|
||||
@ -543,11 +538,11 @@ class Downloader(QueueMixin):
|
||||
d2.addCallback(lambda result: self._write_downloaded_file(abspath_u, result, is_conflict=False))
|
||||
def do_update_db(written_abspath_u):
|
||||
filecap = file_node.get_uri()
|
||||
pathinfo = get_pathinfo(written_abspath_u)
|
||||
if not pathinfo.exists:
|
||||
written_pathinfo = get_pathinfo(written_abspath_u)
|
||||
if not written_pathinfo.exists:
|
||||
raise Exception("downloaded file %s disappeared" % quote_local_unicode_path(written_abspath_u))
|
||||
self._db.did_upload_file(filecap, relpath_u, metadata['version'],
|
||||
pathinfo.mtime, pathinfo.ctime, pathinfo.size)
|
||||
|
||||
self._db.did_upload_version(filecap, relpath_u, metadata['version'], written_pathinfo)
|
||||
d2.addCallback(do_update_db)
|
||||
# XXX handle failure here with addErrback...
|
||||
self._count('objects_downloaded')
|
||||
|
@ -1,5 +1,5 @@
|
||||
|
||||
import os, sys, stat
|
||||
import os, sys
|
||||
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet import defer
|
||||
@ -64,28 +64,31 @@ class MagicFolderTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, ReallyEqual
|
||||
fileutil.make_dirs(self.basedir)
|
||||
db = self._createdb()
|
||||
|
||||
path = abspath_expanduser_unicode(u"myFile1", base=self.basedir)
|
||||
db.did_upload_file('URI:LIT:1', path, 1, 0, 0, 33)
|
||||
relpath1 = u"myFile1"
|
||||
pathinfo = fileutil.PathInfo(isdir=False, isfile=True, islink=False,
|
||||
exists=True, size=1, mtime=123, ctime=456)
|
||||
db.did_upload_version('URI:LIT:1', relpath1, 0, pathinfo)
|
||||
|
||||
c = db.cursor
|
||||
c.execute("SELECT size,mtime,ctime,fileid"
|
||||
c.execute("SELECT size, mtime, ctime"
|
||||
" FROM local_files"
|
||||
" WHERE path=?",
|
||||
(path,))
|
||||
row = db.cursor.fetchone()
|
||||
self.failIfEqual(row, None)
|
||||
(relpath1,))
|
||||
row = c.fetchone()
|
||||
self.failUnlessEqual(row, (pathinfo.size, pathinfo.mtime, pathinfo.ctime))
|
||||
|
||||
# Second test uses db.check_file instead of SQL query directly
|
||||
# Second test uses db.is_new_file instead of SQL query directly
|
||||
# to confirm the previous upload entry in the db.
|
||||
path = abspath_expanduser_unicode(u"myFile2", base=self.basedir)
|
||||
fileutil.write(path, "meow\n")
|
||||
s = os.stat(path)
|
||||
size = s[stat.ST_SIZE]
|
||||
ctime = s[stat.ST_CTIME]
|
||||
mtime = s[stat.ST_MTIME]
|
||||
db.did_upload_file('URI:LIT:2', path, 1, mtime, ctime, size)
|
||||
r = db.check_file(path)
|
||||
self.failUnless(r.was_uploaded())
|
||||
relpath2 = u"myFile2"
|
||||
path2 = os.path.join(self.basedir, relpath2)
|
||||
fileutil.write(path2, "meow\n")
|
||||
pathinfo = fileutil.get_pathinfo(path2)
|
||||
db.did_upload_version('URI:LIT:2', relpath2, 0, pathinfo)
|
||||
self.failUnlessFalse(db.is_new_file(pathinfo, relpath2))
|
||||
|
||||
different_pathinfo = fileutil.PathInfo(isdir=False, isfile=True, islink=False,
|
||||
exists=True, size=0, mtime=pathinfo.mtime, ctime=pathinfo.ctime)
|
||||
self.failUnlessTrue(db.is_new_file(different_pathinfo, relpath2))
|
||||
|
||||
def test_magicfolder_start_service(self):
|
||||
self.set_up_grid()
|
||||
|
Loading…
Reference in New Issue
Block a user