Don't clean up database on every open call

This commit is contained in:
Jeremy Lakeman 2012-12-28 13:17:04 +10:30
parent c8fbef9016
commit 5b48a85f7d
2 changed files with 10 additions and 14 deletions

View File

@ -221,13 +221,17 @@ int rhizome_opendb()
sqlite_exec_void_loglevel(LOG_LEVEL_WARN, "CREATE INDEX IF NOT EXISTS bundlesizeindex ON manifests (filesize);");
sqlite_exec_void_loglevel(LOG_LEVEL_WARN, "CREATE INDEX IF NOT EXISTS IDX_MANIFESTS_HASH ON MANIFESTS(filehash);");
/* Clean out half-finished entries from the database */
// We can't delete a file that is being transferred in another process at this very moment...
// FIXME, reinstate with a check for insert time
/* Clean out half-finished entries from the database
sqlite_exec_void_loglevel(LOG_LEVEL_WARN, "DELETE FROM MANIFESTS WHERE filehash IS NULL;");
sqlite_exec_void_loglevel(LOG_LEVEL_WARN, "DELETE FROM FILES WHERE NOT EXISTS( SELECT 1 FROM MANIFESTS WHERE MANIFESTS.filehash = FILES.id);");
sqlite_exec_void_loglevel(LOG_LEVEL_WARN, "DELETE FROM FILEBLOBS WHERE NOT EXISTS( SELECT 1 FROM FILES WHERE FILEBLOBS.id = FILES.id);");
sqlite_exec_void_loglevel(LOG_LEVEL_WARN, "DELETE FROM MANIFESTS WHERE filehash != '' AND NOT EXISTS( SELECT 1 FROM FILES WHERE MANIFESTS.filehash = FILES.id);");
sqlite_exec_void("DELETE FROM FILES WHERE datavalid=0;");
*/
RETURN(0);
sqlite_exec_void("DELETE FROM FILES WHERE datavalid=0;");
}
int rhizome_close_db()
@ -960,11 +964,6 @@ int64_t rhizome_database_create_blob_for(const char *hashhex,int64_t fileLength,
if (sqlite_exec_void_retry(&retry, "BEGIN TRANSACTION;") != SQLITE_OK)
return WHY("Failed to begin transaction");
/* Okay, so there are no records that match, but we should delete any half-baked record (with datavalid=0) so that the insert below doesn't fail.
Don't worry about the return result, since it might not delete any records. */
sqlite_exec_void_retry(&retry, "DELETE FROM FILES WHERE id='%s' AND datavalid=0;",hashhex);
sqlite_exec_void_retry(&retry, "DELETE FROM FILEBLOBS WHERE id='%s';",hashhex);
/* INSERT INTO FILES(id as text, data blob, length integer, highestpriority integer).
BUT, we have to do this incrementally so that we can handle blobs larger than available memory.
This is possible using:
@ -1007,9 +1006,10 @@ insert_row_fail:
ret = sqlite_exec_void_retry(&retry, "COMMIT;");
if (ret!=SQLITE_OK){
WHYF("Failed to commit transaction");
return -1;
sqlite_exec_void_retry(&retry, "ROLLBACK;");
return WHYF("Failed to commit transaction");
}
DEBUGF("Got rowid %lld for %s", rowid, hashhex);
return rowid;
}

View File

@ -19,11 +19,6 @@ int rhizome_open_write(struct rhizome_write *write, char *expectedFileHash, int6
if (expectedFileHash){
if (rhizome_exists(expectedFileHash))
return 1;
/* Okay, so there are no records that match, but we should delete any half-baked record (with datavalid=0) so that the insert below doesn't fail.
Don't worry about the return result, since it might not delete any records. */
sqlite_exec_void("DELETE FROM FILEBLOBS WHERE id='%s';",expectedFileHash);
sqlite_exec_void("DELETE FROM FILES WHERE id='%s';",expectedFileHash);
strlcpy(write->id, expectedFileHash, SHA512_DIGEST_STRING_LENGTH);
write->id_known=1;
}else{
@ -87,6 +82,7 @@ int rhizome_open_write(struct rhizome_write *write, char *expectedFileHash, int6
/* Get rowid for inserted row, so that we can modify the blob */
write->blob_rowid = sqlite3_last_insert_rowid(rhizome_db);
DEBUGF("Got rowid %lld for %s", write->blob_rowid, write->id);
write->file_length = file_length;
write->file_offset = 0;
SHA512_Init(&write->sha512_context);