Merge branch 'rhizomestress' into 'master'

This commit is contained in:
Andrew Bettison 2012-10-18 17:57:10 +10:30
commit 839de7557c
14 changed files with 689 additions and 495 deletions

View File

@ -117,6 +117,8 @@ typedef struct rhizome_manifest {
int fileHighestPriority;
/* Absolute path of the file associated with the manifest */
char *dataFileName;
/* If set, unlink(2) the associated file when freeing the manifest */
int dataFileUnlinkOnFree;
/* Whether the paylaod is encrypted or not */
int payloadEncryption;
@ -281,7 +283,7 @@ int rhizome_find_duplicate(const rhizome_manifest *m, rhizome_manifest **found,
int rhizome_manifest_to_bar(rhizome_manifest *m,unsigned char *bar);
long long rhizome_bar_version(unsigned char *bar);
unsigned long long rhizome_bar_bidprefix_ll(unsigned char *bar);
int rhizome_queue_manifest_import(rhizome_manifest *m, struct sockaddr_in *peerip, int *manifest_kept);
int rhizome_queue_manifest_import(rhizome_manifest *m, const struct sockaddr_in *peerip, int *manifest_kept);
int rhizome_list_manifests(const char *service, const char *sender_sid, const char *recipient_sid, int limit, int offset);
int rhizome_retrieve_manifest(const char *manifestid, rhizome_manifest **mp);
int rhizome_retrieve_file(const char *fileid, const char *filepath,
@ -539,11 +541,8 @@ extern unsigned char favicon_bytes[];
extern int favicon_len;
int rhizome_import_from_files(const char *manifestpath,const char *filepath);
int rhizome_fetch_request_manifest_by_prefix(struct sockaddr_in *peerip,
unsigned char *prefix,
int prefix_length,
int importP);
extern int rhizome_file_fetch_queue_count;
int rhizome_fetch_request_manifest_by_prefix(const struct sockaddr_in *peerip, const unsigned char *prefix, size_t prefix_length);
int rhizome_count_queued_imports();
struct http_response_parts {
int code;

View File

@ -525,8 +525,12 @@ void _rhizome_manifest_free(struct __sourceloc __whence, rhizome_manifest *m)
m->signatories[i]=NULL;
}
if (m->dataFileName) free(m->dataFileName);
if (m->dataFileName) {
if (m->dataFileUnlinkOnFree && unlink(m->dataFileName) == -1)
WARNF_perror("unlink(%s)", alloca_str_toprint(m->dataFileName));
free(m->dataFileName);
m->dataFileName = NULL;
}
manifest_free[mid]=1;
manifest_free_whence[mid]=__whence;

View File

@ -876,14 +876,13 @@ void rhizome_direct_http_dispatch(rhizome_direct_sync_request *r)
Then as noted above, we can use that to pull the file down using
existing routines.
*/
if (!rhizome_fetch_request_manifest_by_prefix
(&addr,&actionlist[i+1],RHIZOME_BAR_PREFIX_BYTES,
1 /* import, getting file if needed */))
if (!rhizome_fetch_request_manifest_by_prefix(&addr,&actionlist[i+1],RHIZOME_BAR_PREFIX_BYTES))
{
/* Fetching the manifest, and then using it to see if we want to
fetch the file for import is all handled asynchronously, so just
wait for it to finish. */
while(rhizome_file_fetch_queue_count) fd_poll();
while (rhizome_count_queued_imports())
fd_poll();
}
} else if (type==1&&r->pushP) {

View File

@ -31,34 +31,57 @@ extern int sigIoFlag;
typedef struct rhizome_file_fetch_record {
struct sched_ent alarm;
rhizome_manifest *manifest;
char fileid[RHIZOME_FILEHASH_STRLEN + 1];
FILE *file;
char filename[1024];
char request[1024];
int request_len;
int request_ofs;
long long file_len;
long long file_ofs;
int state;
#define RHIZOME_FETCH_FREE 0
#define RHIZOME_FETCH_CONNECTING 1
#define RHIZOME_FETCH_SENDINGHTTPREQUEST 2
#define RHIZOME_FETCH_RXHTTPHEADERS 3
#define RHIZOME_FETCH_RXFILE 4
struct sockaddr_in peer;
} rhizome_file_fetch_record;
struct profile_total fetch_stats;
/* List of queued transfers */
#define MAX_QUEUED_FILES 4
int rhizome_file_fetch_queue_count=0;
rhizome_file_fetch_record file_fetch_queue[MAX_QUEUED_FILES];
#define QUEUED (0)
#define QSAMEPAYLOAD (1)
#define QDATED (2)
#define QBUSY (3)
#define QSAME (4)
#define QOLDER (5)
#define QNEWER (6)
#define QIMPORTED (7)
int rhizome_count_queued_imports()
{
int count = 0;
int i;
for (i = 0; i < MAX_QUEUED_FILES; ++i)
if (file_fetch_queue[i].state != RHIZOME_FETCH_FREE)
++count;
return count;
}
static rhizome_file_fetch_record *rhizome_find_import_slot()
{
// Find a free fetch queue slot.
int i;
for (i = 0; i < MAX_QUEUED_FILES; ++i)
if (file_fetch_queue[i].state == RHIZOME_FETCH_FREE)
return &file_fetch_queue[i];
return NULL;
}
struct profile_total fetch_stats;
/*
Queue a manifest for importing.
@ -524,12 +547,11 @@ int rhizome_suggest_queue_manifest_import(rhizome_manifest *m, struct sockaddr_i
void rhizome_enqueue_suggestions(struct sched_ent *alarm)
{
int i;
for(i=0;i<candidate_count;i++)
{
if (rhizome_file_fetch_queue_count>=MAX_QUEUED_FILES)
break;
for (i = 0; i < candidate_count; ++i) {
int manifest_kept = 0;
rhizome_queue_manifest_import(candidates[i].manifest,&candidates[i].peer, &manifest_kept);
int result = rhizome_queue_manifest_import(candidates[i].manifest, &candidates[i].peer, &manifest_kept);
if (result == QBUSY)
break;
if (!manifest_kept) {
rhizome_manifest_free(candidates[i].manifest);
candidates[i].manifest = NULL;
@ -538,8 +560,7 @@ void rhizome_enqueue_suggestions(struct sched_ent *alarm)
if (i) {
/* now shuffle up */
int bytes=(candidate_count-i)*sizeof(rhizome_candidates);
if (0) DEBUGF("Moving slot %d to slot 0 (%d bytes = %d slots)",
i,bytes,bytes/sizeof(rhizome_candidates));
if (0) DEBUGF("Moving slot %d to slot 0 (%d bytes = %d slots)", i,bytes,bytes/sizeof(rhizome_candidates));
bcopy(&candidates[i],&candidates[0],bytes);
candidate_count-=i;
}
@ -551,14 +572,94 @@ void rhizome_enqueue_suggestions(struct sched_ent *alarm)
return;
}
int rhizome_queue_manifest_import(rhizome_manifest *m, struct sockaddr_in *peerip, int *manifest_kept)
static int rhizome_queue_import(rhizome_file_fetch_record *q)
{
int sock = -1;
FILE *file = NULL;
/* TODO Don't forget to implement resume */
/* TODO We should stream file straight into the database */
if (create_rhizome_import_dir() == -1)
goto bail;
if ((file = fopen(q->filename, "w")) == NULL) {
WHYF_perror("fopen(`%s`, \"w\")", q->filename);
goto bail;
}
if (q->peer.sin_family == AF_INET) {
/* Transfer via HTTP over IPv4 */
if ((sock = socket(AF_INET, SOCK_STREAM, 0)) == -1) {
WHY_perror("socket");
goto bail;
}
if (set_nonblock(sock) == -1)
goto bail;
char buf[INET_ADDRSTRLEN];
if (inet_ntop(AF_INET, &q->peer.sin_addr, buf, sizeof buf) == NULL) {
buf[0] = '*';
buf[1] = '\0';
}
if (connect(sock, (struct sockaddr*)&q->peer, sizeof q->peer) == -1) {
if (errno == EINPROGRESS) {
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("connect() returned EINPROGRESS");
} else {
WHYF_perror("connect(%d, %s:%u)", sock, buf, ntohs(q->peer.sin_port));
goto bail;
}
}
INFOF("RHIZOME HTTP REQUEST family=%u addr=%s port=%u %s",
q->peer.sin_family, buf, ntohs(q->peer.sin_port), alloca_str_toprint(q->request)
);
q->alarm.poll.fd = sock;
q->request_ofs=0;
q->state=RHIZOME_FETCH_CONNECTING;
q->file = file;
q->file_len=-1;
q->file_ofs=0;
/* Watch for activity on the socket */
q->alarm.function=rhizome_fetch_poll;
fetch_stats.name="rhizome_fetch_poll";
q->alarm.stats=&fetch_stats;
q->alarm.poll.events=POLLIN|POLLOUT;
watch(&q->alarm);
/* And schedule a timeout alarm */
q->alarm.alarm=gettime_ms() + RHIZOME_IDLE_TIMEOUT;
q->alarm.deadline = q->alarm.alarm + RHIZOME_IDLE_TIMEOUT;
schedule(&q->alarm);
return 0;
} else {
/* TODO: Fetch via overlay */
WHY("Rhizome fetching via overlay not implemented");
}
bail:
if (sock != -1)
close(sock);
if (file != NULL) {
fclose(file);
unlink(q->filename);
}
return -1;
}
/* Returns QUEUED (0) if the fetch was queued.
* Returns QSAMEPAYLOAD if a fetch of the same payload (file ID) is already queued.
* Returns QDATED if the fetch was not queued because a newer version of the same bundle is already
* present.
* Returns QBUSY if the fetch was not queued because the queue is full.
* Returns QSAME if the same fetch is already queued.
* Returns QOLDER if a fetch of an older version of the same bundle is already queued.
* Returns QNEWER if a fetch of a newer version of the same bundle is already queued.
* Returns QIMPORTED if a fetch was not queued because the payload is nil or already held, so the
* import was performed instead.
* Returns -1 on error.
*/
int rhizome_queue_manifest_import(rhizome_manifest *m, const struct sockaddr_in *peerip, int *manifest_kept)
{
*manifest_kept = 0;
const char *bid = alloca_tohex_bid(m->cryptoSignPublic);
long long filesize = rhizome_manifest_get_ll(m, "filesize");
/* Do the quick rejection tests first, before the more expensive once,
/* Do the quick rejection tests first, before the more expensive ones,
like querying the database for manifests.
We probably need a cache of recently rejected manifestid:versionid
@ -572,33 +673,56 @@ int rhizome_queue_manifest_import(rhizome_manifest *m, struct sockaddr_in *peeri
the cache slot number to implicitly store the first bits.
*/
if (1||debug & DEBUG_RHIZOME_RX)
DEBUGF("Fetching manifest bid=%s version=%lld size=%lld:", bid, m->version, filesize);
if (1||(debug & DEBUG_RHIZOME_RX))
DEBUGF("Fetching bundle bid=%s version=%lld size=%lld:", bid, m->version, filesize);
if (rhizome_manifest_version_cache_lookup(m)) {
/* We already have this version or newer */
// If the payload is empty, no need to fetch, so import now.
if (filesize == 0) {
if (debug & DEBUG_RHIZOME_RX)
DEBUG(" already have that version or newer");
return 1;
DEBUGF(" manifest fetch not started -- nil payload, so importing instead");
if (rhizome_bundle_import(m, m->ttl-1) == -1)
return WHY("bundle import failed");
return QIMPORTED;
}
// Ensure there is a slot available before doing more expensive checks.
rhizome_file_fetch_record *q = rhizome_find_import_slot();
if (q == NULL) {
if (debug & DEBUG_RHIZOME_RX)
DEBUG(" fetch not started - all slots full");
return QBUSY;
}
// If we already have this version or newer, do not fetch.
if (rhizome_manifest_version_cache_lookup(m)) {
if (debug & DEBUG_RHIZOME_RX)
DEBUG(" fetch not started -- already have that version or newer");
return QDATED;
}
if (debug & DEBUG_RHIZOME_RX)
DEBUGF(" is new");
/* Don't queue if queue slots already full */
if (rhizome_file_fetch_queue_count >= MAX_QUEUED_FILES) {
if (debug & DEBUG_RHIZOME_RX)
DEBUG(" all fetch queue slots full");
return 2;
}
/* Don't queue if already queued */
/* Don't fetch if already in progress. If a fetch of an older version is already in progress,
* then this logic will let it run to completion before the fetch of the newer version is queued.
* This avoids the problem of indefinite postponement of fetching if new versions are constantly
* being published faster than we can fetch them.
*/
int i;
for (i = 0; i < rhizome_file_fetch_queue_count; ++i) {
if (file_fetch_queue[i].manifest) {
if (memcmp(m->cryptoSignPublic, file_fetch_queue[i].manifest->cryptoSignPublic, RHIZOME_MANIFEST_ID_BYTES) == 0) {
for (i = 0; i < MAX_QUEUED_FILES; ++i) {
rhizome_manifest *qm = file_fetch_queue[i].manifest;
if (qm && memcmp(m->cryptoSignPublic, qm->cryptoSignPublic, RHIZOME_MANIFEST_ID_BYTES) == 0) {
if (qm->version < m->version) {
if (debug & DEBUG_RHIZOME_RX)
DEBUGF(" manifest fetch already queued");
return 3;
DEBUGF(" fetch already in progress -- older version");
return QOLDER;
} else if (qm->version > m->version) {
if (debug & DEBUG_RHIZOME_RX)
DEBUGF(" fetch already in progress -- newer version");
return QNEWER;
} else {
if (debug & DEBUG_RHIZOME_RX)
DEBUGF(" fetch already in progress -- same version");
return QSAME;
}
}
}
@ -610,129 +734,77 @@ int rhizome_queue_manifest_import(rhizome_manifest *m, struct sockaddr_in *peeri
str_toupper_inplace(m->fileHexHash);
m->fileHashedP = 1;
if (filesize > 0 && m->fileHexHash[0]) {
if (debug & DEBUG_RHIZOME_RX)
DEBUGF(" Getting ready to fetch filehash=%s for bid=%s", m->fileHexHash, bid);
// If the payload is already available, no need to fetch, so import now.
long long gotfile = 0;
if (sqlite_exec_int64(&gotfile, "SELECT COUNT(*) FROM FILES WHERE ID='%s' and datavalid=1;", m->fileHexHash) != 1)
return WHY("select failed");
if (gotfile == 0) {
/* We need to get the file, unless already queued */
int i;
for (i = 0; i < rhizome_file_fetch_queue_count; ++i) {
if (strcasecmp(m->fileHexHash, file_fetch_queue[i].fileid) == 0) {
if (gotfile) {
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("Payload fetch already queued, slot %d filehash=%s", m->fileHexHash);
return 0;
DEBUGF(" fetch not started - payload already present, so importing instead");
if (rhizome_bundle_import(m, m->ttl-1) == -1)
return WHY("bundle import failed");
return QIMPORTED;
}
// Fetch the file, unless already queued.
for (i = 0; i < MAX_QUEUED_FILES; ++i) {
if (file_fetch_queue[i].manifest && strcasecmp(m->fileHexHash, file_fetch_queue[i].manifest->fileHexHash) == 0) {
if (debug & DEBUG_RHIZOME_RX)
DEBUGF(" fetch already in progress, slot=%d filehash=%s", i, m->fileHexHash);
return QSAMEPAYLOAD;
}
}
if (peerip) {
/* Transfer via HTTP over IPv4 */
int sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock == -1)
return WHY_perror("socket");
if (set_nonblock(sock) == -1) {
close(sock);
// Queue the fetch.
//dump("peerip", peerip, sizeof *peerip);
q->peer = *peerip;
strbuf r = strbuf_local(q->request, sizeof q->request);
strbuf_sprintf(r, "GET /rhizome/file/%s HTTP/1.0\r\n\r\n", m->fileHexHash);
if (strbuf_overrun(r))
return WHY("request overrun");
q->request_len = strbuf_len(r);
if (!FORM_RHIZOME_IMPORT_PATH(q->filename, "payload.%s", bid))
return -1;
}
dump("copying peerip",peerip,sizeof(*peerip));
struct sockaddr_in addr = *peerip;
addr.sin_family = AF_INET;
char buf[INET_ADDRSTRLEN];
if (inet_ntop(AF_INET, &addr.sin_addr, buf, sizeof buf) == NULL) {
buf[0] = '*';
buf[1] = '\0';
}
INFOF("RHIZOME HTTP REQUEST, CONNECT family=%u port=%u addr=%s", addr.sin_family, ntohs(addr.sin_port), buf);
if (connect(sock, (struct sockaddr*)&addr, sizeof addr) == -1) {
if (errno == EINPROGRESS) {
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("connect() returned EINPROGRESS");
} else {
WHY_perror("connect");
WHY("Failed to open socket to peer's rhizome web server");
close(sock);
return -1;
}
}
rhizome_file_fetch_record *q=&file_fetch_queue[rhizome_file_fetch_queue_count];
m->dataFileName = strdup(q->filename);
m->dataFileUnlinkOnFree = 0;
q->manifest = m;
if (rhizome_queue_import(q) == -1) {
q->filename[0] = '\0';
return -1;
}
*manifest_kept = 1;
bcopy(&addr,&q->peer,sizeof(q->peer));
q->alarm.poll.fd=sock;
strncpy(q->fileid, m->fileHexHash, RHIZOME_FILEHASH_STRLEN + 1);
q->request_len = snprintf(q->request, sizeof q->request, "GET /rhizome/file/%s HTTP/1.0\r\n\r\n", q->fileid);
q->request_ofs=0;
q->state=RHIZOME_FETCH_CONNECTING;
q->file_len=-1;
q->file_ofs=0;
/* XXX Don't forget to implement resume */
/* XXX We should stream file straight into the database */
const char *id = rhizome_manifest_get(q->manifest, "id", NULL, 0);
if (id == NULL) {
close(sock);
return WHY("Manifest missing ID");
}
if (create_rhizome_import_dir() == -1)
return -1;
char filename[1024];
if (!FORM_RHIZOME_IMPORT_PATH(filename, "file.%s", id)) {
close(sock);
return -1;
}
q->manifest->dataFileName = strdup(filename);
if ((q->file = fopen(q->manifest->dataFileName, "w")) == NULL) {
WHY_perror("fopen");
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("Could not open '%s' to write received file", q->manifest->dataFileName);
close(sock);
return -1;
DEBUGF(" started fetch into %s, slot=%d filehash=%s", q->manifest->dataFileName, q - file_fetch_queue, m->fileHexHash);
return QUEUED;
}
INFOF("RHIZOME HTTP REQUEST, GET \"/rhizome/file/%s\"", q->fileid);
/* Watch for activity on the socket */
q->alarm.function=rhizome_fetch_poll;
fetch_stats.name="rhizome_fetch_poll";
q->alarm.stats=&fetch_stats;
q->alarm.poll.events=POLLIN|POLLOUT;
watch(&q->alarm);
/* And schedule a timeout alarm */
q->alarm.alarm=gettime_ms() + RHIZOME_IDLE_TIMEOUT;
q->alarm.deadline = q->alarm.alarm + RHIZOME_IDLE_TIMEOUT;
schedule(&q->alarm);
rhizome_file_fetch_queue_count++;
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("Queued file for fetching into %s (%d in queue)",
q->manifest->dataFileName, rhizome_file_fetch_queue_count);
return 0;
} else {
/* TODO: fetch via overlay */
return WHY("Rhizome fetching via overlay not implemented");
}
} else {
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("We already have the file for this manifest; importing from manifest alone.");
rhizome_bundle_import(m, m->ttl-1);
}
}
return 0;
}
int rhizome_fetch_close(rhizome_file_fetch_record *q){
/* Free ephemeral data */
if (q->file)
fclose(q->file);
q->file=NULL;
if (q->manifest)
rhizome_manifest_free(q->manifest);
int rhizome_fetch_request_manifest_by_prefix(const struct sockaddr_in *peerip, const unsigned char *prefix, size_t prefix_length)
{
assert(peerip);
rhizome_file_fetch_record *q = rhizome_find_import_slot();
if (q == NULL)
return QBUSY;
q->peer = *peerip;
q->manifest = NULL;
strbuf r = strbuf_local(q->request, sizeof q->request);
strbuf_sprintf(r, "GET /rhizome/manifestbyprefix/%s HTTP/1.0\r\n\r\n", alloca_tohex(prefix, prefix_length));
if (strbuf_overrun(r))
return WHY("request overrun");
q->request_len = strbuf_len(r);
if (!FORM_RHIZOME_IMPORT_PATH(q->filename, "manifest.%s", alloca_tohex(prefix, prefix_length)))
return -1;
if (rhizome_queue_import(q) == -1) {
q->filename[0] = '\0';
return -1;
}
return QUEUED;
}
int rhizome_fetch_close(rhizome_file_fetch_record *q)
{
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("Release Rhizome fetch slot=%d", q - file_fetch_queue);
assert(q->state != RHIZOME_FETCH_FREE);
/* close socket and stop watching it */
unwatch(&q->alarm);
@ -740,18 +812,28 @@ int rhizome_fetch_close(rhizome_file_fetch_record *q){
close(q->alarm.poll.fd);
q->alarm.poll.fd = -1;
/* Reduce count of open connections */
if (rhizome_file_fetch_queue_count>0)
rhizome_file_fetch_queue_count--;
else
WHY("rhizome_file_fetch_queue_count is already zero, is a fetch record being double freed?");
/* Free ephemeral data */
if (q->file)
fclose(q->file);
q->file = NULL;
if (q->manifest)
rhizome_manifest_free(q->manifest);
q->manifest = NULL;
if (q->filename[0])
unlink(q->filename);
q->filename[0] = '\0';
// Release the import queue slot.
q->state = RHIZOME_FETCH_FREE;
// Fill the slot with the next suggestion.
rhizome_enqueue_suggestions(NULL);
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("Released rhizome fetch slot (%d used)", rhizome_file_fetch_queue_count);
return 0;
}
void rhizome_fetch_write(rhizome_file_fetch_record *q){
void rhizome_fetch_write(rhizome_file_fetch_record *q)
{
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("write_nonblock(%d, %s)", q->alarm.poll.fd, alloca_toprint(-1, &q->request[q->request_ofs], q->request_len-q->request_ofs));
int bytes = write_nonblock(q->alarm.poll.fd, &q->request[q->request_ofs], q->request_len-q->request_ofs);
@ -777,34 +859,29 @@ void rhizome_fetch_write(rhizome_file_fetch_record *q){
}
}
void rhizome_write_content(rhizome_file_fetch_record *q, char *buffer, int bytes){
void rhizome_write_content(rhizome_file_fetch_record *q, char *buffer, int bytes)
{
if (bytes>(q->file_len-q->file_ofs))
bytes=q->file_len-q->file_ofs;
if (fwrite(buffer,bytes,1,q->file)!=1)
{
if (fwrite(buffer,bytes,1,q->file) != 1) {
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("Failed to write %d bytes to file @ offset %d", bytes, q->file_ofs);
rhizome_fetch_close(q);
return;
}
q->file_ofs+=bytes;
if (q->file_ofs>=q->file_len)
{
if (q->file_ofs>=q->file_len) {
/* got all of file */
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("Received all of file via rhizome -- now to import it");
fclose(q->file);
q->file = NULL;
if (q->manifest) {
// Were fetching payload, now we have it.
rhizome_import_received_bundle(q->manifest);
rhizome_manifest_free(q->manifest);
q->manifest = NULL;
} else {
/* This was to fetch the manifest, so now fetch the file if needed */
DEBUGF("Received a manifest in response to supplying a manifest prefix.");
DEBUGF("XXX Not implemented scheduling the fetching of the file itself.");
/* Read the manifest and add it to suggestion queue, then immediately
call schedule queued items. */
rhizome_manifest *m = rhizome_new_manifest();
@ -813,18 +890,17 @@ void rhizome_write_content(rhizome_file_fetch_record *q, char *buffer, int bytes
DEBUGF("Couldn't read manifest from %s",q->filename);
rhizome_manifest_free(m);
} else {
DEBUGF("All looks good for importing manifest %p",m);
DEBUGF("All looks good for importing manifest id=%s", alloca_tohex_bid(m->cryptoSignPublic));
dump("q->peer",&q->peer,sizeof(q->peer));
rhizome_suggest_queue_manifest_import(m, &q->peer);
rhizome_enqueue_suggestions(NULL);
//rhizome_enqueue_suggestions(NULL); XXX Now called in rhizome_fetch_close()
}
}
}
rhizome_fetch_close(q);
return;
}
// reset timeout due to activity
// reset inactivity timeout
unschedule(&q->alarm);
q->alarm.alarm=gettime_ms() + RHIZOME_IDLE_TIMEOUT;
q->alarm.deadline = q->alarm.alarm+RHIZOME_IDLE_TIMEOUT;
@ -993,90 +1069,3 @@ int unpack_http_response(char *response, struct http_response_parts *parts)
parts->content_start = p;
return 0;
}
int rhizome_fetch_request_manifest_by_prefix(struct sockaddr_in *peerip,
unsigned char *prefix,
int prefix_length,
int importP)
{
assert(peerip);
/* Transfer via HTTP over IPv4 */
int sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock == -1)
return WHY_perror("socket");
if (set_nonblock(sock) == -1) {
close(sock);
return -1;
}
struct sockaddr_in addr = *peerip;
addr.sin_family = AF_INET;
INFOF("RHIZOME HTTP REQUEST, CONNECT family=%u port=%u addr=%u.%u.%u.%u",
addr.sin_family, ntohs(addr.sin_port),
((unsigned char*)&addr.sin_addr.s_addr)[0],
((unsigned char*)&addr.sin_addr.s_addr)[1],
((unsigned char*)&addr.sin_addr.s_addr)[2],
((unsigned char*)&addr.sin_addr.s_addr)[3]
);
if (connect(sock, (struct sockaddr*)&addr, sizeof addr) == -1) {
if (errno == EINPROGRESS) {
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("connect() returned EINPROGRESS");
} else {
WHY_perror("connect");
WHY("Failed to open socket to peer's rhizome web server");
close(sock);
return -1;
}
}
rhizome_file_fetch_record *q=&file_fetch_queue[rhizome_file_fetch_queue_count];
q->manifest = NULL;
q->alarm.poll.fd=sock;
bzero(q->fileid, sizeof(q->fileid));
q->peer=*peerip;
q->request_len = snprintf(q->request, sizeof q->request, "GET /rhizome/manifestbyprefix/%s HTTP/1.0\r\n\r\n", alloca_tohex(prefix,prefix_length));
q->request_ofs=0;
q->state=RHIZOME_FETCH_CONNECTING;
q->file_len=-1;
q->file_ofs=0;
if (create_rhizome_import_dir() == -1)
return -1;
char filename[1024];
if (!FORM_RHIZOME_IMPORT_PATH(filename, "file.%s",
alloca_tohex(prefix,prefix_length))) {
close(sock);
return -1;
}
if (strlen(q->filename)<sizeof(q->filename))
strcpy(q->filename,filename);
else q->filename[0]=0;
if ((q->file = fopen(filename, "w")) == NULL) {
WHY_perror("fopen");
// if (debug & DEBUG_RHIZOME_RX)
DEBUGF("Could not open '%s' to write received file", filename);
close(sock);
return -1;
}
INFOF("RHIZOME HTTP REQUEST, GET \"/rhizome/manifestbyprefix/%s\"",
alloca_tohex(prefix,prefix_length));
/* Watch for activity on the socket */
q->alarm.function=rhizome_fetch_poll;
fetch_stats.name="rhizome_fetch_poll";
q->alarm.stats=&fetch_stats;
q->alarm.poll.events=POLLIN|POLLOUT;
watch(&q->alarm);
/* And schedule a timeout alarm */
q->alarm.alarm=gettime_ms() + RHIZOME_IDLE_TIMEOUT;
q->alarm.deadline = q->alarm.alarm + RHIZOME_IDLE_TIMEOUT;
schedule(&q->alarm);
rhizome_file_fetch_queue_count++;
if (debug & DEBUG_RHIZOME_RX)
DEBUGF("Queued file for fetching into %s (%d in queue)",
filename, rhizome_file_fetch_queue_count);
return 0;
}

View File

@ -209,6 +209,23 @@ foreach_instance() {
return $ret
}
# Composition function:
# - invoke a command once in every instance that contains a server pidfile
# - takes the same options as foreach_instance()
foreach_instance_with_pidfile() {
push_instance
local -a instances=()
if pushd "${servald_instances_dir?:}" >/dev/null; then
for name in *; do
set_instance "+$name"
get_servald_server_pidfile && instances+=("+$name")
done
popd >/dev/null
fi
pop_instance
foreach_instance "${instances[@]}" "$@"
}
# Utility function for setting up servald JNI fixtures:
# - check that libservald.so is present
# - set LD_LIBRARY_PATH so that libservald.so can be found
@ -307,13 +324,6 @@ stop_servald_server() {
assert --message="servald process still running" [ "$apid" -ne "$servald_pid" ]
done
fi
# Append the server log file to the test log.
[ -s "$instance_servald_log" ] && tfw_cat "$instance_servald_log"
# Append a core dump backtrace to the test log.
if [ -s "$instance_dir/core" ]; then
tfw_core_backtrace "$servald" "$instance_dir/core"
rm -f "$instance_dir/core"
fi
# Check there is at least one fewer servald processes running.
for bpid in ${before_pids[*]}; do
local isgone=true
@ -330,6 +340,18 @@ stop_servald_server() {
pop_instance
}
# Utility function:
# - cat a servald server log file and core dump information into the test log
report_servald_server() {
# Append the server log file to the test log.
[ -s "$instance_servald_log" ] && tfw_cat "$instance_servald_log"
# Append a core dump backtrace to the test log.
if [ -s "$instance_dir/core" ]; then
tfw_core_backtrace "$servald" "$instance_dir/core"
rm -f "$instance_dir/core"
fi
}
# Utility function:
# - test whether the pidfile for a given server instance exists and is valid
# - if it exists and is valid, set named variable to PID (and second named
@ -372,18 +394,36 @@ assert_servald_server_pidfile() {
assert get_servald_server_pidfile "$@"
}
# Assertion function:
# - assert that the given instance's server has the given status ('running' or 'stopped')
assert_servald_server_status() {
push_instance
set_instance_fromarg "$1" && shift
[ $# -eq 1 ] || error "invalid arguments"
executeOk_servald status
local status
extract_stdout_keyvalue status status '.*'
assert --message="instance +$instance_name servald server status is '$1'" [ "$status" = "$1" ]
pop_instance
}
# Assertion function:
# - asserts that all servald instances with a pidfile have a server in a given
# state
assert_status_all_servald_servers() {
foreach_instance_with_pidfile assert_servald_server_status "$@"
}
# Utility function for tearing down servald fixtures:
# - stop all servald server process instances in an orderly fashion
stop_all_servald_servers() {
push_instance
if pushd "${servald_instances_dir?:}" >/dev/null; then
for name in *; do
set_instance "+$name"
get_servald_server_pidfile && stop_servald_server
done
popd >/dev/null
fi
pop_instance
foreach_instance_with_pidfile stop_servald_server
}
# Utility function for tearing down servald fixtures:
# - log a report of the execution of all servald server process instances
report_all_servald_servers() {
foreach_instance +{A..Z} report_servald_server
}
# Utility function for tearing down servald fixtures:
@ -458,7 +498,7 @@ get_servald_pids() {
assert_no_servald_processes() {
local pids
get_servald_pids pids
assert --message="$servald_basename process(es) running: $pids" [ -z "$pids" ]
assert --message="no $servald_basename process(es) running" [ -z "$pids" ]
return 0
}

View File

@ -45,25 +45,31 @@ assert_manifest_complete() {
fi
}
# Assertion function:
# - assert that the output of a "servald rhizome list" command exactly describes the given files
assert_rhizome_list() {
assertStdoutLineCount --stderr '==' $(($# + 2))
assertStdoutIs --stderr --line=1 -e '12\n'
assertStdoutIs --stderr --line=2 -e 'service:id:version:date:.inserttime:.author:.fromhere:filesize:filehash:sender:recipient:name\n'
local filename
local re__author="\($rexp_sid\)\{0,1\}"
local re__fromhere
local exactly=true
local re__inserttime="$rexp_date"
local re__fromhere='[01]'
local re__author="\($rexp_sid\)\{0,1\}"
local files=0
for filename; do
re__fromhere=1
case "$filename" in
*@*) re__author="${filename##*@}"; filename="${filename%@*}";;
esac
case "$filename" in
*!) re__fromhere=0; filename="${filename%!}";;
esac
--fromhere=*) re__fromhere="${filename#*=}";;
--author=*) re__author="${filename#*=}";;
--and-others) exactly=false;;
--*) error "unsupported option: $filename";;
*)
unpack_manifest_for_grep "$filename"
assertStdoutGrep --stderr --matches=1 "^$re_service:$re_manifestid:$re_version:$re_date:$re__inserttime:$re__author:$re__fromhere:$re_filesize:$re_filehash:$re_sender:$re_recipient:$re_name\$"
let files+=1
;;
esac
done
$exactly && assertStdoutLineCount --stderr '==' $(($files + 2))
}
assert_stdout_add_file() {
@ -244,3 +250,96 @@ get_rhizome_server_port() {
fi
return 0
}
# Predicate function:
# - return true if the file bundles identified by args BID[:VERSION] has been
# received by all the given instances args +I
# - does this by examining the server log files of the respective instances
# for tell-tale INFO messages
bundle_received_by() {
local -a rexps
local restart=true
local arg bid version rexp
for arg; do
case "$arg" in
+([0-9A-F])?(:+([0-9])))
$restart && rexps=()
restart=false
bid="${arg%%:*}"
matches_rexp "$rexp_manifestid" "$bid" || error "invalid bundle ID: $bid"
if [ "$bid" = "$arg" ]; then
rexps+=("RHIZOME ADD MANIFEST service=file bid=$bid")
else
version="${arg#*:}"
rexps+=("RHIZOME ADD MANIFEST service=file bid=$bid version=$version")
fi
;;
+[A-Z])
local logvar="LOG${arg#+}"
for rexp in "${rexps[@]}"; do
grep "$rexp" "${!logvar}" || return 1
done
restart=true
;;
--stderr)
for rexp in "${rexps[@]}"; do
replayStderr | grep "$rexp" || return 1
done
restart=true
;;
*)
error "invalid argument: $arg"
return 1
;;
esac
done
return 0
}
extract_manifest_vars() {
local manifest="${1?}"
extract_manifest_id BID "$manifest"
extract_manifest_version VERSION "$manifest"
extract_manifest_filesize FILESIZE "$manifest"
FILEHASH=
if [ "$FILESIZE" != '0' ]; then
extract_manifest_filehash FILEHASH "$manifest"
fi
}
rhizome_add_file() {
local name="$1"
[ -e "$name" ] || echo "File $name" >"$name"
local sidvar="SID$instance_name"
executeOk_servald rhizome add file "${!sidvar}" '' "$name" "$name.manifest"
executeOk_servald rhizome list ''
assert_rhizome_list --fromhere=1 --author="${!sidvar}" "$name" --and-others
extract_manifest_vars "$name.manifest"
}
rhizome_update_file() {
local orig_name="$1"
local new_name="$2"
[ -e "$new_name" ] || echo 'File $new_name' >"$new_name"
local sidvar="SID$instance_name"
[ "$new_name" != "$orig_name" ] && cp "$orig_name.manifest" "$new_name.manifest"
$SED -i -e '/^date=/d;/^filehash=/d;/^filesize=/d;/^version=/d;/^name=/d' "$new_name.manifest"
executeOk_servald rhizome add file "${!sidvar}" '' "$new_name" "$new_name.manifest"
executeOk_servald rhizome list ''
assert_rhizome_list --fromhere=1 "$new_name"
extract_manifest_vars "$new_name.manifest"
}
assert_rhizome_received() {
[ $# -ne 0 ] || error "missing arguments"
local name
local _hash
for name; do
if [ -s "$name" ]; then
extract_manifest_filehash _hash "$name.manifest"
executeOk_servald rhizome extract file "$_hash" extracted
assert cmp "$name" extracted
fi
done
}

View File

@ -230,7 +230,7 @@ runTests() {
local start_time=$(_tfw_timestamp)
local finish_time=unknown
(
trap '_tfw_status=$?; _tfw_teardown; exit $_tfw_status' EXIT SIGHUP SIGINT SIGTERM
trap '_tfw_finalise; _tfw_teardown; _tfw_exit' EXIT SIGHUP SIGINT SIGTERM
_tfw_result=ERROR
mkdir $_tfw_tmp || exit 255
_tfw_setup
@ -239,12 +239,8 @@ runTests() {
tfw_log "# CALL test_$_tfw_test_name()"
$_tfw_trace && set -x
test_$_tfw_test_name
set +x
_tfw_result=PASS
case $_tfw_result in
PASS) exit 0;;
FAIL) exit 1;;
ERROR) exit 254;;
esac
exit 255
)
local stat=$?
@ -419,6 +415,10 @@ setup() {
:
}
finally() {
:
}
teardown() {
:
}
@ -478,6 +478,12 @@ escape_grep_extended() {
echo "$re"
}
# Return true if all the arguments arg2... match the given grep(1) regular
# expression arg1.
matches_rexp() {
_tfw_matches_rexp "$@"
}
# Executes its arguments as a command:
# - captures the standard output and error in temporary files for later
# examination
@ -792,18 +798,38 @@ _tfw_setup() {
tfw_log '# END SETUP'
}
_tfw_finalise() {
_tfw_phase=finalise
tfw_log '# FINALISE'
case `type -t finally_$_tfw_test_name` in
function)
tfw_log "# CALL finally_$_tfw_test_name()"
$_tfw_trace && set -x
finally_$_tfw_test_name
set +x
;;
*)
tfw_log "# CALL finally($_tfw_test_name)"
$_tfw_trace && set -x
finally $_tfw_test_name
set +x
;;
esac
tfw_log '# END FINALLY'
}
_tfw_teardown() {
_tfw_phase=teardown
tfw_log '# TEARDOWN'
case `type -t teardown_$_tfw_test_name` in
function)
tfw_log "# call teardown_$_tfw_test_name()"
tfw_log "# CALL teardown_$_tfw_test_name()"
$_tfw_trace && set -x
teardown_$_tfw_test_name
set +x
;;
*)
tfw_log "# call teardown($_tfw_test_name)"
tfw_log "# CALL teardown($_tfw_test_name)"
$_tfw_trace && set -x
teardown $_tfw_test_name
set +x
@ -812,6 +838,15 @@ _tfw_teardown() {
tfw_log '# END TEARDOWN'
}
_tfw_exit() {
case $_tfw_result in
PASS) exit 0;;
FAIL) exit 1;;
ERROR) exit 254;;
esac
exit 255
}
# Executes $_tfw_executable with the given arguments.
_tfw_execute() {
executed=$(shellarg "${_tfw_executable##*/}" "$@")
@ -886,6 +921,7 @@ _tfw_assert() {
declare -a _tfw_opt_dump_on_fail
_tfw_dump_on_fail() {
local arg
for arg; do
local _found=false
local _f
@ -959,6 +995,7 @@ _tfw_getopts() {
_tfw_matches_rexp() {
local rexp="$1"
shift
local arg
for arg; do
if ! echo "$arg" | $GREP -q -e "$rexp"; then
return 1
@ -1260,7 +1297,7 @@ _tfw_find_tests() {
_tfw_failmsg() {
# A failure during setup or teardown is treated as an error.
case $_tfw_phase in
testcase)
testcase|finalise)
if ! $_tfw_opt_error_on_fail; then
tfw_log "FAIL: $*"
return 0;
@ -1289,13 +1326,22 @@ _tfw_failexit() {
# When exiting a test case due to a failure, log any diagnostic output that
# has been requested.
tfw_cat "${_tfw_opt_dump_on_fail[@]}"
# A failure during setup or teardown is treated as an error.
# A failure during setup or teardown is treated as an error. A failure
# during finalise does not terminate execution.
case $_tfw_phase in
testcase)
if ! $_tfw_opt_error_on_fail; then
exit 1
fi
;;
finalise)
if ! $_tfw_opt_error_on_fail; then
case $_tfw_result in
PASS) _tfw_result=FAIL; _tfw_status=1;;
esac
return 1
fi
;;
esac
_tfw_errorexit
}
@ -1321,10 +1367,10 @@ _tfw_error() {
}
_tfw_errorexit() {
# Do not exit process during teardown
# Do not exit process during finalise or teardown
_tfw_result=ERROR
case $_tfw_phase in
teardown) [ $_tfw_status -lt 254 ] && _tfw_status=254;;
finalise|teardown) [ $_tfw_status -lt 254 ] && _tfw_status=254;;
*) exit 254;;
esac
return 254

View File

@ -45,9 +45,9 @@ setup() {
}
teardown() {
stop_all_servald_servers
kill_all_servald_processes
assert_no_servald_processes
report_all_servald_servers
}
is_published() {
@ -75,7 +75,7 @@ test_publish() {
assertStdoutLineCount '==' 2
assertStdoutGrep --matches=1 "^sid://$SIDC/local/$DIDC:$DIDC:$NAMEC\$"
assertStdoutGrep --matches=1 "^sid://$SIDD/local/$DIDD:$DIDD:$NAMED\$"
return
assert_status_all_servald_servers running
}
start_routing_instance() {
@ -130,5 +130,7 @@ test_routing() {
assertStdoutGrep --matches=1 "^sid://$SIDB/local/$DIDB:$DIDB:$NAMEB\$"
executeOk_servald mdp ping $SIDB 3
tfw_cat --stdout --stderr
assert_status_all_servald_servers running
}
runTests "$@"

View File

@ -29,10 +29,14 @@ setup() {
assert_all_instance_peers_complete +A
}
teardown() {
finally() {
stop_all_servald_servers
}
teardown() {
kill_all_servald_processes
assert_no_servald_processes
report_all_servald_servers
}
# Called by start_servald_instances immediately before starting the server

View File

@ -47,10 +47,14 @@ setup() {
executeOk_servald config set debug.all Yes
}
teardown() {
finally() {
stop_all_servald_servers
}
teardown() {
kill_all_servald_processes
assert_no_servald_processes
report_all_servald_servers
}
set_server_vars() {

View File

@ -141,7 +141,7 @@ test_AddEmpty() {
assertGrep .manifest '^name=$'
assertGrep .manifest '^filesize=0$'
executeOk_servald rhizome list ''
assert_rhizome_list @$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 ''
}
doc_AddThenList="List contains one file after one add"
@ -157,11 +157,11 @@ test_AddThenList() {
# Add first file
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
executeOk_servald rhizome list ''
assert_rhizome_list file1@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1
# Add second file
executeOk_servald rhizome add file $SIDB1 '' file2 file2.manifest
executeOk_servald rhizome list ''
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
}
doc_ExtractManifestAfterAdd="Extract manifest after one add"
@ -171,7 +171,7 @@ setup_ExtractManifestAfterAdd() {
echo "A test file" >file1
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
executeOk_servald rhizome list ''
assert_rhizome_list file1@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1
extract_manifest_id manifestid file1.manifest
extract_manifest_version version file1.manifest
extract_manifest_filehash filehash file1.manifest
@ -198,7 +198,7 @@ setup_ExtractManifestAfterAddNoAuthor() {
echo "A test file" >file1
executeOk_servald rhizome add file '' '' file1 file1.manifest
executeOk_servald rhizome list ''
assert_rhizome_list file1!
assert_rhizome_list --fromhere=0 file1
extract_manifest_id manifestid file1.manifest
extract_manifest_version version file1.manifest
extract_manifest_filehash filehash file1.manifest
@ -254,7 +254,7 @@ setup_ExtractFileAfterAdd() {
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
tfw_cat --stderr
executeOk_servald rhizome list ''
assert_rhizome_list file1@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1
extract_manifest_filehash filehash file1.manifest
}
test_ExtractFileAfterAdd() {
@ -312,7 +312,7 @@ setup_AddDuplicate() {
extract_stdout_secret file2_secret
# Make sure they are both in the list.
executeOk_servald rhizome list ''
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
}
test_AddDuplicate() {
# Add first file again - nothing should change in its manifests, and it
@ -322,7 +322,7 @@ test_AddDuplicate() {
assert [ -s file1.manifestA ]
assert_stdout_add_file file1
executeOk_servald rhizome list ''
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
strip_signatures file1.manifest file1.manifestA
assert diff file1.manifest file1.manifestA
# Repeat for second file.
@ -330,7 +330,7 @@ test_AddDuplicate() {
assert [ -s file2.manifestA ]
assert_stdout_add_file file2
executeOk_servald rhizome list ''
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
strip_signatures file2.manifest file2.manifestA
assert diff file2.manifest file2.manifestA
}
@ -348,7 +348,7 @@ test_AddMismatched() {
assert cmp file1.manifest file1_2.manifest
# And rhizome store should be unchanged.
executeOk_servald rhizome list ''
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
}
doc_AddUpdateSameVersion="Add new payload to existing manifest with same version fails"
@ -371,7 +371,7 @@ test_AddUpdateSameVersion() {
assert cmp file1_2.manifest file1_2.manifest.orig
# And rhizome store should be unchanged.
executeOk_servald rhizome list ''
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
}
doc_AddUpdateNewVersion="Add new payload to existing manifest with new version"
@ -390,7 +390,7 @@ test_AddUpdateNewVersion() {
assert_manifest_newer file1.manifest file1_2.manifest
# Rhizome store contents reflect new payload.
executeOk_servald rhizome list ''
assert_rhizome_list file1_2@$SIDB1 file2@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1_2 file2
}
doc_AddUpdateDiscoverAuthor="Add new payload to manifest with author discovery"
@ -403,7 +403,7 @@ test_AddUpdateDiscoverAuthor() {
tfw_cat --stderr
# Rhizome store contents have new payload.
executeOk_servald rhizome list ''
assert_rhizome_list file1_2@$SIDB1 file2@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1_2 file2
}
doc_AddUpdateNoAuthor="Cannot add new payload to authorless manifest"
@ -418,7 +418,7 @@ test_AddUpdateNoAuthor() {
assertExitStatus '!=' 0
# Rhizome store contents have old payload.
executeOk_servald rhizome list ''
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
}
doc_AddUpdateNoAuthorWithSecret="Add new payload to authorless manifest with bundle secret"
@ -431,7 +431,7 @@ test_AddUpdateNoAuthorWithSecret() {
tfw_cat --stderr
# Rhizome store contents have new payload.
executeOk_servald rhizome list ''
assert_rhizome_list file1_2@$SIDB1 file2@$SIDB1
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1_2 file2
}
doc_AddUpdateAutoVersion="Add new payload to existing manifest with automatic version"
@ -447,7 +447,7 @@ test_AddUpdateAutoVersion() {
assert_manifest_newer file1.manifest file1_2.manifest
# Rhizome store contents reflect new payload.
executeOk_servald rhizome list ''
assert_rhizome_list file1_2 file2
assert_rhizome_list --fromhere=1 file1_2 file2
}
doc_AddUnsupportedService="Add with unsupported service fails"
@ -474,7 +474,7 @@ test_MeshMSAddCreate() {
assert_stdout_add_file file1
assert_manifest_complete file1.manifest
executeOk_servald rhizome list ''
assert_rhizome_list file1
assert_rhizome_list --fromhere=1 file1
extract_manifest_filehash filehash file1.manifest
executeOk_servald rhizome extract file $filehash file1x
assert diff file1 file1x
@ -492,7 +492,7 @@ test_MeshMSAddGrow() {
assert_stdout_add_file file1
assert_manifest_complete file1.manifest
executeOk_servald rhizome list ''
assert_rhizome_list file1
assert_rhizome_list --fromhere=1 file1
extract_manifest_id id file1.manifest
extract_manifest_filehash filehash file1.manifest
extract_manifest_BK bk file1.manifest
@ -503,7 +503,7 @@ test_MeshMSAddGrow() {
echo "Message$m" >>file1
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
executeOk_servald rhizome list ''
assert_rhizome_list file1
assert_rhizome_list --fromhere=1 file1
extract_manifest_id idx file1.manifest
extract_manifest_filehash filehashx file1.manifest
extract_manifest_BK bkx file1.manifest
@ -557,7 +557,7 @@ test_MeshMSAddMissingAuthor() {
assert_stdout_add_file file1
assert_manifest_complete file1.manifest
executeOk_servald rhizome list ''
assert_rhizome_list file1
assert_rhizome_list --fromhere=1 file1
}
doc_MeshMSListFilter="List MeshMS manifests by filter"
@ -585,17 +585,17 @@ setup_MeshMSListFilter() {
assert_stdout_add_file file4
assert_manifest_complete file4.manifest
executeOk_servald rhizome list ''
assert_rhizome_list file1 file2 file3 file4
assert_rhizome_list --fromhere=1 file1 file2 file3 file4
}
test_MeshMSListFilter() {
executeOk_servald rhizome list '' file
assert_rhizome_list
executeOk_servald rhizome list '' MeshMS1
assert_rhizome_list file1 file2 file3 file4
assert_rhizome_list --fromhere=1 file1 file2 file3 file4
executeOk_servald rhizome list '' '' $SIDB1
assert_rhizome_list file1 file2 file3
assert_rhizome_list --fromhere=1 file1 file2 file3
executeOk_servald rhizome list '' '' $SIDB2
assert_rhizome_list file4
assert_rhizome_list --fromhere=1 file4
executeOk_servald rhizome list '' '' $SIDB3
assert_rhizome_list
executeOk_servald rhizome list '' '' $SIDB4
@ -603,19 +603,19 @@ test_MeshMSListFilter() {
executeOk_servald rhizome list '' '' '' $SIDB1
assert_rhizome_list
executeOk_servald rhizome list '' '' '' $SIDB2
assert_rhizome_list file1
assert_rhizome_list --fromhere=1 file1
executeOk_servald rhizome list '' '' '' $SIDB3
assert_rhizome_list file2 file4
assert_rhizome_list --fromhere=1 file2 file4
executeOk_servald rhizome list '' file '' $SIDB3
assert_rhizome_list
executeOk_servald rhizome list '' '' '' $SIDB4
assert_rhizome_list file3
assert_rhizome_list --fromhere=1 file3
executeOk_servald rhizome list '' '' $SIDB1 $SIDB4
assert_rhizome_list file3
assert_rhizome_list --fromhere=1 file3
executeOk_servald rhizome list '' '' $SIDB2 $SIDB4
assert_rhizome_list
executeOk_servald rhizome list '' '' $SIDB2 $SIDB3
assert_rhizome_list file4
assert_rhizome_list --fromhere=1 file4
}
doc_ImportForeignBundle="Can import a bundle created by another instance"
@ -632,7 +632,7 @@ test_ImportForeignBundle() {
executeOk_servald rhizome import bundle fileA fileA.manifest
assert_stdout_import_bundle fileA
executeOk_servald rhizome list ''
assert_rhizome_list fileA!
assert_rhizome_list --fromhere=0 fileA
}
doc_ImportOwnBundle="Can import a bundle created by same instance"
@ -654,7 +654,7 @@ test_ImportOwnBundle() {
assert_stdout_import_bundle fileB
# Bundle author and sender are unknown, so appears not to be from here
executeOk_servald rhizome list ''
assert_rhizome_list fileB!
assert_rhizome_list --fromhere=0 fileB
# Extracting the manifest discovers that it is ours.
executeOk_servald rhizome extract manifest $manifestid fileBx.manifest
tfw_cat --stderr
@ -671,7 +671,7 @@ test_ImportOwnBundle() {
assertStdoutGrep --matches=1 "^\.readonly:0\$"
# Now bundle author is known, so appears to be from here
executeOk_servald rhizome list ''
assert_rhizome_list fileB@$SIDB2
assert_rhizome_list --fromhere=1 --author=$SIDB2 fileB
}
runTests "$@"

View File

@ -2,7 +2,7 @@
# Tests for Serval rhizome protocol.
#
# Copyright 2012 Paul Gardner-Stephen
# Copyright 2012 Serval Project Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
@ -24,15 +24,14 @@ source "${0%/*}/../testdefs_rhizome.sh"
shopt -s extglob
teardown() {
finally() {
stop_all_servald_servers
kill_all_servald_processes
assert_no_servald_processes
}
setup_rhizome() {
foreach_instance +A +B create_single_identity
set_instance +B
teardown() {
kill_all_servald_processes
assert_no_servald_processes
report_all_servald_servers
}
# Called by start_servald_instances for each instance.
@ -48,35 +47,6 @@ configure_servald_server() {
executeOk_servald config set rhizome.fetch_interval_ms 100
}
# Predicate function:
# - return true if the file bundle identified by arg1=BID and arg2=VERSION has been
# received by all the given instances
# - does this by examining the server log files of the respective instances
# for tell-tale INFO messages
bundle_received_by() {
local BID="$1"
local VERSION="$2"
shift 2
local rexp="RHIZOME ADD MANIFEST service=file bid=$BID version=$VERSION"
local I
for I; do
case "$I" in
+*)
local logvar="LOG${I#+}"
grep "$rexp" "${!logvar}" || return 1
;;
--stderr)
replayStderr | grep "$rexp" || return 1
;;
*)
error "invalid instance argument: $I"
return 1
;;
esac
done
return 0
}
setup_curl_7() {
case "$(curl --version | tr '\n' ' ')" in
curl\ @(7|8|9|[1-9][0-1]).*\ Protocols:*\ http\ *) ;;
@ -91,76 +61,33 @@ setup_curl_7() {
setup_common() {
setup_servald
setup_rhizome
assert_no_servald_processes
}
extract_manifest_vars() {
local manifest="${1?}"
extract_manifest_id BID "$manifest"
extract_manifest_version VERSION "$manifest"
extract_manifest_filesize FILESIZE "$manifest"
FILEHASH=
if [ "$FILESIZE" != '0' ]; then
extract_manifest_filehash FILEHASH "$manifest"
fi
}
add_file() {
local name="$1"
[ -e "$name" ] || echo "File $name" >"$name"
local sidvar="SID$instance_name"
executeOk_servald rhizome add file "${!sidvar}" '' "$name" "$name.manifest"
executeOk_servald rhizome list ''
assert_rhizome_list "$name"
extract_manifest_vars "$name.manifest"
}
update_file() {
local orig_name="$1"
local new_name="$2"
[ -e "$new_name" ] || echo 'File $new_name' >"$new_name"
local sidvar="SID$instance_name"
[ "$new_name" != "$orig_name" ] && cp "$orig_name.manifest" "$new_name.manifest"
$SED -i -e '/^date=/d;/^filehash=/d;/^filesize=/d;/^version=/d;/^name=/d' "$new_name.manifest"
executeOk_servald rhizome add file "${!sidvar}" '' "$new_name" "$new_name.manifest"
executeOk_servald rhizome list ''
assert_rhizome_list "$new_name"
extract_manifest_vars "$new_name.manifest"
}
assert_received() {
local name="${1?}"
local _hash
if [ -s "$name" ]; then
extract_manifest_filehash _hash "$name.manifest"
executeOk_servald rhizome extract file "$_hash" extracted
assert cmp "$name" extracted
fi
foreach_instance +A +B create_single_identity
set_instance +B
}
doc_FileTransfer="New bundle and update transfer to one node"
setup_FileTransfer() {
setup_common
set_instance +A
add_file file1
rhizome_add_file file1
start_servald_instances +A +B
foreach_instance +A assert_peers_are_instances +B
foreach_instance +B assert_peers_are_instances +A
}
test_FileTransfer() {
wait_until bundle_received_by $BID $VERSION +B
wait_until bundle_received_by $BID:$VERSION +B
set_instance +B
executeOk_servald rhizome list ''
assert_rhizome_list file1!
assert_received file1
assert_rhizome_list --fromhere=0 file1
assert_rhizome_received file1
set_instance +A
update_file file1 file2
rhizome_update_file file1 file2
set_instance +B
wait_until bundle_received_by $BID $VERSION +B
wait_until bundle_received_by $BID:$VERSION +B
executeOk_servald rhizome list ''
assert_rhizome_list file2!
assert_received file2
assert_rhizome_list --fromhere=0 file2
assert_rhizome_received file2
}
doc_FileTransferBig="Big new bundle transfers to one node"
@ -170,24 +97,24 @@ setup_FileTransferBig() {
dd if=/dev/urandom of=file1 bs=1k count=1k 2>&1
echo x >>file1
ls -l file1
add_file file1
rhizome_add_file file1
start_servald_instances +A +B
foreach_instance +A assert_peers_are_instances +B
foreach_instance +B assert_peers_are_instances +A
}
test_FileTransferBig() {
wait_until bundle_received_by $BID $VERSION +B
wait_until bundle_received_by $BID:$VERSION +B
set_instance +B
executeOk_servald rhizome list ''
assert_rhizome_list file1!
assert_received file1
assert_rhizome_list --fromhere=0 file1
assert_rhizome_received file1
}
doc_FileTransferMulti="New bundle transfers to four nodes"
setup_FileTransferMulti() {
setup_common
set_instance +A
add_file file1
rhizome_add_file file1
start_servald_instances +A +B +C +D +E
foreach_instance +A assert_peers_are_instances +B +C +D +E
foreach_instance +B assert_peers_are_instances +A +C +D +E
@ -195,13 +122,12 @@ setup_FileTransferMulti() {
foreach_instance +D assert_peers_are_instances +A +B +C +E
}
test_FileTransferMulti() {
wait_until bundle_received_by $BID $VERSION +B +C +D +E
local I
for I in +B +C +D +E; do
set_instance $I
wait_until bundle_received_by $BID:$VERSION +B +C +D +E
for i in B C D E; do
set_instance +$i
executeOk_servald rhizome list ''
assert_rhizome_list file1!
assert_received file1
assert_rhizome_list --fromhere=0 file1
assert_rhizome_received file1
done
}
@ -209,21 +135,21 @@ doc_FileTransferDelete="Payload deletion transfers to one node"
setup_FileTransferDelete() {
setup_common
set_instance +A
add_file file1
rhizome_add_file file1
start_servald_instances +A +B
foreach_instance +A assert_peers_are_instances +B
foreach_instance +B assert_peers_are_instances +A
wait_until bundle_received_by $BID $VERSION +B
wait_until bundle_received_by $BID:$VERSION +B
set_instance +A
>file1_2
update_file file1 file1_2
rhizome_update_file file1 file1_2
}
test_FileTransferDelete() {
wait_until bundle_received_by $BID $VERSION +B
wait_until bundle_received_by $BID:$VERSION +B
set_instance +B
executeOk_servald rhizome list ''
assert_rhizome_list file1_2!
assert_received file1_2
assert_rhizome_list --fromhere=0 file1_2
assert_rhizome_received file1_2
}
doc_HttpImport="Import bundle using HTTP POST multi-part form."
@ -234,19 +160,19 @@ setup_HttpImport() {
When we were looking at implementing secure calls for OpenBTS it was suggested
that we configure Asterisk to use SIPS/ZRTP. This would have been relatively
easy to setup, however there are a few problems.
.
Number one is that when Asterisk checks the certificates it will either
validate the certificate (checking the chain of trust and so on) and then
check that the common name attribute on the certificate matches the hostname
of the peer, or it will do none of these checks. This code is in main/tcptls.c
line 206 (in version 1.8.14.1).
.
This is undesirable in a setup where there is limited or no infrastructure as
there is not likely to be a DNS server setup, or even rigid IP assignments
that would allow a static hosts file based setup. This situation would force
the administrator to disable the checks completely which would allow a trivial
man in the middle attack.
.
It would be possible to modify Asterisk to have a third way where it validates
the certificate and checks the chain of trust but does not look at the common
name. We decided against this approach as the VOMP channel driver was written
@ -272,8 +198,8 @@ test_HttpImport() {
"$addr_localhost:$PORTA/rhizome/import"
tfw_cat http.headers http.output
executeOk_servald rhizome list ''
assert_rhizome_list README.WHYNOTSIPS!
assert_received README.WHYNOTSIPS
assert_rhizome_list --fromhere=0 README.WHYNOTSIPS
assert_rhizome_received README.WHYNOTSIPS
}
doc_HttpAddLocal="Add file locally using HTTP, returns manifest"
@ -292,15 +218,15 @@ test_HttpAddLocal() {
executeOk curl --silent --form 'data=@file1' "http://$addr_localhost:$PORTA/rhizome/secretaddfile" --output file1.manifest
assert_manifest_complete file1.manifest
executeOk_servald rhizome list ''
assert_rhizome_list file1
assert_rhizome_list --fromhere=1 file1
extract_manifest_name name file1.manifest
assert [ "$name" = file1 ]
assert_received file1
assert_rhizome_received file1
}
setup_sync() {
set_instance +A
add_file file1
rhizome_add_file file1
BID1=$BID
VERSION1=$VERSION
start_servald_instances dummy1 +A
@ -313,7 +239,7 @@ setup_sync() {
executeOk_servald config set debug.rhizomerx on
executeOk_servald config set rhizome.direct.peer.count "1"
executeOk_servald config set rhizome.direct.peer.0 "http://${addr_localhost}:${PORTA}"
add_file file2
rhizome_add_file file2
BID2=$BID
VERSION2=$VERSION
}
@ -327,14 +253,14 @@ test_DirectPush() {
set_instance +B
executeOk_servald rhizome direct push
tfw_cat --stdout --stderr
assert bundle_received_by $BID2 $VERSION2 +A
assert bundle_received_by $BID2:$VERSION2 +A
set_instance +A
executeOk_servald rhizome list ''
assert_rhizome_list file1 file2!
assert_received file2
assert_rhizome_list --fromhere=1 file1 --fromhere=0 file2
assert_rhizome_received file2
set_instance +B
executeOk_servald rhizome list ''
assert_rhizome_list file2
assert_rhizome_list --fromhere=1 file2
}
doc_DirectPull="One way pull bundle from unconnected node"
@ -346,14 +272,14 @@ test_DirectPull() {
set_instance +B
executeOk_servald rhizome direct pull
tfw_cat --stdout --stderr
assert bundle_received_by $BID1 $VERSION1 --stderr
assert bundle_received_by $BID1:$VERSION1 --stderr
set_instance +A
executeOk_servald rhizome list ''
assert_rhizome_list file1
assert_rhizome_list --fromhere=1 file1
set_instance +B
executeOk_servald rhizome list ''
assert_rhizome_list file1! file2
assert_received file1
assert_rhizome_list --fromhere=0 file1 --fromhere=1 file2
assert_rhizome_received file1
}
doc_DirectSync="Two-way sync bundles between unconnected nodes"
@ -365,16 +291,15 @@ test_DirectSync() {
set_instance +B
executeOk_servald rhizome direct sync
tfw_cat --stdout --stderr
assert bundle_received_by $BID1 $VERSION1 --stderr
assert bundle_received_by $BID2 $VERSION2 +A
assert bundle_received_by $BID1:$VERSION1 --stderr $BID2:$VERSION2 +A
set_instance +A
executeOk_servald rhizome list ''
assert_rhizome_list file1 file2!
assert_received file2
assert_rhizome_list --fromhere=1 file1 --fromhere=0 file2
assert_rhizome_received file2
set_instance +B
executeOk_servald rhizome list ''
assert_rhizome_list file1! file2
assert_received file1
assert_rhizome_list --fromhere=0 file1 --fromhere=1 file2
assert_rhizome_received file1
}
runTests "$@"

83
tests/rhizomestress Executable file
View File

@ -0,0 +1,83 @@
#!/bin/bash
# Stress tests for Serval rhizome protocol.
#
# Copyright 2012 Serval Project Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
source "${0%/*}/../testframework.sh"
source "${0%/*}/../testdefs.sh"
source "${0%/*}/../testdefs_rhizome.sh"
shopt -s extglob
finally() {
stop_all_servald_servers
}
teardown() {
kill_all_servald_processes
assert_no_servald_processes
report_all_servald_servers
}
# Called by start_servald_instances for each instance.
configure_servald_server() {
executeOk_servald config set log.show_pid on
executeOk_servald config set log.show_time on
executeOk_servald config set debug.rhizome on
executeOk_servald config set debug.rhizometx on
executeOk_servald config set debug.rhizomerx on
executeOk_servald config set server.respawn_on_signal off
executeOk_servald config set mdp.wifi.tick_ms 500
executeOk_servald config set mdp.selfannounce.ticks_per_full_address 1
}
doc_FileTransferStress="Stress - five nodes each sharing 16 bundles"
setup_FileTransferStress() {
setup_servald
assert_no_servald_processes
foreach_instance +A +B +C +D +E create_single_identity
for i in A B C D E
do
eval "bundles$i=()"
set_instance +$i
for n in 1 2 3 4 5 6 7 8 9 a b c d e f g
do
rhizome_add_file file-$i-$n
eval "bundles$i+=(\$BID:\$VERSION)"
done
done
}
test_FileTransferStress() {
start_servald_instances +A +B +C +D +E
wait_until --timeout=180 bundle_received_by \
${bundlesA[*]} +B +C +D +E \
${bundlesB[*]} +A +C +D +E \
${bundlesC[*]} +A +B +D +E \
${bundlesD[*]} +A +B +C +E \
${bundlesE[*]} +A +B +C +D
stop_all_servald_servers
local i
for i in A B C D E; do
set_instance +$i
executeOk_servald rhizome list ''
assert_rhizome_list --fromhere=1 file-$i-? --fromhere=0 file-!($i)-?
assert_rhizome_received file-!($i)-?
done
}
runTests "$@"