mirror of
https://github.com/servalproject/serval-dna.git
synced 2025-01-18 02:39:44 +00:00
Merge branch 'rhizomestress' into 'master'
This commit is contained in:
commit
839de7557c
2
log.c
2
log.c
@ -435,7 +435,7 @@ int log_backtrace(struct __sourceloc whence)
|
|||||||
return -1;
|
return -1;
|
||||||
int tmpfd = mkstemp(tempfile);
|
int tmpfd = mkstemp(tempfile);
|
||||||
if (tmpfd == -1)
|
if (tmpfd == -1)
|
||||||
return WHYF_perror("mkstemp(%s)",alloca_str_toprint(tempfile));
|
return WHYF_perror("mkstemp(%s)", alloca_str_toprint(tempfile));
|
||||||
if (write_str(tmpfd, "backtrace\n") == -1) {
|
if (write_str(tmpfd, "backtrace\n") == -1) {
|
||||||
close(tmpfd);
|
close(tmpfd);
|
||||||
unlink(tempfile);
|
unlink(tempfile);
|
||||||
|
11
rhizome.h
11
rhizome.h
@ -117,6 +117,8 @@ typedef struct rhizome_manifest {
|
|||||||
int fileHighestPriority;
|
int fileHighestPriority;
|
||||||
/* Absolute path of the file associated with the manifest */
|
/* Absolute path of the file associated with the manifest */
|
||||||
char *dataFileName;
|
char *dataFileName;
|
||||||
|
/* If set, unlink(2) the associated file when freeing the manifest */
|
||||||
|
int dataFileUnlinkOnFree;
|
||||||
/* Whether the paylaod is encrypted or not */
|
/* Whether the paylaod is encrypted or not */
|
||||||
int payloadEncryption;
|
int payloadEncryption;
|
||||||
|
|
||||||
@ -281,7 +283,7 @@ int rhizome_find_duplicate(const rhizome_manifest *m, rhizome_manifest **found,
|
|||||||
int rhizome_manifest_to_bar(rhizome_manifest *m,unsigned char *bar);
|
int rhizome_manifest_to_bar(rhizome_manifest *m,unsigned char *bar);
|
||||||
long long rhizome_bar_version(unsigned char *bar);
|
long long rhizome_bar_version(unsigned char *bar);
|
||||||
unsigned long long rhizome_bar_bidprefix_ll(unsigned char *bar);
|
unsigned long long rhizome_bar_bidprefix_ll(unsigned char *bar);
|
||||||
int rhizome_queue_manifest_import(rhizome_manifest *m, struct sockaddr_in *peerip, int *manifest_kept);
|
int rhizome_queue_manifest_import(rhizome_manifest *m, const struct sockaddr_in *peerip, int *manifest_kept);
|
||||||
int rhizome_list_manifests(const char *service, const char *sender_sid, const char *recipient_sid, int limit, int offset);
|
int rhizome_list_manifests(const char *service, const char *sender_sid, const char *recipient_sid, int limit, int offset);
|
||||||
int rhizome_retrieve_manifest(const char *manifestid, rhizome_manifest **mp);
|
int rhizome_retrieve_manifest(const char *manifestid, rhizome_manifest **mp);
|
||||||
int rhizome_retrieve_file(const char *fileid, const char *filepath,
|
int rhizome_retrieve_file(const char *fileid, const char *filepath,
|
||||||
@ -539,11 +541,8 @@ extern unsigned char favicon_bytes[];
|
|||||||
extern int favicon_len;
|
extern int favicon_len;
|
||||||
|
|
||||||
int rhizome_import_from_files(const char *manifestpath,const char *filepath);
|
int rhizome_import_from_files(const char *manifestpath,const char *filepath);
|
||||||
int rhizome_fetch_request_manifest_by_prefix(struct sockaddr_in *peerip,
|
int rhizome_fetch_request_manifest_by_prefix(const struct sockaddr_in *peerip, const unsigned char *prefix, size_t prefix_length);
|
||||||
unsigned char *prefix,
|
int rhizome_count_queued_imports();
|
||||||
int prefix_length,
|
|
||||||
int importP);
|
|
||||||
extern int rhizome_file_fetch_queue_count;
|
|
||||||
|
|
||||||
struct http_response_parts {
|
struct http_response_parts {
|
||||||
int code;
|
int code;
|
||||||
|
@ -525,8 +525,12 @@ void _rhizome_manifest_free(struct __sourceloc __whence, rhizome_manifest *m)
|
|||||||
m->signatories[i]=NULL;
|
m->signatories[i]=NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m->dataFileName) free(m->dataFileName);
|
if (m->dataFileName) {
|
||||||
m->dataFileName=NULL;
|
if (m->dataFileUnlinkOnFree && unlink(m->dataFileName) == -1)
|
||||||
|
WARNF_perror("unlink(%s)", alloca_str_toprint(m->dataFileName));
|
||||||
|
free(m->dataFileName);
|
||||||
|
m->dataFileName = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
manifest_free[mid]=1;
|
manifest_free[mid]=1;
|
||||||
manifest_free_whence[mid]=__whence;
|
manifest_free_whence[mid]=__whence;
|
||||||
|
@ -876,14 +876,13 @@ void rhizome_direct_http_dispatch(rhizome_direct_sync_request *r)
|
|||||||
Then as noted above, we can use that to pull the file down using
|
Then as noted above, we can use that to pull the file down using
|
||||||
existing routines.
|
existing routines.
|
||||||
*/
|
*/
|
||||||
if (!rhizome_fetch_request_manifest_by_prefix
|
if (!rhizome_fetch_request_manifest_by_prefix(&addr,&actionlist[i+1],RHIZOME_BAR_PREFIX_BYTES))
|
||||||
(&addr,&actionlist[i+1],RHIZOME_BAR_PREFIX_BYTES,
|
|
||||||
1 /* import, getting file if needed */))
|
|
||||||
{
|
{
|
||||||
/* Fetching the manifest, and then using it to see if we want to
|
/* Fetching the manifest, and then using it to see if we want to
|
||||||
fetch the file for import is all handled asynchronously, so just
|
fetch the file for import is all handled asynchronously, so just
|
||||||
wait for it to finish. */
|
wait for it to finish. */
|
||||||
while(rhizome_file_fetch_queue_count) fd_poll();
|
while (rhizome_count_queued_imports())
|
||||||
|
fd_poll();
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (type==1&&r->pushP) {
|
} else if (type==1&&r->pushP) {
|
||||||
|
545
rhizome_fetch.c
545
rhizome_fetch.c
@ -31,34 +31,57 @@ extern int sigIoFlag;
|
|||||||
typedef struct rhizome_file_fetch_record {
|
typedef struct rhizome_file_fetch_record {
|
||||||
struct sched_ent alarm;
|
struct sched_ent alarm;
|
||||||
rhizome_manifest *manifest;
|
rhizome_manifest *manifest;
|
||||||
char fileid[RHIZOME_FILEHASH_STRLEN + 1];
|
|
||||||
FILE *file;
|
FILE *file;
|
||||||
char filename[1024];
|
char filename[1024];
|
||||||
|
|
||||||
char request[1024];
|
char request[1024];
|
||||||
int request_len;
|
int request_len;
|
||||||
int request_ofs;
|
int request_ofs;
|
||||||
|
|
||||||
long long file_len;
|
long long file_len;
|
||||||
long long file_ofs;
|
long long file_ofs;
|
||||||
|
|
||||||
int state;
|
int state;
|
||||||
|
#define RHIZOME_FETCH_FREE 0
|
||||||
#define RHIZOME_FETCH_CONNECTING 1
|
#define RHIZOME_FETCH_CONNECTING 1
|
||||||
#define RHIZOME_FETCH_SENDINGHTTPREQUEST 2
|
#define RHIZOME_FETCH_SENDINGHTTPREQUEST 2
|
||||||
#define RHIZOME_FETCH_RXHTTPHEADERS 3
|
#define RHIZOME_FETCH_RXHTTPHEADERS 3
|
||||||
#define RHIZOME_FETCH_RXFILE 4
|
#define RHIZOME_FETCH_RXFILE 4
|
||||||
|
|
||||||
struct sockaddr_in peer;
|
struct sockaddr_in peer;
|
||||||
|
|
||||||
} rhizome_file_fetch_record;
|
} rhizome_file_fetch_record;
|
||||||
|
|
||||||
struct profile_total fetch_stats;
|
|
||||||
|
|
||||||
/* List of queued transfers */
|
/* List of queued transfers */
|
||||||
#define MAX_QUEUED_FILES 4
|
#define MAX_QUEUED_FILES 4
|
||||||
int rhizome_file_fetch_queue_count=0;
|
|
||||||
rhizome_file_fetch_record file_fetch_queue[MAX_QUEUED_FILES];
|
rhizome_file_fetch_record file_fetch_queue[MAX_QUEUED_FILES];
|
||||||
|
|
||||||
|
#define QUEUED (0)
|
||||||
|
#define QSAMEPAYLOAD (1)
|
||||||
|
#define QDATED (2)
|
||||||
|
#define QBUSY (3)
|
||||||
|
#define QSAME (4)
|
||||||
|
#define QOLDER (5)
|
||||||
|
#define QNEWER (6)
|
||||||
|
#define QIMPORTED (7)
|
||||||
|
|
||||||
|
int rhizome_count_queued_imports()
|
||||||
|
{
|
||||||
|
int count = 0;
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < MAX_QUEUED_FILES; ++i)
|
||||||
|
if (file_fetch_queue[i].state != RHIZOME_FETCH_FREE)
|
||||||
|
++count;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static rhizome_file_fetch_record *rhizome_find_import_slot()
|
||||||
|
{
|
||||||
|
// Find a free fetch queue slot.
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < MAX_QUEUED_FILES; ++i)
|
||||||
|
if (file_fetch_queue[i].state == RHIZOME_FETCH_FREE)
|
||||||
|
return &file_fetch_queue[i];
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct profile_total fetch_stats;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Queue a manifest for importing.
|
Queue a manifest for importing.
|
||||||
|
|
||||||
@ -449,7 +472,7 @@ int rhizome_suggest_queue_manifest_import(rhizome_manifest *m, struct sockaddr_i
|
|||||||
if (list_version >= m->version) {
|
if (list_version >= m->version) {
|
||||||
/* this version is older than the one in the list, so don't list this one */
|
/* this version is older than the one in the list, so don't list this one */
|
||||||
rhizome_manifest_free(m);
|
rhizome_manifest_free(m);
|
||||||
RETURN(0);
|
RETURN(0);
|
||||||
} else {
|
} else {
|
||||||
/* replace listed version with this newer version */
|
/* replace listed version with this newer version */
|
||||||
if (rhizome_manifest_verify(m)) {
|
if (rhizome_manifest_verify(m)) {
|
||||||
@ -524,22 +547,20 @@ int rhizome_suggest_queue_manifest_import(rhizome_manifest *m, struct sockaddr_i
|
|||||||
void rhizome_enqueue_suggestions(struct sched_ent *alarm)
|
void rhizome_enqueue_suggestions(struct sched_ent *alarm)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
for(i=0;i<candidate_count;i++)
|
for (i = 0; i < candidate_count; ++i) {
|
||||||
{
|
int manifest_kept = 0;
|
||||||
if (rhizome_file_fetch_queue_count>=MAX_QUEUED_FILES)
|
int result = rhizome_queue_manifest_import(candidates[i].manifest, &candidates[i].peer, &manifest_kept);
|
||||||
break;
|
if (result == QBUSY)
|
||||||
int manifest_kept = 0;
|
break;
|
||||||
rhizome_queue_manifest_import(candidates[i].manifest,&candidates[i].peer, &manifest_kept);
|
if (!manifest_kept) {
|
||||||
if (!manifest_kept) {
|
rhizome_manifest_free(candidates[i].manifest);
|
||||||
rhizome_manifest_free(candidates[i].manifest);
|
candidates[i].manifest = NULL;
|
||||||
candidates[i].manifest = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if (i) {
|
if (i) {
|
||||||
/* now shuffle up */
|
/* now shuffle up */
|
||||||
int bytes=(candidate_count-i)*sizeof(rhizome_candidates);
|
int bytes=(candidate_count-i)*sizeof(rhizome_candidates);
|
||||||
if (0) DEBUGF("Moving slot %d to slot 0 (%d bytes = %d slots)",
|
if (0) DEBUGF("Moving slot %d to slot 0 (%d bytes = %d slots)", i,bytes,bytes/sizeof(rhizome_candidates));
|
||||||
i,bytes,bytes/sizeof(rhizome_candidates));
|
|
||||||
bcopy(&candidates[i],&candidates[0],bytes);
|
bcopy(&candidates[i],&candidates[0],bytes);
|
||||||
candidate_count-=i;
|
candidate_count-=i;
|
||||||
}
|
}
|
||||||
@ -551,15 +572,95 @@ void rhizome_enqueue_suggestions(struct sched_ent *alarm)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int rhizome_queue_manifest_import(rhizome_manifest *m, struct sockaddr_in *peerip, int *manifest_kept)
|
static int rhizome_queue_import(rhizome_file_fetch_record *q)
|
||||||
|
{
|
||||||
|
int sock = -1;
|
||||||
|
FILE *file = NULL;
|
||||||
|
/* TODO Don't forget to implement resume */
|
||||||
|
/* TODO We should stream file straight into the database */
|
||||||
|
if (create_rhizome_import_dir() == -1)
|
||||||
|
goto bail;
|
||||||
|
if ((file = fopen(q->filename, "w")) == NULL) {
|
||||||
|
WHYF_perror("fopen(`%s`, \"w\")", q->filename);
|
||||||
|
goto bail;
|
||||||
|
}
|
||||||
|
if (q->peer.sin_family == AF_INET) {
|
||||||
|
/* Transfer via HTTP over IPv4 */
|
||||||
|
if ((sock = socket(AF_INET, SOCK_STREAM, 0)) == -1) {
|
||||||
|
WHY_perror("socket");
|
||||||
|
goto bail;
|
||||||
|
}
|
||||||
|
if (set_nonblock(sock) == -1)
|
||||||
|
goto bail;
|
||||||
|
char buf[INET_ADDRSTRLEN];
|
||||||
|
if (inet_ntop(AF_INET, &q->peer.sin_addr, buf, sizeof buf) == NULL) {
|
||||||
|
buf[0] = '*';
|
||||||
|
buf[1] = '\0';
|
||||||
|
}
|
||||||
|
if (connect(sock, (struct sockaddr*)&q->peer, sizeof q->peer) == -1) {
|
||||||
|
if (errno == EINPROGRESS) {
|
||||||
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
|
DEBUGF("connect() returned EINPROGRESS");
|
||||||
|
} else {
|
||||||
|
WHYF_perror("connect(%d, %s:%u)", sock, buf, ntohs(q->peer.sin_port));
|
||||||
|
goto bail;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
INFOF("RHIZOME HTTP REQUEST family=%u addr=%s port=%u %s",
|
||||||
|
q->peer.sin_family, buf, ntohs(q->peer.sin_port), alloca_str_toprint(q->request)
|
||||||
|
);
|
||||||
|
q->alarm.poll.fd = sock;
|
||||||
|
q->request_ofs=0;
|
||||||
|
q->state=RHIZOME_FETCH_CONNECTING;
|
||||||
|
q->file = file;
|
||||||
|
q->file_len=-1;
|
||||||
|
q->file_ofs=0;
|
||||||
|
/* Watch for activity on the socket */
|
||||||
|
q->alarm.function=rhizome_fetch_poll;
|
||||||
|
fetch_stats.name="rhizome_fetch_poll";
|
||||||
|
q->alarm.stats=&fetch_stats;
|
||||||
|
q->alarm.poll.events=POLLIN|POLLOUT;
|
||||||
|
watch(&q->alarm);
|
||||||
|
/* And schedule a timeout alarm */
|
||||||
|
q->alarm.alarm=gettime_ms() + RHIZOME_IDLE_TIMEOUT;
|
||||||
|
q->alarm.deadline = q->alarm.alarm + RHIZOME_IDLE_TIMEOUT;
|
||||||
|
schedule(&q->alarm);
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
/* TODO: Fetch via overlay */
|
||||||
|
WHY("Rhizome fetching via overlay not implemented");
|
||||||
|
}
|
||||||
|
bail:
|
||||||
|
if (sock != -1)
|
||||||
|
close(sock);
|
||||||
|
if (file != NULL) {
|
||||||
|
fclose(file);
|
||||||
|
unlink(q->filename);
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Returns QUEUED (0) if the fetch was queued.
|
||||||
|
* Returns QSAMEPAYLOAD if a fetch of the same payload (file ID) is already queued.
|
||||||
|
* Returns QDATED if the fetch was not queued because a newer version of the same bundle is already
|
||||||
|
* present.
|
||||||
|
* Returns QBUSY if the fetch was not queued because the queue is full.
|
||||||
|
* Returns QSAME if the same fetch is already queued.
|
||||||
|
* Returns QOLDER if a fetch of an older version of the same bundle is already queued.
|
||||||
|
* Returns QNEWER if a fetch of a newer version of the same bundle is already queued.
|
||||||
|
* Returns QIMPORTED if a fetch was not queued because the payload is nil or already held, so the
|
||||||
|
* import was performed instead.
|
||||||
|
* Returns -1 on error.
|
||||||
|
*/
|
||||||
|
int rhizome_queue_manifest_import(rhizome_manifest *m, const struct sockaddr_in *peerip, int *manifest_kept)
|
||||||
{
|
{
|
||||||
*manifest_kept = 0;
|
*manifest_kept = 0;
|
||||||
|
|
||||||
const char *bid = alloca_tohex_bid(m->cryptoSignPublic);
|
const char *bid = alloca_tohex_bid(m->cryptoSignPublic);
|
||||||
long long filesize = rhizome_manifest_get_ll(m, "filesize");
|
long long filesize = rhizome_manifest_get_ll(m, "filesize");
|
||||||
|
|
||||||
/* Do the quick rejection tests first, before the more expensive once,
|
/* Do the quick rejection tests first, before the more expensive ones,
|
||||||
like querying the database for manifests.
|
like querying the database for manifests.
|
||||||
|
|
||||||
We probably need a cache of recently rejected manifestid:versionid
|
We probably need a cache of recently rejected manifestid:versionid
|
||||||
pairs so that we can avoid database lookups in most cases. Probably
|
pairs so that we can avoid database lookups in most cases. Probably
|
||||||
@ -572,33 +673,56 @@ int rhizome_queue_manifest_import(rhizome_manifest *m, struct sockaddr_in *peeri
|
|||||||
the cache slot number to implicitly store the first bits.
|
the cache slot number to implicitly store the first bits.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (1||debug & DEBUG_RHIZOME_RX)
|
if (1||(debug & DEBUG_RHIZOME_RX))
|
||||||
DEBUGF("Fetching manifest bid=%s version=%lld size=%lld:", bid, m->version, filesize);
|
DEBUGF("Fetching bundle bid=%s version=%lld size=%lld:", bid, m->version, filesize);
|
||||||
|
|
||||||
if (rhizome_manifest_version_cache_lookup(m)) {
|
// If the payload is empty, no need to fetch, so import now.
|
||||||
/* We already have this version or newer */
|
if (filesize == 0) {
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
DEBUG(" already have that version or newer");
|
DEBUGF(" manifest fetch not started -- nil payload, so importing instead");
|
||||||
return 1;
|
if (rhizome_bundle_import(m, m->ttl-1) == -1)
|
||||||
|
return WHY("bundle import failed");
|
||||||
|
return QIMPORTED;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure there is a slot available before doing more expensive checks.
|
||||||
|
rhizome_file_fetch_record *q = rhizome_find_import_slot();
|
||||||
|
if (q == NULL) {
|
||||||
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
|
DEBUG(" fetch not started - all slots full");
|
||||||
|
return QBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we already have this version or newer, do not fetch.
|
||||||
|
if (rhizome_manifest_version_cache_lookup(m)) {
|
||||||
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
|
DEBUG(" fetch not started -- already have that version or newer");
|
||||||
|
return QDATED;
|
||||||
}
|
}
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
DEBUGF(" is new");
|
DEBUGF(" is new");
|
||||||
|
|
||||||
/* Don't queue if queue slots already full */
|
/* Don't fetch if already in progress. If a fetch of an older version is already in progress,
|
||||||
if (rhizome_file_fetch_queue_count >= MAX_QUEUED_FILES) {
|
* then this logic will let it run to completion before the fetch of the newer version is queued.
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
* This avoids the problem of indefinite postponement of fetching if new versions are constantly
|
||||||
DEBUG(" all fetch queue slots full");
|
* being published faster than we can fetch them.
|
||||||
return 2;
|
*/
|
||||||
}
|
|
||||||
|
|
||||||
/* Don't queue if already queued */
|
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < rhizome_file_fetch_queue_count; ++i) {
|
for (i = 0; i < MAX_QUEUED_FILES; ++i) {
|
||||||
if (file_fetch_queue[i].manifest) {
|
rhizome_manifest *qm = file_fetch_queue[i].manifest;
|
||||||
if (memcmp(m->cryptoSignPublic, file_fetch_queue[i].manifest->cryptoSignPublic, RHIZOME_MANIFEST_ID_BYTES) == 0) {
|
if (qm && memcmp(m->cryptoSignPublic, qm->cryptoSignPublic, RHIZOME_MANIFEST_ID_BYTES) == 0) {
|
||||||
|
if (qm->version < m->version) {
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
DEBUGF(" manifest fetch already queued");
|
DEBUGF(" fetch already in progress -- older version");
|
||||||
return 3;
|
return QOLDER;
|
||||||
|
} else if (qm->version > m->version) {
|
||||||
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
|
DEBUGF(" fetch already in progress -- newer version");
|
||||||
|
return QNEWER;
|
||||||
|
} else {
|
||||||
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
|
DEBUGF(" fetch already in progress -- same version");
|
||||||
|
return QSAME;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -610,148 +734,106 @@ int rhizome_queue_manifest_import(rhizome_manifest *m, struct sockaddr_in *peeri
|
|||||||
str_toupper_inplace(m->fileHexHash);
|
str_toupper_inplace(m->fileHexHash);
|
||||||
m->fileHashedP = 1;
|
m->fileHashedP = 1;
|
||||||
|
|
||||||
if (filesize > 0 && m->fileHexHash[0]) {
|
// If the payload is already available, no need to fetch, so import now.
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
long long gotfile = 0;
|
||||||
DEBUGF(" Getting ready to fetch filehash=%s for bid=%s", m->fileHexHash, bid);
|
if (sqlite_exec_int64(&gotfile, "SELECT COUNT(*) FROM FILES WHERE ID='%s' and datavalid=1;", m->fileHexHash) != 1)
|
||||||
|
return WHY("select failed");
|
||||||
|
if (gotfile) {
|
||||||
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
|
DEBUGF(" fetch not started - payload already present, so importing instead");
|
||||||
|
if (rhizome_bundle_import(m, m->ttl-1) == -1)
|
||||||
|
return WHY("bundle import failed");
|
||||||
|
return QIMPORTED;
|
||||||
|
}
|
||||||
|
|
||||||
long long gotfile = 0;
|
// Fetch the file, unless already queued.
|
||||||
if (sqlite_exec_int64(&gotfile, "SELECT COUNT(*) FROM FILES WHERE ID='%s' and datavalid=1;", m->fileHexHash) != 1)
|
for (i = 0; i < MAX_QUEUED_FILES; ++i) {
|
||||||
return WHY("select failed");
|
if (file_fetch_queue[i].manifest && strcasecmp(m->fileHexHash, file_fetch_queue[i].manifest->fileHexHash) == 0) {
|
||||||
if (gotfile == 0) {
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
/* We need to get the file, unless already queued */
|
DEBUGF(" fetch already in progress, slot=%d filehash=%s", i, m->fileHexHash);
|
||||||
int i;
|
return QSAMEPAYLOAD;
|
||||||
for (i = 0; i < rhizome_file_fetch_queue_count; ++i) {
|
|
||||||
if (strcasecmp(m->fileHexHash, file_fetch_queue[i].fileid) == 0) {
|
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
|
||||||
DEBUGF("Payload fetch already queued, slot %d filehash=%s", m->fileHexHash);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (peerip) {
|
|
||||||
/* Transfer via HTTP over IPv4 */
|
|
||||||
int sock = socket(AF_INET, SOCK_STREAM, 0);
|
|
||||||
if (sock == -1)
|
|
||||||
return WHY_perror("socket");
|
|
||||||
if (set_nonblock(sock) == -1) {
|
|
||||||
close(sock);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
dump("copying peerip",peerip,sizeof(*peerip));
|
|
||||||
struct sockaddr_in addr = *peerip;
|
|
||||||
addr.sin_family = AF_INET;
|
|
||||||
char buf[INET_ADDRSTRLEN];
|
|
||||||
if (inet_ntop(AF_INET, &addr.sin_addr, buf, sizeof buf) == NULL) {
|
|
||||||
buf[0] = '*';
|
|
||||||
buf[1] = '\0';
|
|
||||||
}
|
|
||||||
INFOF("RHIZOME HTTP REQUEST, CONNECT family=%u port=%u addr=%s", addr.sin_family, ntohs(addr.sin_port), buf);
|
|
||||||
if (connect(sock, (struct sockaddr*)&addr, sizeof addr) == -1) {
|
|
||||||
if (errno == EINPROGRESS) {
|
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
|
||||||
DEBUGF("connect() returned EINPROGRESS");
|
|
||||||
} else {
|
|
||||||
WHY_perror("connect");
|
|
||||||
WHY("Failed to open socket to peer's rhizome web server");
|
|
||||||
close(sock);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rhizome_file_fetch_record *q=&file_fetch_queue[rhizome_file_fetch_queue_count];
|
|
||||||
q->manifest = m;
|
|
||||||
*manifest_kept = 1;
|
|
||||||
bcopy(&addr,&q->peer,sizeof(q->peer));
|
|
||||||
q->alarm.poll.fd=sock;
|
|
||||||
strncpy(q->fileid, m->fileHexHash, RHIZOME_FILEHASH_STRLEN + 1);
|
|
||||||
q->request_len = snprintf(q->request, sizeof q->request, "GET /rhizome/file/%s HTTP/1.0\r\n\r\n", q->fileid);
|
|
||||||
q->request_ofs=0;
|
|
||||||
q->state=RHIZOME_FETCH_CONNECTING;
|
|
||||||
q->file_len=-1;
|
|
||||||
q->file_ofs=0;
|
|
||||||
|
|
||||||
/* XXX Don't forget to implement resume */
|
|
||||||
/* XXX We should stream file straight into the database */
|
|
||||||
const char *id = rhizome_manifest_get(q->manifest, "id", NULL, 0);
|
|
||||||
if (id == NULL) {
|
|
||||||
close(sock);
|
|
||||||
return WHY("Manifest missing ID");
|
|
||||||
}
|
|
||||||
if (create_rhizome_import_dir() == -1)
|
|
||||||
return -1;
|
|
||||||
char filename[1024];
|
|
||||||
if (!FORM_RHIZOME_IMPORT_PATH(filename, "file.%s", id)) {
|
|
||||||
close(sock);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
q->manifest->dataFileName = strdup(filename);
|
|
||||||
if ((q->file = fopen(q->manifest->dataFileName, "w")) == NULL) {
|
|
||||||
WHY_perror("fopen");
|
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
|
||||||
DEBUGF("Could not open '%s' to write received file", q->manifest->dataFileName);
|
|
||||||
close(sock);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
INFOF("RHIZOME HTTP REQUEST, GET \"/rhizome/file/%s\"", q->fileid);
|
|
||||||
|
|
||||||
/* Watch for activity on the socket */
|
|
||||||
q->alarm.function=rhizome_fetch_poll;
|
|
||||||
fetch_stats.name="rhizome_fetch_poll";
|
|
||||||
q->alarm.stats=&fetch_stats;
|
|
||||||
q->alarm.poll.events=POLLIN|POLLOUT;
|
|
||||||
watch(&q->alarm);
|
|
||||||
/* And schedule a timeout alarm */
|
|
||||||
q->alarm.alarm=gettime_ms() + RHIZOME_IDLE_TIMEOUT;
|
|
||||||
q->alarm.deadline = q->alarm.alarm + RHIZOME_IDLE_TIMEOUT;
|
|
||||||
|
|
||||||
schedule(&q->alarm);
|
|
||||||
|
|
||||||
rhizome_file_fetch_queue_count++;
|
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
|
||||||
DEBUGF("Queued file for fetching into %s (%d in queue)",
|
|
||||||
q->manifest->dataFileName, rhizome_file_fetch_queue_count);
|
|
||||||
return 0;
|
|
||||||
} else {
|
|
||||||
/* TODO: fetch via overlay */
|
|
||||||
return WHY("Rhizome fetching via overlay not implemented");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
|
||||||
DEBUGF("We already have the file for this manifest; importing from manifest alone.");
|
|
||||||
rhizome_bundle_import(m, m->ttl-1);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
// Queue the fetch.
|
||||||
|
//dump("peerip", peerip, sizeof *peerip);
|
||||||
|
q->peer = *peerip;
|
||||||
|
strbuf r = strbuf_local(q->request, sizeof q->request);
|
||||||
|
strbuf_sprintf(r, "GET /rhizome/file/%s HTTP/1.0\r\n\r\n", m->fileHexHash);
|
||||||
|
if (strbuf_overrun(r))
|
||||||
|
return WHY("request overrun");
|
||||||
|
q->request_len = strbuf_len(r);
|
||||||
|
if (!FORM_RHIZOME_IMPORT_PATH(q->filename, "payload.%s", bid))
|
||||||
|
return -1;
|
||||||
|
m->dataFileName = strdup(q->filename);
|
||||||
|
m->dataFileUnlinkOnFree = 0;
|
||||||
|
q->manifest = m;
|
||||||
|
if (rhizome_queue_import(q) == -1) {
|
||||||
|
q->filename[0] = '\0';
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
*manifest_kept = 1;
|
||||||
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
|
DEBUGF(" started fetch into %s, slot=%d filehash=%s", q->manifest->dataFileName, q - file_fetch_queue, m->fileHexHash);
|
||||||
|
return QUEUED;
|
||||||
}
|
}
|
||||||
|
|
||||||
int rhizome_fetch_close(rhizome_file_fetch_record *q){
|
int rhizome_fetch_request_manifest_by_prefix(const struct sockaddr_in *peerip, const unsigned char *prefix, size_t prefix_length)
|
||||||
/* Free ephemeral data */
|
{
|
||||||
if (q->file)
|
assert(peerip);
|
||||||
fclose(q->file);
|
rhizome_file_fetch_record *q = rhizome_find_import_slot();
|
||||||
q->file=NULL;
|
if (q == NULL)
|
||||||
if (q->manifest)
|
return QBUSY;
|
||||||
rhizome_manifest_free(q->manifest);
|
q->peer = *peerip;
|
||||||
q->manifest=NULL;
|
q->manifest = NULL;
|
||||||
|
strbuf r = strbuf_local(q->request, sizeof q->request);
|
||||||
|
strbuf_sprintf(r, "GET /rhizome/manifestbyprefix/%s HTTP/1.0\r\n\r\n", alloca_tohex(prefix, prefix_length));
|
||||||
|
if (strbuf_overrun(r))
|
||||||
|
return WHY("request overrun");
|
||||||
|
q->request_len = strbuf_len(r);
|
||||||
|
if (!FORM_RHIZOME_IMPORT_PATH(q->filename, "manifest.%s", alloca_tohex(prefix, prefix_length)))
|
||||||
|
return -1;
|
||||||
|
if (rhizome_queue_import(q) == -1) {
|
||||||
|
q->filename[0] = '\0';
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return QUEUED;
|
||||||
|
}
|
||||||
|
|
||||||
|
int rhizome_fetch_close(rhizome_file_fetch_record *q)
|
||||||
|
{
|
||||||
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
|
DEBUGF("Release Rhizome fetch slot=%d", q - file_fetch_queue);
|
||||||
|
assert(q->state != RHIZOME_FETCH_FREE);
|
||||||
|
|
||||||
/* close socket and stop watching it */
|
/* close socket and stop watching it */
|
||||||
unwatch(&q->alarm);
|
unwatch(&q->alarm);
|
||||||
unschedule(&q->alarm);
|
unschedule(&q->alarm);
|
||||||
close(q->alarm.poll.fd);
|
close(q->alarm.poll.fd);
|
||||||
q->alarm.poll.fd=-1;
|
q->alarm.poll.fd = -1;
|
||||||
|
|
||||||
/* Reduce count of open connections */
|
/* Free ephemeral data */
|
||||||
if (rhizome_file_fetch_queue_count>0)
|
if (q->file)
|
||||||
rhizome_file_fetch_queue_count--;
|
fclose(q->file);
|
||||||
else
|
q->file = NULL;
|
||||||
WHY("rhizome_file_fetch_queue_count is already zero, is a fetch record being double freed?");
|
if (q->manifest)
|
||||||
|
rhizome_manifest_free(q->manifest);
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
q->manifest = NULL;
|
||||||
DEBUGF("Released rhizome fetch slot (%d used)", rhizome_file_fetch_queue_count);
|
if (q->filename[0])
|
||||||
|
unlink(q->filename);
|
||||||
|
q->filename[0] = '\0';
|
||||||
|
|
||||||
|
// Release the import queue slot.
|
||||||
|
q->state = RHIZOME_FETCH_FREE;
|
||||||
|
|
||||||
|
// Fill the slot with the next suggestion.
|
||||||
|
rhizome_enqueue_suggestions(NULL);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void rhizome_fetch_write(rhizome_file_fetch_record *q){
|
void rhizome_fetch_write(rhizome_file_fetch_record *q)
|
||||||
|
{
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
DEBUGF("write_nonblock(%d, %s)", q->alarm.poll.fd, alloca_toprint(-1, &q->request[q->request_ofs], q->request_len-q->request_ofs));
|
DEBUGF("write_nonblock(%d, %s)", q->alarm.poll.fd, alloca_toprint(-1, &q->request[q->request_ofs], q->request_len-q->request_ofs));
|
||||||
int bytes = write_nonblock(q->alarm.poll.fd, &q->request[q->request_ofs], q->request_len-q->request_ofs);
|
int bytes = write_nonblock(q->alarm.poll.fd, &q->request[q->request_ofs], q->request_len-q->request_ofs);
|
||||||
@ -777,54 +859,48 @@ void rhizome_fetch_write(rhizome_file_fetch_record *q){
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void rhizome_write_content(rhizome_file_fetch_record *q, char *buffer, int bytes){
|
void rhizome_write_content(rhizome_file_fetch_record *q, char *buffer, int bytes)
|
||||||
|
{
|
||||||
if (bytes>(q->file_len-q->file_ofs))
|
if (bytes>(q->file_len-q->file_ofs))
|
||||||
bytes=q->file_len-q->file_ofs;
|
bytes=q->file_len-q->file_ofs;
|
||||||
if (fwrite(buffer,bytes,1,q->file)!=1)
|
if (fwrite(buffer,bytes,1,q->file) != 1) {
|
||||||
{
|
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
DEBUGF("Failed to write %d bytes to file @ offset %d", bytes, q->file_ofs);
|
DEBUGF("Failed to write %d bytes to file @ offset %d", bytes, q->file_ofs);
|
||||||
rhizome_fetch_close(q);
|
rhizome_fetch_close(q);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
q->file_ofs+=bytes;
|
q->file_ofs+=bytes;
|
||||||
|
if (q->file_ofs>=q->file_len) {
|
||||||
if (q->file_ofs>=q->file_len)
|
|
||||||
{
|
|
||||||
/* got all of file */
|
/* got all of file */
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
if (debug & DEBUG_RHIZOME_RX)
|
||||||
DEBUGF("Received all of file via rhizome -- now to import it");
|
DEBUGF("Received all of file via rhizome -- now to import it");
|
||||||
fclose(q->file);
|
fclose(q->file);
|
||||||
q->file = NULL;
|
q->file = NULL;
|
||||||
if (q->manifest) {
|
if (q->manifest) {
|
||||||
|
// Were fetching payload, now we have it.
|
||||||
rhizome_import_received_bundle(q->manifest);
|
rhizome_import_received_bundle(q->manifest);
|
||||||
rhizome_manifest_free(q->manifest);
|
|
||||||
q->manifest = NULL;
|
|
||||||
} else {
|
} else {
|
||||||
/* This was to fetch the manifest, so now fetch the file if needed */
|
/* This was to fetch the manifest, so now fetch the file if needed */
|
||||||
DEBUGF("Received a manifest in response to supplying a manifest prefix.");
|
DEBUGF("Received a manifest in response to supplying a manifest prefix.");
|
||||||
DEBUGF("XXX Not implemented scheduling the fetching of the file itself.");
|
|
||||||
/* Read the manifest and add it to suggestion queue, then immediately
|
/* Read the manifest and add it to suggestion queue, then immediately
|
||||||
call schedule queued items. */
|
call schedule queued items. */
|
||||||
rhizome_manifest *m=rhizome_new_manifest();
|
rhizome_manifest *m = rhizome_new_manifest();
|
||||||
if (m) {
|
if (m) {
|
||||||
if (rhizome_read_manifest_file(m,q->filename,0)==-1) {
|
if (rhizome_read_manifest_file(m, q->filename, 0) == -1) {
|
||||||
DEBUGF("Couldn't read manifest from %s",q->filename);
|
DEBUGF("Couldn't read manifest from %s",q->filename);
|
||||||
rhizome_manifest_free(m);
|
rhizome_manifest_free(m);
|
||||||
} else {
|
} else {
|
||||||
DEBUGF("All looks good for importing manifest %p",m);
|
DEBUGF("All looks good for importing manifest id=%s", alloca_tohex_bid(m->cryptoSignPublic));
|
||||||
dump("q->peer",&q->peer,sizeof(q->peer));
|
dump("q->peer",&q->peer,sizeof(q->peer));
|
||||||
rhizome_suggest_queue_manifest_import(m,&q->peer);
|
rhizome_suggest_queue_manifest_import(m, &q->peer);
|
||||||
rhizome_enqueue_suggestions(NULL);
|
//rhizome_enqueue_suggestions(NULL); XXX Now called in rhizome_fetch_close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rhizome_fetch_close(q);
|
rhizome_fetch_close(q);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
// reset inactivity timeout
|
||||||
// reset timeout due to activity
|
|
||||||
unschedule(&q->alarm);
|
unschedule(&q->alarm);
|
||||||
q->alarm.alarm=gettime_ms() + RHIZOME_IDLE_TIMEOUT;
|
q->alarm.alarm=gettime_ms() + RHIZOME_IDLE_TIMEOUT;
|
||||||
q->alarm.deadline = q->alarm.alarm+RHIZOME_IDLE_TIMEOUT;
|
q->alarm.deadline = q->alarm.alarm+RHIZOME_IDLE_TIMEOUT;
|
||||||
@ -993,90 +1069,3 @@ int unpack_http_response(char *response, struct http_response_parts *parts)
|
|||||||
parts->content_start = p;
|
parts->content_start = p;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int rhizome_fetch_request_manifest_by_prefix(struct sockaddr_in *peerip,
|
|
||||||
unsigned char *prefix,
|
|
||||||
int prefix_length,
|
|
||||||
int importP)
|
|
||||||
{
|
|
||||||
assert(peerip);
|
|
||||||
/* Transfer via HTTP over IPv4 */
|
|
||||||
int sock = socket(AF_INET, SOCK_STREAM, 0);
|
|
||||||
if (sock == -1)
|
|
||||||
return WHY_perror("socket");
|
|
||||||
if (set_nonblock(sock) == -1) {
|
|
||||||
close(sock);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
struct sockaddr_in addr = *peerip;
|
|
||||||
addr.sin_family = AF_INET;
|
|
||||||
INFOF("RHIZOME HTTP REQUEST, CONNECT family=%u port=%u addr=%u.%u.%u.%u",
|
|
||||||
addr.sin_family, ntohs(addr.sin_port),
|
|
||||||
((unsigned char*)&addr.sin_addr.s_addr)[0],
|
|
||||||
((unsigned char*)&addr.sin_addr.s_addr)[1],
|
|
||||||
((unsigned char*)&addr.sin_addr.s_addr)[2],
|
|
||||||
((unsigned char*)&addr.sin_addr.s_addr)[3]
|
|
||||||
);
|
|
||||||
if (connect(sock, (struct sockaddr*)&addr, sizeof addr) == -1) {
|
|
||||||
if (errno == EINPROGRESS) {
|
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
|
||||||
DEBUGF("connect() returned EINPROGRESS");
|
|
||||||
} else {
|
|
||||||
WHY_perror("connect");
|
|
||||||
WHY("Failed to open socket to peer's rhizome web server");
|
|
||||||
close(sock);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rhizome_file_fetch_record *q=&file_fetch_queue[rhizome_file_fetch_queue_count];
|
|
||||||
q->manifest = NULL;
|
|
||||||
q->alarm.poll.fd=sock;
|
|
||||||
bzero(q->fileid, sizeof(q->fileid));
|
|
||||||
q->peer=*peerip;
|
|
||||||
q->request_len = snprintf(q->request, sizeof q->request, "GET /rhizome/manifestbyprefix/%s HTTP/1.0\r\n\r\n", alloca_tohex(prefix,prefix_length));
|
|
||||||
q->request_ofs=0;
|
|
||||||
q->state=RHIZOME_FETCH_CONNECTING;
|
|
||||||
q->file_len=-1;
|
|
||||||
q->file_ofs=0;
|
|
||||||
|
|
||||||
if (create_rhizome_import_dir() == -1)
|
|
||||||
return -1;
|
|
||||||
char filename[1024];
|
|
||||||
if (!FORM_RHIZOME_IMPORT_PATH(filename, "file.%s",
|
|
||||||
alloca_tohex(prefix,prefix_length))) {
|
|
||||||
close(sock);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (strlen(q->filename)<sizeof(q->filename))
|
|
||||||
strcpy(q->filename,filename);
|
|
||||||
else q->filename[0]=0;
|
|
||||||
|
|
||||||
if ((q->file = fopen(filename, "w")) == NULL) {
|
|
||||||
WHY_perror("fopen");
|
|
||||||
// if (debug & DEBUG_RHIZOME_RX)
|
|
||||||
DEBUGF("Could not open '%s' to write received file", filename);
|
|
||||||
close(sock);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
INFOF("RHIZOME HTTP REQUEST, GET \"/rhizome/manifestbyprefix/%s\"",
|
|
||||||
alloca_tohex(prefix,prefix_length));
|
|
||||||
|
|
||||||
/* Watch for activity on the socket */
|
|
||||||
q->alarm.function=rhizome_fetch_poll;
|
|
||||||
fetch_stats.name="rhizome_fetch_poll";
|
|
||||||
q->alarm.stats=&fetch_stats;
|
|
||||||
q->alarm.poll.events=POLLIN|POLLOUT;
|
|
||||||
watch(&q->alarm);
|
|
||||||
/* And schedule a timeout alarm */
|
|
||||||
q->alarm.alarm=gettime_ms() + RHIZOME_IDLE_TIMEOUT;
|
|
||||||
q->alarm.deadline = q->alarm.alarm + RHIZOME_IDLE_TIMEOUT;
|
|
||||||
|
|
||||||
schedule(&q->alarm);
|
|
||||||
|
|
||||||
rhizome_file_fetch_queue_count++;
|
|
||||||
if (debug & DEBUG_RHIZOME_RX)
|
|
||||||
DEBUGF("Queued file for fetching into %s (%d in queue)",
|
|
||||||
filename, rhizome_file_fetch_queue_count);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
74
testdefs.sh
74
testdefs.sh
@ -209,6 +209,23 @@ foreach_instance() {
|
|||||||
return $ret
|
return $ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Composition function:
|
||||||
|
# - invoke a command once in every instance that contains a server pidfile
|
||||||
|
# - takes the same options as foreach_instance()
|
||||||
|
foreach_instance_with_pidfile() {
|
||||||
|
push_instance
|
||||||
|
local -a instances=()
|
||||||
|
if pushd "${servald_instances_dir?:}" >/dev/null; then
|
||||||
|
for name in *; do
|
||||||
|
set_instance "+$name"
|
||||||
|
get_servald_server_pidfile && instances+=("+$name")
|
||||||
|
done
|
||||||
|
popd >/dev/null
|
||||||
|
fi
|
||||||
|
pop_instance
|
||||||
|
foreach_instance "${instances[@]}" "$@"
|
||||||
|
}
|
||||||
|
|
||||||
# Utility function for setting up servald JNI fixtures:
|
# Utility function for setting up servald JNI fixtures:
|
||||||
# - check that libservald.so is present
|
# - check that libservald.so is present
|
||||||
# - set LD_LIBRARY_PATH so that libservald.so can be found
|
# - set LD_LIBRARY_PATH so that libservald.so can be found
|
||||||
@ -307,13 +324,6 @@ stop_servald_server() {
|
|||||||
assert --message="servald process still running" [ "$apid" -ne "$servald_pid" ]
|
assert --message="servald process still running" [ "$apid" -ne "$servald_pid" ]
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
# Append the server log file to the test log.
|
|
||||||
[ -s "$instance_servald_log" ] && tfw_cat "$instance_servald_log"
|
|
||||||
# Append a core dump backtrace to the test log.
|
|
||||||
if [ -s "$instance_dir/core" ]; then
|
|
||||||
tfw_core_backtrace "$servald" "$instance_dir/core"
|
|
||||||
rm -f "$instance_dir/core"
|
|
||||||
fi
|
|
||||||
# Check there is at least one fewer servald processes running.
|
# Check there is at least one fewer servald processes running.
|
||||||
for bpid in ${before_pids[*]}; do
|
for bpid in ${before_pids[*]}; do
|
||||||
local isgone=true
|
local isgone=true
|
||||||
@ -330,6 +340,18 @@ stop_servald_server() {
|
|||||||
pop_instance
|
pop_instance
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Utility function:
|
||||||
|
# - cat a servald server log file and core dump information into the test log
|
||||||
|
report_servald_server() {
|
||||||
|
# Append the server log file to the test log.
|
||||||
|
[ -s "$instance_servald_log" ] && tfw_cat "$instance_servald_log"
|
||||||
|
# Append a core dump backtrace to the test log.
|
||||||
|
if [ -s "$instance_dir/core" ]; then
|
||||||
|
tfw_core_backtrace "$servald" "$instance_dir/core"
|
||||||
|
rm -f "$instance_dir/core"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# Utility function:
|
# Utility function:
|
||||||
# - test whether the pidfile for a given server instance exists and is valid
|
# - test whether the pidfile for a given server instance exists and is valid
|
||||||
# - if it exists and is valid, set named variable to PID (and second named
|
# - if it exists and is valid, set named variable to PID (and second named
|
||||||
@ -372,18 +394,36 @@ assert_servald_server_pidfile() {
|
|||||||
assert get_servald_server_pidfile "$@"
|
assert get_servald_server_pidfile "$@"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Assertion function:
|
||||||
|
# - assert that the given instance's server has the given status ('running' or 'stopped')
|
||||||
|
assert_servald_server_status() {
|
||||||
|
push_instance
|
||||||
|
set_instance_fromarg "$1" && shift
|
||||||
|
[ $# -eq 1 ] || error "invalid arguments"
|
||||||
|
executeOk_servald status
|
||||||
|
local status
|
||||||
|
extract_stdout_keyvalue status status '.*'
|
||||||
|
assert --message="instance +$instance_name servald server status is '$1'" [ "$status" = "$1" ]
|
||||||
|
pop_instance
|
||||||
|
}
|
||||||
|
|
||||||
|
# Assertion function:
|
||||||
|
# - asserts that all servald instances with a pidfile have a server in a given
|
||||||
|
# state
|
||||||
|
assert_status_all_servald_servers() {
|
||||||
|
foreach_instance_with_pidfile assert_servald_server_status "$@"
|
||||||
|
}
|
||||||
|
|
||||||
# Utility function for tearing down servald fixtures:
|
# Utility function for tearing down servald fixtures:
|
||||||
# - stop all servald server process instances in an orderly fashion
|
# - stop all servald server process instances in an orderly fashion
|
||||||
stop_all_servald_servers() {
|
stop_all_servald_servers() {
|
||||||
push_instance
|
foreach_instance_with_pidfile stop_servald_server
|
||||||
if pushd "${servald_instances_dir?:}" >/dev/null; then
|
}
|
||||||
for name in *; do
|
|
||||||
set_instance "+$name"
|
# Utility function for tearing down servald fixtures:
|
||||||
get_servald_server_pidfile && stop_servald_server
|
# - log a report of the execution of all servald server process instances
|
||||||
done
|
report_all_servald_servers() {
|
||||||
popd >/dev/null
|
foreach_instance +{A..Z} report_servald_server
|
||||||
fi
|
|
||||||
pop_instance
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Utility function for tearing down servald fixtures:
|
# Utility function for tearing down servald fixtures:
|
||||||
@ -458,7 +498,7 @@ get_servald_pids() {
|
|||||||
assert_no_servald_processes() {
|
assert_no_servald_processes() {
|
||||||
local pids
|
local pids
|
||||||
get_servald_pids pids
|
get_servald_pids pids
|
||||||
assert --message="$servald_basename process(es) running: $pids" [ -z "$pids" ]
|
assert --message="no $servald_basename process(es) running" [ -z "$pids" ]
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,25 +45,31 @@ assert_manifest_complete() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Assertion function:
|
||||||
|
# - assert that the output of a "servald rhizome list" command exactly describes the given files
|
||||||
assert_rhizome_list() {
|
assert_rhizome_list() {
|
||||||
assertStdoutLineCount --stderr '==' $(($# + 2))
|
|
||||||
assertStdoutIs --stderr --line=1 -e '12\n'
|
assertStdoutIs --stderr --line=1 -e '12\n'
|
||||||
assertStdoutIs --stderr --line=2 -e 'service:id:version:date:.inserttime:.author:.fromhere:filesize:filehash:sender:recipient:name\n'
|
assertStdoutIs --stderr --line=2 -e 'service:id:version:date:.inserttime:.author:.fromhere:filesize:filehash:sender:recipient:name\n'
|
||||||
local filename
|
local filename
|
||||||
local re__author="\($rexp_sid\)\{0,1\}"
|
local exactly=true
|
||||||
local re__fromhere
|
|
||||||
local re__inserttime="$rexp_date"
|
local re__inserttime="$rexp_date"
|
||||||
|
local re__fromhere='[01]'
|
||||||
|
local re__author="\($rexp_sid\)\{0,1\}"
|
||||||
|
local files=0
|
||||||
for filename; do
|
for filename; do
|
||||||
re__fromhere=1
|
|
||||||
case "$filename" in
|
case "$filename" in
|
||||||
*@*) re__author="${filename##*@}"; filename="${filename%@*}";;
|
--fromhere=*) re__fromhere="${filename#*=}";;
|
||||||
|
--author=*) re__author="${filename#*=}";;
|
||||||
|
--and-others) exactly=false;;
|
||||||
|
--*) error "unsupported option: $filename";;
|
||||||
|
*)
|
||||||
|
unpack_manifest_for_grep "$filename"
|
||||||
|
assertStdoutGrep --stderr --matches=1 "^$re_service:$re_manifestid:$re_version:$re_date:$re__inserttime:$re__author:$re__fromhere:$re_filesize:$re_filehash:$re_sender:$re_recipient:$re_name\$"
|
||||||
|
let files+=1
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
case "$filename" in
|
|
||||||
*!) re__fromhere=0; filename="${filename%!}";;
|
|
||||||
esac
|
|
||||||
unpack_manifest_for_grep "$filename"
|
|
||||||
assertStdoutGrep --stderr --matches=1 "^$re_service:$re_manifestid:$re_version:$re_date:$re__inserttime:$re__author:$re__fromhere:$re_filesize:$re_filehash:$re_sender:$re_recipient:$re_name\$"
|
|
||||||
done
|
done
|
||||||
|
$exactly && assertStdoutLineCount --stderr '==' $(($files + 2))
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_stdout_add_file() {
|
assert_stdout_add_file() {
|
||||||
@ -244,3 +250,96 @@ get_rhizome_server_port() {
|
|||||||
fi
|
fi
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Predicate function:
|
||||||
|
# - return true if the file bundles identified by args BID[:VERSION] has been
|
||||||
|
# received by all the given instances args +I
|
||||||
|
# - does this by examining the server log files of the respective instances
|
||||||
|
# for tell-tale INFO messages
|
||||||
|
bundle_received_by() {
|
||||||
|
local -a rexps
|
||||||
|
local restart=true
|
||||||
|
local arg bid version rexp
|
||||||
|
for arg; do
|
||||||
|
case "$arg" in
|
||||||
|
+([0-9A-F])?(:+([0-9])))
|
||||||
|
$restart && rexps=()
|
||||||
|
restart=false
|
||||||
|
bid="${arg%%:*}"
|
||||||
|
matches_rexp "$rexp_manifestid" "$bid" || error "invalid bundle ID: $bid"
|
||||||
|
if [ "$bid" = "$arg" ]; then
|
||||||
|
rexps+=("RHIZOME ADD MANIFEST service=file bid=$bid")
|
||||||
|
else
|
||||||
|
version="${arg#*:}"
|
||||||
|
rexps+=("RHIZOME ADD MANIFEST service=file bid=$bid version=$version")
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
+[A-Z])
|
||||||
|
local logvar="LOG${arg#+}"
|
||||||
|
for rexp in "${rexps[@]}"; do
|
||||||
|
grep "$rexp" "${!logvar}" || return 1
|
||||||
|
done
|
||||||
|
restart=true
|
||||||
|
;;
|
||||||
|
--stderr)
|
||||||
|
for rexp in "${rexps[@]}"; do
|
||||||
|
replayStderr | grep "$rexp" || return 1
|
||||||
|
done
|
||||||
|
restart=true
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
error "invalid argument: $arg"
|
||||||
|
return 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
extract_manifest_vars() {
|
||||||
|
local manifest="${1?}"
|
||||||
|
extract_manifest_id BID "$manifest"
|
||||||
|
extract_manifest_version VERSION "$manifest"
|
||||||
|
extract_manifest_filesize FILESIZE "$manifest"
|
||||||
|
FILEHASH=
|
||||||
|
if [ "$FILESIZE" != '0' ]; then
|
||||||
|
extract_manifest_filehash FILEHASH "$manifest"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
rhizome_add_file() {
|
||||||
|
local name="$1"
|
||||||
|
[ -e "$name" ] || echo "File $name" >"$name"
|
||||||
|
local sidvar="SID$instance_name"
|
||||||
|
executeOk_servald rhizome add file "${!sidvar}" '' "$name" "$name.manifest"
|
||||||
|
executeOk_servald rhizome list ''
|
||||||
|
assert_rhizome_list --fromhere=1 --author="${!sidvar}" "$name" --and-others
|
||||||
|
extract_manifest_vars "$name.manifest"
|
||||||
|
}
|
||||||
|
|
||||||
|
rhizome_update_file() {
|
||||||
|
local orig_name="$1"
|
||||||
|
local new_name="$2"
|
||||||
|
[ -e "$new_name" ] || echo 'File $new_name' >"$new_name"
|
||||||
|
local sidvar="SID$instance_name"
|
||||||
|
[ "$new_name" != "$orig_name" ] && cp "$orig_name.manifest" "$new_name.manifest"
|
||||||
|
$SED -i -e '/^date=/d;/^filehash=/d;/^filesize=/d;/^version=/d;/^name=/d' "$new_name.manifest"
|
||||||
|
executeOk_servald rhizome add file "${!sidvar}" '' "$new_name" "$new_name.manifest"
|
||||||
|
executeOk_servald rhizome list ''
|
||||||
|
assert_rhizome_list --fromhere=1 "$new_name"
|
||||||
|
extract_manifest_vars "$new_name.manifest"
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_rhizome_received() {
|
||||||
|
[ $# -ne 0 ] || error "missing arguments"
|
||||||
|
local name
|
||||||
|
local _hash
|
||||||
|
for name; do
|
||||||
|
if [ -s "$name" ]; then
|
||||||
|
extract_manifest_filehash _hash "$name.manifest"
|
||||||
|
executeOk_servald rhizome extract file "$_hash" extracted
|
||||||
|
assert cmp "$name" extracted
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -230,7 +230,7 @@ runTests() {
|
|||||||
local start_time=$(_tfw_timestamp)
|
local start_time=$(_tfw_timestamp)
|
||||||
local finish_time=unknown
|
local finish_time=unknown
|
||||||
(
|
(
|
||||||
trap '_tfw_status=$?; _tfw_teardown; exit $_tfw_status' EXIT SIGHUP SIGINT SIGTERM
|
trap '_tfw_finalise; _tfw_teardown; _tfw_exit' EXIT SIGHUP SIGINT SIGTERM
|
||||||
_tfw_result=ERROR
|
_tfw_result=ERROR
|
||||||
mkdir $_tfw_tmp || exit 255
|
mkdir $_tfw_tmp || exit 255
|
||||||
_tfw_setup
|
_tfw_setup
|
||||||
@ -239,12 +239,8 @@ runTests() {
|
|||||||
tfw_log "# CALL test_$_tfw_test_name()"
|
tfw_log "# CALL test_$_tfw_test_name()"
|
||||||
$_tfw_trace && set -x
|
$_tfw_trace && set -x
|
||||||
test_$_tfw_test_name
|
test_$_tfw_test_name
|
||||||
|
set +x
|
||||||
_tfw_result=PASS
|
_tfw_result=PASS
|
||||||
case $_tfw_result in
|
|
||||||
PASS) exit 0;;
|
|
||||||
FAIL) exit 1;;
|
|
||||||
ERROR) exit 254;;
|
|
||||||
esac
|
|
||||||
exit 255
|
exit 255
|
||||||
)
|
)
|
||||||
local stat=$?
|
local stat=$?
|
||||||
@ -419,6 +415,10 @@ setup() {
|
|||||||
:
|
:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
finally() {
|
||||||
|
:
|
||||||
|
}
|
||||||
|
|
||||||
teardown() {
|
teardown() {
|
||||||
:
|
:
|
||||||
}
|
}
|
||||||
@ -478,6 +478,12 @@ escape_grep_extended() {
|
|||||||
echo "$re"
|
echo "$re"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Return true if all the arguments arg2... match the given grep(1) regular
|
||||||
|
# expression arg1.
|
||||||
|
matches_rexp() {
|
||||||
|
_tfw_matches_rexp "$@"
|
||||||
|
}
|
||||||
|
|
||||||
# Executes its arguments as a command:
|
# Executes its arguments as a command:
|
||||||
# - captures the standard output and error in temporary files for later
|
# - captures the standard output and error in temporary files for later
|
||||||
# examination
|
# examination
|
||||||
@ -792,18 +798,38 @@ _tfw_setup() {
|
|||||||
tfw_log '# END SETUP'
|
tfw_log '# END SETUP'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_tfw_finalise() {
|
||||||
|
_tfw_phase=finalise
|
||||||
|
tfw_log '# FINALISE'
|
||||||
|
case `type -t finally_$_tfw_test_name` in
|
||||||
|
function)
|
||||||
|
tfw_log "# CALL finally_$_tfw_test_name()"
|
||||||
|
$_tfw_trace && set -x
|
||||||
|
finally_$_tfw_test_name
|
||||||
|
set +x
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
tfw_log "# CALL finally($_tfw_test_name)"
|
||||||
|
$_tfw_trace && set -x
|
||||||
|
finally $_tfw_test_name
|
||||||
|
set +x
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
tfw_log '# END FINALLY'
|
||||||
|
}
|
||||||
|
|
||||||
_tfw_teardown() {
|
_tfw_teardown() {
|
||||||
_tfw_phase=teardown
|
_tfw_phase=teardown
|
||||||
tfw_log '# TEARDOWN'
|
tfw_log '# TEARDOWN'
|
||||||
case `type -t teardown_$_tfw_test_name` in
|
case `type -t teardown_$_tfw_test_name` in
|
||||||
function)
|
function)
|
||||||
tfw_log "# call teardown_$_tfw_test_name()"
|
tfw_log "# CALL teardown_$_tfw_test_name()"
|
||||||
$_tfw_trace && set -x
|
$_tfw_trace && set -x
|
||||||
teardown_$_tfw_test_name
|
teardown_$_tfw_test_name
|
||||||
set +x
|
set +x
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
tfw_log "# call teardown($_tfw_test_name)"
|
tfw_log "# CALL teardown($_tfw_test_name)"
|
||||||
$_tfw_trace && set -x
|
$_tfw_trace && set -x
|
||||||
teardown $_tfw_test_name
|
teardown $_tfw_test_name
|
||||||
set +x
|
set +x
|
||||||
@ -812,6 +838,15 @@ _tfw_teardown() {
|
|||||||
tfw_log '# END TEARDOWN'
|
tfw_log '# END TEARDOWN'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_tfw_exit() {
|
||||||
|
case $_tfw_result in
|
||||||
|
PASS) exit 0;;
|
||||||
|
FAIL) exit 1;;
|
||||||
|
ERROR) exit 254;;
|
||||||
|
esac
|
||||||
|
exit 255
|
||||||
|
}
|
||||||
|
|
||||||
# Executes $_tfw_executable with the given arguments.
|
# Executes $_tfw_executable with the given arguments.
|
||||||
_tfw_execute() {
|
_tfw_execute() {
|
||||||
executed=$(shellarg "${_tfw_executable##*/}" "$@")
|
executed=$(shellarg "${_tfw_executable##*/}" "$@")
|
||||||
@ -886,6 +921,7 @@ _tfw_assert() {
|
|||||||
declare -a _tfw_opt_dump_on_fail
|
declare -a _tfw_opt_dump_on_fail
|
||||||
|
|
||||||
_tfw_dump_on_fail() {
|
_tfw_dump_on_fail() {
|
||||||
|
local arg
|
||||||
for arg; do
|
for arg; do
|
||||||
local _found=false
|
local _found=false
|
||||||
local _f
|
local _f
|
||||||
@ -959,6 +995,7 @@ _tfw_getopts() {
|
|||||||
_tfw_matches_rexp() {
|
_tfw_matches_rexp() {
|
||||||
local rexp="$1"
|
local rexp="$1"
|
||||||
shift
|
shift
|
||||||
|
local arg
|
||||||
for arg; do
|
for arg; do
|
||||||
if ! echo "$arg" | $GREP -q -e "$rexp"; then
|
if ! echo "$arg" | $GREP -q -e "$rexp"; then
|
||||||
return 1
|
return 1
|
||||||
@ -1260,7 +1297,7 @@ _tfw_find_tests() {
|
|||||||
_tfw_failmsg() {
|
_tfw_failmsg() {
|
||||||
# A failure during setup or teardown is treated as an error.
|
# A failure during setup or teardown is treated as an error.
|
||||||
case $_tfw_phase in
|
case $_tfw_phase in
|
||||||
testcase)
|
testcase|finalise)
|
||||||
if ! $_tfw_opt_error_on_fail; then
|
if ! $_tfw_opt_error_on_fail; then
|
||||||
tfw_log "FAIL: $*"
|
tfw_log "FAIL: $*"
|
||||||
return 0;
|
return 0;
|
||||||
@ -1289,13 +1326,22 @@ _tfw_failexit() {
|
|||||||
# When exiting a test case due to a failure, log any diagnostic output that
|
# When exiting a test case due to a failure, log any diagnostic output that
|
||||||
# has been requested.
|
# has been requested.
|
||||||
tfw_cat "${_tfw_opt_dump_on_fail[@]}"
|
tfw_cat "${_tfw_opt_dump_on_fail[@]}"
|
||||||
# A failure during setup or teardown is treated as an error.
|
# A failure during setup or teardown is treated as an error. A failure
|
||||||
|
# during finalise does not terminate execution.
|
||||||
case $_tfw_phase in
|
case $_tfw_phase in
|
||||||
testcase)
|
testcase)
|
||||||
if ! $_tfw_opt_error_on_fail; then
|
if ! $_tfw_opt_error_on_fail; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
finalise)
|
||||||
|
if ! $_tfw_opt_error_on_fail; then
|
||||||
|
case $_tfw_result in
|
||||||
|
PASS) _tfw_result=FAIL; _tfw_status=1;;
|
||||||
|
esac
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
_tfw_errorexit
|
_tfw_errorexit
|
||||||
}
|
}
|
||||||
@ -1321,10 +1367,10 @@ _tfw_error() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_tfw_errorexit() {
|
_tfw_errorexit() {
|
||||||
# Do not exit process during teardown
|
# Do not exit process during finalise or teardown
|
||||||
_tfw_result=ERROR
|
_tfw_result=ERROR
|
||||||
case $_tfw_phase in
|
case $_tfw_phase in
|
||||||
teardown) [ $_tfw_status -lt 254 ] && _tfw_status=254;;
|
finalise|teardown) [ $_tfw_status -lt 254 ] && _tfw_status=254;;
|
||||||
*) exit 254;;
|
*) exit 254;;
|
||||||
esac
|
esac
|
||||||
return 254
|
return 254
|
||||||
|
@ -45,9 +45,9 @@ setup() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
teardown() {
|
teardown() {
|
||||||
stop_all_servald_servers
|
|
||||||
kill_all_servald_processes
|
kill_all_servald_processes
|
||||||
assert_no_servald_processes
|
assert_no_servald_processes
|
||||||
|
report_all_servald_servers
|
||||||
}
|
}
|
||||||
|
|
||||||
is_published() {
|
is_published() {
|
||||||
@ -75,7 +75,7 @@ test_publish() {
|
|||||||
assertStdoutLineCount '==' 2
|
assertStdoutLineCount '==' 2
|
||||||
assertStdoutGrep --matches=1 "^sid://$SIDC/local/$DIDC:$DIDC:$NAMEC\$"
|
assertStdoutGrep --matches=1 "^sid://$SIDC/local/$DIDC:$DIDC:$NAMEC\$"
|
||||||
assertStdoutGrep --matches=1 "^sid://$SIDD/local/$DIDD:$DIDD:$NAMED\$"
|
assertStdoutGrep --matches=1 "^sid://$SIDD/local/$DIDD:$DIDD:$NAMED\$"
|
||||||
return
|
assert_status_all_servald_servers running
|
||||||
}
|
}
|
||||||
|
|
||||||
start_routing_instance() {
|
start_routing_instance() {
|
||||||
@ -130,5 +130,7 @@ test_routing() {
|
|||||||
assertStdoutGrep --matches=1 "^sid://$SIDB/local/$DIDB:$DIDB:$NAMEB\$"
|
assertStdoutGrep --matches=1 "^sid://$SIDB/local/$DIDB:$DIDB:$NAMEB\$"
|
||||||
executeOk_servald mdp ping $SIDB 3
|
executeOk_servald mdp ping $SIDB 3
|
||||||
tfw_cat --stdout --stderr
|
tfw_cat --stdout --stderr
|
||||||
|
assert_status_all_servald_servers running
|
||||||
}
|
}
|
||||||
|
|
||||||
runTests "$@"
|
runTests "$@"
|
||||||
|
@ -29,10 +29,14 @@ setup() {
|
|||||||
assert_all_instance_peers_complete +A
|
assert_all_instance_peers_complete +A
|
||||||
}
|
}
|
||||||
|
|
||||||
teardown() {
|
finally() {
|
||||||
stop_all_servald_servers
|
stop_all_servald_servers
|
||||||
|
}
|
||||||
|
|
||||||
|
teardown() {
|
||||||
kill_all_servald_processes
|
kill_all_servald_processes
|
||||||
assert_no_servald_processes
|
assert_no_servald_processes
|
||||||
|
report_all_servald_servers
|
||||||
}
|
}
|
||||||
|
|
||||||
# Called by start_servald_instances immediately before starting the server
|
# Called by start_servald_instances immediately before starting the server
|
||||||
|
@ -47,10 +47,14 @@ setup() {
|
|||||||
executeOk_servald config set debug.all Yes
|
executeOk_servald config set debug.all Yes
|
||||||
}
|
}
|
||||||
|
|
||||||
teardown() {
|
finally() {
|
||||||
stop_all_servald_servers
|
stop_all_servald_servers
|
||||||
|
}
|
||||||
|
|
||||||
|
teardown() {
|
||||||
kill_all_servald_processes
|
kill_all_servald_processes
|
||||||
assert_no_servald_processes
|
assert_no_servald_processes
|
||||||
|
report_all_servald_servers
|
||||||
}
|
}
|
||||||
|
|
||||||
set_server_vars() {
|
set_server_vars() {
|
||||||
|
@ -141,7 +141,7 @@ test_AddEmpty() {
|
|||||||
assertGrep .manifest '^name=$'
|
assertGrep .manifest '^name=$'
|
||||||
assertGrep .manifest '^filesize=0$'
|
assertGrep .manifest '^filesize=0$'
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list @$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 ''
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_AddThenList="List contains one file after one add"
|
doc_AddThenList="List contains one file after one add"
|
||||||
@ -157,11 +157,11 @@ test_AddThenList() {
|
|||||||
# Add first file
|
# Add first file
|
||||||
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
|
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1
|
||||||
# Add second file
|
# Add second file
|
||||||
executeOk_servald rhizome add file $SIDB1 '' file2 file2.manifest
|
executeOk_servald rhizome add file $SIDB1 '' file2 file2.manifest
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_ExtractManifestAfterAdd="Extract manifest after one add"
|
doc_ExtractManifestAfterAdd="Extract manifest after one add"
|
||||||
@ -171,7 +171,7 @@ setup_ExtractManifestAfterAdd() {
|
|||||||
echo "A test file" >file1
|
echo "A test file" >file1
|
||||||
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
|
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1
|
||||||
extract_manifest_id manifestid file1.manifest
|
extract_manifest_id manifestid file1.manifest
|
||||||
extract_manifest_version version file1.manifest
|
extract_manifest_version version file1.manifest
|
||||||
extract_manifest_filehash filehash file1.manifest
|
extract_manifest_filehash filehash file1.manifest
|
||||||
@ -198,7 +198,7 @@ setup_ExtractManifestAfterAddNoAuthor() {
|
|||||||
echo "A test file" >file1
|
echo "A test file" >file1
|
||||||
executeOk_servald rhizome add file '' '' file1 file1.manifest
|
executeOk_servald rhizome add file '' '' file1 file1.manifest
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1!
|
assert_rhizome_list --fromhere=0 file1
|
||||||
extract_manifest_id manifestid file1.manifest
|
extract_manifest_id manifestid file1.manifest
|
||||||
extract_manifest_version version file1.manifest
|
extract_manifest_version version file1.manifest
|
||||||
extract_manifest_filehash filehash file1.manifest
|
extract_manifest_filehash filehash file1.manifest
|
||||||
@ -254,7 +254,7 @@ setup_ExtractFileAfterAdd() {
|
|||||||
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
|
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
|
||||||
tfw_cat --stderr
|
tfw_cat --stderr
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1
|
||||||
extract_manifest_filehash filehash file1.manifest
|
extract_manifest_filehash filehash file1.manifest
|
||||||
}
|
}
|
||||||
test_ExtractFileAfterAdd() {
|
test_ExtractFileAfterAdd() {
|
||||||
@ -312,7 +312,7 @@ setup_AddDuplicate() {
|
|||||||
extract_stdout_secret file2_secret
|
extract_stdout_secret file2_secret
|
||||||
# Make sure they are both in the list.
|
# Make sure they are both in the list.
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
|
||||||
}
|
}
|
||||||
test_AddDuplicate() {
|
test_AddDuplicate() {
|
||||||
# Add first file again - nothing should change in its manifests, and it
|
# Add first file again - nothing should change in its manifests, and it
|
||||||
@ -322,7 +322,7 @@ test_AddDuplicate() {
|
|||||||
assert [ -s file1.manifestA ]
|
assert [ -s file1.manifestA ]
|
||||||
assert_stdout_add_file file1
|
assert_stdout_add_file file1
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
|
||||||
strip_signatures file1.manifest file1.manifestA
|
strip_signatures file1.manifest file1.manifestA
|
||||||
assert diff file1.manifest file1.manifestA
|
assert diff file1.manifest file1.manifestA
|
||||||
# Repeat for second file.
|
# Repeat for second file.
|
||||||
@ -330,7 +330,7 @@ test_AddDuplicate() {
|
|||||||
assert [ -s file2.manifestA ]
|
assert [ -s file2.manifestA ]
|
||||||
assert_stdout_add_file file2
|
assert_stdout_add_file file2
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
|
||||||
strip_signatures file2.manifest file2.manifestA
|
strip_signatures file2.manifest file2.manifestA
|
||||||
assert diff file2.manifest file2.manifestA
|
assert diff file2.manifest file2.manifestA
|
||||||
}
|
}
|
||||||
@ -348,7 +348,7 @@ test_AddMismatched() {
|
|||||||
assert cmp file1.manifest file1_2.manifest
|
assert cmp file1.manifest file1_2.manifest
|
||||||
# And rhizome store should be unchanged.
|
# And rhizome store should be unchanged.
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_AddUpdateSameVersion="Add new payload to existing manifest with same version fails"
|
doc_AddUpdateSameVersion="Add new payload to existing manifest with same version fails"
|
||||||
@ -371,7 +371,7 @@ test_AddUpdateSameVersion() {
|
|||||||
assert cmp file1_2.manifest file1_2.manifest.orig
|
assert cmp file1_2.manifest file1_2.manifest.orig
|
||||||
# And rhizome store should be unchanged.
|
# And rhizome store should be unchanged.
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_AddUpdateNewVersion="Add new payload to existing manifest with new version"
|
doc_AddUpdateNewVersion="Add new payload to existing manifest with new version"
|
||||||
@ -390,7 +390,7 @@ test_AddUpdateNewVersion() {
|
|||||||
assert_manifest_newer file1.manifest file1_2.manifest
|
assert_manifest_newer file1.manifest file1_2.manifest
|
||||||
# Rhizome store contents reflect new payload.
|
# Rhizome store contents reflect new payload.
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1_2@$SIDB1 file2@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1_2 file2
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_AddUpdateDiscoverAuthor="Add new payload to manifest with author discovery"
|
doc_AddUpdateDiscoverAuthor="Add new payload to manifest with author discovery"
|
||||||
@ -403,7 +403,7 @@ test_AddUpdateDiscoverAuthor() {
|
|||||||
tfw_cat --stderr
|
tfw_cat --stderr
|
||||||
# Rhizome store contents have new payload.
|
# Rhizome store contents have new payload.
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1_2@$SIDB1 file2@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1_2 file2
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_AddUpdateNoAuthor="Cannot add new payload to authorless manifest"
|
doc_AddUpdateNoAuthor="Cannot add new payload to authorless manifest"
|
||||||
@ -418,7 +418,7 @@ test_AddUpdateNoAuthor() {
|
|||||||
assertExitStatus '!=' 0
|
assertExitStatus '!=' 0
|
||||||
# Rhizome store contents have old payload.
|
# Rhizome store contents have old payload.
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1@$SIDB1 file2@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1 file2
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_AddUpdateNoAuthorWithSecret="Add new payload to authorless manifest with bundle secret"
|
doc_AddUpdateNoAuthorWithSecret="Add new payload to authorless manifest with bundle secret"
|
||||||
@ -431,7 +431,7 @@ test_AddUpdateNoAuthorWithSecret() {
|
|||||||
tfw_cat --stderr
|
tfw_cat --stderr
|
||||||
# Rhizome store contents have new payload.
|
# Rhizome store contents have new payload.
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1_2@$SIDB1 file2@$SIDB1
|
assert_rhizome_list --fromhere=1 --author=$SIDB1 file1_2 file2
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_AddUpdateAutoVersion="Add new payload to existing manifest with automatic version"
|
doc_AddUpdateAutoVersion="Add new payload to existing manifest with automatic version"
|
||||||
@ -447,7 +447,7 @@ test_AddUpdateAutoVersion() {
|
|||||||
assert_manifest_newer file1.manifest file1_2.manifest
|
assert_manifest_newer file1.manifest file1_2.manifest
|
||||||
# Rhizome store contents reflect new payload.
|
# Rhizome store contents reflect new payload.
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1_2 file2
|
assert_rhizome_list --fromhere=1 file1_2 file2
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_AddUnsupportedService="Add with unsupported service fails"
|
doc_AddUnsupportedService="Add with unsupported service fails"
|
||||||
@ -474,7 +474,7 @@ test_MeshMSAddCreate() {
|
|||||||
assert_stdout_add_file file1
|
assert_stdout_add_file file1
|
||||||
assert_manifest_complete file1.manifest
|
assert_manifest_complete file1.manifest
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1
|
assert_rhizome_list --fromhere=1 file1
|
||||||
extract_manifest_filehash filehash file1.manifest
|
extract_manifest_filehash filehash file1.manifest
|
||||||
executeOk_servald rhizome extract file $filehash file1x
|
executeOk_servald rhizome extract file $filehash file1x
|
||||||
assert diff file1 file1x
|
assert diff file1 file1x
|
||||||
@ -492,7 +492,7 @@ test_MeshMSAddGrow() {
|
|||||||
assert_stdout_add_file file1
|
assert_stdout_add_file file1
|
||||||
assert_manifest_complete file1.manifest
|
assert_manifest_complete file1.manifest
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1
|
assert_rhizome_list --fromhere=1 file1
|
||||||
extract_manifest_id id file1.manifest
|
extract_manifest_id id file1.manifest
|
||||||
extract_manifest_filehash filehash file1.manifest
|
extract_manifest_filehash filehash file1.manifest
|
||||||
extract_manifest_BK bk file1.manifest
|
extract_manifest_BK bk file1.manifest
|
||||||
@ -503,7 +503,7 @@ test_MeshMSAddGrow() {
|
|||||||
echo "Message$m" >>file1
|
echo "Message$m" >>file1
|
||||||
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
|
executeOk_servald rhizome add file $SIDB1 '' file1 file1.manifest
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1
|
assert_rhizome_list --fromhere=1 file1
|
||||||
extract_manifest_id idx file1.manifest
|
extract_manifest_id idx file1.manifest
|
||||||
extract_manifest_filehash filehashx file1.manifest
|
extract_manifest_filehash filehashx file1.manifest
|
||||||
extract_manifest_BK bkx file1.manifest
|
extract_manifest_BK bkx file1.manifest
|
||||||
@ -557,7 +557,7 @@ test_MeshMSAddMissingAuthor() {
|
|||||||
assert_stdout_add_file file1
|
assert_stdout_add_file file1
|
||||||
assert_manifest_complete file1.manifest
|
assert_manifest_complete file1.manifest
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1
|
assert_rhizome_list --fromhere=1 file1
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_MeshMSListFilter="List MeshMS manifests by filter"
|
doc_MeshMSListFilter="List MeshMS manifests by filter"
|
||||||
@ -585,17 +585,17 @@ setup_MeshMSListFilter() {
|
|||||||
assert_stdout_add_file file4
|
assert_stdout_add_file file4
|
||||||
assert_manifest_complete file4.manifest
|
assert_manifest_complete file4.manifest
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1 file2 file3 file4
|
assert_rhizome_list --fromhere=1 file1 file2 file3 file4
|
||||||
}
|
}
|
||||||
test_MeshMSListFilter() {
|
test_MeshMSListFilter() {
|
||||||
executeOk_servald rhizome list '' file
|
executeOk_servald rhizome list '' file
|
||||||
assert_rhizome_list
|
assert_rhizome_list
|
||||||
executeOk_servald rhizome list '' MeshMS1
|
executeOk_servald rhizome list '' MeshMS1
|
||||||
assert_rhizome_list file1 file2 file3 file4
|
assert_rhizome_list --fromhere=1 file1 file2 file3 file4
|
||||||
executeOk_servald rhizome list '' '' $SIDB1
|
executeOk_servald rhizome list '' '' $SIDB1
|
||||||
assert_rhizome_list file1 file2 file3
|
assert_rhizome_list --fromhere=1 file1 file2 file3
|
||||||
executeOk_servald rhizome list '' '' $SIDB2
|
executeOk_servald rhizome list '' '' $SIDB2
|
||||||
assert_rhizome_list file4
|
assert_rhizome_list --fromhere=1 file4
|
||||||
executeOk_servald rhizome list '' '' $SIDB3
|
executeOk_servald rhizome list '' '' $SIDB3
|
||||||
assert_rhizome_list
|
assert_rhizome_list
|
||||||
executeOk_servald rhizome list '' '' $SIDB4
|
executeOk_servald rhizome list '' '' $SIDB4
|
||||||
@ -603,19 +603,19 @@ test_MeshMSListFilter() {
|
|||||||
executeOk_servald rhizome list '' '' '' $SIDB1
|
executeOk_servald rhizome list '' '' '' $SIDB1
|
||||||
assert_rhizome_list
|
assert_rhizome_list
|
||||||
executeOk_servald rhizome list '' '' '' $SIDB2
|
executeOk_servald rhizome list '' '' '' $SIDB2
|
||||||
assert_rhizome_list file1
|
assert_rhizome_list --fromhere=1 file1
|
||||||
executeOk_servald rhizome list '' '' '' $SIDB3
|
executeOk_servald rhizome list '' '' '' $SIDB3
|
||||||
assert_rhizome_list file2 file4
|
assert_rhizome_list --fromhere=1 file2 file4
|
||||||
executeOk_servald rhizome list '' file '' $SIDB3
|
executeOk_servald rhizome list '' file '' $SIDB3
|
||||||
assert_rhizome_list
|
assert_rhizome_list
|
||||||
executeOk_servald rhizome list '' '' '' $SIDB4
|
executeOk_servald rhizome list '' '' '' $SIDB4
|
||||||
assert_rhizome_list file3
|
assert_rhizome_list --fromhere=1 file3
|
||||||
executeOk_servald rhizome list '' '' $SIDB1 $SIDB4
|
executeOk_servald rhizome list '' '' $SIDB1 $SIDB4
|
||||||
assert_rhizome_list file3
|
assert_rhizome_list --fromhere=1 file3
|
||||||
executeOk_servald rhizome list '' '' $SIDB2 $SIDB4
|
executeOk_servald rhizome list '' '' $SIDB2 $SIDB4
|
||||||
assert_rhizome_list
|
assert_rhizome_list
|
||||||
executeOk_servald rhizome list '' '' $SIDB2 $SIDB3
|
executeOk_servald rhizome list '' '' $SIDB2 $SIDB3
|
||||||
assert_rhizome_list file4
|
assert_rhizome_list --fromhere=1 file4
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_ImportForeignBundle="Can import a bundle created by another instance"
|
doc_ImportForeignBundle="Can import a bundle created by another instance"
|
||||||
@ -632,7 +632,7 @@ test_ImportForeignBundle() {
|
|||||||
executeOk_servald rhizome import bundle fileA fileA.manifest
|
executeOk_servald rhizome import bundle fileA fileA.manifest
|
||||||
assert_stdout_import_bundle fileA
|
assert_stdout_import_bundle fileA
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list fileA!
|
assert_rhizome_list --fromhere=0 fileA
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_ImportOwnBundle="Can import a bundle created by same instance"
|
doc_ImportOwnBundle="Can import a bundle created by same instance"
|
||||||
@ -654,7 +654,7 @@ test_ImportOwnBundle() {
|
|||||||
assert_stdout_import_bundle fileB
|
assert_stdout_import_bundle fileB
|
||||||
# Bundle author and sender are unknown, so appears not to be from here
|
# Bundle author and sender are unknown, so appears not to be from here
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list fileB!
|
assert_rhizome_list --fromhere=0 fileB
|
||||||
# Extracting the manifest discovers that it is ours.
|
# Extracting the manifest discovers that it is ours.
|
||||||
executeOk_servald rhizome extract manifest $manifestid fileBx.manifest
|
executeOk_servald rhizome extract manifest $manifestid fileBx.manifest
|
||||||
tfw_cat --stderr
|
tfw_cat --stderr
|
||||||
@ -671,7 +671,7 @@ test_ImportOwnBundle() {
|
|||||||
assertStdoutGrep --matches=1 "^\.readonly:0\$"
|
assertStdoutGrep --matches=1 "^\.readonly:0\$"
|
||||||
# Now bundle author is known, so appears to be from here
|
# Now bundle author is known, so appears to be from here
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list fileB@$SIDB2
|
assert_rhizome_list --fromhere=1 --author=$SIDB2 fileB
|
||||||
}
|
}
|
||||||
|
|
||||||
runTests "$@"
|
runTests "$@"
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
# Tests for Serval rhizome protocol.
|
# Tests for Serval rhizome protocol.
|
||||||
#
|
#
|
||||||
# Copyright 2012 Paul Gardner-Stephen
|
# Copyright 2012 Serval Project Inc.
|
||||||
#
|
#
|
||||||
# This program is free software; you can redistribute it and/or
|
# This program is free software; you can redistribute it and/or
|
||||||
# modify it under the terms of the GNU General Public License
|
# modify it under the terms of the GNU General Public License
|
||||||
@ -24,15 +24,14 @@ source "${0%/*}/../testdefs_rhizome.sh"
|
|||||||
|
|
||||||
shopt -s extglob
|
shopt -s extglob
|
||||||
|
|
||||||
teardown() {
|
finally() {
|
||||||
stop_all_servald_servers
|
stop_all_servald_servers
|
||||||
kill_all_servald_processes
|
|
||||||
assert_no_servald_processes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
setup_rhizome() {
|
teardown() {
|
||||||
foreach_instance +A +B create_single_identity
|
kill_all_servald_processes
|
||||||
set_instance +B
|
assert_no_servald_processes
|
||||||
|
report_all_servald_servers
|
||||||
}
|
}
|
||||||
|
|
||||||
# Called by start_servald_instances for each instance.
|
# Called by start_servald_instances for each instance.
|
||||||
@ -48,35 +47,6 @@ configure_servald_server() {
|
|||||||
executeOk_servald config set rhizome.fetch_interval_ms 100
|
executeOk_servald config set rhizome.fetch_interval_ms 100
|
||||||
}
|
}
|
||||||
|
|
||||||
# Predicate function:
|
|
||||||
# - return true if the file bundle identified by arg1=BID and arg2=VERSION has been
|
|
||||||
# received by all the given instances
|
|
||||||
# - does this by examining the server log files of the respective instances
|
|
||||||
# for tell-tale INFO messages
|
|
||||||
bundle_received_by() {
|
|
||||||
local BID="$1"
|
|
||||||
local VERSION="$2"
|
|
||||||
shift 2
|
|
||||||
local rexp="RHIZOME ADD MANIFEST service=file bid=$BID version=$VERSION"
|
|
||||||
local I
|
|
||||||
for I; do
|
|
||||||
case "$I" in
|
|
||||||
+*)
|
|
||||||
local logvar="LOG${I#+}"
|
|
||||||
grep "$rexp" "${!logvar}" || return 1
|
|
||||||
;;
|
|
||||||
--stderr)
|
|
||||||
replayStderr | grep "$rexp" || return 1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
error "invalid instance argument: $I"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
setup_curl_7() {
|
setup_curl_7() {
|
||||||
case "$(curl --version | tr '\n' ' ')" in
|
case "$(curl --version | tr '\n' ' ')" in
|
||||||
curl\ @(7|8|9|[1-9][0-1]).*\ Protocols:*\ http\ *) ;;
|
curl\ @(7|8|9|[1-9][0-1]).*\ Protocols:*\ http\ *) ;;
|
||||||
@ -91,76 +61,33 @@ setup_curl_7() {
|
|||||||
|
|
||||||
setup_common() {
|
setup_common() {
|
||||||
setup_servald
|
setup_servald
|
||||||
setup_rhizome
|
|
||||||
assert_no_servald_processes
|
assert_no_servald_processes
|
||||||
}
|
foreach_instance +A +B create_single_identity
|
||||||
|
set_instance +B
|
||||||
extract_manifest_vars() {
|
|
||||||
local manifest="${1?}"
|
|
||||||
extract_manifest_id BID "$manifest"
|
|
||||||
extract_manifest_version VERSION "$manifest"
|
|
||||||
extract_manifest_filesize FILESIZE "$manifest"
|
|
||||||
FILEHASH=
|
|
||||||
if [ "$FILESIZE" != '0' ]; then
|
|
||||||
extract_manifest_filehash FILEHASH "$manifest"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
add_file() {
|
|
||||||
local name="$1"
|
|
||||||
[ -e "$name" ] || echo "File $name" >"$name"
|
|
||||||
local sidvar="SID$instance_name"
|
|
||||||
executeOk_servald rhizome add file "${!sidvar}" '' "$name" "$name.manifest"
|
|
||||||
executeOk_servald rhizome list ''
|
|
||||||
assert_rhizome_list "$name"
|
|
||||||
extract_manifest_vars "$name.manifest"
|
|
||||||
}
|
|
||||||
|
|
||||||
update_file() {
|
|
||||||
local orig_name="$1"
|
|
||||||
local new_name="$2"
|
|
||||||
[ -e "$new_name" ] || echo 'File $new_name' >"$new_name"
|
|
||||||
local sidvar="SID$instance_name"
|
|
||||||
[ "$new_name" != "$orig_name" ] && cp "$orig_name.manifest" "$new_name.manifest"
|
|
||||||
$SED -i -e '/^date=/d;/^filehash=/d;/^filesize=/d;/^version=/d;/^name=/d' "$new_name.manifest"
|
|
||||||
executeOk_servald rhizome add file "${!sidvar}" '' "$new_name" "$new_name.manifest"
|
|
||||||
executeOk_servald rhizome list ''
|
|
||||||
assert_rhizome_list "$new_name"
|
|
||||||
extract_manifest_vars "$new_name.manifest"
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_received() {
|
|
||||||
local name="${1?}"
|
|
||||||
local _hash
|
|
||||||
if [ -s "$name" ]; then
|
|
||||||
extract_manifest_filehash _hash "$name.manifest"
|
|
||||||
executeOk_servald rhizome extract file "$_hash" extracted
|
|
||||||
assert cmp "$name" extracted
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_FileTransfer="New bundle and update transfer to one node"
|
doc_FileTransfer="New bundle and update transfer to one node"
|
||||||
setup_FileTransfer() {
|
setup_FileTransfer() {
|
||||||
setup_common
|
setup_common
|
||||||
set_instance +A
|
set_instance +A
|
||||||
add_file file1
|
rhizome_add_file file1
|
||||||
start_servald_instances +A +B
|
start_servald_instances +A +B
|
||||||
foreach_instance +A assert_peers_are_instances +B
|
foreach_instance +A assert_peers_are_instances +B
|
||||||
foreach_instance +B assert_peers_are_instances +A
|
foreach_instance +B assert_peers_are_instances +A
|
||||||
}
|
}
|
||||||
test_FileTransfer() {
|
test_FileTransfer() {
|
||||||
wait_until bundle_received_by $BID $VERSION +B
|
wait_until bundle_received_by $BID:$VERSION +B
|
||||||
set_instance +B
|
set_instance +B
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1!
|
assert_rhizome_list --fromhere=0 file1
|
||||||
assert_received file1
|
assert_rhizome_received file1
|
||||||
set_instance +A
|
set_instance +A
|
||||||
update_file file1 file2
|
rhizome_update_file file1 file2
|
||||||
set_instance +B
|
set_instance +B
|
||||||
wait_until bundle_received_by $BID $VERSION +B
|
wait_until bundle_received_by $BID:$VERSION +B
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file2!
|
assert_rhizome_list --fromhere=0 file2
|
||||||
assert_received file2
|
assert_rhizome_received file2
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_FileTransferBig="Big new bundle transfers to one node"
|
doc_FileTransferBig="Big new bundle transfers to one node"
|
||||||
@ -170,24 +97,24 @@ setup_FileTransferBig() {
|
|||||||
dd if=/dev/urandom of=file1 bs=1k count=1k 2>&1
|
dd if=/dev/urandom of=file1 bs=1k count=1k 2>&1
|
||||||
echo x >>file1
|
echo x >>file1
|
||||||
ls -l file1
|
ls -l file1
|
||||||
add_file file1
|
rhizome_add_file file1
|
||||||
start_servald_instances +A +B
|
start_servald_instances +A +B
|
||||||
foreach_instance +A assert_peers_are_instances +B
|
foreach_instance +A assert_peers_are_instances +B
|
||||||
foreach_instance +B assert_peers_are_instances +A
|
foreach_instance +B assert_peers_are_instances +A
|
||||||
}
|
}
|
||||||
test_FileTransferBig() {
|
test_FileTransferBig() {
|
||||||
wait_until bundle_received_by $BID $VERSION +B
|
wait_until bundle_received_by $BID:$VERSION +B
|
||||||
set_instance +B
|
set_instance +B
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1!
|
assert_rhizome_list --fromhere=0 file1
|
||||||
assert_received file1
|
assert_rhizome_received file1
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_FileTransferMulti="New bundle transfers to four nodes"
|
doc_FileTransferMulti="New bundle transfers to four nodes"
|
||||||
setup_FileTransferMulti() {
|
setup_FileTransferMulti() {
|
||||||
setup_common
|
setup_common
|
||||||
set_instance +A
|
set_instance +A
|
||||||
add_file file1
|
rhizome_add_file file1
|
||||||
start_servald_instances +A +B +C +D +E
|
start_servald_instances +A +B +C +D +E
|
||||||
foreach_instance +A assert_peers_are_instances +B +C +D +E
|
foreach_instance +A assert_peers_are_instances +B +C +D +E
|
||||||
foreach_instance +B assert_peers_are_instances +A +C +D +E
|
foreach_instance +B assert_peers_are_instances +A +C +D +E
|
||||||
@ -195,13 +122,12 @@ setup_FileTransferMulti() {
|
|||||||
foreach_instance +D assert_peers_are_instances +A +B +C +E
|
foreach_instance +D assert_peers_are_instances +A +B +C +E
|
||||||
}
|
}
|
||||||
test_FileTransferMulti() {
|
test_FileTransferMulti() {
|
||||||
wait_until bundle_received_by $BID $VERSION +B +C +D +E
|
wait_until bundle_received_by $BID:$VERSION +B +C +D +E
|
||||||
local I
|
for i in B C D E; do
|
||||||
for I in +B +C +D +E; do
|
set_instance +$i
|
||||||
set_instance $I
|
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1!
|
assert_rhizome_list --fromhere=0 file1
|
||||||
assert_received file1
|
assert_rhizome_received file1
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,21 +135,21 @@ doc_FileTransferDelete="Payload deletion transfers to one node"
|
|||||||
setup_FileTransferDelete() {
|
setup_FileTransferDelete() {
|
||||||
setup_common
|
setup_common
|
||||||
set_instance +A
|
set_instance +A
|
||||||
add_file file1
|
rhizome_add_file file1
|
||||||
start_servald_instances +A +B
|
start_servald_instances +A +B
|
||||||
foreach_instance +A assert_peers_are_instances +B
|
foreach_instance +A assert_peers_are_instances +B
|
||||||
foreach_instance +B assert_peers_are_instances +A
|
foreach_instance +B assert_peers_are_instances +A
|
||||||
wait_until bundle_received_by $BID $VERSION +B
|
wait_until bundle_received_by $BID:$VERSION +B
|
||||||
set_instance +A
|
set_instance +A
|
||||||
>file1_2
|
>file1_2
|
||||||
update_file file1 file1_2
|
rhizome_update_file file1 file1_2
|
||||||
}
|
}
|
||||||
test_FileTransferDelete() {
|
test_FileTransferDelete() {
|
||||||
wait_until bundle_received_by $BID $VERSION +B
|
wait_until bundle_received_by $BID:$VERSION +B
|
||||||
set_instance +B
|
set_instance +B
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1_2!
|
assert_rhizome_list --fromhere=0 file1_2
|
||||||
assert_received file1_2
|
assert_rhizome_received file1_2
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_HttpImport="Import bundle using HTTP POST multi-part form."
|
doc_HttpImport="Import bundle using HTTP POST multi-part form."
|
||||||
@ -234,19 +160,19 @@ setup_HttpImport() {
|
|||||||
When we were looking at implementing secure calls for OpenBTS it was suggested
|
When we were looking at implementing secure calls for OpenBTS it was suggested
|
||||||
that we configure Asterisk to use SIPS/ZRTP. This would have been relatively
|
that we configure Asterisk to use SIPS/ZRTP. This would have been relatively
|
||||||
easy to setup, however there are a few problems.
|
easy to setup, however there are a few problems.
|
||||||
|
.
|
||||||
Number one is that when Asterisk checks the certificates it will either
|
Number one is that when Asterisk checks the certificates it will either
|
||||||
validate the certificate (checking the chain of trust and so on) and then
|
validate the certificate (checking the chain of trust and so on) and then
|
||||||
check that the common name attribute on the certificate matches the hostname
|
check that the common name attribute on the certificate matches the hostname
|
||||||
of the peer, or it will do none of these checks. This code is in main/tcptls.c
|
of the peer, or it will do none of these checks. This code is in main/tcptls.c
|
||||||
line 206 (in version 1.8.14.1).
|
line 206 (in version 1.8.14.1).
|
||||||
|
.
|
||||||
This is undesirable in a setup where there is limited or no infrastructure as
|
This is undesirable in a setup where there is limited or no infrastructure as
|
||||||
there is not likely to be a DNS server setup, or even rigid IP assignments
|
there is not likely to be a DNS server setup, or even rigid IP assignments
|
||||||
that would allow a static hosts file based setup. This situation would force
|
that would allow a static hosts file based setup. This situation would force
|
||||||
the administrator to disable the checks completely which would allow a trivial
|
the administrator to disable the checks completely which would allow a trivial
|
||||||
man in the middle attack.
|
man in the middle attack.
|
||||||
|
.
|
||||||
It would be possible to modify Asterisk to have a third way where it validates
|
It would be possible to modify Asterisk to have a third way where it validates
|
||||||
the certificate and checks the chain of trust but does not look at the common
|
the certificate and checks the chain of trust but does not look at the common
|
||||||
name. We decided against this approach as the VOMP channel driver was written
|
name. We decided against this approach as the VOMP channel driver was written
|
||||||
@ -272,8 +198,8 @@ test_HttpImport() {
|
|||||||
"$addr_localhost:$PORTA/rhizome/import"
|
"$addr_localhost:$PORTA/rhizome/import"
|
||||||
tfw_cat http.headers http.output
|
tfw_cat http.headers http.output
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list README.WHYNOTSIPS!
|
assert_rhizome_list --fromhere=0 README.WHYNOTSIPS
|
||||||
assert_received README.WHYNOTSIPS
|
assert_rhizome_received README.WHYNOTSIPS
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_HttpAddLocal="Add file locally using HTTP, returns manifest"
|
doc_HttpAddLocal="Add file locally using HTTP, returns manifest"
|
||||||
@ -292,15 +218,15 @@ test_HttpAddLocal() {
|
|||||||
executeOk curl --silent --form 'data=@file1' "http://$addr_localhost:$PORTA/rhizome/secretaddfile" --output file1.manifest
|
executeOk curl --silent --form 'data=@file1' "http://$addr_localhost:$PORTA/rhizome/secretaddfile" --output file1.manifest
|
||||||
assert_manifest_complete file1.manifest
|
assert_manifest_complete file1.manifest
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1
|
assert_rhizome_list --fromhere=1 file1
|
||||||
extract_manifest_name name file1.manifest
|
extract_manifest_name name file1.manifest
|
||||||
assert [ "$name" = file1 ]
|
assert [ "$name" = file1 ]
|
||||||
assert_received file1
|
assert_rhizome_received file1
|
||||||
}
|
}
|
||||||
|
|
||||||
setup_sync() {
|
setup_sync() {
|
||||||
set_instance +A
|
set_instance +A
|
||||||
add_file file1
|
rhizome_add_file file1
|
||||||
BID1=$BID
|
BID1=$BID
|
||||||
VERSION1=$VERSION
|
VERSION1=$VERSION
|
||||||
start_servald_instances dummy1 +A
|
start_servald_instances dummy1 +A
|
||||||
@ -313,7 +239,7 @@ setup_sync() {
|
|||||||
executeOk_servald config set debug.rhizomerx on
|
executeOk_servald config set debug.rhizomerx on
|
||||||
executeOk_servald config set rhizome.direct.peer.count "1"
|
executeOk_servald config set rhizome.direct.peer.count "1"
|
||||||
executeOk_servald config set rhizome.direct.peer.0 "http://${addr_localhost}:${PORTA}"
|
executeOk_servald config set rhizome.direct.peer.0 "http://${addr_localhost}:${PORTA}"
|
||||||
add_file file2
|
rhizome_add_file file2
|
||||||
BID2=$BID
|
BID2=$BID
|
||||||
VERSION2=$VERSION
|
VERSION2=$VERSION
|
||||||
}
|
}
|
||||||
@ -327,14 +253,14 @@ test_DirectPush() {
|
|||||||
set_instance +B
|
set_instance +B
|
||||||
executeOk_servald rhizome direct push
|
executeOk_servald rhizome direct push
|
||||||
tfw_cat --stdout --stderr
|
tfw_cat --stdout --stderr
|
||||||
assert bundle_received_by $BID2 $VERSION2 +A
|
assert bundle_received_by $BID2:$VERSION2 +A
|
||||||
set_instance +A
|
set_instance +A
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1 file2!
|
assert_rhizome_list --fromhere=1 file1 --fromhere=0 file2
|
||||||
assert_received file2
|
assert_rhizome_received file2
|
||||||
set_instance +B
|
set_instance +B
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file2
|
assert_rhizome_list --fromhere=1 file2
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_DirectPull="One way pull bundle from unconnected node"
|
doc_DirectPull="One way pull bundle from unconnected node"
|
||||||
@ -346,14 +272,14 @@ test_DirectPull() {
|
|||||||
set_instance +B
|
set_instance +B
|
||||||
executeOk_servald rhizome direct pull
|
executeOk_servald rhizome direct pull
|
||||||
tfw_cat --stdout --stderr
|
tfw_cat --stdout --stderr
|
||||||
assert bundle_received_by $BID1 $VERSION1 --stderr
|
assert bundle_received_by $BID1:$VERSION1 --stderr
|
||||||
set_instance +A
|
set_instance +A
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1
|
assert_rhizome_list --fromhere=1 file1
|
||||||
set_instance +B
|
set_instance +B
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1! file2
|
assert_rhizome_list --fromhere=0 file1 --fromhere=1 file2
|
||||||
assert_received file1
|
assert_rhizome_received file1
|
||||||
}
|
}
|
||||||
|
|
||||||
doc_DirectSync="Two-way sync bundles between unconnected nodes"
|
doc_DirectSync="Two-way sync bundles between unconnected nodes"
|
||||||
@ -365,16 +291,15 @@ test_DirectSync() {
|
|||||||
set_instance +B
|
set_instance +B
|
||||||
executeOk_servald rhizome direct sync
|
executeOk_servald rhizome direct sync
|
||||||
tfw_cat --stdout --stderr
|
tfw_cat --stdout --stderr
|
||||||
assert bundle_received_by $BID1 $VERSION1 --stderr
|
assert bundle_received_by $BID1:$VERSION1 --stderr $BID2:$VERSION2 +A
|
||||||
assert bundle_received_by $BID2 $VERSION2 +A
|
|
||||||
set_instance +A
|
set_instance +A
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1 file2!
|
assert_rhizome_list --fromhere=1 file1 --fromhere=0 file2
|
||||||
assert_received file2
|
assert_rhizome_received file2
|
||||||
set_instance +B
|
set_instance +B
|
||||||
executeOk_servald rhizome list ''
|
executeOk_servald rhizome list ''
|
||||||
assert_rhizome_list file1! file2
|
assert_rhizome_list --fromhere=0 file1 --fromhere=1 file2
|
||||||
assert_received file1
|
assert_rhizome_received file1
|
||||||
}
|
}
|
||||||
|
|
||||||
runTests "$@"
|
runTests "$@"
|
||||||
|
83
tests/rhizomestress
Executable file
83
tests/rhizomestress
Executable file
@ -0,0 +1,83 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Stress tests for Serval rhizome protocol.
|
||||||
|
#
|
||||||
|
# Copyright 2012 Serval Project Inc.
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or
|
||||||
|
# modify it under the terms of the GNU General Public License
|
||||||
|
# as published by the Free Software Foundation; either version 2
|
||||||
|
# of the License, or (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program; if not, write to the Free Software
|
||||||
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
|
|
||||||
|
source "${0%/*}/../testframework.sh"
|
||||||
|
source "${0%/*}/../testdefs.sh"
|
||||||
|
source "${0%/*}/../testdefs_rhizome.sh"
|
||||||
|
|
||||||
|
shopt -s extglob
|
||||||
|
|
||||||
|
finally() {
|
||||||
|
stop_all_servald_servers
|
||||||
|
}
|
||||||
|
|
||||||
|
teardown() {
|
||||||
|
kill_all_servald_processes
|
||||||
|
assert_no_servald_processes
|
||||||
|
report_all_servald_servers
|
||||||
|
}
|
||||||
|
|
||||||
|
# Called by start_servald_instances for each instance.
|
||||||
|
configure_servald_server() {
|
||||||
|
executeOk_servald config set log.show_pid on
|
||||||
|
executeOk_servald config set log.show_time on
|
||||||
|
executeOk_servald config set debug.rhizome on
|
||||||
|
executeOk_servald config set debug.rhizometx on
|
||||||
|
executeOk_servald config set debug.rhizomerx on
|
||||||
|
executeOk_servald config set server.respawn_on_signal off
|
||||||
|
executeOk_servald config set mdp.wifi.tick_ms 500
|
||||||
|
executeOk_servald config set mdp.selfannounce.ticks_per_full_address 1
|
||||||
|
}
|
||||||
|
|
||||||
|
doc_FileTransferStress="Stress - five nodes each sharing 16 bundles"
|
||||||
|
setup_FileTransferStress() {
|
||||||
|
setup_servald
|
||||||
|
assert_no_servald_processes
|
||||||
|
foreach_instance +A +B +C +D +E create_single_identity
|
||||||
|
for i in A B C D E
|
||||||
|
do
|
||||||
|
eval "bundles$i=()"
|
||||||
|
set_instance +$i
|
||||||
|
for n in 1 2 3 4 5 6 7 8 9 a b c d e f g
|
||||||
|
do
|
||||||
|
rhizome_add_file file-$i-$n
|
||||||
|
eval "bundles$i+=(\$BID:\$VERSION)"
|
||||||
|
done
|
||||||
|
done
|
||||||
|
}
|
||||||
|
test_FileTransferStress() {
|
||||||
|
start_servald_instances +A +B +C +D +E
|
||||||
|
wait_until --timeout=180 bundle_received_by \
|
||||||
|
${bundlesA[*]} +B +C +D +E \
|
||||||
|
${bundlesB[*]} +A +C +D +E \
|
||||||
|
${bundlesC[*]} +A +B +D +E \
|
||||||
|
${bundlesD[*]} +A +B +C +E \
|
||||||
|
${bundlesE[*]} +A +B +C +D
|
||||||
|
stop_all_servald_servers
|
||||||
|
local i
|
||||||
|
for i in A B C D E; do
|
||||||
|
set_instance +$i
|
||||||
|
executeOk_servald rhizome list ''
|
||||||
|
assert_rhizome_list --fromhere=1 file-$i-? --fromhere=0 file-!($i)-?
|
||||||
|
assert_rhizome_received file-!($i)-?
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
runTests "$@"
|
Loading…
Reference in New Issue
Block a user