Logic may be clearer this way

This commit is contained in:
Eric Fischer 2016-04-11 14:51:04 -07:00
parent e846b11ce7
commit c6d2988485

View File

@ -1721,11 +1721,11 @@ int read_json(int argc, struct source **sourcelist, char *fname, const char *lay
ahead += n;
if (buf[n - 1] == '\n' && ahead > PARSE_MIN) {
int already_waited = 0;
// Don't let the streaming reader get too far ahead of the parsers.
// If the buffered input gets huge, block until the parsers are done.
while (ahead >= PARSE_MAX && is_parsing != 0) {
// If the buffered input gets huge, even if the parsers are still running,
// wait for the parser thread instead of continuing to stream input.
if (is_parsing == 0 || ahead >= PARSE_MAX) {
if (initial_offset != 0) {
if (pthread_join(parallel_parser, NULL) != 0) {
perror("pthread_join");
@ -1733,19 +1733,6 @@ int read_json(int argc, struct source **sourcelist, char *fname, const char *lay
}
}
already_waited = 1;
}
if (is_parsing == 0) {
if (!already_waited) {
if (initial_offset != 0) {
if (pthread_join(parallel_parser, NULL) != 0) {
perror("pthread_join");
exit(EXIT_FAILURE);
}
}
}
fflush(readfp);
start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, &parallel_parser, reading, reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, source, nlayers, droprate, initialized, initial_x, initial_y);