From bc5a7b251f97dd6c76a514500521394f7e051989 Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Fri, 25 Mar 2016 11:20:56 -0700 Subject: [PATCH 01/14] Check return values after allocating memory --- geojson.c | 2 +- jsonpull.c | 63 +++++++++++++++++++++++++++++++++++++++++------------- jsonpull.h | 10 ++++----- 3 files changed, 54 insertions(+), 21 deletions(-) diff --git a/geojson.c b/geojson.c index 4c4382f..baefbe9 100644 --- a/geojson.c +++ b/geojson.c @@ -800,7 +800,7 @@ struct jsonmap { long long end; }; -int json_map_read(struct json_pull *jp, char *buffer, int n) { +ssize_t json_map_read(struct json_pull *jp, char *buffer, size_t n) { struct jsonmap *jm = jp->source; if (jm->off + n >= jm->end) { diff --git a/jsonpull.c b/jsonpull.c index e56c48b..cfbe3cf 100644 --- a/jsonpull.c +++ b/jsonpull.c @@ -8,8 +8,12 @@ #define BUFFER 10000 -json_pull *json_begin(int (*read)(struct json_pull *, char *buffer, int n), void *source) { +json_pull *json_begin(ssize_t (*read)(struct json_pull *, char *buffer, size_t n), void *source) { json_pull *j = malloc(sizeof(json_pull)); + if (j == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } j->error = NULL; j->line = 1; @@ -18,10 +22,15 @@ json_pull *json_begin(int (*read)(struct json_pull *, char *buffer, int n), void j->read = read; j->source = source; - j->buffer = malloc(BUFFER); j->buffer_head = 0; j->buffer_tail = 0; + j->buffer = malloc(BUFFER); + if (j->buffer == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } + return j; } @@ -51,7 +60,7 @@ static inline int next(json_pull *j) { } } -static int read_file(json_pull *j, char *buffer, int n) { +static ssize_t read_file(json_pull *j, char *buffer, size_t n) { return fread(buffer, 1, n, j->source); } @@ -59,7 +68,7 @@ json_pull *json_begin_file(FILE *f) { return json_begin(read_file, f); } -static int read_string(json_pull *j, char *buffer, int n) { +static ssize_t read_string(json_pull *j, char *buffer, size_t n) { char *cp = j->source; int out = 0; @@ -95,6 +104,10 @@ static inline int read_wrap(json_pull *j) { static json_object *fabricate_object(json_object *parent, json_type type) { json_object *o = malloc(sizeof(struct json_object)); + if (o == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } o->type = type; o->parent = parent; o->array = NULL; @@ -113,6 +126,10 @@ static json_object *add_object(json_pull *j, json_type type) { if (c->expect == JSON_ITEM) { if (SIZE_FOR(c->length + 1) != SIZE_FOR(c->length)) { c->array = realloc(c->array, SIZE_FOR(c->length + 1) * sizeof(json_object *)); + if (c->array == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } } c->array[c->length++] = o; @@ -136,6 +153,10 @@ static json_object *add_object(json_pull *j, json_type type) { if (SIZE_FOR(c->length + 1) != SIZE_FOR(c->length)) { c->keys = realloc(c->keys, SIZE_FOR(c->length + 1) * sizeof(json_object *)); c->values = realloc(c->values, SIZE_FOR(c->length + 1) * sizeof(json_object *)); + if (c->keys == NULL || c->values == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } } c->keys[c->length] = o; @@ -160,7 +181,7 @@ json_object *json_hash_get(json_object *o, const char *s) { return NULL; } - int i; + size_t i; for (i = 0; i < o->length; i++) { if (o->keys[i] != NULL && o->keys[i]->type == JSON_STRING) { if (strcmp(o->keys[i]->string, s) == 0) { @@ -174,13 +195,17 @@ json_object *json_hash_get(json_object *o, const char *s) { struct string { char *buf; - int n; - int nalloc; + size_t n; + size_t nalloc; }; static void string_init(struct string *s) { s->nalloc = 500; s->buf = malloc(s->nalloc); + if (s->buf == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } s->n = 0; s->buf[0] = '\0'; } @@ -189,6 +214,10 @@ static void string_append(struct string *s, char c) { if (s->n + 2 >= s->nalloc) { s->nalloc += 500; s->buf = realloc(s->buf, s->nalloc); + if (s->buf == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } } s->buf[s->n++] = c; @@ -196,11 +225,15 @@ static void string_append(struct string *s, char c) { } static void string_append_string(struct string *s, char *add) { - int len = strlen(add); + size_t len = strlen(add); if (s->n + len + 1 >= s->nalloc) { s->nalloc += 500 + len; s->buf = realloc(s->buf, s->nalloc); + if (s->buf == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } } for (; *add != '\0'; add++) { @@ -541,7 +574,7 @@ json_object *json_read_tree(json_pull *p) { } void json_free(json_object *o) { - int i; + size_t i; if (o == NULL) { return; @@ -551,7 +584,7 @@ void json_free(json_object *o) { if (o->type == JSON_ARRAY) { json_object **a = o->array; - int n = o->length; + size_t n = o->length; o->array = NULL; o->length = 0; @@ -564,7 +597,7 @@ void json_free(json_object *o) { } else if (o->type == JSON_HASH) { json_object **k = o->keys; json_object **v = o->values; - int n = o->length; + size_t n = o->length; o->keys = NULL; o->values = NULL; @@ -592,7 +625,7 @@ void json_disconnect(json_object *o) { if (o->parent != NULL) { if (o->parent->type == JSON_ARRAY) { - int i; + size_t i; for (i = 0; i < o->parent->length; i++) { if (o->parent->array[i] == o) { @@ -607,7 +640,7 @@ void json_disconnect(json_object *o) { } if (o->parent->type == JSON_HASH) { - int i; + size_t i; for (i = 0; i < o->parent->length; i++) { if (o->parent->keys[i] == o) { @@ -683,7 +716,7 @@ static void json_print(struct string *val, json_object *o) { } else if (o->type == JSON_HASH) { string_append(val, '{'); - int i; + size_t i; for (i = 0; i < o->length; i++) { json_print(val, o->keys[i]); string_append(val, ':'); @@ -695,7 +728,7 @@ static void json_print(struct string *val, json_object *o) { string_append(val, '}'); } else if (o->type == JSON_ARRAY) { string_append(val, '['); - int i; + size_t i; for (i = 0; i < o->length; i++) { json_print(val, o->array[i]); if (i + 1 < o->length) { diff --git a/jsonpull.h b/jsonpull.h index d5cc5aa..9a715f2 100644 --- a/jsonpull.h +++ b/jsonpull.h @@ -28,7 +28,7 @@ typedef struct json_object { struct json_object **array; struct json_object **keys; struct json_object **values; - int length; + size_t length; int expect; } json_object; @@ -37,11 +37,11 @@ typedef struct json_pull { char *error; int line; - int (*read)(struct json_pull *, char *buf, int n); + ssize_t (*read)(struct json_pull *, char *buf, size_t n); void *source; char *buffer; - int buffer_tail; - int buffer_head; + ssize_t buffer_tail; + ssize_t buffer_head; json_object *container; json_object *root; @@ -50,7 +50,7 @@ typedef struct json_pull { json_pull *json_begin_file(FILE *f); json_pull *json_begin_string(char *s); -json_pull *json_begin(int (*read)(struct json_pull *, char *buffer, int n), void *source); +json_pull *json_begin(ssize_t (*read)(struct json_pull *, char *buffer, size_t n), void *source); void json_end(json_pull *p); typedef void (*json_separator_callback)(json_type type, json_pull *j, void *state); From eee596d5f505662b3a3d3daae50f4b1a2b5e63f6 Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Fri, 25 Mar 2016 11:57:33 -0700 Subject: [PATCH 02/14] Check return values from memory allocation --- geojson.c | 27 +++++++++++++++++++++++++++ mbtiles.c | 8 ++++++++ tile-join.cc | 50 ++++++++++++++++++++++++++++++++++++++++++++------ tile.cc | 11 ++++++++++- 4 files changed, 89 insertions(+), 7 deletions(-) diff --git a/geojson.c b/geojson.c index baefbe9..458b6fe 100644 --- a/geojson.c +++ b/geojson.c @@ -815,6 +815,10 @@ ssize_t json_map_read(struct json_pull *jp, char *buffer, size_t n) { struct json_pull *json_begin_map(char *map, long long len) { struct jsonmap *jm = malloc(sizeof(struct jsonmap)); + if (jm == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } jm->map = map; jm->off = 0; @@ -1046,6 +1050,11 @@ void start_parsing(int fd, FILE *fp, long long offset, long long len, volatile i *is_parsing = 1; struct read_parallel_arg *rpa = malloc(sizeof(struct read_parallel_arg)); + if (rpa == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } + rpa->fd = fd; rpa->fp = fp; rpa->offset = offset; @@ -1088,6 +1097,11 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max r->geomname = malloc(strlen(tmpdir) + strlen("/geom.XXXXXXXX") + 1); r->indexname = malloc(strlen(tmpdir) + strlen("/index.XXXXXXXX") + 1); + if (r->metaname == NULL || r->poolname == NULL || r->treename == NULL || r->geomname == NULL || r->indexname == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } + sprintf(r->metaname, "%s%s", tmpdir, "/meta.XXXXXXXX"); sprintf(r->poolname, "%s%s", tmpdir, "/pool.XXXXXXXX"); sprintf(r->treename, "%s%s", tmpdir, "/tree.XXXXXXXX"); @@ -1162,6 +1176,10 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max } r->file_bbox = malloc(4 * sizeof(long long)); + if (r->file_bbox == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } r->file_bbox[0] = r->file_bbox[1] = UINT_MAX; r->file_bbox[2] = r->file_bbox[3] = 0; } @@ -1364,6 +1382,10 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max for (i = 0; i < nlayers; i++) { if (layername != NULL) { layernames[i] = strdup(layername); + if (layernames[i] == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } } else { char *src = argv[i]; if (argc < 1) { @@ -1371,6 +1393,11 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max } char *trunc = layernames[i] = malloc(strlen(src) + 1); + if (trunc == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } + const char *ocp, *use = src; for (ocp = src; *ocp; ocp++) { if (*ocp == '/' && ocp[1] != '\0') { diff --git a/mbtiles.c b/mbtiles.c index 02a1a40..d46f16e 100644 --- a/mbtiles.c +++ b/mbtiles.c @@ -118,6 +118,10 @@ static void aprintf(char **buf, const char *format, ...) { va_end(ap); *buf = realloc(*buf, strlen(*buf) + strlen(tmp) + 1); + if (*buf == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } strcat(*buf, tmp); free(tmp); } @@ -219,6 +223,10 @@ void mbtiles_write_metadata(sqlite3 *outdb, const char *fname, char **layername, sqlite3_free(sql); char *buf = strdup("{"); + if (buf == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } aprintf(&buf, "\"vector_layers\": [ "); int i; diff --git a/tile-join.cc b/tile-join.cc index 36e183c..73d6bfc 100644 --- a/tile-join.cc +++ b/tile-join.cc @@ -141,6 +141,10 @@ void handle(std::string message, int z, unsigned x, unsigned y, struct pool **fi pool_init(&((*file_keys)[ll]), 0); (*layernames)[ll] = strdup(ln); + if ((*layernames)[ll] == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } *nlayers = ll + 1; } @@ -161,6 +165,10 @@ void handle(std::string message, int z, unsigned x, unsigned y, struct pool **fi if (val.has_string_value()) { value = strdup(val.string_value().c_str()); + if (value == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } type = VT_STRING; } else if (val.has_int_value()) { if (asprintf(&value, "%lld", (long long) val.int_value()) >= 0) { @@ -196,7 +204,12 @@ void handle(std::string message, int z, unsigned x, unsigned y, struct pool **fi if (!is_pooled(exclude, key, VT_STRING)) { if (!is_pooled(&((*file_keys)[ll]), key, type)) { - pool(&((*file_keys)[ll]), strdup(key), type); + char *copy = strdup(key); + if (copy == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } + pool(&((*file_keys)[ll]), copy, type); } struct pool_val *k, *v; @@ -204,13 +217,23 @@ void handle(std::string message, int z, unsigned x, unsigned y, struct pool **fi if (is_pooled(&keys, key, VT_STRING)) { k = pool(&keys, key, VT_STRING); } else { - k = pool(&keys, strdup(key), VT_STRING); + char *copy = strdup(key); + if (copy == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } + k = pool(&keys, copy, VT_STRING); } if (is_pooled(&values, value, type)) { v = pool(&values, value, type); } else { - v = pool(&values, strdup(value), type); + char *copy = strdup(value); + if (copy == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } + v = pool(&values, copy, type); } feature_tags.push_back(k->n); @@ -242,7 +265,12 @@ void handle(std::string message, int z, unsigned x, unsigned y, struct pool **fi if (!is_pooled(exclude, sjoinkey, VT_STRING)) { if (!is_pooled(&((*file_keys)[ll]), sjoinkey, type)) { - pool(&((*file_keys)[ll]), strdup(sjoinkey), type); + char *copy = strdup(sjoinkey); + if (copy == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } + pool(&((*file_keys)[ll]), copy, type); } struct pool_val *k, *v; @@ -250,13 +278,23 @@ void handle(std::string message, int z, unsigned x, unsigned y, struct pool **fi if (is_pooled(&keys, sjoinkey, VT_STRING)) { k = pool(&keys, sjoinkey, VT_STRING); } else { - k = pool(&keys, strdup(sjoinkey), VT_STRING); + char *copy = strdup(sjoinkey); + if (copy == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } + k = pool(&keys, copy, VT_STRING); } if (is_pooled(&values, sjoinval, type)) { v = pool(&values, sjoinval, type); } else { - v = pool(&values, strdup(sjoinval), type); + char *copy = strdup(sjoinval); + if (copy == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } + v = pool(&values, copy, type); } feature_tags.push_back(k->n); diff --git a/tile.cc b/tile.cc index 0838f42..90c53ee 100644 --- a/tile.cc +++ b/tile.cc @@ -232,7 +232,12 @@ void decode_meta(char **meta, char *stringpool, struct pool *keys, struct pool * } // Dup to retain after munmap - pool(file_keys, strdup(key->s), value->type); + char *copy = strdup(key->s); + if (copy == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } + pool(file_keys, copy, value->type); if (pthread_mutex_unlock(&var_lock) != 0) { perror("pthread_mutex_unlock"); @@ -1109,6 +1114,10 @@ void *run_thread(void *vargs) { if (len < 0) { int *err = (int *) malloc(sizeof(int)); + if (err == NULL) { + perror("Out of memory"); + exit(EXIT_FAILURE); + } *err = z - 1; return err; } From c2231318bd1c8185aee969c7815144d5c9efb174 Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Fri, 25 Mar 2016 12:20:32 -0700 Subject: [PATCH 03/14] Many places where I used unsigned array indices but meant size_t --- decode.cc | 20 +++++------ geometry.cc | 93 ++++++++++++++++++++++++---------------------------- tile-join.cc | 13 ++++---- tile.cc | 35 +++++++++----------- 4 files changed, 73 insertions(+), 88 deletions(-) diff --git a/decode.cc b/decode.cc index 865b8d4..5905e93 100644 --- a/decode.cc +++ b/decode.cc @@ -196,7 +196,7 @@ void handle(std::string message, int z, unsigned x, unsigned y, int describe) { uint32_t count = geom >> 3; if (op == VT_MOVETO || op == VT_LINETO) { - for (unsigned k = 0; k < count; k++) { + for (size_t k = 0; k < count; k++) { px += dezig(feat.geometry(g + 1)); py += dezig(feat.geometry(g + 2)); g += 2; @@ -220,7 +220,7 @@ void handle(std::string message, int z, unsigned x, unsigned y, int describe) { printf("\"type\": \"Point\", \"coordinates\": [ %f, %f ]", ops[0].lon, ops[0].lat); } else { printf("\"type\": \"MultiPoint\", \"coordinates\": [ "); - for (unsigned i = 0; i < ops.size(); i++) { + for (size_t i = 0; i < ops.size(); i++) { if (i != 0) { printf(", "); } @@ -230,7 +230,7 @@ void handle(std::string message, int z, unsigned x, unsigned y, int describe) { } } else if (feat.type() == VT_LINE) { int movetos = 0; - for (unsigned i = 0; i < ops.size(); i++) { + for (size_t i = 0; i < ops.size(); i++) { if (ops[i].op == VT_MOVETO) { movetos++; } @@ -238,7 +238,7 @@ void handle(std::string message, int z, unsigned x, unsigned y, int describe) { if (movetos < 2) { printf("\"type\": \"LineString\", \"coordinates\": [ "); - for (unsigned i = 0; i < ops.size(); i++) { + for (size_t i = 0; i < ops.size(); i++) { if (i != 0) { printf(", "); } @@ -248,7 +248,7 @@ void handle(std::string message, int z, unsigned x, unsigned y, int describe) { } else { printf("\"type\": \"MultiLineString\", \"coordinates\": [ [ "); int state = 0; - for (unsigned i = 0; i < ops.size(); i++) { + for (size_t i = 0; i < ops.size(); i++) { if (ops[i].op == VT_MOVETO) { if (state == 0) { printf("[ %f, %f ]", ops[i].lon, ops[i].lat); @@ -268,7 +268,7 @@ void handle(std::string message, int z, unsigned x, unsigned y, int describe) { std::vector > rings; std::vector areas; - for (unsigned i = 0; i < ops.size(); i++) { + for (size_t i = 0; i < ops.size(); i++) { if (ops[i].op == VT_MOVETO) { rings.push_back(std::vector()); areas.push_back(0); @@ -282,9 +282,9 @@ void handle(std::string message, int z, unsigned x, unsigned y, int describe) { int outer = 0; - for (unsigned i = 0; i < rings.size(); i++) { + for (size_t i = 0; i < rings.size(); i++) { double area = 0; - for (unsigned k = 0; k < rings[i].size(); k++) { + for (size_t k = 0; k < rings[i].size(); k++) { if (rings[i][k].op != VT_CLOSEPATH) { area += rings[i][k].lon * rings[i][(k + 1) % rings[i].size()].lat; area -= rings[i][k].lat * rings[i][(k + 1) % rings[i].size()].lon; @@ -306,7 +306,7 @@ void handle(std::string message, int z, unsigned x, unsigned y, int describe) { } int state = 0; - for (unsigned i = 0; i < rings.size(); i++) { + for (size_t i = 0; i < rings.size(); i++) { if (areas[i] <= 0) { if (state != 0) { // new multipolygon @@ -320,7 +320,7 @@ void handle(std::string message, int z, unsigned x, unsigned y, int describe) { printf(" ], [ "); } - for (unsigned j = 0; j < rings[i].size(); j++) { + for (size_t j = 0; j < rings[i].size(); j++) { if (rings[i][j].op != VT_CLOSEPATH) { if (j != 0) { printf(", "); diff --git a/geometry.cc b/geometry.cc index a3af1ac..e7aabff 100644 --- a/geometry.cc +++ b/geometry.cc @@ -77,9 +77,7 @@ drawvec decode_geometry(char **meta, int z, unsigned tx, unsigned ty, int detail } void to_tile_scale(drawvec &geom, int z, int detail) { - unsigned i; - - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { geom[i].x >>= (32 - detail - z); geom[i].y >>= (32 - detail - z); } @@ -90,9 +88,8 @@ drawvec remove_noop(drawvec geom, int type, int shift) { long long x = 0, y = 0; drawvec out; - unsigned i; - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].op == VT_LINETO && (geom[i].x >> shift) == x && (geom[i].y >> shift) == y) { continue; } @@ -112,7 +109,7 @@ drawvec remove_noop(drawvec geom, int type, int shift) { geom = out; out.resize(0); - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].op == VT_MOVETO) { if (i + 1 >= geom.size()) { continue; @@ -139,7 +136,7 @@ drawvec remove_noop(drawvec geom, int type, int shift) { geom = out; out.resize(0); - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].op == VT_MOVETO) { if (i > 0 && geom[i - 1].op == VT_LINETO && (geom[i - 1].x >> shift) == (geom[i].x >> shift) && (geom[i - 1].y >> shift) == (geom[i].y >> shift)) { continue; @@ -159,10 +156,9 @@ drawvec shrink_lines(drawvec &geom, int z, int detail, int basezoom, long long * long long res = 200LL << (32 - 8 - z); long long portion = res / exp(log(sqrt(droprate)) * (basezoom - z)); - unsigned i; drawvec out; - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (i > 0 && (geom[i - 1].op == VT_MOVETO || geom[i - 1].op == VT_LINETO) && geom[i].op == VT_LINETO) { double dx = (geom[i].x - geom[i - 1].x); double dy = (geom[i].y - geom[i - 1].y); @@ -214,7 +210,7 @@ static void decode_clipped(ClipperLib::PolyNode *t, drawvec &out) { // to do any outer-ring children of those children as a new top level. ClipperLib::Path p = t->Contour; - for (unsigned i = 0; i < p.size(); i++) { + for (size_t i = 0; i < p.size(); i++) { out.push_back(draw((i == 0) ? VT_MOVETO : VT_LINETO, p[i].X, p[i].Y)); } if (p.size() > 0) { @@ -223,7 +219,7 @@ static void decode_clipped(ClipperLib::PolyNode *t, drawvec &out) { for (int n = 0; n < t->ChildCount(); n++) { ClipperLib::Path p = t->Childs[n]->Contour; - for (unsigned i = 0; i < p.size(); i++) { + for (size_t i = 0; i < p.size(); i++) { out.push_back(draw((i == 0) ? VT_MOVETO : VT_LINETO, p[i].X, p[i].Y)); } if (p.size() > 0) { @@ -243,9 +239,9 @@ drawvec clean_or_clip_poly(drawvec &geom, int z, int detail, int buffer, bool cl bool has_area = false; - for (unsigned i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].op == VT_MOVETO) { - unsigned j; + size_t j; for (j = i + 1; j < geom.size(); j++) { if (geom[j].op != VT_LINETO) { break; @@ -253,7 +249,7 @@ drawvec clean_or_clip_poly(drawvec &geom, int z, int detail, int buffer, bool cl } double area = 0; - for (unsigned k = i; k < j; k++) { + for (size_t k = i; k < j; k++) { area += (long double) geom[k].x * (long double) geom[i + ((k - i + 1) % (j - i))].y; area -= (long double) geom[k].y * (long double) geom[i + ((k - i + 1) % (j - i))].x; } @@ -265,14 +261,14 @@ drawvec clean_or_clip_poly(drawvec &geom, int z, int detail, int buffer, bool cl ClipperLib::Path path; drawvec tmp; - for (unsigned k = i; k < j; k++) { + for (size_t k = i; k < j; k++) { path.push_back(ClipperLib::IntPoint(geom[k].x, geom[k].y)); } if (!clipper.AddPath(path, ClipperLib::ptSubject, true)) { #if 0 fprintf(stderr, "Couldn't add polygon for clipping:"); - for (unsigned k = i; k < j; k++) { + for (size_t k = i; k < j; k++) { fprintf(stderr, " %lld,%lld", geom[k].x, geom[k].y); } fprintf(stderr, "\n"); @@ -331,9 +327,9 @@ drawvec clean_or_clip_poly(drawvec &geom, int z, int detail, int buffer, bool cl drawvec close_poly(drawvec &geom) { drawvec out; - for (unsigned i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].op == VT_MOVETO) { - unsigned j; + size_t j; for (j = i + 1; j < geom.size(); j++) { if (geom[j].op != VT_LINETO) { break; @@ -346,7 +342,7 @@ drawvec close_poly(drawvec &geom) { } } - for (unsigned n = i; n < j - 1; n++) { + for (size_t n = i; n < j - 1; n++) { out.push_back(geom[n]); } out.push_back(draw(VT_CLOSEPATH, 0, 0)); @@ -425,7 +421,7 @@ static drawvec clip_poly1(drawvec &geom, long long minx, long long miny, long lo draw S = in[in.size() - 1]; - for (unsigned e = 0; e < in.size(); e++) { + for (size_t e = 0; e < in.size(); e++) { draw E = in[e]; if (inside(E, edge, minx, miny, maxx, maxy)) { @@ -459,7 +455,7 @@ static drawvec clip_poly1(drawvec &geom, long long minx, long long miny, long lo } out[0].op = VT_MOVETO; - for (unsigned i = 1; i < out.size(); i++) { + for (size_t i = 1; i < out.size(); i++) { out[i].op = VT_LINETO; } } @@ -470,9 +466,9 @@ static drawvec clip_poly1(drawvec &geom, long long minx, long long miny, long lo drawvec simple_clip_poly(drawvec &geom, long long minx, long long miny, long long maxx, long long maxy) { drawvec out; - for (unsigned i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].op == VT_MOVETO) { - unsigned j; + size_t j; for (j = i + 1; j < geom.size(); j++) { if (geom[j].op != VT_LINETO) { break; @@ -480,7 +476,7 @@ drawvec simple_clip_poly(drawvec &geom, long long minx, long long miny, long lon } drawvec tmp; - for (unsigned k = i; k < j; k++) { + for (size_t k = i; k < j; k++) { tmp.push_back(geom[k]); } tmp = clip_poly1(tmp, minx, miny, maxx, maxy); @@ -490,7 +486,7 @@ drawvec simple_clip_poly(drawvec &geom, long long minx, long long miny, long lon exit(EXIT_FAILURE); } } - for (unsigned k = 0; k < tmp.size(); k++) { + for (size_t k = 0; k < tmp.size(); k++) { out.push_back(tmp[k]); } @@ -522,9 +518,9 @@ drawvec reduce_tiny_poly(drawvec &geom, int z, int detail, bool *reduced, double *reduced = true; bool included_last_outer = false; - for (unsigned i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].op == VT_MOVETO) { - unsigned j; + size_t j; for (j = i + 1; j < geom.size(); j++) { if (geom[j].op != VT_LINETO) { break; @@ -532,7 +528,7 @@ drawvec reduce_tiny_poly(drawvec &geom, int z, int detail, bool *reduced, double } double area = 0; - for (unsigned k = i; k < j; k++) { + for (size_t k = i; k < j; k++) { area += (long double) geom[k].x * (long double) geom[i + ((k - i + 1) % (j - i))].y; area -= (long double) geom[k].y * (long double) geom[i + ((k - i + 1) % (j - i))].x; } @@ -571,7 +567,7 @@ drawvec reduce_tiny_poly(drawvec &geom, int z, int detail, bool *reduced, double } else { // printf("area is %f so keeping instead of %lld\n", area, pixel * pixel); - for (unsigned k = i; k <= j && k < geom.size(); k++) { + for (size_t k = i; k <= j && k < geom.size(); k++) { out.push_back(geom[k]); } @@ -587,7 +583,7 @@ drawvec reduce_tiny_poly(drawvec &geom, int z, int detail, bool *reduced, double } else { fprintf(stderr, "how did we get here with %d in %d?\n", geom[i].op, (int) geom.size()); - for (unsigned n = 0; n < geom.size(); n++) { + for (size_t n = 0; n < geom.size(); n++) { fprintf(stderr, "%d/%lld/%lld ", geom[n].op, geom[n].x, geom[n].y); } fprintf(stderr, "\n"); @@ -601,7 +597,6 @@ drawvec reduce_tiny_poly(drawvec &geom, int z, int detail, bool *reduced, double drawvec clip_point(drawvec &geom, int z, int detail, long long buffer) { drawvec out; - unsigned i; long long min = 0; long long area = 0xFFFFFFFF; @@ -612,7 +607,7 @@ drawvec clip_point(drawvec &geom, int z, int detail, long long buffer) { area += buffer * area / 256; } - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].x >= min && geom[i].y >= min && geom[i].x <= area && geom[i].y <= area) { out.push_back(geom[i]); } @@ -650,7 +645,6 @@ int quick_check(long long *bbox, int z, int detail, long long buffer) { drawvec clip_lines(drawvec &geom, int z, int detail, long long buffer) { drawvec out; - unsigned i; long long min = 0; long long area = 0xFFFFFFFF; @@ -661,7 +655,7 @@ drawvec clip_lines(drawvec &geom, int z, int detail, long long buffer) { area += buffer * area / 256; } - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (i > 0 && (geom[i - 1].op == VT_MOVETO || geom[i - 1].op == VT_LINETO) && geom[i].op == VT_LINETO) { double x1 = geom[i - 1].x; double y1 = geom[i - 1].y; @@ -773,7 +767,7 @@ static void douglas_peucker(drawvec &geom, int start, int n, double e) { drawvec impose_tile_boundaries(drawvec &geom, long long extent) { drawvec out; - for (unsigned i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (i > 0 && geom[i].op == VT_LINETO && (geom[i - 1].op == VT_MOVETO || geom[i - 1].op == VT_LINETO)) { double x1 = geom[i - 1].x; double y1 = geom[i - 1].y; @@ -808,8 +802,7 @@ drawvec simplify_lines(drawvec &geom, int z, int detail) { area = 1LL << (32 - z); } - unsigned i; - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].op == VT_MOVETO) { geom[i].necessary = 1; } else if (geom[i].op == VT_LINETO) { @@ -821,9 +814,9 @@ drawvec simplify_lines(drawvec &geom, int z, int detail) { geom = impose_tile_boundaries(geom, area); - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].op == VT_MOVETO) { - unsigned j; + size_t j; for (j = i + 1; j < geom.size(); j++) { if (geom[j].op != VT_LINETO) { break; @@ -839,7 +832,7 @@ drawvec simplify_lines(drawvec &geom, int z, int detail) { } drawvec out; - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].necessary) { out.push_back(geom[i]); } @@ -855,8 +848,7 @@ drawvec reorder_lines(drawvec &geom) { return geom; } - unsigned i; - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].op == VT_MOVETO) { if (i != 0) { return geom; @@ -879,7 +871,7 @@ drawvec reorder_lines(drawvec &geom) { if (l1 > l2) { drawvec out; - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { out.push_back(geom[geom.size() - 1 - i]); } out[0].op = VT_MOVETO; @@ -894,14 +886,13 @@ drawvec fix_polygon(drawvec &geom) { int outer = 1; drawvec out; - unsigned i; - for (i = 0; i < geom.size(); i++) { + for (size_t i = 0; i < geom.size(); i++) { if (geom[i].op == VT_CLOSEPATH) { outer = 1; } else if (geom[i].op == VT_MOVETO) { // Find the end of the ring - unsigned j; + size_t j; for (j = i + 1; j < geom.size(); j++) { if (geom[j].op != VT_LINETO) { break; @@ -912,7 +903,7 @@ drawvec fix_polygon(drawvec &geom) { // Close it if it isn't closed. drawvec ring; - for (unsigned a = i; a < j; a++) { + for (size_t a = i; a < j; a++) { ring.push_back(geom[a]); } if (j - i != 0 && (ring[0].x != ring[j - i - 1].x || ring[0].y != ring[j - i - 1].y)) { @@ -923,7 +914,7 @@ drawvec fix_polygon(drawvec &geom) { // inner/outer expectation double area = 0; - for (unsigned k = 0; k < ring.size(); k++) { + for (size_t k = 0; k < ring.size(); k++) { area += (long double) ring[k].x * (long double) ring[(k + 1) % ring.size()].y; area -= (long double) ring[k].y * (long double) ring[(k + 1) % ring.size()].x; } @@ -939,7 +930,7 @@ drawvec fix_polygon(drawvec &geom) { // Copy ring into output, fixing the moveto/lineto ops if necessary because of // reversal or closing - for (unsigned a = 0; a < ring.size(); a++) { + for (size_t a = 0; a < ring.size(); a++) { if (a == 0) { out.push_back(draw(VT_MOVETO, ring[a].x, ring[a].y)); } else { @@ -966,12 +957,12 @@ std::vector chop_polygon(std::vector &geoms) { bool again = false; std::vector out; - for (unsigned i = 0; i < geoms.size(); i++) { + for (size_t i = 0; i < geoms.size(); i++) { if (geoms[i].size() > 700) { long long midx = 0, midy = 0, count = 0; long long maxx = LONG_LONG_MIN, maxy = LONG_LONG_MIN, minx = LONG_LONG_MAX, miny = LONG_LONG_MAX; - for (unsigned j = 0; j < geoms[i].size(); j++) { + for (size_t j = 0; j < geoms[i].size(); j++) { if (geoms[i][j].op == VT_MOVETO || geoms[i][j].op == VT_LINETO) { midx += geoms[i][j].x; midy += geoms[i][j].y; diff --git a/tile-join.cc b/tile-join.cc index 73d6bfc..c5c6c5b 100644 --- a/tile-join.cc +++ b/tile-join.cc @@ -247,7 +247,7 @@ void handle(std::string message, int z, unsigned x, unsigned y, struct pool **fi std::vector fields = ii->second; matched = 1; - for (unsigned i = 1; i < fields.size(); i++) { + for (size_t i = 1; i < fields.size(); i++) { std::string joinkey = header[i]; std::string joinval = fields[i]; int type = VT_STRING; @@ -311,11 +311,11 @@ void handle(std::string message, int z, unsigned x, unsigned y, struct pool **fi mapnik::vector::tile_feature *outfeature = outlayer->add_features(); outfeature->set_type(feat.type()); - for (int g = 0; g < feat.geometry_size(); g++) { + for (size_t g = 0; g < feat.geometry_size(); g++) { outfeature->add_geometry(feat.geometry(g)); } - for (unsigned i = 0; i < feature_tags.size(); i++) { + for (size_t i = 0; i < feature_tags.size(); i++) { outfeature->add_tags(feature_tags[i]); } @@ -485,8 +485,7 @@ std::vector split(char *s) { std::string dequote(std::string s) { std::string out; - unsigned i; - for (i = 0; i < s.size(); i++) { + for (size_t i = 0; i < s.size(); i++) { if (s[i] == '"') { if (i + 1 < s.size() && s[i + 1] == '"') { out.push_back('"'); @@ -509,7 +508,7 @@ void readcsv(char *fn, std::vector &header, std::map &header, std::map >(line[0], line)); } diff --git a/tile.cc b/tile.cc index 90c53ee..d71d6b5 100644 --- a/tile.cc +++ b/tile.cc @@ -165,8 +165,7 @@ int coalcmp(const void *v1, const void *v2) { return cmp; } - unsigned i; - for (i = 0; i < c1->meta.size() && i < c2->meta.size(); i++) { + for (size_t i = 0; i < c1->meta.size() && i < c2->meta.size(); i++) { cmp = c1->meta[i] - c2->meta[i]; if (cmp != 0) { @@ -293,8 +292,7 @@ mapnik::vector::tile create_tile(char **layernames, int line_detail, std::vector layer->set_version(1); layer->set_extent(1 << line_detail); - unsigned x; - for (x = 0; x < features[i].size(); x++) { + for (size_t x = 0; x < features[i].size(); x++) { if (features[i][x].type == VT_LINE || features[i][x].type == VT_POLYGON) { features[i][x].geom = remove_noop(features[i][x].geom, features[i][x].type, 0); } @@ -314,8 +312,7 @@ mapnik::vector::tile create_tile(char **layernames, int line_detail, std::vector to_feature(features[i][x].geom, feature); *count += features[i][x].geom.size(); - unsigned y; - for (y = 0; y < features[i][x].meta.size(); y++) { + for (size_t y = 0; y < features[i][x].meta.size(); y++) { feature->add_tags(features[i][x].meta[y]); } } @@ -452,7 +449,7 @@ void rewrite(drawvec &geom, int z, int nextzoom, int maxzoom, long long *bbox, u serialize_long_long(geomfile[j], metastart, &geompos[j], fname); long long wx = initial_x[segment], wy = initial_y[segment]; - for (unsigned u = 0; u < geom.size(); u++) { + for (size_t u = 0; u < geom.size(); u++) { serialize_byte(geomfile[j], geom[u].op, &geompos[j], fname); if (geom[u].op != VT_CLOSEPATH) { @@ -498,7 +495,7 @@ void *partial_feature_worker(void *v) { struct partial_arg *a = (struct partial_arg *) v; std::vector *partials = a->partials; - for (unsigned i = a->task; i < (*partials).size(); i += a->tasks) { + for (size_t i = a->task; i < (*partials).size(); i += a->tasks) { drawvec geom = (*partials)[i].geoms[0]; // XXX assumption of a single geometry at the beginning (*partials)[i].geoms.clear(); // avoid keeping two copies in memory signed char t = (*partials)[i].t; @@ -540,7 +537,7 @@ void *partial_feature_worker(void *v) { if (t == VT_POLYGON) { // Scaling may have made the polygon degenerate. // Give Clipper a chance to try to fix it. - for (unsigned i = 0; i < geoms.size(); i++) { + for (size_t i = 0; i < geoms.size(); i++) { geoms[i] = clean_or_clip_poly(geoms[i], 0, 0, 0, false); geoms[i] = close_poly(geoms[i]); } @@ -733,16 +730,16 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, int z, unsi // If the geometry extends off the edge of the world, concatenate on another copy // shifted by 360 degrees, and then make sure both copies get clipped down to size. - unsigned n = geom.size(); + size_t n = geom.size(); if (bbox[0] < 0) { - for (unsigned i = 0; i < n; i++) { + for (size_t i = 0; i < n; i++) { geom.push_back(draw(geom[i].op, geom[i].x + (1LL << 32), geom[i].y)); } } if (bbox[2] > 1LL << 32) { - for (unsigned i = 0; i < n; i++) { + for (size_t i = 0; i < n; i++) { geom.push_back(draw(geom[i].op, geom[i].x - (1LL << 32), geom[i].y)); } } @@ -873,7 +870,7 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, int z, unsi } // This is serial because decode_meta() unifies duplicates - for (unsigned i = 0; i < partials.size(); i++) { + for (size_t i = 0; i < partials.size(); i++) { std::vector geoms = partials[i].geoms; partials[i].geoms.clear(); // avoid keeping two copies in memory long long layer = partials[i].layer; @@ -883,7 +880,7 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, int z, unsi // A complex polygon may have been split up into multiple geometries. // Break them out into multiple features if necessary. - for (unsigned j = 0; j < geoms.size(); j++) { + for (size_t j = 0; j < geoms.size(); j++) { if (t == VT_POINT || to_feature(geoms[j], NULL)) { struct coalesce c; char *meta = partials[i].meta; @@ -915,9 +912,8 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, int z, unsi } std::vector out; - unsigned x; - for (x = 0; x < features[j].size(); x++) { - unsigned y = out.size() - 1; + for (size_t x = 0; x < features[j].size(); x++) { + size_t y = out.size() - 1; #if 0 if (out.size() > 0 && coalcmp(&features[j][x], &out[y]) < 0) { @@ -926,8 +922,7 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, int z, unsi #endif if (additional[A_COALESCE] && out.size() > 0 && out[y].geom.size() + features[j][x].geom.size() < 700 && coalcmp(&features[j][x], &out[y]) == 0 && features[j][x].type != VT_POINT) { - unsigned z; - for (z = 0; z < features[j][x].geom.size(); z++) { + for (size_t z = 0; z < features[j][x].geom.size(); z++) { out[y].geom.push_back(features[j][x].geom[z]); } out[y].coalesced = true; @@ -939,7 +934,7 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, int z, unsi features[j] = out; out.clear(); - for (x = 0; x < features[j].size(); x++) { + for (size_t x = 0; x < features[j].size(); x++) { if (features[j][x].coalesced && features[j][x].type == VT_LINE) { features[j][x].geom = remove_noop(features[j][x].geom, features[j][x].type, 0); features[j][x].geom = simplify_lines(features[j][x].geom, 32, 0); From 52dbed813221faf6b21a47bdcece8da9750b004d Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Fri, 25 Mar 2016 13:03:57 -0700 Subject: [PATCH 04/14] Fix a few warnings about globals --- geojson.c | 16 ++++++++-------- tile.h | 1 - 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/geojson.c b/geojson.c index 458b6fe..16e1470 100644 --- a/geojson.c +++ b/geojson.c @@ -28,11 +28,11 @@ #include "version.h" #include "memfile.h" -int low_detail = 12; -int full_detail = -1; -int min_detail = 7; -int quiet = 0; +static int low_detail = 12; +static int full_detail = -1; +static int min_detail = 7; +int quiet = 0; int geometry_scale = 0; #define GEOM_POINT 0 /* array of positions */ @@ -43,11 +43,11 @@ int geometry_scale = 0; #define GEOM_MULTIPOLYGON 5 /* array of arrays of arrays of arrays of positions */ #define GEOM_TYPES 6 -const char *geometry_names[GEOM_TYPES] = { +static const char *geometry_names[GEOM_TYPES] = { "Point", "MultiPoint", "LineString", "MultiLineString", "Polygon", "MultiPolygon", }; -int geometry_within[GEOM_TYPES] = { +static int geometry_within[GEOM_TYPES] = { -1, /* point */ GEOM_POINT, /* multipoint */ GEOM_POINT, /* linestring */ @@ -56,7 +56,7 @@ int geometry_within[GEOM_TYPES] = { GEOM_POLYGON, /* multipolygon */ }; -int mb_geometry[GEOM_TYPES] = { +static int mb_geometry[GEOM_TYPES] = { VT_POINT, VT_POINT, VT_LINE, VT_LINE, VT_POLYGON, VT_POLYGON, }; @@ -146,7 +146,7 @@ void parse_geometry(int t, json_object *j, long long *bbox, long long *fpos, FIL int within = geometry_within[t]; if (within >= 0) { - int i; + size_t i; for (i = 0; i < j->length; i++) { if (within == GEOM_POINT) { if (i == 0 || mb_geometry[t] == GEOM_MULTIPOINT) { diff --git a/tile.h b/tile.h index 3a4db27..0930fc4 100644 --- a/tile.h +++ b/tile.h @@ -31,7 +31,6 @@ int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpo int manage_gap(unsigned long long index, unsigned long long *previndex, double scale, double gamma, double *gap); -extern unsigned initial_x, initial_y; extern int geometry_scale; extern int quiet; From 12be3e5a3299c2a99f2151ba103c14600907fceb Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Fri, 25 Mar 2016 13:21:32 -0700 Subject: [PATCH 05/14] This one really is an int upstream --- tile-join.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tile-join.cc b/tile-join.cc index c5c6c5b..25794ce 100644 --- a/tile-join.cc +++ b/tile-join.cc @@ -311,7 +311,7 @@ void handle(std::string message, int z, unsigned x, unsigned y, struct pool **fi mapnik::vector::tile_feature *outfeature = outlayer->add_features(); outfeature->set_type(feat.type()); - for (size_t g = 0; g < feat.geometry_size(); g++) { + for (int g = 0; g < feat.geometry_size(); g++) { outfeature->add_geometry(feat.geometry(g)); } From 1e5d420b66bdce43e31f0e3639c3066a69dc3988 Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Fri, 25 Mar 2016 13:45:28 -0700 Subject: [PATCH 06/14] Fix warnings about unused arguments --- geojson.c | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/geojson.c b/geojson.c index 16e1470..9bf42ce 100644 --- a/geojson.c +++ b/geojson.c @@ -299,7 +299,7 @@ struct merge { struct merge *next; }; -static void insert(struct merge *m, struct merge **head, unsigned char *map, int bytes) { +static void insert(struct merge *m, struct merge **head, unsigned char *map) { while (*head != NULL && indexcmp(map + m->start, map + (*head)->start) > 0) { head = &((*head)->next); } @@ -316,7 +316,7 @@ static void merge(struct merge *merges, int nmerges, unsigned char *map, FILE *f for (i = 0; i < nmerges; i++) { if (merges[i].start < merges[i].end) { - insert(&(merges[i]), &head, map, bytes); + insert(&(merges[i]), &head, map); } } @@ -333,7 +333,7 @@ static void merge(struct merge *merges, int nmerges, unsigned char *map, FILE *f m->next = NULL; if (m->start < m->end) { - insert(m, &head, map, bytes); + insert(m, &head, map); } along++; @@ -445,7 +445,7 @@ long long addpool(struct memfile *poolfile, struct memfile *treefile, char *s, c return off; } -int serialize_geometry(json_object *geometry, json_object *properties, const char *reading, int line, volatile long long *layer_seq, volatile long long *progress_seq, long long *metapos, long long *geompos, long long *indexpos, struct pool *exclude, struct pool *include, int exclude_all, FILE *metafile, FILE *geomfile, FILE *indexfile, struct memfile *poolfile, struct memfile *treefile, const char *fname, int maxzoom, int basezoom, int layer, double droprate, long long *file_bbox, json_object *tippecanoe, int segment, int *initialized, unsigned *initial_x, unsigned *initial_y) { +int serialize_geometry(json_object *geometry, json_object *properties, const char *reading, int line, volatile long long *layer_seq, volatile long long *progress_seq, long long *metapos, long long *geompos, long long *indexpos, struct pool *exclude, struct pool *include, int exclude_all, FILE *metafile, FILE *geomfile, FILE *indexfile, struct memfile *poolfile, struct memfile *treefile, const char *fname, int basezoom, int layer, double droprate, long long *file_bbox, json_object *tippecanoe, int segment, int *initialized, unsigned *initial_x, unsigned *initial_y) { json_object *geometry_type = json_hash_get(geometry, "type"); if (geometry_type == NULL) { static int warned = 0; @@ -646,7 +646,7 @@ int serialize_geometry(json_object *geometry, json_object *properties, const cha return 1; } -void parse_json(json_pull *jp, const char *reading, volatile long long *layer_seq, volatile long long *progress_seq, long long *metapos, long long *geompos, long long *indexpos, struct pool *exclude, struct pool *include, int exclude_all, FILE *metafile, FILE *geomfile, FILE *indexfile, struct memfile *poolfile, struct memfile *treefile, char *fname, int maxzoom, int basezoom, int layer, double droprate, long long *file_bbox, int segment, int *initialized, unsigned *initial_x, unsigned *initial_y) { +void parse_json(json_pull *jp, const char *reading, volatile long long *layer_seq, volatile long long *progress_seq, long long *metapos, long long *geompos, long long *indexpos, struct pool *exclude, struct pool *include, int exclude_all, FILE *metafile, FILE *geomfile, FILE *indexfile, struct memfile *poolfile, struct memfile *treefile, char *fname, int basezoom, int layer, double droprate, long long *file_bbox, int segment, int *initialized, unsigned *initial_x, unsigned *initial_y) { long long found_hashes = 0; long long found_features = 0; long long found_geometries = 0; @@ -711,7 +711,7 @@ void parse_json(json_pull *jp, const char *reading, volatile long long *layer_se } found_geometries++; - serialize_geometry(j, NULL, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, maxzoom, basezoom, layer, droprate, file_bbox, NULL, segment, initialized, initial_x, initial_y); + serialize_geometry(j, NULL, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, basezoom, layer, droprate, file_bbox, NULL, segment, initialized, initial_x, initial_y); json_free(j); continue; } @@ -744,12 +744,12 @@ void parse_json(json_pull *jp, const char *reading, volatile long long *layer_se json_object *geometries = json_hash_get(geometry, "geometries"); if (geometries != NULL) { - int g; + size_t g; for (g = 0; g < geometries->length; g++) { - serialize_geometry(geometries->array[g], properties, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, maxzoom, basezoom, layer, droprate, file_bbox, tippecanoe, segment, initialized, initial_x, initial_y); + serialize_geometry(geometries->array[g], properties, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, basezoom, layer, droprate, file_bbox, tippecanoe, segment, initialized, initial_x, initial_y); } } else { - serialize_geometry(geometry, properties, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, maxzoom, basezoom, layer, droprate, file_bbox, tippecanoe, segment, initialized, initial_x, initial_y); + serialize_geometry(geometry, properties, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, basezoom, layer, droprate, file_bbox, tippecanoe, segment, initialized, initial_x, initial_y); } json_free(j); @@ -775,7 +775,6 @@ struct parse_json_args { struct memfile *poolfile; struct memfile *treefile; char *fname; - int maxzoom; int basezoom; int layer; double droprate; @@ -789,15 +788,15 @@ struct parse_json_args { void *run_parse_json(void *v) { struct parse_json_args *pja = v; - parse_json(pja->jp, pja->reading, pja->layer_seq, pja->progress_seq, pja->metapos, pja->geompos, pja->indexpos, pja->exclude, pja->include, pja->exclude_all, pja->metafile, pja->geomfile, pja->indexfile, pja->poolfile, pja->treefile, pja->fname, pja->maxzoom, pja->basezoom, pja->layer, pja->droprate, pja->file_bbox, pja->segment, pja->initialized, pja->initial_x, pja->initial_y); + parse_json(pja->jp, pja->reading, pja->layer_seq, pja->progress_seq, pja->metapos, pja->geompos, pja->indexpos, pja->exclude, pja->include, pja->exclude_all, pja->metafile, pja->geomfile, pja->indexfile, pja->poolfile, pja->treefile, pja->fname, pja->basezoom, pja->layer, pja->droprate, pja->file_bbox, pja->segment, pja->initialized, pja->initial_x, pja->initial_y); return NULL; } struct jsonmap { char *map; - long long off; - long long end; + unsigned long long off; + unsigned long long end; }; ssize_t json_map_read(struct json_pull *jp, char *buffer, size_t n) { @@ -916,7 +915,7 @@ void *run_sort(void *v) { return NULL; } -void do_read_parallel(char *map, long long len, long long initial_offset, const char *reading, struct reader *reader, volatile long long *progress_seq, struct pool *exclude, struct pool *include, int exclude_all, char *fname, int maxzoom, int basezoom, int source, int nlayers, double droprate, int *initialized, unsigned *initial_x, unsigned *initial_y) { +void do_read_parallel(char *map, long long len, long long initial_offset, const char *reading, struct reader *reader, volatile long long *progress_seq, struct pool *exclude, struct pool *include, int exclude_all, char *fname, int basezoom, int source, int nlayers, double droprate, int *initialized, unsigned *initial_x, unsigned *initial_y) { long long segs[CPUS + 1]; segs[0] = 0; segs[CPUS] = len; @@ -957,7 +956,6 @@ void do_read_parallel(char *map, long long len, long long initial_offset, const pja[i].poolfile = reader[i].poolfile; pja[i].treefile = reader[i].treefile; pja[i].fname = fname; - pja[i].maxzoom = maxzoom; pja[i].basezoom = basezoom; pja[i].layer = source < nlayers ? source : 0; pja[i].droprate = droprate; @@ -1027,7 +1025,7 @@ void *run_read_parallel(void *v) { exit(EXIT_FAILURE); } - do_read_parallel(map, a->len, a->offset, a->reading, a->reader, a->progress_seq, a->exclude, a->include, a->exclude_all, a->fname, a->maxzoom, a->basezoom, a->source, a->nlayers, a->droprate, a->initialized, a->initial_x, a->initial_y); + do_read_parallel(map, a->len, a->offset, a->reading, a->reader, a->progress_seq, a->exclude, a->include, a->exclude_all, a->fname, a->basezoom, a->source, a->nlayers, a->droprate, a->initialized, a->initial_x, a->initial_y); if (munmap(map, a->len) != 0) { perror("munmap source file"); @@ -1042,7 +1040,7 @@ void *run_read_parallel(void *v) { return NULL; } -void start_parsing(int fd, FILE *fp, long long offset, long long len, volatile int *is_parsing, pthread_t *parallel_parser, const char *reading, struct reader *reader, volatile long long *progress_seq, struct pool *exclude, struct pool *include, int exclude_all, char *fname, int maxzoom, int basezoom, int source, int nlayers, double droprate, int *initialized, unsigned *initial_x, unsigned *initial_y) { +void start_parsing(int fd, FILE *fp, long long offset, long long len, volatile int *is_parsing, pthread_t *parallel_parser, const char *reading, struct reader *reader, volatile long long *progress_seq, struct pool *exclude, struct pool *include, int exclude_all, char *fname, int basezoom, int source, int nlayers, double droprate, int *initialized, unsigned *initial_x, unsigned *initial_y) { // This has to kick off an intermediate thread to start the parser threads, // so the main thread can get back to reading the next input stage while // the intermediate thread waits for the completion of the parser threads. @@ -1068,7 +1066,6 @@ void start_parsing(int fd, FILE *fp, long long offset, long long len, volatile i rpa->include = include; rpa->exclude_all = exclude_all; rpa->fname = fname; - rpa->maxzoom = maxzoom; rpa->basezoom = basezoom; rpa->source = source; rpa->nlayers = nlayers; @@ -1240,7 +1237,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max } if (map != NULL && map != MAP_FAILED) { - do_read_parallel(map, st.st_size - off, overall_offset, reading, reader, &progress_seq, exclude, include, exclude_all, fname, maxzoom, basezoom, source, nlayers, droprate, initialized, initial_x, initial_y); + do_read_parallel(map, st.st_size - off, overall_offset, reading, reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, source, nlayers, droprate, initialized, initial_x, initial_y); overall_offset += st.st_size - off; if (munmap(map, st.st_size - off) != 0) { @@ -1294,7 +1291,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max } fflush(readfp); - start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, reading, reader, &progress_seq, exclude, include, exclude_all, fname, maxzoom, basezoom, source, nlayers, droprate, initialized, initial_x, initial_y); + start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, reading, reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, source, nlayers, droprate, initialized, initial_x, initial_y); initial_offset += ahead; overall_offset += ahead; @@ -1328,7 +1325,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max fflush(readfp); if (ahead > 0) { - start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, reading, reader, &progress_seq, exclude, include, exclude_all, fname, maxzoom, basezoom, source, nlayers, droprate, initialized, initial_x, initial_y); + start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, reading, reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, source, nlayers, droprate, initialized, initial_x, initial_y); if (pthread_join(parallel_parser, NULL) != 0) { perror("pthread_join"); @@ -1341,7 +1338,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max long long layer_seq = overall_offset; json_pull *jp = json_begin_file(fp); - parse_json(jp, reading, &layer_seq, &progress_seq, &reader[0].metapos, &reader[0].geompos, &reader[0].indexpos, exclude, include, exclude_all, reader[0].metafile, reader[0].geomfile, reader[0].indexfile, reader[0].poolfile, reader[0].treefile, fname, maxzoom, basezoom, source < nlayers ? source : 0, droprate, reader[0].file_bbox, 0, &initialized[0], &initial_x[0], &initial_y[0]); + parse_json(jp, reading, &layer_seq, &progress_seq, &reader[0].metapos, &reader[0].geompos, &reader[0].indexpos, exclude, include, exclude_all, reader[0].metafile, reader[0].geomfile, reader[0].indexfile, reader[0].poolfile, reader[0].treefile, fname, basezoom, source < nlayers ? source : 0, droprate, reader[0].file_bbox, 0, &initialized[0], &initial_x[0], &initial_y[0]); json_end(jp); overall_offset = layer_seq; } From 8c9aa53bb5a69793c2256df9c6aaeae4c4407b64 Mon Sep 17 00:00:00 2001 From: Steve Bennett Date: Sat, 26 Mar 2016 12:28:23 +1100 Subject: [PATCH 07/14] Warn of spurious errors (as per #191) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 03f8755..f86fc63 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,8 @@ Options * -t _directory_: Put the temporary files in _directory_. * -P: Use multiple threads to read different parts of each input file at once. This will only work if the input is line-delimited JSON with each Feature on its - own line, because it knows nothing of the top-level structure around the Features. + own line, because it knows nothing of the top-level structure around the Features. Spurious "EOF" error + messages may result otherwise. Performance will be better if the input is a named file that can be mapped into memory rather than a stream that can only be read sequentially. From 21a635fb7abe4e66830d38c27509f81d81afe1be Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Mon, 28 Mar 2016 12:00:55 -0700 Subject: [PATCH 08/14] Check for string length overflow --- jsonpull.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/jsonpull.c b/jsonpull.c index cfbe3cf..869e913 100644 --- a/jsonpull.c +++ b/jsonpull.c @@ -212,7 +212,12 @@ static void string_init(struct string *s) { static void string_append(struct string *s, char c) { if (s->n + 2 >= s->nalloc) { + size_t prev = s->nalloc; s->nalloc += 500; + if (s->nalloc <= prev) { + fprintf(stderr, "String size overflowed\n"); + exit(EXIT_FAILURE); + } s->buf = realloc(s->buf, s->nalloc); if (s->buf == NULL) { perror("Out of memory"); @@ -228,7 +233,12 @@ static void string_append_string(struct string *s, char *add) { size_t len = strlen(add); if (s->n + len + 1 >= s->nalloc) { + size_t prev = s->nalloc; s->nalloc += 500 + len; + if (s->nalloc <= prev) { + fprintf(stderr, "String size overflowed\n"); + exit(EXIT_FAILURE); + } s->buf = realloc(s->buf, s->nalloc); if (s->buf == NULL) { perror("Out of memory"); From 356575d0e0d1cea8f00a8b63bca5a9a3f55fe681 Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Mon, 28 Mar 2016 12:25:33 -0700 Subject: [PATCH 09/14] Check for JSON array and hash overflows --- jsonpull.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/jsonpull.c b/jsonpull.c index 869e913..4147b4f 100644 --- a/jsonpull.c +++ b/jsonpull.c @@ -100,7 +100,7 @@ static inline int read_wrap(json_pull *j) { return c; } -#define SIZE_FOR(i) (((i) + 31) & ~31) +#define SIZE_FOR(i) ((size_t)(((i) + 31) & ~31)) static json_object *fabricate_object(json_object *parent, json_type type) { json_object *o = malloc(sizeof(struct json_object)); @@ -125,6 +125,10 @@ static json_object *add_object(json_pull *j, json_type type) { if (c->type == JSON_ARRAY) { if (c->expect == JSON_ITEM) { if (SIZE_FOR(c->length + 1) != SIZE_FOR(c->length)) { + if (SIZE_FOR(c->length + 1) < SIZE_FOR(c->length)) { + fprintf(stderr, "Array size overflow\n"); + exit(EXIT_FAILURE); + } c->array = realloc(c->array, SIZE_FOR(c->length + 1) * sizeof(json_object *)); if (c->array == NULL) { perror("Out of memory"); @@ -151,6 +155,10 @@ static json_object *add_object(json_pull *j, json_type type) { } if (SIZE_FOR(c->length + 1) != SIZE_FOR(c->length)) { + if (SIZE_FOR(c->length + 1) < SIZE_FOR(c->length)) { + fprintf(stderr, "Hash size overflow\n"); + exit(EXIT_FAILURE); + } c->keys = realloc(c->keys, SIZE_FOR(c->length + 1) * sizeof(json_object *)); c->values = realloc(c->values, SIZE_FOR(c->length + 1) * sizeof(json_object *)); if (c->keys == NULL || c->values == NULL) { From cf2abf67d2c36dc81c290449e455e5a666540a73 Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Mon, 28 Mar 2016 13:08:54 -0700 Subject: [PATCH 10/14] Oops: need to check array size in bytes, not number of objects --- jsonpull.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/jsonpull.c b/jsonpull.c index 4147b4f..3c9a2c9 100644 --- a/jsonpull.c +++ b/jsonpull.c @@ -100,7 +100,7 @@ static inline int read_wrap(json_pull *j) { return c; } -#define SIZE_FOR(i) ((size_t)(((i) + 31) & ~31)) +#define SIZE_FOR(i, size) ((size_t)((((i) + 31) & ~31) * size)) static json_object *fabricate_object(json_object *parent, json_type type) { json_object *o = malloc(sizeof(struct json_object)); @@ -124,12 +124,12 @@ static json_object *add_object(json_pull *j, json_type type) { if (c != NULL) { if (c->type == JSON_ARRAY) { if (c->expect == JSON_ITEM) { - if (SIZE_FOR(c->length + 1) != SIZE_FOR(c->length)) { - if (SIZE_FOR(c->length + 1) < SIZE_FOR(c->length)) { + if (SIZE_FOR(c->length + 1, sizeof(json_object *)) != SIZE_FOR(c->length, sizeof(json_object *))) { + if (SIZE_FOR(c->length + 1, sizeof(json_object *)) < SIZE_FOR(c->length, sizeof(json_object *))) { fprintf(stderr, "Array size overflow\n"); exit(EXIT_FAILURE); } - c->array = realloc(c->array, SIZE_FOR(c->length + 1) * sizeof(json_object *)); + c->array = realloc(c->array, SIZE_FOR(c->length + 1, sizeof(json_object *))); if (c->array == NULL) { perror("Out of memory"); exit(EXIT_FAILURE); @@ -154,13 +154,13 @@ static json_object *add_object(json_pull *j, json_type type) { return NULL; } - if (SIZE_FOR(c->length + 1) != SIZE_FOR(c->length)) { - if (SIZE_FOR(c->length + 1) < SIZE_FOR(c->length)) { + if (SIZE_FOR(c->length + 1, sizeof(json_object *)) != SIZE_FOR(c->length, sizeof(json_object *))) { + if (SIZE_FOR(c->length + 1, sizeof(json_object *)) < SIZE_FOR(c->length, sizeof(json_object *))) { fprintf(stderr, "Hash size overflow\n"); exit(EXIT_FAILURE); } - c->keys = realloc(c->keys, SIZE_FOR(c->length + 1) * sizeof(json_object *)); - c->values = realloc(c->values, SIZE_FOR(c->length + 1) * sizeof(json_object *)); + c->keys = realloc(c->keys, SIZE_FOR(c->length + 1, sizeof(json_object *))); + c->values = realloc(c->values, SIZE_FOR(c->length + 1, sizeof(json_object *))); if (c->keys == NULL || c->values == NULL) { perror("Out of memory"); exit(EXIT_FAILURE); From 5a2a1b793aa31852532b81f00538645211ae436e Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Mon, 28 Mar 2016 13:22:03 -0700 Subject: [PATCH 11/14] Exit cleanly if there was no valid input instead of giving an mmap error --- geojson.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/geojson.c b/geojson.c index 9bf42ce..1e8284d 100644 --- a/geojson.c +++ b/geojson.c @@ -891,7 +891,7 @@ void *run_sort(void *v) { // MAP_PRIVATE to avoid disk writes if it fits in memory void *map = mmap(NULL, end - start, PROT_READ | PROT_WRITE, MAP_PRIVATE, a->indexfd, start); if (map == MAP_FAILED) { - perror("mmap"); + perror("mmap in run_sort"); exit(EXIT_FAILURE); } @@ -1450,7 +1450,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max if (reader[i].indexpos > 0) { void *map = mmap(NULL, reader[i].indexpos, PROT_READ, MAP_PRIVATE, reader[i].indexfd, 0); if (map == MAP_FAILED) { - perror("mmap"); + perror("mmap reunifying index"); exit(EXIT_FAILURE); } if (fwrite(map, reader[i].indexpos, 1, indexfile) != 1) { @@ -1468,6 +1468,11 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max } fclose(indexfile); + if (indexpos == 0) { + fprintf(stderr, "Did not read any valid geometries\n"); + exit(EXIT_FAILURE); + } + char geomname[strlen(tmpdir) + strlen("/geom.XXXXXXXX") + 1]; FILE *geomfile; @@ -1544,7 +1549,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max char *map = mmap(NULL, indexpos, PROT_READ | PROT_WRITE, MAP_SHARED, indexfd, 0); if (map == MAP_FAILED) { - perror("mmap"); + perror("mmap unified index"); exit(EXIT_FAILURE); } @@ -1599,7 +1604,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max long long pre_merged_geompos = geompos; char *geom_map = mmap(NULL, geompos, PROT_READ, MAP_PRIVATE, geomfd, 0); if (geom_map == MAP_FAILED) { - perror("mmap"); + perror("mmap geometry"); exit(EXIT_FAILURE); } @@ -1664,7 +1669,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max if (basezoom < 0 || droprate < 0) { struct index *map = mmap(NULL, indexpos, PROT_READ, MAP_PRIVATE, indexfd, 0); if (map == MAP_FAILED) { - perror("mmap"); + perror("mmap index for basezoom"); exit(EXIT_FAILURE); } @@ -1915,7 +1920,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max if (reader[i].metapos > 0) { void *map = mmap(NULL, reader[i].metapos, PROT_READ, MAP_PRIVATE, reader[i].metafd, 0); if (map == MAP_FAILED) { - perror("mmap"); + perror("mmap unmerged meta"); exit(EXIT_FAILURE); } if (fwrite(map, reader[i].metapos, 1, metafile) != 1) { From b47653e2e60d10b903451df8c18cb95cf226b361 Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Mon, 28 Mar 2016 14:00:01 -0700 Subject: [PATCH 12/14] Remove temporary files that were accidentally left in place --- geojson.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/geojson.c b/geojson.c index 1e8284d..e43c755 100644 --- a/geojson.c +++ b/geojson.c @@ -1491,6 +1491,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max perror(geomname); exit(EXIT_FAILURE); } + unlink(geomname); int geomfd2; /* Sort the index by geometry */ @@ -1625,6 +1626,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max perror(geomname); exit(EXIT_FAILURE); } + unlink(geomname); for (i = 0; i < CPUS; i++) { if (reader[i].geomst.st_size > 0) { From edce0f088d3630d3919fe808f7568d83378c99f1 Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Mon, 28 Mar 2016 15:03:28 -0700 Subject: [PATCH 13/14] Add GNU-style long options --- CHANGELOG.md | 6 +++++ README.md | 62 ++++++++++++++++++++++++++-------------------------- geojson.c | 52 +++++++++++++++++++++++++++++++++++++++---- tile.cc | 16 +++++++------- tile.h | 4 ++-- version.h | 2 +- 6 files changed, 96 insertions(+), 46 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eebed5f..a686a0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## 1.9.5 + +* Remove temporary files that were accidentally left behind +* Be more careful about checking memory allocations and array bounds +* Add GNU-style long options + ## 1.9.4 * Tippecanoe-decode can decode .pbf files that aren't in an .mbtiles container diff --git a/README.md b/README.md index f86fc63..60c6029 100644 --- a/README.md +++ b/README.md @@ -57,18 +57,18 @@ Options ### Naming - * -l _name_: Layer name (default "file" if source is file.json or output is file.mbtiles). If there are multiple input files + * -l _name_ or --layer=_name_: Layer name (default "file" if source is file.json or output is file.mbtiles). If there are multiple input files specified, the files are all merged into the single named layer. - * -n _name_: Human-readable name (default file.json) + * -n _name_ or --name=_name_: Human-readable name (default file.json) ### File control - * -o _file_.mbtiles: Name the output file. - * -f: Delete the mbtiles file if it already exists instead of giving an error - * -F: Proceed (without deleting existing data) if the metadata or tiles table already exists + * -o _file_.mbtiles or --output=_file_.mbtiles: Name the output file. + * -f or --force: Delete the mbtiles file if it already exists instead of giving an error + * -F or --allow-existing: Proceed (without deleting existing data) if the metadata or tiles table already exists or if metadata fields can't be set - * -t _directory_: Put the temporary files in _directory_. - * -P: Use multiple threads to read different parts of each input file at once. + * -t _directory_ or --temporary-directory=_directory_: Put the temporary files in _directory_. + * -P or --read-parallel: Use multiple threads to read different parts of each input file at once. This will only work if the input is line-delimited JSON with each Feature on its own line, because it knows nothing of the top-level structure around the Features. Spurious "EOF" error messages may result otherwise. @@ -77,16 +77,16 @@ Options ### Zoom levels and resolution - * -z _zoom_: Maxzoom: the highest zoom level for which tiles are generated (default 14) - * -Z _zoom_: Minzoom: the lowest zoom level for which tiles are generated (default 0) - * -B _zoom_: Base zoom, the level at and above which all points are included in the tiles (default maxzoom). + * -z _zoom_ or --maximum-zoom=_zoom_: Maxzoom: the highest zoom level for which tiles are generated (default 14) + * -Z _zoom_ or --minimum-zoom=_zoom_: Minzoom: the lowest zoom level for which tiles are generated (default 0) + * -B _zoom_ or --base-zoom=_zoom_: Base zoom, the level at and above which all points are included in the tiles (default maxzoom). If you use -Bg, it will guess a zoom level that will keep at most 50,000 features in the densest tile. You can also specify a marker-width with -Bg*width* to allow fewer features in the densest tile to compensate for the larger marker, or -Bf*number* to allow at most *number* features in the densest tile. - * -d _detail_: Detail at max zoom level (default 12, for tile resolution of 4096) - * -D _detail_: Detail at lower zoom levels (default 12, for tile resolution of 4096) - * -m _detail_: Minimum detail that it will try if tiles are too big at regular detail (default 7) - * -b _pixels_: Buffer size where features are duplicated from adjacent tiles. Units are "screen pixels"--1/256th of the tile width or height. (default 5) + * -d _detail_ or --full-detail=_detail_: Detail at max zoom level (default 12, for tile resolution of 4096) + * -D _detail_ or --low-detail=_detail_: Detail at lower zoom levels (default 12, for tile resolution of 4096) + * -m _detail_ or --minimum-detail=_detail_: Minimum detail that it will try if tiles are too big at regular detail (default 7) + * -b _pixels_ or --buffer=_pixels_: Buffer size where features are duplicated from adjacent tiles. Units are "screen pixels"--1/256th of the tile width or height. (default 5) All internal math is done in terms of a 32-bit tile coordinate system, so 1/(2^32) of the size of Earth, or about 1cm, is the smallest distinguishable distance. If _maxzoom_ + _detail_ > 32, no additional @@ -94,35 +94,35 @@ resolution is obtained than by using a smaller _maxzoom_ or _detail_. ### Properties - * -x _name_: Exclude the named properties from all features - * -y _name_: Include the named properties in all features, excluding all those not explicitly named - * -X: Exclude all properties and encode only geometries + * -x _name_ or --exclude=_name_: Exclude the named properties from all features + * -y _name_ or --include=_name_: Include the named properties in all features, excluding all those not explicitly named + * -X or --exclude-all: Exclude all properties and encode only geometries ### Point simplification - * -r _rate_: Rate at which dots are dropped at zoom levels below basezoom (default 2.5). + * -r _rate_ or --drop_rate=_rate_: Rate at which dots are dropped at zoom levels below basezoom (default 2.5). If you use -rg, it will guess a drop rate that will keep at most 50,000 features in the densest tile. You can also specify a marker-width with -rg*width* to allow fewer features in the densest tile to compensate for the larger marker, or -rf*number* to allow at most *number* features in the densest tile. - * -g _gamma_: Rate at which especially dense dots are dropped (default 0, for no effect). A gamma of 2 reduces the number of dots less than a pixel apart to the square root of their original number. + * -g _gamma_ or --gamma=_gamma_: Rate at which especially dense dots are dropped (default 0, for no effect). A gamma of 2 reduces the number of dots less than a pixel apart to the square root of their original number. ### Doing more - * -ac: Coalesce adjacent line and polygon features that have the same properties - * -ar: Try reversing the directions of lines to make them coalesce and compress better - * -ao: Reorder features to put ones with the same properties in sequence, to try to get them to coalesce - * -al: Let "dot" dropping at lower zooms apply to lines too + * -ac or --coalesce: Coalesce adjacent line and polygon features that have the same properties + * -ar or --reverse: Try reversing the directions of lines to make them coalesce and compress better + * -ao or --reorder: Reorder features to put ones with the same properties in sequence, to try to get them to coalesce + * -al or --drop-lines: Let "dot" dropping at lower zooms apply to lines too ### Doing less - * -ps: Don't simplify lines - * -pS: Don't simplify lines at maxzoom (but do simplify at lower zooms) - * -pf: Don't limit tiles to 200,000 features - * -pk: Don't limit tiles to 500K bytes - * -pd: Dynamically drop some fraction of features from large tiles to keep them under the 500K size limit. It will probably look ugly at the tile boundaries. - * -pi: Preserve the original input order of features as the drawing order instead of ordering geographically. (This is implemented as a restoration of the original order at the end, so that dot-dropping is still geographic, which means it also undoes -ao). - * -pp: Don't split complex polygons (over 700 vertices after simplification) into multiple features. - * -q: Work quietly instead of reporting progress + * -ps or --no-line-simplification: Don't simplify lines + * -pS or --simplify-only-low-zooms: Don't simplify lines at maxzoom (but do simplify at lower zooms) + * -pf or --no-feature-limit: Don't limit tiles to 200,000 features + * -pk or --no-tile-size-limit: Don't limit tiles to 500K bytes + * -pd or --force-feature-limit: Dynamically drop some fraction of features from large tiles to keep them under the 500K size limit. It will probably look ugly at the tile boundaries. + * -pi or --preserve-input-order: Preserve the original input order of features as the drawing order instead of ordering geographically. (This is implemented as a restoration of the original order at the end, so that dot-dropping is still geographic, which means it also undoes -ao). + * -pp or --no-polygon-splitting: Don't split complex polygons (over 700 vertices after simplification) into multiple features. + * -q or --quiet: Work quietly instead of reporting progress Example ------- diff --git a/geojson.c b/geojson.c index e43c755..19127e1 100644 --- a/geojson.c +++ b/geojson.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "jsonpull.h" #include "tile.h" @@ -35,6 +36,9 @@ static int min_detail = 7; int quiet = 0; int geometry_scale = 0; +static int prevent[256]; +static int additional[256]; + #define GEOM_POINT 0 /* array of positions */ #define GEOM_MULTIPOINT 1 /* array of arrays of positions */ #define GEOM_LINESTRING 2 /* array of arrays of positions */ @@ -1080,7 +1084,7 @@ void start_parsing(int fd, FILE *fp, long long offset, long long len, volatile i } } -int read_json(int argc, char **argv, char *fname, const char *layername, int maxzoom, int minzoom, int basezoom, double basezoom_marker_width, sqlite3 *outdb, struct pool *exclude, struct pool *include, int exclude_all, double droprate, int buffer, const char *tmpdir, double gamma, char *prevent, char *additional, int read_parallel, int forcetable) { +int read_json(int argc, char **argv, char *fname, const char *layername, int maxzoom, int minzoom, int basezoom, double basezoom_marker_width, sqlite3 *outdb, struct pool *exclude, struct pool *include, int exclude_all, double droprate, int buffer, const char *tmpdir, double gamma, int *prevent, int *additional, int read_parallel, int forcetable) { int ret = EXIT_SUCCESS; struct reader reader[CPUS]; @@ -2108,8 +2112,6 @@ int main(int argc, char **argv) { double gamma = 0; int buffer = 5; const char *tmpdir = "/tmp"; - char prevent[256]; - char additional[256]; struct pool exclude, include; pool_init(&exclude, 0); @@ -2122,7 +2124,49 @@ int main(int argc, char **argv) { additional[i] = 0; } - while ((i = getopt(argc, argv, "l:n:z:Z:d:D:m:o:x:y:r:b:fFXt:g:p:vqa:B:P")) != -1) { + static struct option long_options[] = { + {"name", required_argument, 0, 'n'}, + {"layer", required_argument, 0, 'l'}, + {"maximum-zoom", required_argument, 0, 'z'}, + {"minimum-zoom", required_argument, 0, 'Z'}, + {"base-zoom", required_argument, 0, 'B'}, + {"full-detail", required_argument, 0, 'd'}, + {"low-detail", required_argument, 0, 'D'}, + {"minimum-detail", required_argument, 0, 'm'}, + {"output", required_argument, 0, 'o'}, + {"exclude", required_argument, 0, 'x'}, + {"include", required_argument, 0, 'y'}, + {"drop-rate", required_argument, 0, 'r'}, + {"buffer", required_argument, 0, 'b'}, + {"temporary-directory", required_argument, 0, 't'}, + {"gamma", required_argument, 0, 'g'}, + {"prevent", required_argument, 0, 'p'}, + {"additional", required_argument, 0, 'a'}, + + {"exclude-all", no_argument, 0, 'X'}, + {"force", no_argument, 0, 'f'}, + {"allow-existing", no_argument, 0, 'F'}, + {"quiet", no_argument, 0, 'q'}, + {"version", no_argument, 0, 'v'}, + {"read-parallel", no_argument, 0, 'P'}, + + {"coalesce", no_argument, &additional[A_COALESCE], 1}, + {"reverse", no_argument, &additional[A_REVERSE], 1}, + {"reorder", no_argument, &additional[A_REORDER], 1}, + {"drop-lines", no_argument, &additional[A_LINE_DROP], 1}, + + {"no-line-simplification", no_argument, &prevent[P_SIMPLIFY], 1}, + {"simplify-only-low-zooms", no_argument, &prevent[P_SIMPLIFY_LOW], 1}, + {"no-feature-limit", no_argument, &prevent[P_FEATURE_LIMIT], 1}, + {"no-tile-size-limit", no_argument, &prevent[P_KILOBYTE_LIMIT], 1}, + {"force-feature-limit", no_argument, &prevent[P_DYNAMIC_DROP], 1}, + {"preseve-input-order", no_argument, &prevent[P_INPUT_ORDER], 1}, + {"no-polygon-splitting", no_argument, &prevent[P_POLYGON_SPLIT], 1}, + + {0, 0, 0, 0}, + }; + + while ((i = getopt_long(argc, argv, "n:l:z:Z:B:d:D:m:o:x:y:r:b:t:g:p:a:XfFqvP", long_options, NULL)) != -1) { switch (i) { case 'n': name = optarg; diff --git a/tile.cc b/tile.cc index d71d6b5..7b61d11 100644 --- a/tile.cc +++ b/tile.cc @@ -480,8 +480,8 @@ struct partial { unsigned long long index2; int z; int line_detail; - char *prevent; - char *additional; + int *prevent; + int *additional; int maxzoom; }; @@ -501,8 +501,8 @@ void *partial_feature_worker(void *v) { signed char t = (*partials)[i].t; int z = (*partials)[i].z; int line_detail = (*partials)[i].line_detail; - char *prevent = (*partials)[i].prevent; - char *additional = (*partials)[i].additional; + int *prevent = (*partials)[i].prevent; + int *additional = (*partials)[i].additional; int maxzoom = (*partials)[i].maxzoom; if ((t == VT_LINE || t == VT_POLYGON) && !(prevent[P_SIMPLIFY] || (z == maxzoom && prevent[P_SIMPLIFY_LOW]))) { @@ -597,7 +597,7 @@ int manage_gap(unsigned long long index, unsigned long long *previndex, double s return 0; } -long long write_tile(char **geoms, char *metabase, char *stringpool, int z, unsigned tx, unsigned ty, int detail, int min_detail, int basezoom, struct pool **file_keys, char **layernames, sqlite3 *outdb, double droprate, int buffer, const char *fname, FILE **geomfile, int minzoom, int maxzoom, double todo, char *geomstart, volatile long long *along, double gamma, int nlayers, char *prevent, char *additional, int child_shards, long long *meta_off, long long *pool_off, unsigned *initial_x, unsigned *initial_y, volatile int *running) { +long long write_tile(char **geoms, char *metabase, char *stringpool, int z, unsigned tx, unsigned ty, int detail, int min_detail, int basezoom, struct pool **file_keys, char **layernames, sqlite3 *outdb, double droprate, int buffer, const char *fname, FILE **geomfile, int minzoom, int maxzoom, double todo, char *geomstart, volatile long long *along, double gamma, int nlayers, int *prevent, int *additional, int child_shards, long long *meta_off, long long *pool_off, unsigned *initial_x, unsigned *initial_y, volatile int *running) { int line_detail; double fraction = 1; @@ -1049,8 +1049,8 @@ struct write_tile_args { volatile long long *along; double gamma; int nlayers; - char *prevent; - char *additional; + int *prevent; + int *additional; int child_shards; int *geomfd; off_t *geom_size; @@ -1157,7 +1157,7 @@ void *run_thread(void *vargs) { return NULL; } -int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, struct pool **file_keys, unsigned *midx, unsigned *midy, char **layernames, int maxzoom, int minzoom, int basezoom, sqlite3 *outdb, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int nlayers, char *prevent, char *additional, int full_detail, int low_detail, int min_detail, long long *meta_off, long long *pool_off, unsigned *initial_x, unsigned *initial_y) { +int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, struct pool **file_keys, unsigned *midx, unsigned *midy, char **layernames, int maxzoom, int minzoom, int basezoom, sqlite3 *outdb, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int nlayers, int *prevent, int *additional, int full_detail, int low_detail, int min_detail, long long *meta_off, long long *pool_off, unsigned *initial_x, unsigned *initial_y) { int i; for (i = 0; i <= maxzoom; i++) { long long most = 0; diff --git a/tile.h b/tile.h index 0930fc4..8d58deb 100644 --- a/tile.h +++ b/tile.h @@ -25,9 +25,9 @@ void deserialize_uint(char **f, unsigned *n); void deserialize_byte(char **f, signed char *n); struct pool_val *deserialize_string(char **f, struct pool *p, int type); -long long write_tile(char **geom, char *metabase, char *stringpool, unsigned *file_bbox, int z, unsigned x, unsigned y, int detail, int min_detail, int basezoom, struct pool **file_keys, char **layernames, sqlite3 *outdb, double droprate, int buffer, const char *fname, FILE **geomfile, int file_minzoom, int file_maxzoom, double todo, char *geomstart, long long along, double gamma, int nlayers, char *prevent, char *additional); +long long write_tile(char **geom, char *metabase, char *stringpool, unsigned *file_bbox, int z, unsigned x, unsigned y, int detail, int min_detail, int basezoom, struct pool **file_keys, char **layernames, sqlite3 *outdb, double droprate, int buffer, const char *fname, FILE **geomfile, int file_minzoom, int file_maxzoom, double todo, char *geomstart, long long along, double gamma, int nlayers, int *prevent, int *additional); -int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, struct pool **file_keys, unsigned *midx, unsigned *midy, char **layernames, int maxzoom, int minzoom, int basezoom, sqlite3 *outdb, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int nlayers, char *prevent, char *additional, int full_detail, int low_detail, int min_detail, long long *meta_off, long long *pool_off, unsigned *initial_x, unsigned *initial_y); +int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, struct pool **file_keys, unsigned *midx, unsigned *midy, char **layernames, int maxzoom, int minzoom, int basezoom, sqlite3 *outdb, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int nlayers, int *prevent, int *additional, int full_detail, int low_detail, int min_detail, long long *meta_off, long long *pool_off, unsigned *initial_x, unsigned *initial_y); int manage_gap(unsigned long long index, unsigned long long *previndex, double scale, double gamma, double *gap); diff --git a/version.h b/version.h index 4c2bdf0..ddb3937 100644 --- a/version.h +++ b/version.h @@ -1 +1 @@ -#define VERSION "tippecanoe v1.9.4\n" +#define VERSION "tippecanoe v1.9.5\n" From 448d1a124e7571239c2ab2b3d8beb55d4fc12481 Mon Sep 17 00:00:00 2001 From: Eric Fischer Date: Mon, 28 Mar 2016 15:10:04 -0700 Subject: [PATCH 14/14] Handle case of options that aren't processed individually --- geojson.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/geojson.c b/geojson.c index 19127e1..ab6a0c5 100644 --- a/geojson.c +++ b/geojson.c @@ -2168,6 +2168,9 @@ int main(int argc, char **argv) { while ((i = getopt_long(argc, argv, "n:l:z:Z:B:d:D:m:o:x:y:r:b:t:g:p:a:XfFqvP", long_options, NULL)) != -1) { switch (i) { + case 0: + break; + case 'n': name = optarg; break;