Merge pull request #29 from mapbox/multilayer

Add multilayer support
This commit is contained in:
Eric Fischer 2015-04-10 11:03:36 -07:00
commit 85919de490
6 changed files with 474 additions and 388 deletions

View File

@ -34,9 +34,10 @@ The easiest way to install tippecanoe on OSX is with [Homebrew](http://brew.sh/)
Usage
-----
tippecanoe -o file.mbtiles [file.json]
tippecanoe -o file.mbtiles [file.json ...]
If the file is not specified, it reads GeoJSON from the standard input.
If no files are specified, it reads GeoJSON from the standard input.
If multiple files are specified, each is placed in its own layer.
The GeoJSON features need not be wrapped in a FeatureCollection.
You can concatenate multiple GeoJSON features or files together,
@ -46,18 +47,33 @@ it encounters.
Options
-------
* -l <i>name</i>: Layer name (default "file" if source is file.json)
### Naming
* -l <i>name</i>: Layer name (default "file" if source is file.json or output is file.mbtiles). Only works if there is only one layer.
* -n <i>name</i>: Human-readable name (default file.json)
### File control
* -o <i>file</i>.mbtiles: Name the output file.
* -f: Delete the mbtiles file if it already exists instead of giving an error
### Zoom levels and resolution
* -z <i>zoom</i>: Base (maxzoom) zoom level (default 14)
* -Z <i>zoom</i>: Lowest (minzoom) zoom level (default 0)
* -d <i>detail</i>: Detail at base zoom level (default 26-basezoom, ~0.5m, for tile resolution of 4096 if -z14)
* -D <i>detail</i>: Detail at lower zoom levels (default 10, for tile resolution of 1024)
* -b <i>pixels</i>: Buffer size where features are duplicated from adjacent tiles (default 5)
### Properties
* -x <i>name</i>: Exclude the named properties from all features
* -y <i>name</i>: Include the named properties in all features, excluding all those not explicitly named
* -X: Exclude all properties and encode only geometries
* -f: Delete the mbtiles file if it already exists instead of giving an error
### Point simplification
* -r <i>rate</i>: Rate at which dots are dropped at lower zoom levels (default 2.5)
* -b <i>pixels</i>: Buffer size where features are duplicated from adjacent tiles (default 5)
* -g <i>gamma</i>: Rate at which especially dense dots are dropped (default 0, for no effect). A gamma of 2 reduces the number of dots less than a pixel apart to the square root of their original number.
Example

561
geojson.c
View File

@ -58,41 +58,41 @@ int mb_geometry[GEOM_TYPES] = {
VT_POLYGON,
};
size_t fwrite_check(const void *ptr, size_t size, size_t nitems, FILE *stream, const char *fname, json_pull *source) {
size_t fwrite_check(const void *ptr, size_t size, size_t nitems, FILE *stream, const char *fname) {
size_t w = fwrite(ptr, size, nitems, stream);
if (w != nitems) {
fprintf(stderr, "%s:%d: Write to temporary file failed: %s\n", fname, source->line, strerror(errno));
fprintf(stderr, "%s: Write to temporary file failed: %s\n", fname, strerror(errno));
exit(EXIT_FAILURE);
}
return w;
}
void serialize_int(FILE *out, int n, long long *fpos, const char *fname, json_pull *source) {
fwrite_check(&n, sizeof(int), 1, out, fname, source);
void serialize_int(FILE *out, int n, long long *fpos, const char *fname) {
fwrite_check(&n, sizeof(int), 1, out, fname);
*fpos += sizeof(int);
}
void serialize_long_long(FILE *out, long long n, long long *fpos, const char *fname, json_pull *source) {
fwrite_check(&n, sizeof(long long), 1, out, fname, source);
void serialize_long_long(FILE *out, long long n, long long *fpos, const char *fname) {
fwrite_check(&n, sizeof(long long), 1, out, fname);
*fpos += sizeof(long long);
}
void serialize_byte(FILE *out, signed char n, long long *fpos, const char *fname, json_pull *source) {
fwrite_check(&n, sizeof(signed char), 1, out, fname, source);
void serialize_byte(FILE *out, signed char n, long long *fpos, const char *fname) {
fwrite_check(&n, sizeof(signed char), 1, out, fname);
*fpos += sizeof(signed char);
}
void serialize_uint(FILE *out, unsigned n, long long *fpos, const char *fname, json_pull *source) {
fwrite_check(&n, sizeof(unsigned), 1, out, fname, source);
void serialize_uint(FILE *out, unsigned n, long long *fpos, const char *fname) {
fwrite_check(&n, sizeof(unsigned), 1, out, fname);
*fpos += sizeof(unsigned);
}
void serialize_string(FILE *out, const char *s, long long *fpos, const char *fname, json_pull *source) {
void serialize_string(FILE *out, const char *s, long long *fpos, const char *fname) {
int len = strlen(s);
serialize_int(out, len + 1, fpos, fname, source);
fwrite_check(s, sizeof(char), len, out, fname, source);
fwrite_check("", sizeof(char), 1, out, fname, source);
serialize_int(out, len + 1, fpos, fname);
fwrite_check(s, sizeof(char), len, out, fname);
fwrite_check("", sizeof(char), 1, out, fname);
*fpos += len + 1;
}
@ -148,9 +148,9 @@ void parse_geometry(int t, json_object *j, unsigned *bbox, long long *fpos, FILE
}
}
serialize_byte(out, op, fpos, fname, source);
serialize_uint(out, x, fpos, fname, source);
serialize_uint(out, y, fpos, fname, source);
serialize_byte(out, op, fpos, fname);
serialize_uint(out, x, fpos, fname);
serialize_uint(out, y, fpos, fname);
} else {
fprintf(stderr, "%s:%d: malformed point\n", fname, source->line);
}
@ -158,7 +158,7 @@ void parse_geometry(int t, json_object *j, unsigned *bbox, long long *fpos, FILE
if (t == GEOM_POLYGON) {
if (*fpos != began) {
serialize_byte(out, VT_CLOSEPATH, fpos, fname, source);
serialize_byte(out, VT_CLOSEPATH, fpos, fname);
}
}
}
@ -194,7 +194,7 @@ struct pool_val *deserialize_string(char **f, struct pool *p, int type) {
return ret;
}
int traverse_zooms(int geomfd[4], off_t geom_size[4], char *metabase, unsigned *file_bbox, struct pool *file_keys, unsigned *midx, unsigned *midy, const char *layername, int maxzoom, int minzoom, sqlite3 *outdb, double droprate, int buffer, const char *fname, struct json_pull *jp, const char *tmpdir, double gamma) {
int traverse_zooms(int geomfd[4], off_t geom_size[4], char *metabase, unsigned *file_bbox, struct pool **file_keys, unsigned *midx, unsigned *midy, char **layernames, int maxzoom, int minzoom, sqlite3 *outdb, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int nlayers) {
int i;
for (i = 0; i <= maxzoom; i++) {
long long most = 0;
@ -255,7 +255,7 @@ int traverse_zooms(int geomfd[4], off_t geom_size[4], char *metabase, unsigned *
// fprintf(stderr, "%d/%u/%u\n", z, x, y);
long long len = write_tile(&geom, metabase, file_bbox, z, x, y, z == maxzoom ? full_detail : low_detail, maxzoom, file_keys, layername, outdb, droprate, buffer, fname, jp, sub, minzoom, maxzoom, todo, geomstart, along, gamma);
long long len = write_tile(&geom, metabase, file_bbox, z, x, y, z == maxzoom ? full_detail : low_detail, maxzoom, file_keys, layernames, outdb, droprate, buffer, fname, sub, minzoom, maxzoom, todo, geomstart, along, gamma, nlayers);
if (len < 0) {
return i - 1;
@ -361,7 +361,7 @@ static void merge(struct merge *merges, int nmerges, unsigned char *map, FILE *f
}
}
int read_json(FILE *f, const char *fname, const char *layername, int maxzoom, int minzoom, sqlite3 *outdb, struct pool *exclude, struct pool *include, int exclude_all, double droprate, int buffer, const char *tmpdir, double gamma) {
int read_json(int argc, char **argv, char *fname, const char *layername, int maxzoom, int minzoom, sqlite3 *outdb, struct pool *exclude, struct pool *include, int exclude_all, double droprate, int buffer, const char *tmpdir, double gamma) {
int ret = EXIT_SUCCESS;
char metaname[strlen(tmpdir) + strlen("/meta.XXXXXXXX") + 1];
@ -413,200 +413,227 @@ int read_json(FILE *f, const char *fname, const char *layername, int maxzoom, in
unsigned file_bbox[] = { UINT_MAX, UINT_MAX, 0, 0 };
unsigned midx = 0, midy = 0;
json_pull *jp = json_begin_file(f);
long long seq = 0;
while (1) {
json_object *j = json_read(jp);
if (j == NULL) {
if (jp->error != NULL) {
fprintf(stderr, "%s:%d: %s\n", fname, jp->line, jp->error);
}
json_free(jp->root);
break;
}
json_object *type = json_hash_get(j, "type");
if (type == NULL || type->type != JSON_STRING || strcmp(type->string, "Feature") != 0) {
continue;
}
json_object *geometry = json_hash_get(j, "geometry");
if (geometry == NULL) {
fprintf(stderr, "%s:%d: feature with no geometry\n", fname, jp->line);
json_free(j);
continue;
}
json_object *geometry_type = json_hash_get(geometry, "type");
if (geometry_type == NULL) {
static int warned = 0;
if (!warned) {
fprintf(stderr, "%s:%d: null geometry (additional not reported)\n", fname, jp->line);
warned = 1;
}
json_free(j);
continue;
}
if (geometry_type->type != JSON_STRING) {
fprintf(stderr, "%s:%d: geometry without type\n", fname, jp->line);
json_free(j);
continue;
}
json_object *properties = json_hash_get(j, "properties");
if (properties == NULL || (properties->type != JSON_HASH && properties->type != JSON_NULL)) {
fprintf(stderr, "%s:%d: feature without properties hash\n", fname, jp->line);
json_free(j);
continue;
}
json_object *coordinates = json_hash_get(geometry, "coordinates");
if (coordinates == NULL || coordinates->type != JSON_ARRAY) {
fprintf(stderr, "%s:%d: feature without coordinates array\n", fname, jp->line);
json_free(j);
continue;
}
int t;
for (t = 0; t < GEOM_TYPES; t++) {
if (strcmp(geometry_type->string, geometry_names[t]) == 0) {
break;
}
}
if (t >= GEOM_TYPES) {
fprintf(stderr, "%s:%d: Can't handle geometry type %s\n", fname, jp->line, geometry_type->string);
json_free(j);
continue;
}
{
unsigned bbox[] = { UINT_MAX, UINT_MAX, 0, 0 };
int nprop = 0;
if (properties->type == JSON_HASH) {
nprop = properties->length;
}
long long metastart = metapos;
char *metakey[nprop];
char *metaval[nprop];
int metatype[nprop];
int m = 0;
int i;
for (i = 0; i < nprop; i++) {
if (properties->keys[i]->type == JSON_STRING) {
if (exclude_all) {
if (!is_pooled(include, properties->keys[i]->string, VT_STRING)) {
continue;
}
} else if (is_pooled(exclude, properties->keys[i]->string, VT_STRING)) {
continue;
}
metakey[m] = properties->keys[i]->string;
if (properties->values[i] != NULL && properties->values[i]->type == JSON_STRING) {
metatype[m] = VT_STRING;
metaval[m] = properties->values[i]->string;
m++;
} else if (properties->values[i] != NULL && properties->values[i]->type == JSON_NUMBER) {
metatype[m] = VT_NUMBER;
metaval[m] = properties->values[i]->string;
m++;
} else if (properties->values[i] != NULL && (properties->values[i]->type == JSON_TRUE || properties->values[i]->type == JSON_FALSE)) {
metatype[m] = VT_BOOLEAN;
metaval[m] = properties->values[i]->string;
m++;
} else if (properties->values[i] != NULL && (properties->values[i]->type == JSON_NULL)) {
;
} else {
fprintf(stderr, "%s:%d: Unsupported property type for %s\n", fname, jp->line, properties->keys[i]->string);
json_free(j);
continue;
}
}
}
serialize_int(metafile, m, &metapos, fname, jp);
for (i = 0; i < m; i++) {
serialize_int(metafile, metatype[i], &metapos, fname, jp);
serialize_string(metafile, metakey[i], &metapos, fname, jp);
serialize_string(metafile, metaval[i], &metapos, fname, jp);
}
long long geomstart = geompos;
serialize_int(geomfile, mb_geometry[t], &geompos, fname, jp);
serialize_long_long(geomfile, metastart, &geompos, fname, jp);
parse_geometry(t, coordinates, bbox, &geompos, geomfile, VT_MOVETO, fname, jp);
serialize_byte(geomfile, VT_END, &geompos, fname, jp);
/*
* Note that minzoom for lines is the dimension
* of the geometry in world coordinates, but
* for points is the lowest zoom level (in tiles,
* not in pixels) at which it should be drawn.
*
* So a line that is too small for, say, z8
* will have minzoom of 18 (if tile detail is 10),
* not 8.
*/
int minzoom = 0;
if (mb_geometry[t] == VT_LINE) {
for (minzoom = 0; minzoom < 31; minzoom++) {
unsigned mask = 1 << (32 - (minzoom + 1));
if (((bbox[0] & mask) != (bbox[2] & mask)) ||
((bbox[1] & mask) != (bbox[3] & mask))) {
break;
}
}
} else if (mb_geometry[t] == VT_POINT) {
double r = ((double) rand()) / RAND_MAX;
if (r == 0) {
r = .00000001;
}
minzoom = maxzoom - floor(log(r) / - log(droprate));
}
serialize_byte(geomfile, minzoom, &geompos, fname, jp);
struct index index;
index.start = geomstart;
index.end = geompos;
index.index = encode(bbox[0] / 2 + bbox[2] / 2, bbox[1] / 2 + bbox[3] / 2);
fwrite_check(&index, sizeof(struct index), 1, indexfile, fname, jp);
indexpos += sizeof(struct index);
for (i = 0; i < 2; i++) {
if (bbox[i] < file_bbox[i]) {
file_bbox[i] = bbox[i];
}
}
for (i = 2; i < 4; i++) {
if (bbox[i] > file_bbox[i]) {
file_bbox[i] = bbox[i];
}
}
if (seq % 10000 == 0) {
fprintf(stderr, "Read %.2f million features\r", seq / 1000000.0);
}
seq++;
}
json_free(j);
/* XXX check for any non-features in the outer object */
int nlayers = argc;
if (nlayers == 0) {
nlayers = 1;
}
int n;
for (n = 0; n < nlayers; n++) {
json_pull *jp;
const char *reading;
FILE *fp;
if (n >= argc) {
reading = "standard input";
fp = stdin;
} else {
reading = argv[n];
fp = fopen(argv[n], "r");
if (fp == NULL) {
perror(argv[n]);
continue;
}
}
jp = json_begin_file(fp);
while (1) {
json_object *j = json_read(jp);
if (j == NULL) {
if (jp->error != NULL) {
fprintf(stderr, "%s:%d: %s\n", reading, jp->line, jp->error);
}
json_free(jp->root);
break;
}
json_object *type = json_hash_get(j, "type");
if (type == NULL || type->type != JSON_STRING || strcmp(type->string, "Feature") != 0) {
continue;
}
json_object *geometry = json_hash_get(j, "geometry");
if (geometry == NULL) {
fprintf(stderr, "%s:%d: feature with no geometry\n", reading, jp->line);
json_free(j);
continue;
}
json_object *geometry_type = json_hash_get(geometry, "type");
if (geometry_type == NULL) {
static int warned = 0;
if (!warned) {
fprintf(stderr, "%s:%d: null geometry (additional not reported)\n", reading, jp->line);
warned = 1;
}
json_free(j);
continue;
}
if (geometry_type->type != JSON_STRING) {
fprintf(stderr, "%s:%d: geometry without type\n", reading, jp->line);
json_free(j);
continue;
}
json_object *properties = json_hash_get(j, "properties");
if (properties == NULL || (properties->type != JSON_HASH && properties->type != JSON_NULL)) {
fprintf(stderr, "%s:%d: feature without properties hash\n", reading, jp->line);
json_free(j);
continue;
}
json_object *coordinates = json_hash_get(geometry, "coordinates");
if (coordinates == NULL || coordinates->type != JSON_ARRAY) {
fprintf(stderr, "%s:%d: feature without coordinates array\n", reading, jp->line);
json_free(j);
continue;
}
int t;
for (t = 0; t < GEOM_TYPES; t++) {
if (strcmp(geometry_type->string, geometry_names[t]) == 0) {
break;
}
}
if (t >= GEOM_TYPES) {
fprintf(stderr, "%s:%d: Can't handle geometry type %s\n", reading, jp->line, geometry_type->string);
json_free(j);
continue;
}
{
unsigned bbox[] = { UINT_MAX, UINT_MAX, 0, 0 };
int nprop = 0;
if (properties->type == JSON_HASH) {
nprop = properties->length;
}
long long metastart = metapos;
char *metakey[nprop];
char *metaval[nprop];
int metatype[nprop];
int m = 0;
int i;
for (i = 0; i < nprop; i++) {
if (properties->keys[i]->type == JSON_STRING) {
if (exclude_all) {
if (!is_pooled(include, properties->keys[i]->string, VT_STRING)) {
continue;
}
} else if (is_pooled(exclude, properties->keys[i]->string, VT_STRING)) {
continue;
}
metakey[m] = properties->keys[i]->string;
if (properties->values[i] != NULL && properties->values[i]->type == JSON_STRING) {
metatype[m] = VT_STRING;
metaval[m] = properties->values[i]->string;
m++;
} else if (properties->values[i] != NULL && properties->values[i]->type == JSON_NUMBER) {
metatype[m] = VT_NUMBER;
metaval[m] = properties->values[i]->string;
m++;
} else if (properties->values[i] != NULL && (properties->values[i]->type == JSON_TRUE || properties->values[i]->type == JSON_FALSE)) {
metatype[m] = VT_BOOLEAN;
metaval[m] = properties->values[i]->string;
m++;
} else if (properties->values[i] != NULL && (properties->values[i]->type == JSON_NULL)) {
;
} else {
fprintf(stderr, "%s:%d: Unsupported property type for %s\n", reading, jp->line, properties->keys[i]->string);
json_free(j);
continue;
}
}
}
serialize_int(metafile, m, &metapos, fname);
for (i = 0; i < m; i++) {
serialize_int(metafile, metatype[i], &metapos, fname);
serialize_string(metafile, metakey[i], &metapos, fname);
serialize_string(metafile, metaval[i], &metapos, fname);
}
long long geomstart = geompos;
serialize_byte(geomfile, mb_geometry[t], &geompos, fname);
serialize_byte(geomfile, n, &geompos, fname);
serialize_long_long(geomfile, metastart, &geompos, fname);
parse_geometry(t, coordinates, bbox, &geompos, geomfile, VT_MOVETO, fname, jp);
serialize_byte(geomfile, VT_END, &geompos, fname);
/*
* Note that minzoom for lines is the dimension
* of the geometry in world coordinates, but
* for points is the lowest zoom level (in tiles,
* not in pixels) at which it should be drawn.
*
* So a line that is too small for, say, z8
* will have minzoom of 18 (if tile detail is 10),
* not 8.
*/
int minzoom = 0;
if (mb_geometry[t] == VT_LINE) {
for (minzoom = 0; minzoom < 31; minzoom++) {
unsigned mask = 1 << (32 - (minzoom + 1));
if (((bbox[0] & mask) != (bbox[2] & mask)) ||
((bbox[1] & mask) != (bbox[3] & mask))) {
break;
}
}
} else if (mb_geometry[t] == VT_POINT) {
double r = ((double) rand()) / RAND_MAX;
if (r == 0) {
r = .00000001;
}
minzoom = maxzoom - floor(log(r) / - log(droprate));
}
serialize_byte(geomfile, minzoom, &geompos, fname);
struct index index;
index.start = geomstart;
index.end = geompos;
index.index = encode(bbox[0] / 2 + bbox[2] / 2, bbox[1] / 2 + bbox[3] / 2);
fwrite_check(&index, sizeof(struct index), 1, indexfile, fname);
indexpos += sizeof(struct index);
for (i = 0; i < 2; i++) {
if (bbox[i] < file_bbox[i]) {
file_bbox[i] = bbox[i];
}
}
for (i = 2; i < 4; i++) {
if (bbox[i] > file_bbox[i]) {
file_bbox[i] = bbox[i];
}
}
if (seq % 10000 == 0) {
fprintf(stderr, "Read %.2f million features\r", seq / 1000000.0);
}
seq++;
}
json_free(j);
/* XXX check for any non-features in the outer object */
}
json_end(jp);
fclose(fp);
}
json_end(jp);
fclose(metafile);
fclose(geomfile);
fclose(indexfile);
@ -624,7 +651,7 @@ int read_json(FILE *f, const char *fname, const char *layername, int maxzoom, in
}
if (geomst.st_size == 0 || metast.st_size == 0) {
fprintf(stderr, "%s: did not read any valid geometries\n", fname);
fprintf(stderr, "did not read any valid geometries\n");
exit(EXIT_FAILURE);
}
@ -634,38 +661,53 @@ int read_json(FILE *f, const char *fname, const char *layername, int maxzoom, in
exit(EXIT_FAILURE);
}
struct pool file_keys;
pool_init(&file_keys, 0);
struct pool file_keys1[nlayers];
struct pool *file_keys[nlayers];
int i;
for (i = 0; i < nlayers; i++) {
pool_init(&file_keys1[i], 0);
file_keys[i] = &file_keys1[i];
}
char trunc[strlen(fname) + 1];
if (layername == NULL) {
const char *ocp, *use = fname;
for (ocp = fname; *ocp; ocp++) {
if (*ocp == '/' && ocp[1] != '\0') {
use = ocp + 1;
char *layernames[nlayers];
for (i = 0; i < nlayers; i++) {
if (argc <= 1 && layername != NULL) {
layernames[i] = strdup(layername);
} else {
char *src = argv[i];
if (argc < 1) {
src = fname;
}
}
strcpy(trunc, use);
char *cp = strstr(trunc, ".json");
if (cp != NULL) {
*cp = '\0';
}
cp = strstr(trunc, ".mbtiles");
if (cp != NULL) {
*cp = '\0';
}
layername = trunc;
char *out = trunc;
for (cp = trunc; *cp; cp++) {
if (isalpha(*cp) || isdigit(*cp) || *cp == '_') {
*out++ = *cp;
char *trunc = layernames[i] = malloc(strlen(src) + 1);
const char *ocp, *use = src;
for (ocp = src; *ocp; ocp++) {
if (*ocp == '/' && ocp[1] != '\0') {
use = ocp + 1;
}
}
}
*out = '\0';
strcpy(trunc, use);
printf("using layer name %s\n", trunc);
char *cp = strstr(trunc, ".json");
if (cp != NULL) {
*cp = '\0';
}
cp = strstr(trunc, ".mbtiles");
if (cp != NULL) {
*cp = '\0';
}
layername = trunc;
char *out = trunc;
for (cp = trunc; *cp; cp++) {
if (isalpha(*cp) || isdigit(*cp) || *cp == '_') {
*out++ = *cp;
}
}
*out = '\0';
printf("using layer %d name %s\n", i, trunc);
}
}
/* Sort the index by geometry */
@ -783,15 +825,15 @@ int read_json(FILE *f, const char *fname, const char *layername, int maxzoom, in
geompos = 0;
/* initial tile is 0/0/0 */
serialize_int(geomfile, 0, &geompos, fname, jp);
serialize_uint(geomfile, 0, &geompos, fname, jp);
serialize_uint(geomfile, 0, &geompos, fname, jp);
serialize_int(geomfile, 0, &geompos, fname);
serialize_uint(geomfile, 0, &geompos, fname);
serialize_uint(geomfile, 0, &geompos, fname);
long long i;
long long sum = 0;
long long progress = 0;
for (i = 0; i < indexpos / sizeof(struct index); i++) {
fwrite_check(geom_map + index_map[i].start, sizeof(char), index_map[i].end - index_map[i].start, geomfile, fname, jp);
fwrite_check(geom_map + index_map[i].start, sizeof(char), index_map[i].end - index_map[i].start, geomfile, fname);
sum += index_map[i].end - index_map[i].start;
long long p = 1000 * i / (indexpos / sizeof(struct index));
@ -802,7 +844,7 @@ int read_json(FILE *f, const char *fname, const char *layername, int maxzoom, in
}
/* end of tile */
serialize_int(geomfile, -2, &geompos, fname, jp);
serialize_byte(geomfile, -2, &geompos, fname);
fclose(geomfile);
}
@ -843,7 +885,7 @@ int read_json(FILE *f, const char *fname, const char *layername, int maxzoom, in
fprintf(stderr, "%lld features, %lld bytes of geometry, %lld bytes of metadata\n", seq, (long long) geomst.st_size, (long long) metast.st_size);
int written = traverse_zooms(fd, size, meta, file_bbox, &file_keys, &midx, &midy, layername, maxzoom, minzoom, outdb, droprate, buffer, fname, jp, tmpdir, gamma);
int written = traverse_zooms(fd, size, meta, file_bbox, file_keys, &midx, &midy, layernames, maxzoom, minzoom, outdb, droprate, buffer, fname, tmpdir, gamma, nlayers);
if (maxzoom != written) {
fprintf(stderr, "\n\n\n*** NOTE TILES ONLY COMPLETE THROUGH ZOOM %d ***\n\n\n", written);
@ -883,9 +925,12 @@ int read_json(FILE *f, const char *fname, const char *layername, int maxzoom, in
midlon = maxlon;
}
mbtiles_write_metadata(outdb, fname, layername, minzoom, maxzoom, minlat, minlon, maxlat, maxlon, midlat, midlon, &file_keys);
mbtiles_write_metadata(outdb, fname, layernames, minzoom, maxzoom, minlat, minlon, maxlat, maxlon, midlat, midlon, file_keys, nlayers); // XXX layers
pool_free_strings(&file_keys);
for (i = 0; i < nlayers; i++) {
pool_free_strings(&file_keys1[i]);
free(layernames[i]);
}
return ret;
}
@ -925,7 +970,7 @@ int main(int argc, char **argv) {
break;
case 'Z':
minzoom = atoi(optarg);
minzoom = atoi(optarg);
break;
case 'd':
@ -997,24 +1042,8 @@ int main(int argc, char **argv) {
sqlite3 *outdb = mbtiles_open(outdir, argv);
int ret = EXIT_SUCCESS;
if (argc == optind + 1) {
int i;
for (i = optind; i < argc; i++) {
FILE *f = fopen(argv[i], "r");
if (f == NULL) {
fprintf(stderr, "%s: %s: %s\n", argv[0], argv[i], strerror(errno));
} else {
ret = read_json(f, name ? name : argv[i], layer, maxzoom, minzoom, outdb, &exclude, &include, exclude_all, droprate, buffer, tmpdir, gamma);
fclose(f);
}
}
} else if (argc > optind) {
fprintf(stderr, "%s: Only accepts one input file\n", argv[0]);
exit(EXIT_FAILURE);
} else {
ret = read_json(stdin, name ? name : outdir, layer, maxzoom, minzoom, outdb, &exclude, &include, exclude_all, droprate, buffer, tmpdir, gamma);
}
ret = read_json(argc - optind, argv + optind, name ? name : outdir, layer, maxzoom, minzoom, outdb, &exclude, &include, exclude_all, droprate, buffer, tmpdir, gamma);
mbtiles_close(outdb, argv);
return ret;

View File

@ -107,7 +107,7 @@ static void aprintf(char **buf, const char *format, ...) {
free(tmp);
}
void mbtiles_write_metadata(sqlite3 *outdb, const char *fname, const char *layername, int minzoom, int maxzoom, double minlat, double minlon, double maxlat, double maxlon, double midlat, double midlon, struct pool *fields) {
void mbtiles_write_metadata(sqlite3 *outdb, const char *fname, char **layername, int minzoom, int maxzoom, double minlat, double minlon, double maxlat, double maxlon, double midlat, double midlon, struct pool **file_keys, int nlayers) {
char *sql, *err;
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('name', %Q);", fname);
@ -174,27 +174,38 @@ void mbtiles_write_metadata(sqlite3 *outdb, const char *fname, const char *layer
sqlite3_free(sql);
char *buf = strdup("{");
aprintf(&buf, "\"vector_layers\": [ { \"id\": \"");
quote(&buf, layername);
aprintf(&buf, "\", \"description\": \"\", \"minzoom\": %d, \"maxzoom\": %d, \"fields\": {", minzoom, maxzoom);
aprintf(&buf, "\"vector_layers\": [ ");
struct pool_val *pv;
for (pv = fields->head; pv != NULL; pv = pv->next) {
aprintf(&buf, "\"");
quote(&buf, pv->s);
if (pv->type == VT_NUMBER) {
aprintf(&buf, "\": \"Number\"");
} else {
aprintf(&buf, "\": \"String\"");
}
if (pv->next != NULL) {
int i;
for (i = 0; i < nlayers; i++) {
if (i != 0) {
aprintf(&buf, ", ");
}
aprintf(&buf, "{ \"id\": \"");
quote(&buf, layername[i]);
aprintf(&buf, "\", \"description\": \"\", \"minzoom\": %d, \"maxzoom\": %d, \"fields\": {", minzoom, maxzoom);
struct pool_val *pv;
for (pv = file_keys[i]->head; pv != NULL; pv = pv->next) {
aprintf(&buf, "\"");
quote(&buf, pv->s);
if (pv->type == VT_NUMBER) {
aprintf(&buf, "\": \"Number\"");
} else {
aprintf(&buf, "\": \"String\"");
}
if (pv->next != NULL) {
aprintf(&buf, ", ");
}
}
aprintf(&buf, "} }");
}
aprintf(&buf, "} } ] }");
aprintf(&buf, " ] }");
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('json', %Q);", buf);
if (sqlite3_exec(outdb, sql, NULL, NULL, &err) != SQLITE_OK) {

View File

@ -2,6 +2,6 @@ sqlite3 *mbtiles_open(char *dbname, char **argv);
void mbtiles_write_tile(sqlite3 *outdb, int z, int tx, int ty, const char *data, int size);
void mbtiles_write_metadata(sqlite3 *outdb, const char *fname, const char *layername, int minzoom, int maxzoom, double minlat, double minlon, double maxlat, double maxlon, double midlat, double midlon, struct pool *fields);
void mbtiles_write_metadata(sqlite3 *outdb, const char *fname, char **layername, int minzoom, int maxzoom, double minlat, double minlon, double maxlat, double maxlon, double midlat, double midlon, struct pool **file_keys, int nlayers);
void mbtiles_close(sqlite3 *outdb, char **argv);

215
tile.cc
View File

@ -214,52 +214,56 @@ void decode_meta(char **meta, struct pool *keys, struct pool *values, struct poo
}
}
mapnik::vector::tile create_tile(const char *layername, int line_detail, std::vector<coalesce> &features, long long *count, struct pool *keys, struct pool *values) {
mapnik::vector::tile create_tile(char **layernames, int line_detail, std::vector<std::vector<coalesce> > &features, long long *count, struct pool **keys, struct pool **values, int nlayers) {
mapnik::vector::tile tile;
mapnik::vector::tile_layer *layer = tile.add_layers();
layer->set_name(layername);
layer->set_version(1);
layer->set_extent(1 << line_detail);
int i;
for (i = 0; i < nlayers; i++) {
mapnik::vector::tile_layer *layer = tile.add_layers();
unsigned x;
for (x = 0; x < features.size(); x++) {
if (features[x].type == VT_LINE || features[x].type == VT_POLYGON) {
features[x].geom = remove_noop(features[x].geom, features[x].type);
layer->set_name(layernames[i]);
layer->set_version(1);
layer->set_extent(1 << line_detail);
unsigned x;
for (x = 0; x < features[i].size(); x++) {
if (features[i][x].type == VT_LINE || features[i][x].type == VT_POLYGON) {
features[i][x].geom = remove_noop(features[i][x].geom, features[i][x].type);
}
mapnik::vector::tile_feature *feature = layer->add_features();
if (features[i][x].type == VT_POINT) {
feature->set_type(mapnik::vector::tile::Point);
} else if (features[i][x].type == VT_LINE) {
feature->set_type(mapnik::vector::tile::LineString);
} else if (features[i][x].type == VT_POLYGON) {
feature->set_type(mapnik::vector::tile::Polygon);
} else {
feature->set_type(mapnik::vector::tile::Unknown);
}
to_feature(features[i][x].geom, feature);
*count += features[i][x].geom.size();
unsigned y;
for (y = 0; y < features[i][x].meta.size(); y++) {
feature->add_tags(features[i][x].meta[y]);
}
}
mapnik::vector::tile_feature *feature = layer->add_features();
if (features[x].type == VT_POINT) {
feature->set_type(mapnik::vector::tile::Point);
} else if (features[x].type == VT_LINE) {
feature->set_type(mapnik::vector::tile::LineString);
} else if (features[x].type == VT_POLYGON) {
feature->set_type(mapnik::vector::tile::Polygon);
} else {
feature->set_type(mapnik::vector::tile::Unknown);
struct pool_val *pv;
for (pv = keys[i]->head; pv != NULL; pv = pv->next) {
layer->add_keys(pv->s, strlen(pv->s));
}
for (pv = values[i]->head; pv != NULL; pv = pv->next) {
mapnik::vector::tile_value *tv = layer->add_values();
to_feature(features[x].geom, feature);
*count += features[x].geom.size();
unsigned y;
for (y = 0; y < features[x].meta.size(); y++) {
feature->add_tags(features[x].meta[y]);
}
}
struct pool_val *pv;
for (pv = keys->head; pv != NULL; pv = pv->next) {
layer->add_keys(pv->s, strlen(pv->s));
}
for (pv = values->head; pv != NULL; pv = pv->next) {
mapnik::vector::tile_value *tv = layer->add_values();
if (pv->type == VT_NUMBER) {
tv->set_double_value(atof(pv->s));
} else {
tv->set_string_value(pv->s);
if (pv->type == VT_NUMBER) {
tv->set_double_value(atof(pv->s));
} else {
tv->set_string_value(pv->s);
}
}
}
@ -284,6 +288,7 @@ struct sll {
}
};
#if 0
void evaluate(std::vector<coalesce> &features, char *metabase, struct pool *file_keys, const char *layername, int line_detail, long long orig) {
std::vector<sll> options;
@ -302,7 +307,7 @@ void evaluate(std::vector<coalesce> &features, char *metabase, struct pool *file
}
std::vector<coalesce> empty;
mapnik::vector::tile tile = create_tile(layername, line_detail, empty, &count, &keys, &values);
mapnik::vector::tile tile = create_tile(layername, line_detail, empty, &count, &keys, &values, 1); // XXX layer
std::string s;
std::string compressed;
@ -329,7 +334,7 @@ void evaluate(std::vector<coalesce> &features, char *metabase, struct pool *file
long long count = 0;
std::vector<coalesce> empty;
mapnik::vector::tile tile = create_tile(layername, line_detail, features, &count, &keys, &values);
mapnik::vector::tile tile = create_tile(layername, line_detail, features, &count, &keys, &values, nlayers);
std::string s;
std::string compressed;
@ -341,8 +346,9 @@ void evaluate(std::vector<coalesce> &features, char *metabase, struct pool *file
pool_free(&values);
pool_free(&keys);
}
#endif
long long write_tile(char **geoms, char *metabase, unsigned *file_bbox, int z, unsigned tx, unsigned ty, int detail, int basezoom, struct pool *file_keys, const char *layername, sqlite3 *outdb, double droprate, int buffer, const char *fname, json_pull *jp, FILE *geomfile[4], int file_minzoom, int file_maxzoom, double todo, char *geomstart, long long along, double gamma) {
long long write_tile(char **geoms, char *metabase, unsigned *file_bbox, int z, unsigned tx, unsigned ty, int detail, int basezoom, struct pool **file_keys, char **layernames, sqlite3 *outdb, double droprate, int buffer, const char *fname, FILE *geomfile[4], int file_minzoom, int file_maxzoom, double todo, char *geomstart, long long along, double gamma, int nlayers) {
int line_detail;
static bool evaluated = false;
double oprogress = 0;
@ -352,10 +358,16 @@ long long write_tile(char **geoms, char *metabase, unsigned *file_bbox, int z, u
for (line_detail = detail; line_detail >= MIN_DETAIL || line_detail == detail; line_detail--) {
GOOGLE_PROTOBUF_VERIFY_VERSION;
struct pool keys, values;
pool_init(&keys, 0);
pool_init(&values, 0);
std::set<long long> dup;
struct pool keys1[nlayers], values1[nlayers];
struct pool *keys[nlayers], *values[nlayers];
int i;
for (i = 0; i < nlayers; i++) {
pool_init(&keys1[i], 0);
pool_init(&values1[i], 0);
keys[i] = &keys1[i];
values[i] = &values1[i];
}
long long count = 0;
//long long along = 0;
@ -371,7 +383,10 @@ long long write_tile(char **geoms, char *metabase, unsigned *file_bbox, int z, u
double scale = (double) (1LL << (64 - 2 * (z + 8)));
double gap = 0;
std::vector<coalesce> features;
std::vector<std::vector<coalesce> > features;
for (i = 0; i < nlayers; i++) {
features.push_back(std::vector<coalesce>());
}
int within[4] = { 0 };
long long geompos[4] = { 0 };
@ -379,12 +394,15 @@ long long write_tile(char **geoms, char *metabase, unsigned *file_bbox, int z, u
*geoms = og;
while (1) {
int t;
deserialize_int(geoms, &t);
signed char t;
deserialize_byte(geoms, &t);
if (t < 0) {
break;
}
signed char layer;
deserialize_byte(geoms, &layer);
long long metastart;
deserialize_long_long(geoms, &metastart);
char *meta = metabase + metastart;
@ -448,9 +466,9 @@ long long write_tile(char **geoms, char *metabase, unsigned *file_bbox, int z, u
int quick2 = quick_check(bbox2, z + 1, line_detail, buffer);
if (quick2 != 0) {
if (!within[j]) {
serialize_int(geomfile[j], z + 1, &geompos[j], fname, jp);
serialize_uint(geomfile[j], tx * 2 + xo, &geompos[j], fname, jp);
serialize_uint(geomfile[j], ty * 2 + yo, &geompos[j], fname, jp);
serialize_int(geomfile[j], z + 1, &geompos[j], fname);
serialize_uint(geomfile[j], tx * 2 + xo, &geompos[j], fname);
serialize_uint(geomfile[j], ty * 2 + yo, &geompos[j], fname);
within[j] = 1;
}
@ -462,20 +480,21 @@ long long write_tile(char **geoms, char *metabase, unsigned *file_bbox, int z, u
}
//printf("type %d, meta %lld\n", t, metastart);
serialize_int(geomfile[j], t, &geompos[j], fname, jp);
serialize_long_long(geomfile[j], metastart, &geompos[j], fname, jp);
serialize_byte(geomfile[j], t, &geompos[j], fname);
serialize_byte(geomfile[j], layer, &geompos[j], fname);
serialize_long_long(geomfile[j], metastart, &geompos[j], fname);
for (unsigned u = 0; u < geom.size(); u++) {
serialize_byte(geomfile[j], geom[u].op, &geompos[j], fname, jp);
serialize_byte(geomfile[j], geom[u].op, &geompos[j], fname);
if (geom[u].op != VT_CLOSEPATH) {
serialize_uint(geomfile[j], geom[u].x + sx, &geompos[j], fname, jp);
serialize_uint(geomfile[j], geom[u].y + sy, &geompos[j], fname, jp);
serialize_uint(geomfile[j], geom[u].x + sx, &geompos[j], fname);
serialize_uint(geomfile[j], geom[u].y + sy, &geompos[j], fname);
}
}
serialize_byte(geomfile[j], VT_END, &geompos[j], fname, jp);
serialize_byte(geomfile[j], feature_minzoom, &geompos[j], fname, jp);
serialize_byte(geomfile[j], VT_END, &geompos[j], fname);
serialize_byte(geomfile[j], feature_minzoom, &geompos[j], fname);
}
}
}
@ -576,60 +595,70 @@ long long write_tile(char **geoms, char *metabase, unsigned *file_bbox, int z, u
c.metasrc = meta;
c.coalesced = false;
decode_meta(&meta, &keys, &values, file_keys, &c.meta, NULL);
features.push_back(c);
decode_meta(&meta, keys[layer], values[layer], file_keys[layer], &c.meta, NULL);
features[layer].push_back(c);
}
}
int j;
for (j = 0; j < 4; j++) {
if (within[j]) {
serialize_int(geomfile[j], -2, &geompos[j], fname, jp);
serialize_byte(geomfile[j], -2, &geompos[j], fname);
within[j] = 0;
}
}
std::sort(features.begin(), features.end());
for (j = 0; j < nlayers; j++) {
std::sort(features[j].begin(), features[j].end());
std::vector<coalesce> out;
unsigned x;
for (x = 0; x < features.size(); x++) {
unsigned y = out.size() - 1;
std::vector<coalesce> out;
unsigned x;
for (x = 0; x < features[j].size(); x++) {
unsigned y = out.size() - 1;
if (out.size() > 0 && coalcmp(&features[x], &out[y]) < 0) {
fprintf(stderr, "\nfeature out of order\n");
}
if (out.size() > 0 && out[y].geom.size() + features[x].geom.size() < 20000 && coalcmp(&features[x], &out[y]) == 0 && features[x].type != VT_POINT) {
unsigned z;
for (z = 0; z < features[x].geom.size(); z++) {
out[y].geom.push_back(features[x].geom[z]);
if (out.size() > 0 && coalcmp(&features[j][x], &out[y]) < 0) {
fprintf(stderr, "\nfeature out of order\n");
}
out[y].coalesced = true;
} else {
out.push_back(features[x]);
}
}
features = out;
for (x = 0; x < features.size(); x++) {
if (features[x].coalesced && features[x].type == VT_LINE) {
features[x].geom = remove_noop(features[x].geom, features[x].type);
features[x].geom = simplify_lines(features[x].geom, 32, 0);
if (out.size() > 0 && out[y].geom.size() + features[j][x].geom.size() < 20000 && coalcmp(&features[j][x], &out[y]) == 0 && features[j][x].type != VT_POINT) {
unsigned z;
for (z = 0; z < features[j][x].geom.size(); z++) {
out[y].geom.push_back(features[j][x].geom[z]);
}
out[y].coalesced = true;
} else {
out.push_back(features[j][x]);
}
}
features[j] = out;
for (x = 0; x < features[j].size(); x++) {
if (features[j][x].coalesced && features[j][x].type == VT_LINE) {
features[j][x].geom = remove_noop(features[j][x].geom, features[j][x].type);
features[j][x].geom = simplify_lines(features[j][x].geom, 32, 0);
}
}
}
if (features.size() > 0) {
if (features.size() > 200000) {
fprintf(stderr, "tile %d/%u/%u has %lld features, >200000 \n", z, tx, ty, (long long) features.size());
long long totalsize = 0;
for (j = 0; j < nlayers; j++) {
totalsize += features[j].size();
}
if (totalsize > 0) {
if (totalsize > 200000) {
fprintf(stderr, "tile %d/%u/%u has %lld features, >200000 \n", z, tx, ty, totalsize);
fprintf(stderr, "Try using -z to set a higher base zoom level.\n");
return -1;
}
mapnik::vector::tile tile = create_tile(layername, line_detail, features, &count, &keys, &values);
mapnik::vector::tile tile = create_tile(layernames, line_detail, features, &count, keys, values, nlayers);
pool_free(&keys);
pool_free(&values);
int i;
for (i = 0; i < nlayers; i++) {
pool_free(&keys1[i]);
pool_free(&values1[i]);
}
std::string s;
std::string compressed;
@ -642,7 +671,9 @@ long long write_tile(char **geoms, char *metabase, unsigned *file_bbox, int z, u
if (line_detail == MIN_DETAIL || !evaluated) {
evaluated = true;
evaluate(features, metabase, file_keys, layername, line_detail, compressed.size());
#if 0
evaluate(features[0], metabase, file_keys[0], layername, line_detail, compressed.size()); // XXX layer
#endif
}
} else {
mbtiles_write_tile(outdb, z, tx, ty, compressed.data(), compressed.size());

13
tile.h
View File

@ -12,13 +12,12 @@
#define VT_BOOLEAN 7
struct pool;
struct json_pull;
void serialize_int(FILE *out, int n, long long *fpos, const char *fname, struct json_pull *source);
void serialize_long_long(FILE *out, long long n, long long *fpos, const char *fname, struct json_pull *source);
void serialize_byte(FILE *out, signed char n, long long *fpos, const char *fname, struct json_pull *source);
void serialize_uint(FILE *out, unsigned n, long long *fpos, const char *fname, struct json_pull *source);
void serialize_string(FILE *out, const char *s, long long *fpos, const char *fname, struct json_pull *source);
void serialize_int(FILE *out, int n, long long *fpos, const char *fname);
void serialize_long_long(FILE *out, long long n, long long *fpos, const char *fname);
void serialize_byte(FILE *out, signed char n, long long *fpos, const char *fname);
void serialize_uint(FILE *out, unsigned n, long long *fpos, const char *fname);
void serialize_string(FILE *out, const char *s, long long *fpos, const char *fname);
void deserialize_int(char **f, int *n);
void deserialize_long_long(char **f, long long *n);
@ -26,4 +25,4 @@ void deserialize_uint(char **f, unsigned *n);
void deserialize_byte(char **f, signed char *n);
struct pool_val *deserialize_string(char **f, struct pool *p, int type);
long long write_tile(char **geom, char *metabase, unsigned *file_bbox, int z, unsigned x, unsigned y, int detail, int basezoom, struct pool *file_keys, const char *layername, sqlite3 *outdb, double droprate, int buffer, const char *fname, struct json_pull *jp, FILE *geomfile[4], int file_minzoom, int file_maxzoom, double todo, char *geomstart, long long along, double gamma);
long long write_tile(char **geom, char *metabase, unsigned *file_bbox, int z, unsigned x, unsigned y, int detail, int basezoom, struct pool **file_keys, char **layernames, sqlite3 *outdb, double droprate, int buffer, const char *fname, FILE *geomfile[4], int file_minzoom, int file_maxzoom, double todo, char *geomstart, long long along, double gamma, int nlayers);