mirror of
https://github.com/mapbox/tippecanoe.git
synced 2025-03-31 16:00:36 +00:00
Merge branch 'master' into multithread
Conflicts: tile.cc
This commit is contained in:
commit
601c092883
7
Makefile
7
Makefile
@ -1,7 +1,9 @@
|
||||
PREFIX ?= /usr/local
|
||||
MANDIR ?= /usr/share/man/man1/
|
||||
|
||||
all: tippecanoe enumerate decode man/tippecanoe.1
|
||||
all: tippecanoe enumerate decode tile-join
|
||||
|
||||
docs: man/tippecanoe.1
|
||||
|
||||
install: tippecanoe
|
||||
mkdir -p $(PREFIX)/bin
|
||||
@ -31,6 +33,9 @@ enumerate: enumerate.o
|
||||
decode: decode.o vector_tile.pb.o projection.o
|
||||
g++ $(PG) $(LIBS) -O3 -g -Wall -o $@ $^ -lm -lz -lprotobuf-lite -lsqlite3
|
||||
|
||||
tile-join: tile-join.o vector_tile.pb.o projection.o pool.o mbtiles.o
|
||||
g++ $(PG) $(LIBS) -O3 -g -Wall -o $@ $^ -lm -lz -lprotobuf-lite -lsqlite3
|
||||
|
||||
libjsonpull.a: jsonpull.o
|
||||
ar rc $@ $^
|
||||
ranlib $@
|
||||
|
36
README.md
36
README.md
@ -61,12 +61,13 @@ Options
|
||||
|
||||
* -o _file_.mbtiles: Name the output file.
|
||||
* -f: Delete the mbtiles file if it already exists instead of giving an error
|
||||
* -t _directory_: Put the temporary files in _directory_.
|
||||
|
||||
### Zoom levels and resolution
|
||||
|
||||
* -z _zoom_: Base (maxzoom) zoom level (default 14)
|
||||
* -Z _zoom_: Lowest (minzoom) zoom level (default 0)
|
||||
* -d _detail_: Detail at base zoom level (default 26-basezoom, ~0.5m, for tile resolution of 4096 if -z14)
|
||||
* -d _detail_: Detail at base zoom level (default 12 at -z14 or higher, or 13 at -z13 or lower. Detail beyond 13 has rendering problems with Mapbox GL.)
|
||||
* -D _detail_: Detail at lower zoom levels (default 10, for tile resolution of 1024)
|
||||
* -m _detail_: Minimum detail that it will try if tiles are too big at regular detail (default 7)
|
||||
* -b _pixels_: Buffer size where features are duplicated from adjacent tiles. Units are "screen pixels"--1/256th of the tile width or height. (default 5)
|
||||
@ -82,15 +83,18 @@ Options
|
||||
* -r _rate_: Rate at which dots are dropped at lower zoom levels (default 2.5)
|
||||
* -g _gamma_: Rate at which especially dense dots are dropped (default 0, for no effect). A gamma of 2 reduces the number of dots less than a pixel apart to the square root of their original number.
|
||||
|
||||
### Doing more
|
||||
|
||||
* -ac: Coalesce adjacent line and polygon features that have the same properties
|
||||
* -ar: Try reversing the directions of lines to make them coalesce and compress better
|
||||
* -ao: Reorder features to put ones with the same properties in sequence, to try to get them to coalesce
|
||||
* -al: Let "dot" dropping at lower zooms apply to lines too
|
||||
|
||||
### Doing less
|
||||
|
||||
* -ps: Don't simplify lines
|
||||
* -pr: Don't reverse the direction of lines to make them coalesce better
|
||||
* -pc: Don't coalesce features with the same properties
|
||||
* -pf: Don't limit tiles to 200,000 features
|
||||
* -pk: Don't limit tiles to 500K bytes
|
||||
* -po: Don't reorder features to put the same properties in sequence
|
||||
* -pl: Let "dot" simplification apply to lines too
|
||||
* -pd: Dynamically drop some fraction of features from large tiles to keep them under the 500K size limit. It will probably look ugly at the tile boundaries.
|
||||
* -q: Work quietly instead of reporting progress
|
||||
|
||||
@ -143,7 +147,7 @@ I don't know why 2.5 is the appropriate number, but the densities of many differ
|
||||
data sets fall off at about this same rate. You can use -r to specify a different rate.
|
||||
|
||||
You can use the gamma option to thin out especially dense clusters of points.
|
||||
For any area that where dots are closer than one pixel together (at whatever zoom level),
|
||||
For any area where dots are closer than one pixel together (at whatever zoom level),
|
||||
a gamma of 3, for example, will reduce these clusters to the cube root of their original density.
|
||||
|
||||
For line features, it drops any features that are too small to draw at all.
|
||||
@ -165,10 +169,20 @@ lower resolutions before failing if it still doesn't fit.
|
||||
Development
|
||||
-----------
|
||||
|
||||
Requires protoc (`brew install protobuf` or
|
||||
`apt-get install libprotobuf-dev` and `protobuf-compiler`),
|
||||
`md2man` (`gem install md2man`), and sqlite3 (`apt-get install libsqlite3-dev`).
|
||||
To build:
|
||||
Requires protoc and sqlite3. Rebuilding the manpage
|
||||
uses md2man (`gem install md2man`).
|
||||
|
||||
MacOS:
|
||||
|
||||
brew install protobuf
|
||||
|
||||
Linux:
|
||||
|
||||
sudo apt-get install libprotobuf-dev
|
||||
sudo apt-get install protobuf-compiler
|
||||
sudo apt-get install libsqlite3-dev
|
||||
|
||||
Then build:
|
||||
|
||||
make
|
||||
|
||||
@ -184,4 +198,4 @@ Check out [some examples of maps made with tippecanoe](MADE_WITH.md)
|
||||
Name
|
||||
----
|
||||
|
||||
The name is [a joking reference](http://en.wikipedia.org/wiki/Tippecanoe_and_Tyler_Too) to making tiles.
|
||||
The name is [a joking reference](http://en.wikipedia.org/wiki/Tippecanoe_and_Tyler_Too) to a "tiler" for making map tiles.
|
||||
|
357
geojson.c
357
geojson.c
@ -1,3 +1,7 @@
|
||||
#ifdef MTRACE
|
||||
#include <mcheck.h>
|
||||
#endif
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <math.h>
|
||||
@ -405,7 +409,160 @@ long long addpool(struct memfile *poolfile, struct memfile *treefile, char *s, c
|
||||
return off;
|
||||
}
|
||||
|
||||
int read_json(int argc, char **argv, char *fname, const char *layername, int maxzoom, int minzoom, sqlite3 *outdb, struct pool *exclude, struct pool *include, int exclude_all, double droprate, int buffer, const char *tmpdir, double gamma, char *prevent) {
|
||||
int serialize_geometry(json_object *geometry, json_object *properties, const char *reading, json_pull *jp, long long *seq, long long *metapos, long long *geompos, long long *indexpos, struct pool *exclude, struct pool *include, int exclude_all, FILE *metafile, FILE *geomfile, FILE *indexfile, struct memfile *poolfile, struct memfile *treefile, const char *fname, int maxzoom, int n, double droprate, unsigned *file_bbox) {
|
||||
json_object *geometry_type = json_hash_get(geometry, "type");
|
||||
if (geometry_type == NULL) {
|
||||
static int warned = 0;
|
||||
if (!warned) {
|
||||
fprintf(stderr, "%s:%d: null geometry (additional not reported)\n", reading, jp->line);
|
||||
warned = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (geometry_type->type != JSON_STRING) {
|
||||
fprintf(stderr, "%s:%d: geometry without type\n", reading, jp->line);
|
||||
return 0;
|
||||
}
|
||||
|
||||
json_object *coordinates = json_hash_get(geometry, "coordinates");
|
||||
if (coordinates == NULL || coordinates->type != JSON_ARRAY) {
|
||||
fprintf(stderr, "%s:%d: feature without coordinates array\n", reading, jp->line);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int t;
|
||||
for (t = 0; t < GEOM_TYPES; t++) {
|
||||
if (strcmp(geometry_type->string, geometry_names[t]) == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (t >= GEOM_TYPES) {
|
||||
fprintf(stderr, "%s:%d: Can't handle geometry type %s\n", reading, jp->line, geometry_type->string);
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned bbox[] = {UINT_MAX, UINT_MAX, 0, 0};
|
||||
|
||||
int nprop = 0;
|
||||
if (properties->type == JSON_HASH) {
|
||||
nprop = properties->length;
|
||||
}
|
||||
|
||||
long long metastart = *metapos;
|
||||
char *metakey[nprop];
|
||||
char *metaval[nprop];
|
||||
int metatype[nprop];
|
||||
int m = 0;
|
||||
|
||||
int i;
|
||||
for (i = 0; i < nprop; i++) {
|
||||
if (properties->keys[i]->type == JSON_STRING) {
|
||||
if (exclude_all) {
|
||||
if (!is_pooled(include, properties->keys[i]->string, VT_STRING)) {
|
||||
continue;
|
||||
}
|
||||
} else if (is_pooled(exclude, properties->keys[i]->string, VT_STRING)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
metakey[m] = properties->keys[i]->string;
|
||||
|
||||
if (properties->values[i] != NULL && properties->values[i]->type == JSON_STRING) {
|
||||
metatype[m] = VT_STRING;
|
||||
metaval[m] = properties->values[i]->string;
|
||||
m++;
|
||||
} else if (properties->values[i] != NULL && properties->values[i]->type == JSON_NUMBER) {
|
||||
metatype[m] = VT_NUMBER;
|
||||
metaval[m] = properties->values[i]->string;
|
||||
m++;
|
||||
} else if (properties->values[i] != NULL && (properties->values[i]->type == JSON_TRUE || properties->values[i]->type == JSON_FALSE)) {
|
||||
metatype[m] = VT_BOOLEAN;
|
||||
metaval[m] = properties->values[i]->type == JSON_TRUE ? "true" : "false";
|
||||
m++;
|
||||
} else if (properties->values[i] != NULL && (properties->values[i]->type == JSON_NULL)) {
|
||||
;
|
||||
} else {
|
||||
fprintf(stderr, "%s:%d: Unsupported property type for %s\n", reading, jp->line, properties->keys[i]->string);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
serialize_int(metafile, m, metapos, fname);
|
||||
for (i = 0; i < m; i++) {
|
||||
serialize_long_long(metafile, addpool(poolfile, treefile, metakey[i], VT_STRING), metapos, fname);
|
||||
serialize_long_long(metafile, addpool(poolfile, treefile, metaval[i], metatype[i]), metapos, fname);
|
||||
}
|
||||
|
||||
long long geomstart = *geompos;
|
||||
|
||||
serialize_byte(geomfile, mb_geometry[t], geompos, fname);
|
||||
serialize_long_long(geomfile, n, geompos, fname);
|
||||
serialize_long_long(geomfile, metastart, geompos, fname);
|
||||
long long wx = initial_x, wy = initial_y;
|
||||
parse_geometry(t, coordinates, bbox, geompos, geomfile, VT_MOVETO, fname, jp, &wx, &wy, &initialized);
|
||||
serialize_byte(geomfile, VT_END, geompos, fname);
|
||||
|
||||
/*
|
||||
* Note that minzoom for lines is the dimension
|
||||
* of the geometry in world coordinates, but
|
||||
* for points is the lowest zoom level (in tiles,
|
||||
* not in pixels) at which it should be drawn.
|
||||
*
|
||||
* So a line that is too small for, say, z8
|
||||
* will have minzoom of 18 (if tile detail is 10),
|
||||
* not 8.
|
||||
*/
|
||||
int minzoom = 0;
|
||||
if (mb_geometry[t] == VT_LINE) {
|
||||
for (minzoom = 0; minzoom < 31; minzoom++) {
|
||||
unsigned mask = 1 << (32 - (minzoom + 1));
|
||||
|
||||
if (((bbox[0] & mask) != (bbox[2] & mask)) || ((bbox[1] & mask) != (bbox[3] & mask))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (mb_geometry[t] == VT_POINT) {
|
||||
double r = ((double) rand()) / RAND_MAX;
|
||||
if (r == 0) {
|
||||
r = .00000001;
|
||||
}
|
||||
minzoom = maxzoom - floor(log(r) / -log(droprate));
|
||||
}
|
||||
|
||||
serialize_byte(geomfile, minzoom, geompos, fname);
|
||||
|
||||
struct index index;
|
||||
index.start = geomstart;
|
||||
index.end = *geompos;
|
||||
index.index = encode(bbox[0] / 2 + bbox[2] / 2, bbox[1] / 2 + bbox[3] / 2);
|
||||
fwrite_check(&index, sizeof(struct index), 1, indexfile, fname);
|
||||
*indexpos += sizeof(struct index);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (bbox[i] < file_bbox[i]) {
|
||||
file_bbox[i] = bbox[i];
|
||||
}
|
||||
}
|
||||
for (i = 2; i < 4; i++) {
|
||||
if (bbox[i] > file_bbox[i]) {
|
||||
file_bbox[i] = bbox[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (*seq % 10000 == 0) {
|
||||
if (!quiet) {
|
||||
fprintf(stderr, "Read %.2f million features\r", *seq / 1000000.0);
|
||||
}
|
||||
}
|
||||
(*seq)++;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int read_json(int argc, char **argv, char *fname, const char *layername, int maxzoom, int minzoom, sqlite3 *outdb, struct pool *exclude, struct pool *include, int exclude_all, double droprate, int buffer, const char *tmpdir, double gamma, char *prevent, char *additional) {
|
||||
int ret = EXIT_SUCCESS;
|
||||
|
||||
char metaname[strlen(tmpdir) + strlen("/meta.XXXXXXXX") + 1];
|
||||
@ -533,8 +690,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max
|
||||
found_hashes++;
|
||||
|
||||
if (found_hashes == 50 && found_features == 0) {
|
||||
fprintf(stderr, "%s:%d: Not finding any GeoJSON features in input. Is your file just bare geometries?\n", reading, jp->line);
|
||||
break;
|
||||
fprintf(stderr, "%s:%d: Not finding any GeoJSON features in input after 50 objects. Is your file just bare geometries?\n", reading, jp->line);
|
||||
}
|
||||
}
|
||||
|
||||
@ -552,24 +708,6 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max
|
||||
continue;
|
||||
}
|
||||
|
||||
json_object *geometry_type = json_hash_get(geometry, "type");
|
||||
if (geometry_type == NULL) {
|
||||
static int warned = 0;
|
||||
if (!warned) {
|
||||
fprintf(stderr, "%s:%d: null geometry (additional not reported)\n", reading, jp->line);
|
||||
warned = 1;
|
||||
}
|
||||
|
||||
json_free(j);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (geometry_type->type != JSON_STRING) {
|
||||
fprintf(stderr, "%s:%d: geometry without type\n", reading, jp->line);
|
||||
json_free(j);
|
||||
continue;
|
||||
}
|
||||
|
||||
json_object *properties = json_hash_get(j, "properties");
|
||||
if (properties == NULL || (properties->type != JSON_HASH && properties->type != JSON_NULL)) {
|
||||
fprintf(stderr, "%s:%d: feature without properties hash\n", reading, jp->line);
|
||||
@ -577,142 +715,14 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max
|
||||
continue;
|
||||
}
|
||||
|
||||
json_object *coordinates = json_hash_get(geometry, "coordinates");
|
||||
if (coordinates == NULL || coordinates->type != JSON_ARRAY) {
|
||||
fprintf(stderr, "%s:%d: feature without coordinates array\n", reading, jp->line);
|
||||
json_free(j);
|
||||
continue;
|
||||
}
|
||||
|
||||
int t;
|
||||
for (t = 0; t < GEOM_TYPES; t++) {
|
||||
if (strcmp(geometry_type->string, geometry_names[t]) == 0) {
|
||||
break;
|
||||
json_object *geometries = json_hash_get(geometry, "geometries");
|
||||
if (geometries != NULL) {
|
||||
int g;
|
||||
for (g = 0; g < geometries->length; g++) {
|
||||
serialize_geometry(geometries->array[g], properties, reading, jp, &seq, &metapos, &geompos, &indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, maxzoom, n, droprate, file_bbox);
|
||||
}
|
||||
}
|
||||
if (t >= GEOM_TYPES) {
|
||||
fprintf(stderr, "%s:%d: Can't handle geometry type %s\n", reading, jp->line, geometry_type->string);
|
||||
json_free(j);
|
||||
continue;
|
||||
}
|
||||
|
||||
{
|
||||
unsigned bbox[] = {UINT_MAX, UINT_MAX, 0, 0};
|
||||
|
||||
int nprop = 0;
|
||||
if (properties->type == JSON_HASH) {
|
||||
nprop = properties->length;
|
||||
}
|
||||
|
||||
long long metastart = metapos;
|
||||
char *metakey[nprop];
|
||||
char *metaval[nprop];
|
||||
int metatype[nprop];
|
||||
int m = 0;
|
||||
|
||||
int i;
|
||||
for (i = 0; i < nprop; i++) {
|
||||
if (properties->keys[i]->type == JSON_STRING) {
|
||||
if (exclude_all) {
|
||||
if (!is_pooled(include, properties->keys[i]->string, VT_STRING)) {
|
||||
continue;
|
||||
}
|
||||
} else if (is_pooled(exclude, properties->keys[i]->string, VT_STRING)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
metakey[m] = properties->keys[i]->string;
|
||||
|
||||
if (properties->values[i] != NULL && properties->values[i]->type == JSON_STRING) {
|
||||
metatype[m] = VT_STRING;
|
||||
metaval[m] = properties->values[i]->string;
|
||||
m++;
|
||||
} else if (properties->values[i] != NULL && properties->values[i]->type == JSON_NUMBER) {
|
||||
metatype[m] = VT_NUMBER;
|
||||
metaval[m] = properties->values[i]->string;
|
||||
m++;
|
||||
} else if (properties->values[i] != NULL && (properties->values[i]->type == JSON_TRUE || properties->values[i]->type == JSON_FALSE)) {
|
||||
metatype[m] = VT_BOOLEAN;
|
||||
metaval[m] = properties->values[i]->type == JSON_TRUE ? "true" : "false";
|
||||
m++;
|
||||
} else if (properties->values[i] != NULL && (properties->values[i]->type == JSON_NULL)) {
|
||||
;
|
||||
} else {
|
||||
fprintf(stderr, "%s:%d: Unsupported property type for %s\n", reading, jp->line, properties->keys[i]->string);
|
||||
json_free(j);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
serialize_int(metafile, m, &metapos, fname);
|
||||
for (i = 0; i < m; i++) {
|
||||
serialize_long_long(metafile, addpool(poolfile, treefile, metakey[i], VT_STRING), &metapos, fname);
|
||||
serialize_long_long(metafile, addpool(poolfile, treefile, metaval[i], metatype[i]), &metapos, fname);
|
||||
}
|
||||
|
||||
long long geomstart = geompos;
|
||||
|
||||
serialize_byte(geomfile, mb_geometry[t], &geompos, fname);
|
||||
serialize_byte(geomfile, n, &geompos, fname);
|
||||
serialize_long_long(geomfile, metastart, &geompos, fname);
|
||||
long long wx = initial_x, wy = initial_y;
|
||||
parse_geometry(t, coordinates, bbox, &geompos, geomfile, VT_MOVETO, fname, jp, &wx, &wy, &initialized);
|
||||
serialize_byte(geomfile, VT_END, &geompos, fname);
|
||||
|
||||
/*
|
||||
* Note that minzoom for lines is the dimension
|
||||
* of the geometry in world coordinates, but
|
||||
* for points is the lowest zoom level (in tiles,
|
||||
* not in pixels) at which it should be drawn.
|
||||
*
|
||||
* So a line that is too small for, say, z8
|
||||
* will have minzoom of 18 (if tile detail is 10),
|
||||
* not 8.
|
||||
*/
|
||||
int minzoom = 0;
|
||||
if (mb_geometry[t] == VT_LINE) {
|
||||
for (minzoom = 0; minzoom < 31; minzoom++) {
|
||||
unsigned mask = 1 << (32 - (minzoom + 1));
|
||||
|
||||
if (((bbox[0] & mask) != (bbox[2] & mask)) || ((bbox[1] & mask) != (bbox[3] & mask))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (mb_geometry[t] == VT_POINT) {
|
||||
double r = ((double) rand()) / RAND_MAX;
|
||||
if (r == 0) {
|
||||
r = .00000001;
|
||||
}
|
||||
minzoom = maxzoom - floor(log(r) / -log(droprate));
|
||||
}
|
||||
|
||||
serialize_byte(geomfile, minzoom, &geompos, fname);
|
||||
|
||||
struct index index;
|
||||
index.start = geomstart;
|
||||
index.end = geompos;
|
||||
index.index = encode(bbox[0] / 2 + bbox[2] / 2, bbox[1] / 2 + bbox[3] / 2);
|
||||
fwrite_check(&index, sizeof(struct index), 1, indexfile, fname);
|
||||
indexpos += sizeof(struct index);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (bbox[i] < file_bbox[i]) {
|
||||
file_bbox[i] = bbox[i];
|
||||
}
|
||||
}
|
||||
for (i = 2; i < 4; i++) {
|
||||
if (bbox[i] > file_bbox[i]) {
|
||||
file_bbox[i] = bbox[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (seq % 10000 == 0) {
|
||||
if (!quiet) {
|
||||
fprintf(stderr, "Read %.2f million features\r", seq / 1000000.0);
|
||||
}
|
||||
}
|
||||
seq++;
|
||||
} else {
|
||||
serialize_geometry(geometry, properties, reading, jp, &seq, &metapos, &geompos, &indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, maxzoom, n, droprate, file_bbox);
|
||||
}
|
||||
|
||||
json_free(j);
|
||||
@ -991,7 +1001,7 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max
|
||||
fprintf(stderr, "%lld features, %lld bytes of geometry, %lld bytes of metadata, %lld bytes of string pool\n", seq, (long long) geomst.st_size, (long long) metast.st_size, poolfile->off);
|
||||
}
|
||||
|
||||
int written = traverse_zooms(fd, size, meta, stringpool, file_bbox, file_keys, &midx, &midy, layernames, maxzoom, minzoom, outdb, droprate, buffer, fname, tmpdir, gamma, nlayers, prevent, full_detail, low_detail, min_detail);
|
||||
int written = traverse_zooms(fd, size, meta, stringpool, file_bbox, file_keys, &midx, &midy, layernames, maxzoom, minzoom, outdb, droprate, buffer, fname, tmpdir, gamma, nlayers, prevent, additional, full_detail, low_detail, min_detail);
|
||||
|
||||
if (maxzoom != written) {
|
||||
fprintf(stderr, "\n\n\n*** NOTE TILES ONLY COMPLETE THROUGH ZOOM %d ***\n\n\n", written);
|
||||
@ -1044,6 +1054,10 @@ int read_json(int argc, char **argv, char *fname, const char *layername, int max
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
#ifdef MTRACE
|
||||
mtrace();
|
||||
#endif
|
||||
|
||||
extern int optind;
|
||||
extern char *optarg;
|
||||
int i;
|
||||
@ -1059,6 +1073,7 @@ int main(int argc, char **argv) {
|
||||
int buffer = 5;
|
||||
const char *tmpdir = "/tmp";
|
||||
char prevent[256];
|
||||
char additional[256];
|
||||
|
||||
struct pool exclude, include;
|
||||
pool_init(&exclude, 0);
|
||||
@ -1067,9 +1082,10 @@ int main(int argc, char **argv) {
|
||||
|
||||
for (i = 0; i < 256; i++) {
|
||||
prevent[i] = 0;
|
||||
additional[i] = 0;
|
||||
}
|
||||
|
||||
while ((i = getopt(argc, argv, "l:n:z:Z:d:D:m:o:x:y:r:b:fXt:g:p:vq")) != -1) {
|
||||
while ((i = getopt(argc, argv, "l:n:z:Z:d:D:m:o:x:y:r:b:fXt:g:p:vqa:")) != -1) {
|
||||
switch (i) {
|
||||
case 'n':
|
||||
name = optarg;
|
||||
@ -1147,12 +1163,19 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
} break;
|
||||
|
||||
case 'a': {
|
||||
char *cp;
|
||||
for (cp = optarg; *cp != '\0'; cp++) {
|
||||
additional[*cp & 0xFF] = 1;
|
||||
}
|
||||
} break;
|
||||
|
||||
case 'v':
|
||||
fprintf(stderr, VERSION);
|
||||
exit(EXIT_FAILURE);
|
||||
|
||||
default:
|
||||
fprintf(stderr, "Usage: %s -o out.mbtiles [-n name] [-l layername] [-z maxzoom] [-Z minzoom] [-d detail] [-D lower-detail] [-m min-detail] [-x excluded-field ...] [-y included-field ...] [-X] [-r droprate] [-b buffer] [-t tmpdir] [-p rcfs] [file.json ...]\n", argv[0]);
|
||||
fprintf(stderr, "Usage: %s -o out.mbtiles [-n name] [-l layername] [-z maxzoom] [-Z minzoom] [-d detail] [-D lower-detail] [-m min-detail] [-x excluded-field ...] [-y included-field ...] [-X] [-r droprate] [-b buffer] [-t tmpdir] [-a rco] [-p sfkld] [-q] [file.json ...]\n", argv[0]);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
@ -1163,8 +1186,13 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
|
||||
if (full_detail <= 0) {
|
||||
// ~0.5m accuracy at whatever zoom
|
||||
// 12 bits (4096 units) at z14
|
||||
if (maxzoom >= 14) {
|
||||
// ~0.5m accuracy at z14
|
||||
full_detail = 12;
|
||||
} else {
|
||||
// as good as we can get without breaking GL
|
||||
full_detail = 13;
|
||||
}
|
||||
|
||||
full_detail = 26 - maxzoom;
|
||||
}
|
||||
@ -1188,8 +1216,13 @@ int main(int argc, char **argv) {
|
||||
sqlite3 *outdb = mbtiles_open(outdir, argv);
|
||||
int ret = EXIT_SUCCESS;
|
||||
|
||||
ret = read_json(argc - optind, argv + optind, name ? name : outdir, layer, maxzoom, minzoom, outdb, &exclude, &include, exclude_all, droprate, buffer, tmpdir, gamma, prevent);
|
||||
ret = read_json(argc - optind, argv + optind, name ? name : outdir, layer, maxzoom, minzoom, outdb, &exclude, &include, exclude_all, droprate, buffer, tmpdir, gamma, prevent, additional);
|
||||
|
||||
mbtiles_close(outdb, argv);
|
||||
|
||||
#ifdef MTRACE
|
||||
muntrace();
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -64,6 +64,8 @@ it encounters.
|
||||
\-o \fIfile\fP\&.mbtiles: Name the output file.
|
||||
.IP \(bu 2
|
||||
\-f: Delete the mbtiles file if it already exists instead of giving an error
|
||||
.IP \(bu 2
|
||||
\-t \fIdirectory\fP: Put the temporary files in \fIdirectory\fP\&.
|
||||
.RE
|
||||
.SS Zoom levels and resolution
|
||||
.RS
|
||||
@ -72,7 +74,7 @@ it encounters.
|
||||
.IP \(bu 2
|
||||
\-Z \fIzoom\fP: Lowest (minzoom) zoom level (default 0)
|
||||
.IP \(bu 2
|
||||
\-d \fIdetail\fP: Detail at base zoom level (default 26\-basezoom, ~0.5m, for tile resolution of 4096 if \-z14)
|
||||
\-d \fIdetail\fP: Detail at base zoom level (default 12 at \-z14 or higher, or 13 at \-z13 or lower. Detail beyond 13 has rendering problems with Mapbox GL.)
|
||||
.IP \(bu 2
|
||||
\-D \fIdetail\fP: Detail at lower zoom levels (default 10, for tile resolution of 1024)
|
||||
.IP \(bu 2
|
||||
@ -96,23 +98,26 @@ it encounters.
|
||||
.IP \(bu 2
|
||||
\-g \fIgamma\fP: Rate at which especially dense dots are dropped (default 0, for no effect). A gamma of 2 reduces the number of dots less than a pixel apart to the square root of their original number.
|
||||
.RE
|
||||
.SS Doing more
|
||||
.RS
|
||||
.IP \(bu 2
|
||||
\-ac: Coalesce adjacent line and polygon features that have the same properties
|
||||
.IP \(bu 2
|
||||
\-ar: Try reversing the directions of lines to make them coalesce and compress better
|
||||
.IP \(bu 2
|
||||
\-ao: Reorder features to put ones with the same properties in sequence, to try to get them to coalesce
|
||||
.IP \(bu 2
|
||||
\-al: Let "dot" dropping at lower zooms apply to lines too
|
||||
.RE
|
||||
.SS Doing less
|
||||
.RS
|
||||
.IP \(bu 2
|
||||
\-ps: Don't simplify lines
|
||||
.IP \(bu 2
|
||||
\-pr: Don't reverse the direction of lines to make them coalesce better
|
||||
.IP \(bu 2
|
||||
\-pc: Don't coalesce features with the same properties
|
||||
.IP \(bu 2
|
||||
\-pf: Don't limit tiles to 200,000 features
|
||||
.IP \(bu 2
|
||||
\-pk: Don't limit tiles to 500K bytes
|
||||
.IP \(bu 2
|
||||
\-po: Don't reorder features to put the same properties in sequence
|
||||
.IP \(bu 2
|
||||
\-pl: Let "dot" simplification apply to lines too
|
||||
.IP \(bu 2
|
||||
\-pd: Dynamically drop some fraction of features from large tiles to keep them under the 500K size limit. It will probably look ugly at the tile boundaries.
|
||||
.IP \(bu 2
|
||||
\-q: Work quietly instead of reporting progress
|
||||
@ -167,7 +172,7 @@ I don't know why 2.5 is the appropriate number, but the densities of many differ
|
||||
data sets fall off at about this same rate. You can use \-r to specify a different rate.
|
||||
.PP
|
||||
You can use the gamma option to thin out especially dense clusters of points.
|
||||
For any area that where dots are closer than one pixel together (at whatever zoom level),
|
||||
For any area where dots are closer than one pixel together (at whatever zoom level),
|
||||
a gamma of 3, for example, will reduce these clusters to the cube root of their original density.
|
||||
.PP
|
||||
For line features, it drops any features that are too small to draw at all.
|
||||
@ -187,10 +192,28 @@ If a tile is larger than 500K, it will try encoding that tile at progressively
|
||||
lower resolutions before failing if it still doesn't fit.
|
||||
.SH Development
|
||||
.PP
|
||||
Requires protoc (\fB\fCbrew install protobuf\fR or
|
||||
\fB\fCapt\-get install libprotobuf\-dev\fR and \fB\fCprotobuf\-compiler\fR),
|
||||
\fB\fCmd2man\fR (\fB\fCgem install md2man\fR), and sqlite3 (\fB\fCapt\-get install libsqlite3\-dev\fR).
|
||||
To build:
|
||||
Requires protoc and sqlite3. Rebuilding the manpage
|
||||
uses md2man (\fB\fCgem install md2man\fR).
|
||||
.PP
|
||||
MacOS:
|
||||
.PP
|
||||
.RS
|
||||
.nf
|
||||
brew install protobuf
|
||||
.fi
|
||||
.RE
|
||||
.PP
|
||||
Linux:
|
||||
.PP
|
||||
.RS
|
||||
.nf
|
||||
sudo apt\-get install libprotobuf\-dev
|
||||
sudo apt\-get install protobuf\-compiler
|
||||
sudo apt\-get install libsqlite3\-dev
|
||||
.fi
|
||||
.RE
|
||||
.PP
|
||||
Then build:
|
||||
.PP
|
||||
.RS
|
||||
.nf
|
||||
@ -212,4 +235,4 @@ Check out some examples of maps made with tippecanoe
|
||||
.SH Name
|
||||
.PP
|
||||
The name is a joking reference
|
||||
\[la]http://en.wikipedia.org/wiki/Tippecanoe_and_Tyler_Too\[ra] to making tiles.
|
||||
\[la]http://en.wikipedia.org/wiki/Tippecanoe_and_Tyler_Too\[ra] to a "tiler" for making map tiles.
|
||||
|
17
pool.c
17
pool.c
@ -1,10 +1,11 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "pool.h"
|
||||
|
||||
#define POOL_WIDTH 256
|
||||
|
||||
static int hash(char *s) {
|
||||
static int hash(const char *s) {
|
||||
int h = 0;
|
||||
for (; *s; s++) {
|
||||
h = h * 37 + *s;
|
||||
@ -13,7 +14,7 @@ static int hash(char *s) {
|
||||
return h;
|
||||
}
|
||||
|
||||
struct pool_val *pool(struct pool *p, char *s, int type) {
|
||||
struct pool_val *pool(struct pool *p, const char *s, int type) {
|
||||
int h = hash(s);
|
||||
struct pool_val **v = &(p->vals[h]);
|
||||
|
||||
@ -34,6 +35,10 @@ struct pool_val *pool(struct pool *p, char *s, int type) {
|
||||
}
|
||||
|
||||
*v = malloc(sizeof(struct pool_val));
|
||||
if (*v == NULL) {
|
||||
fprintf(stderr, "out of memory making string pool\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
(*v)->left = NULL;
|
||||
(*v)->right = NULL;
|
||||
(*v)->next = NULL;
|
||||
@ -52,7 +57,7 @@ struct pool_val *pool(struct pool *p, char *s, int type) {
|
||||
return *v;
|
||||
}
|
||||
|
||||
int is_pooled(struct pool *p, char *s, int type) {
|
||||
int is_pooled(struct pool *p, const char *s, int type) {
|
||||
int h = hash(s);
|
||||
struct pool_val **v = &(p->vals[h]);
|
||||
|
||||
@ -78,7 +83,7 @@ int is_pooled(struct pool *p, char *s, int type) {
|
||||
void pool_free1(struct pool *p, void (*func)(void *)) {
|
||||
while (p->head != NULL) {
|
||||
if (func != NULL) {
|
||||
func(p->head->s);
|
||||
func((void *) p->head->s);
|
||||
}
|
||||
|
||||
struct pool_val *next = p->head->next;
|
||||
@ -104,6 +109,10 @@ void pool_free_strings(struct pool *p) {
|
||||
void pool_init(struct pool *p, int n) {
|
||||
p->n = n;
|
||||
p->vals = calloc(POOL_WIDTH, sizeof(struct pool_val *));
|
||||
if (p->vals == NULL) {
|
||||
fprintf(stderr, "out of memory creating string pool\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
p->head = NULL;
|
||||
p->tail = NULL;
|
||||
}
|
||||
|
6
pool.h
6
pool.h
@ -1,5 +1,5 @@
|
||||
struct pool_val {
|
||||
char *s;
|
||||
const char *s;
|
||||
int type;
|
||||
int n;
|
||||
|
||||
@ -17,8 +17,8 @@ struct pool {
|
||||
int n;
|
||||
};
|
||||
|
||||
struct pool_val *pool(struct pool *p, char *s, int type);
|
||||
struct pool_val *pool(struct pool *p, const char *s, int type);
|
||||
void pool_free(struct pool *p);
|
||||
void pool_free_strings(struct pool *p);
|
||||
void pool_init(struct pool *p, int n);
|
||||
int is_pooled(struct pool *p, char *s, int type);
|
||||
int is_pooled(struct pool *p, const char *s, int type);
|
||||
|
540
tile-join.cc
Normal file
540
tile-join.cc
Normal file
@ -0,0 +1,540 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <sqlite3.h>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <zlib.h>
|
||||
#include <math.h>
|
||||
#include "vector_tile.pb.h"
|
||||
#include "tile.h"
|
||||
|
||||
extern "C" {
|
||||
#include "projection.h"
|
||||
#include "pool.h"
|
||||
#include "mbtiles.h"
|
||||
}
|
||||
|
||||
std::string dequote(std::string s);
|
||||
|
||||
struct stats {
|
||||
int minzoom;
|
||||
int maxzoom;
|
||||
double midlat, midlon;
|
||||
double minlat, minlon, maxlat, maxlon;
|
||||
};
|
||||
|
||||
// https://github.com/mapbox/mapnik-vector-tile/blob/master/src/vector_tile_compression.hpp
|
||||
inline bool is_compressed(std::string const &data) {
|
||||
return data.size() > 2 && (((uint8_t) data[0] == 0x78 && (uint8_t) data[1] == 0x9C) || ((uint8_t) data[0] == 0x1F && (uint8_t) data[1] == 0x8B));
|
||||
}
|
||||
|
||||
// https://github.com/mapbox/mapnik-vector-tile/blob/master/src/vector_tile_compression.hpp
|
||||
inline int decompress(std::string const &input, std::string &output) {
|
||||
z_stream inflate_s;
|
||||
inflate_s.zalloc = Z_NULL;
|
||||
inflate_s.zfree = Z_NULL;
|
||||
inflate_s.opaque = Z_NULL;
|
||||
inflate_s.avail_in = 0;
|
||||
inflate_s.next_in = Z_NULL;
|
||||
if (inflateInit2(&inflate_s, 32 + 15) != Z_OK) {
|
||||
fprintf(stderr, "error: %s\n", inflate_s.msg);
|
||||
}
|
||||
inflate_s.next_in = (Bytef *) input.data();
|
||||
inflate_s.avail_in = input.size();
|
||||
size_t length = 0;
|
||||
do {
|
||||
output.resize(length + 2 * input.size());
|
||||
inflate_s.avail_out = 2 * input.size();
|
||||
inflate_s.next_out = (Bytef *) (output.data() + length);
|
||||
int ret = inflate(&inflate_s, Z_FINISH);
|
||||
if (ret != Z_STREAM_END && ret != Z_OK && ret != Z_BUF_ERROR) {
|
||||
fprintf(stderr, "error: %s\n", inflate_s.msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
length += (2 * input.size() - inflate_s.avail_out);
|
||||
} while (inflate_s.avail_out == 0);
|
||||
inflateEnd(&inflate_s);
|
||||
output.resize(length);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// https://github.com/mapbox/mapnik-vector-tile/blob/master/src/vector_tile_compression.hpp
|
||||
static inline int compress(std::string const &input, std::string &output) {
|
||||
z_stream deflate_s;
|
||||
deflate_s.zalloc = Z_NULL;
|
||||
deflate_s.zfree = Z_NULL;
|
||||
deflate_s.opaque = Z_NULL;
|
||||
deflate_s.avail_in = 0;
|
||||
deflate_s.next_in = Z_NULL;
|
||||
deflateInit2(&deflate_s, Z_BEST_COMPRESSION, Z_DEFLATED, 31, 8, Z_DEFAULT_STRATEGY);
|
||||
deflate_s.next_in = (Bytef *) input.data();
|
||||
deflate_s.avail_in = input.size();
|
||||
size_t length = 0;
|
||||
do {
|
||||
size_t increase = input.size() / 2 + 1024;
|
||||
output.resize(length + increase);
|
||||
deflate_s.avail_out = increase;
|
||||
deflate_s.next_out = (Bytef *) (output.data() + length);
|
||||
int ret = deflate(&deflate_s, Z_FINISH);
|
||||
if (ret != Z_STREAM_END && ret != Z_OK && ret != Z_BUF_ERROR) {
|
||||
return -1;
|
||||
}
|
||||
length += (increase - deflate_s.avail_out);
|
||||
} while (deflate_s.avail_out == 0);
|
||||
deflateEnd(&deflate_s);
|
||||
output.resize(length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void handle(std::string message, int z, unsigned x, unsigned y, struct pool **file_keys, char ***layernames, int *nlayers, sqlite3 *outdb, std::vector<std::string> &header, std::map<std::string, std::vector<std::string> > &mapping, struct pool *exclude, int ifmatched) {
|
||||
GOOGLE_PROTOBUF_VERIFY_VERSION;
|
||||
|
||||
// https://github.com/mapbox/mapnik-vector-tile/blob/master/examples/c%2B%2B/tileinfo.cpp
|
||||
mapnik::vector::tile tile;
|
||||
mapnik::vector::tile outtile;
|
||||
|
||||
if (is_compressed(message)) {
|
||||
std::string uncompressed;
|
||||
decompress(message, uncompressed);
|
||||
if (!tile.ParseFromString(uncompressed)) {
|
||||
fprintf(stderr, "Couldn't decompress tile %d/%u/%u\n", z, x, y);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
} else if (!tile.ParseFromString(message)) {
|
||||
fprintf(stderr, "Couldn't parse tile %d/%u/%u\n", z, x, y);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
for (int l = 0; l < tile.layers_size(); l++) {
|
||||
mapnik::vector::tile_layer layer = tile.layers(l);
|
||||
mapnik::vector::tile_layer *outlayer = outtile.add_layers();
|
||||
|
||||
outlayer->set_name(layer.name());
|
||||
outlayer->set_version(layer.version());
|
||||
outlayer->set_extent(layer.extent());
|
||||
|
||||
const char *ln = layer.name().c_str();
|
||||
|
||||
int ll;
|
||||
for (ll = 0; ll < *nlayers; ll++) {
|
||||
if (strcmp((*layernames)[ll], ln) == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ll == *nlayers) {
|
||||
*file_keys = (struct pool *) realloc(*file_keys, (ll + 1) * sizeof(struct pool));
|
||||
*layernames = (char **) realloc(*layernames, (ll + 1) * sizeof(char *));
|
||||
|
||||
if (*file_keys == NULL) {
|
||||
perror("realloc file_keys");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if (*layernames == NULL) {
|
||||
perror("realloc layernames");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
pool_init(&((*file_keys)[ll]), 0);
|
||||
(*layernames)[ll] = strdup(ln);
|
||||
*nlayers = ll + 1;
|
||||
}
|
||||
|
||||
struct pool keys, values;
|
||||
pool_init(&keys, 0);
|
||||
pool_init(&values, 0);
|
||||
|
||||
for (int f = 0; f < layer.features_size(); f++) {
|
||||
mapnik::vector::tile_feature feat = layer.features(f);
|
||||
std::vector<int> feature_tags;
|
||||
int matched = 0;
|
||||
|
||||
for (int t = 0; t + 1 < feat.tags_size(); t += 2) {
|
||||
const char *key = layer.keys(feat.tags(t)).c_str();
|
||||
mapnik::vector::tile_value const &val = layer.values(feat.tags(t + 1));
|
||||
char *value;
|
||||
int type = -1;
|
||||
|
||||
if (val.has_string_value()) {
|
||||
value = strdup(val.string_value().c_str());
|
||||
type = VT_STRING;
|
||||
} else if (val.has_int_value()) {
|
||||
if (asprintf(&value, "%lld", (long long) val.int_value()) >= 0) {
|
||||
type = VT_NUMBER;
|
||||
}
|
||||
} else if (val.has_double_value()) {
|
||||
if (asprintf(&value, "%g", val.double_value()) >= 0) {
|
||||
type = VT_NUMBER;
|
||||
}
|
||||
} else if (val.has_float_value()) {
|
||||
if (asprintf(&value, "%g", val.float_value()) >= 0) {
|
||||
type = VT_NUMBER;
|
||||
}
|
||||
} else if (val.has_bool_value()) {
|
||||
if (asprintf(&value, "%s", val.bool_value() ? "true" : "false") >= 0) {
|
||||
type = VT_BOOLEAN;
|
||||
}
|
||||
} else if (val.has_sint_value()) {
|
||||
if (asprintf(&value, "%lld", (long long) val.sint_value()) >= 0) {
|
||||
type = VT_NUMBER;
|
||||
}
|
||||
} else if (val.has_uint_value()) {
|
||||
if (asprintf(&value, "%llu", (long long) val.uint_value()) >= 0) {
|
||||
type = VT_NUMBER;
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!is_pooled(exclude, key, VT_STRING)) {
|
||||
if (!is_pooled(&((*file_keys)[ll]), key, type)) {
|
||||
pool(&((*file_keys)[ll]), strdup(key), type);
|
||||
}
|
||||
|
||||
struct pool_val *k, *v;
|
||||
|
||||
if (is_pooled(&keys, key, VT_STRING)) {
|
||||
k = pool(&keys, key, VT_STRING);
|
||||
} else {
|
||||
k = pool(&keys, strdup(key), VT_STRING);
|
||||
}
|
||||
|
||||
if (is_pooled(&values, value, type)) {
|
||||
v = pool(&values, value, type);
|
||||
} else {
|
||||
v = pool(&values, strdup(value), type);
|
||||
}
|
||||
|
||||
feature_tags.push_back(k->n);
|
||||
feature_tags.push_back(v->n);
|
||||
}
|
||||
|
||||
if (strcmp(key, header[0].c_str()) == 0) {
|
||||
std::map<std::string, std::vector<std::string> >::iterator ii = mapping.find(std::string(value));
|
||||
|
||||
if (ii != mapping.end()) {
|
||||
std::vector<std::string> fields = ii->second;
|
||||
matched = 1;
|
||||
|
||||
for (unsigned i = 1; i < fields.size(); i++) {
|
||||
std::string joinkey = header[i];
|
||||
std::string joinval = fields[i];
|
||||
int type = VT_STRING;
|
||||
|
||||
if (joinval.size() > 0) {
|
||||
if (joinval[0] == '"') {
|
||||
joinval = dequote(joinval);
|
||||
} else if ((joinval[0] >= '0' && joinval[0] <= '9') || joinval[0] == '-') {
|
||||
type = VT_NUMBER;
|
||||
}
|
||||
}
|
||||
|
||||
const char *sjoinkey = joinkey.c_str();
|
||||
const char *sjoinval = joinval.c_str();
|
||||
|
||||
if (!is_pooled(exclude, sjoinkey, VT_STRING)) {
|
||||
if (!is_pooled(&((*file_keys)[ll]), sjoinkey, type)) {
|
||||
pool(&((*file_keys)[ll]), strdup(sjoinkey), type);
|
||||
}
|
||||
|
||||
struct pool_val *k, *v;
|
||||
|
||||
if (is_pooled(&keys, sjoinkey, VT_STRING)) {
|
||||
k = pool(&keys, sjoinkey, VT_STRING);
|
||||
} else {
|
||||
k = pool(&keys, strdup(sjoinkey), VT_STRING);
|
||||
}
|
||||
|
||||
if (is_pooled(&values, sjoinval, type)) {
|
||||
v = pool(&values, sjoinval, type);
|
||||
} else {
|
||||
v = pool(&values, strdup(sjoinval), type);
|
||||
}
|
||||
|
||||
feature_tags.push_back(k->n);
|
||||
feature_tags.push_back(v->n);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
free(value);
|
||||
}
|
||||
|
||||
if (matched || !ifmatched) {
|
||||
mapnik::vector::tile_feature *outfeature = outlayer->add_features();
|
||||
outfeature->set_type(feat.type());
|
||||
|
||||
for (int g = 0; g < feat.geometry_size(); g++) {
|
||||
outfeature->add_geometry(feat.geometry(g));
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < feature_tags.size(); i++) {
|
||||
outfeature->add_tags(feature_tags[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct pool_val *pv;
|
||||
for (pv = keys.head; pv != NULL; pv = pv->next) {
|
||||
outlayer->add_keys(pv->s, strlen(pv->s));
|
||||
}
|
||||
for (pv = values.head; pv != NULL; pv = pv->next) {
|
||||
mapnik::vector::tile_value *tv = outlayer->add_values();
|
||||
|
||||
if (pv->type == VT_NUMBER) {
|
||||
tv->set_double_value(atof(pv->s));
|
||||
} else if (pv->type == VT_BOOLEAN) {
|
||||
tv->set_bool_value(pv->s[0] == 't');
|
||||
} else {
|
||||
tv->set_string_value(pv->s);
|
||||
}
|
||||
}
|
||||
|
||||
pool_free_strings(&keys);
|
||||
pool_free_strings(&values);
|
||||
}
|
||||
|
||||
std::string s;
|
||||
std::string compressed;
|
||||
|
||||
outtile.SerializeToString(&s);
|
||||
compress(s, compressed);
|
||||
|
||||
if (compressed.size() > 500000) {
|
||||
fprintf(stderr, "Tile %d/%u/%u size is %lld, >500000. Skipping this tile\n.", z, x, y, (long long) compressed.size());
|
||||
return;
|
||||
}
|
||||
|
||||
mbtiles_write_tile(outdb, z, x, y, compressed.data(), compressed.size());
|
||||
}
|
||||
|
||||
void decode(char *fname, char *map, struct pool **file_keys, char ***layernames, int *nlayers, sqlite3 *outdb, struct stats *st, std::vector<std::string> &header, std::map<std::string, std::vector<std::string> > &mapping, struct pool *exclude, int ifmatched) {
|
||||
sqlite3 *db;
|
||||
|
||||
if (sqlite3_open(fname, &db) != SQLITE_OK) {
|
||||
fprintf(stderr, "%s: %s\n", fname, sqlite3_errmsg(db));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
const char *sql = "SELECT zoom_level, tile_column, tile_row, tile_data from tiles;";
|
||||
sqlite3_stmt *stmt;
|
||||
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) {
|
||||
fprintf(stderr, "%s: select failed: %s\n", fname, sqlite3_errmsg(db));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
long long zoom = sqlite3_column_int(stmt, 0);
|
||||
long long x = sqlite3_column_int(stmt, 1);
|
||||
long long y = sqlite3_column_int(stmt, 2);
|
||||
y = (1LL << zoom) - 1 - y;
|
||||
|
||||
int len = sqlite3_column_bytes(stmt, 3);
|
||||
const char *s = (const char *) sqlite3_column_blob(stmt, 3);
|
||||
|
||||
fprintf(stderr, "%lld/%lld/%lld \r", zoom, x, y);
|
||||
|
||||
handle(std::string(s, len), zoom, x, y, file_keys, layernames, nlayers, outdb, header, mapping, exclude, ifmatched);
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
if (sqlite3_prepare_v2(db, "SELECT value from metadata where name = 'minzoom'", -1, &stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
st->minzoom = sqlite3_column_int(stmt, 0);
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
if (sqlite3_prepare_v2(db, "SELECT value from metadata where name = 'maxzoom'", -1, &stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
st->maxzoom = sqlite3_column_int(stmt, 0);
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
if (sqlite3_prepare_v2(db, "SELECT value from metadata where name = 'center'", -1, &stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const unsigned char *s = sqlite3_column_text(stmt, 0);
|
||||
sscanf((char *) s, "%lf,%lf", &st->midlon, &st->midlat);
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
if (sqlite3_prepare_v2(db, "SELECT value from metadata where name = 'bounds'", -1, &stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const unsigned char *s = sqlite3_column_text(stmt, 0);
|
||||
sscanf((char *) s, "%lf,%lf,%lf,%lf", &st->minlon, &st->minlat, &st->maxlon, &st->maxlat);
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
|
||||
if (sqlite3_close(db) != SQLITE_OK) {
|
||||
fprintf(stderr, "%s: could not close database: %s\n", fname, sqlite3_errmsg(db));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
void usage(char **argv) {
|
||||
fprintf(stderr, "Usage: %s [-f] [-i] [-c joins.csv] [-x exclude ...] -o new.mbtiles source.mbtiles\n", argv[0]);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
#define MAXLINE 10000 /* XXX */
|
||||
|
||||
std::vector<std::string> split(char *s) {
|
||||
std::vector<std::string> ret;
|
||||
|
||||
while (*s && *s != '\n') {
|
||||
char *start = s;
|
||||
int within = 0;
|
||||
|
||||
for (; *s && *s != '\n'; s++) {
|
||||
if (*s == '"') {
|
||||
within = !within;
|
||||
}
|
||||
|
||||
if (*s == ',' && !within) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::string v = std::string(start, s - start);
|
||||
ret.push_back(v);
|
||||
|
||||
if (*s == ',') {
|
||||
s++;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::string dequote(std::string s) {
|
||||
std::string out;
|
||||
unsigned i;
|
||||
for (i = 0; i < s.size(); i++) {
|
||||
if (s[i] == '"') {
|
||||
if (i + 1 < s.size() && s[i + 1] == '"') {
|
||||
out.push_back('"');
|
||||
}
|
||||
} else {
|
||||
out.push_back(s[i]);
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
void readcsv(char *fn, std::vector<std::string> &header, std::map<std::string, std::vector<std::string> > &mapping) {
|
||||
FILE *f = fopen(fn, "r");
|
||||
if (f == NULL) {
|
||||
perror(fn);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
char s[MAXLINE];
|
||||
if (fgets(s, MAXLINE, f)) {
|
||||
header = split(s);
|
||||
|
||||
for (unsigned i = 0; i < header.size(); i++) {
|
||||
header[i] = dequote(header[i]);
|
||||
}
|
||||
}
|
||||
while (fgets(s, MAXLINE, f)) {
|
||||
std::vector<std::string> line = split(s);
|
||||
if (line.size() > 0) {
|
||||
line[0] = dequote(line[0]);
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < line.size() && i < header.size(); i++) {
|
||||
// printf("putting %s\n", line[0].c_str());
|
||||
mapping.insert(std::pair<std::string, std::vector<std::string> >(line[0], line));
|
||||
}
|
||||
}
|
||||
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
char *outfile = NULL;
|
||||
char *csv = NULL;
|
||||
int force = 0;
|
||||
int ifmatched = 0;
|
||||
|
||||
std::vector<std::string> header;
|
||||
std::map<std::string, std::vector<std::string> > mapping;
|
||||
|
||||
struct pool exclude;
|
||||
pool_init(&exclude, 0);
|
||||
|
||||
extern int optind;
|
||||
extern char *optarg;
|
||||
int i;
|
||||
|
||||
while ((i = getopt(argc, argv, "fo:c:x:i")) != -1) {
|
||||
switch (i) {
|
||||
case 'o':
|
||||
outfile = optarg;
|
||||
break;
|
||||
|
||||
case 'f':
|
||||
force = 1;
|
||||
break;
|
||||
|
||||
case 'i':
|
||||
ifmatched = 1;
|
||||
break;
|
||||
|
||||
case 'c':
|
||||
if (csv != NULL) {
|
||||
fprintf(stderr, "Only one -c for now\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
csv = optarg;
|
||||
readcsv(csv, header, mapping);
|
||||
break;
|
||||
|
||||
case 'x':
|
||||
pool(&exclude, optarg, VT_STRING);
|
||||
break;
|
||||
|
||||
default:
|
||||
usage(argv);
|
||||
}
|
||||
}
|
||||
|
||||
if (argc - optind != 1 || outfile == NULL) {
|
||||
usage(argv);
|
||||
}
|
||||
|
||||
if (force) {
|
||||
unlink(outfile);
|
||||
}
|
||||
|
||||
sqlite3 *outdb = mbtiles_open(outfile, argv);
|
||||
struct stats st;
|
||||
memset(&st, 0, sizeof(st));
|
||||
|
||||
struct pool *file_keys = NULL;
|
||||
char **layernames = NULL;
|
||||
int nlayers = 0;
|
||||
|
||||
for (i = optind; i < argc; i++) {
|
||||
decode(argv[i], csv, &file_keys, &layernames, &nlayers, outdb, &st, header, mapping, &exclude, ifmatched);
|
||||
}
|
||||
|
||||
struct pool *fk[nlayers];
|
||||
for (i = 0; i < nlayers; i++) {
|
||||
fk[i] = &(file_keys[i]);
|
||||
}
|
||||
|
||||
mbtiles_write_metadata(outdb, outfile, layernames, st.minzoom, st.maxzoom, st.minlat, st.minlon, st.maxlat, st.maxlon, st.midlat, st.midlon, fk, nlayers);
|
||||
mbtiles_close(outdb, argv);
|
||||
|
||||
return 0;
|
||||
}
|
36
tile.cc
36
tile.cc
@ -8,6 +8,7 @@
|
||||
#include <algorithm>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
#include <zlib.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
@ -27,6 +28,9 @@ extern "C" {
|
||||
|
||||
#define CMD_BITS 3
|
||||
|
||||
#define XSTRINGIFY(s) STRINGIFY(s)
|
||||
#define STRINGIFY(s) #s
|
||||
|
||||
// https://github.com/mapbox/mapnik-vector-tile/blob/master/src/vector_tile_compression.hpp
|
||||
static inline int compress(std::string const &input, std::string &output) {
|
||||
z_stream deflate_s;
|
||||
@ -358,7 +362,7 @@ void evaluate(std::vector<coalesce> &features, char *metabase, struct pool *file
|
||||
}
|
||||
#endif
|
||||
|
||||
void rewrite(drawvec &geom, int z, int nextzoom, int file_maxzoom, long long *bbox, unsigned tx, unsigned ty, int buffer, int line_detail, int *within, long long *geompos, FILE **geomfile, const char *fname, signed char t, signed char layer, long long metastart, signed char feature_minzoom, int child_shards, int max_zoom_increment) {
|
||||
void rewrite(drawvec &geom, int z, int nextzoom, int file_maxzoom, long long *bbox, unsigned tx, unsigned ty, int buffer, int line_detail, int *within, long long *geompos, FILE **geomfile, const char *fname, signed char t, int layer, long long metastart, signed char feature_minzoom, int child_shards, int max_zoom_increment) {
|
||||
if (geom.size() > 0 && nextzoom <= file_maxzoom) {
|
||||
int xo, yo;
|
||||
int span = 1 << (nextzoom - z);
|
||||
@ -429,7 +433,7 @@ void rewrite(drawvec &geom, int z, int nextzoom, int file_maxzoom, long long *bb
|
||||
|
||||
// printf("type %d, meta %lld\n", t, metastart);
|
||||
serialize_byte(geomfile[j], t, &geompos[j], fname);
|
||||
serialize_byte(geomfile[j], layer, &geompos[j], fname);
|
||||
serialize_long_long(geomfile[j], layer, &geompos[j], fname);
|
||||
serialize_long_long(geomfile[j], metastart, &geompos[j], fname);
|
||||
long long wx = initial_x, wy = initial_y;
|
||||
|
||||
@ -452,7 +456,7 @@ void rewrite(drawvec &geom, int z, int nextzoom, int file_maxzoom, long long *bb
|
||||
}
|
||||
}
|
||||
|
||||
long long write_tile(char **geoms, char *metabase, char *stringpool, unsigned *file_bbox, int z, unsigned tx, unsigned ty, int detail, int min_detail, int basezoom, struct pool **file_keys, char **layernames, sqlite3 *outdb, double droprate, int buffer, const char *fname, FILE **geomfile, int file_minzoom, int file_maxzoom, double todo, char *geomstart, long long along, double gamma, int nlayers, char *prevent, int child_shards) {
|
||||
long long write_tile(char **geoms, char *metabase, char *stringpool, unsigned *file_bbox, int z, unsigned tx, unsigned ty, int detail, int min_detail, int basezoom, struct pool **file_keys, char **layernames, sqlite3 *outdb, double droprate, int buffer, const char *fname, FILE **geomfile, int file_minzoom, int file_maxzoom, double todo, char *geomstart, long long along, double gamma, int nlayers, char *prevent, char *additional, int child_shards) {
|
||||
int line_detail;
|
||||
static bool evaluated = false;
|
||||
double oprogress = 0;
|
||||
@ -532,8 +536,8 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, unsigned *f
|
||||
break;
|
||||
}
|
||||
|
||||
signed char layer;
|
||||
deserialize_byte(geoms, &layer);
|
||||
long long layer;
|
||||
deserialize_long_long(geoms, &layer);
|
||||
|
||||
long long metastart;
|
||||
deserialize_long_long(geoms, &metastart);
|
||||
@ -594,7 +598,7 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, unsigned *f
|
||||
continue;
|
||||
}
|
||||
|
||||
if (gamma >= 0 && (t == VT_POINT || (prevent['l' & 0xFF] && t == VT_LINE))) {
|
||||
if (gamma >= 0 && (t == VT_POINT || (additional['l' & 0xFF] && t == VT_LINE))) {
|
||||
seq++;
|
||||
if (seq >= 0) {
|
||||
seq -= interval;
|
||||
@ -659,7 +663,7 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, unsigned *f
|
||||
}
|
||||
#endif
|
||||
|
||||
if (t == VT_LINE && !prevent['r' & 0xFF]) {
|
||||
if (t == VT_LINE && additional['r' & 0xFF]) {
|
||||
geom = reorder_lines(geom);
|
||||
}
|
||||
|
||||
@ -701,7 +705,7 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, unsigned *f
|
||||
}
|
||||
|
||||
for (j = 0; j < nlayers; j++) {
|
||||
if (!prevent['o' & 0xFF]) {
|
||||
if (additional['o' & 0xFF]) {
|
||||
std::sort(features[j].begin(), features[j].end());
|
||||
}
|
||||
|
||||
@ -716,7 +720,7 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, unsigned *f
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!prevent['c' & 0xFF] && out.size() > 0 && out[y].geom.size() + features[j][x].geom.size() < 20000 && coalcmp(&features[j][x], &out[y]) == 0 && features[j][x].type != VT_POINT) {
|
||||
if (additional['c' & 0xFF] && out.size() > 0 && out[y].geom.size() + features[j][x].geom.size() < 20000 && coalcmp(&features[j][x], &out[y]) == 0 && features[j][x].type != VT_POINT) {
|
||||
unsigned z;
|
||||
for (z = 0; z < features[j][x].geom.size(); z++) {
|
||||
out[y].geom.push_back(features[j][x].geom[z]);
|
||||
@ -794,6 +798,12 @@ long long write_tile(char **geoms, char *metabase, char *stringpool, unsigned *f
|
||||
return count;
|
||||
}
|
||||
} else {
|
||||
int i;
|
||||
for (i = 0; i < nlayers; i++) {
|
||||
pool_free(&keys1[i]);
|
||||
pool_free(&values1[i]);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
}
|
||||
@ -828,6 +838,7 @@ struct write_tile_args {
|
||||
double gamma;
|
||||
int nlayers;
|
||||
char *prevent;
|
||||
char *additional;
|
||||
int child_shards;
|
||||
int *geomfd;
|
||||
off_t *geom_size;
|
||||
@ -875,7 +886,7 @@ void run_thread(write_tile_args *arg) {
|
||||
|
||||
// fprintf(stderr, "%d/%u/%u\n", z, x, y);
|
||||
|
||||
long long len = write_tile(&geom, arg->metabase, arg->stringpool, arg->file_bbox, z, x, y, z == arg->maxzoom ? arg->full_detail : arg->low_detail, arg->min_detail, arg->maxzoom, arg->file_keys, arg->layernames, arg->outdb, arg->droprate, arg->buffer, arg->fname, arg->geomfile, arg->minzoom, arg->maxzoom, arg->todo, geomstart, *arg->along, arg->gamma, arg->nlayers, arg->prevent, arg->child_shards);
|
||||
long long len = write_tile(&geom, arg->metabase, arg->stringpool, arg->file_bbox, z, x, y, z == arg->maxzoom ? arg->full_detail : arg->low_detail, arg->min_detail, arg->maxzoom, arg->file_keys, arg->layernames, arg->outdb, arg->droprate, arg->buffer, arg->fname, arg->geomfile, arg->minzoom, arg->maxzoom, arg->todo, geomstart, *arg->along, arg->gamma, arg->nlayers, arg->prevent, arg->additional, arg->child_shards);
|
||||
|
||||
if (len < 0) {
|
||||
return; // XXX how to report errors from threads?
|
||||
@ -896,7 +907,7 @@ void run_thread(write_tile_args *arg) {
|
||||
}
|
||||
}
|
||||
|
||||
int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, unsigned *file_bbox, struct pool **file_keys, unsigned *midx, unsigned *midy, char **layernames, int maxzoom, int minzoom, sqlite3 *outdb, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int nlayers, char *prevent, int full_detail, int low_detail, int min_detail) {
|
||||
int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, unsigned *file_bbox, struct pool **file_keys, unsigned *midx, unsigned *midy, char **layernames, int maxzoom, int minzoom, sqlite3 *outdb, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int nlayers, char *prevent, char *additional, int full_detail, int low_detail, int min_detail) {
|
||||
int i;
|
||||
for (i = 0; i <= maxzoom; i++) {
|
||||
long long most = 0;
|
||||
@ -905,7 +916,7 @@ int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpo
|
||||
int subfd[TEMP_FILES];
|
||||
int j;
|
||||
for (j = 0; j < TEMP_FILES; j++) {
|
||||
char geomname[strlen(tmpdir) + strlen("/geom2.XXXXXXXX") + 1];
|
||||
char geomname[strlen(tmpdir) + strlen("/geom.XXXXXXXX" XSTRINGIFY(INT_MAX)) + 1];
|
||||
sprintf(geomname, "%s/geom%d.XXXXXXXX", tmpdir, j);
|
||||
subfd[j] = mkstemp(geomname);
|
||||
// printf("%s\n", geomname);
|
||||
@ -1013,6 +1024,7 @@ int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpo
|
||||
args.gamma = gamma;
|
||||
args.nlayers = nlayers;
|
||||
args.prevent = prevent;
|
||||
args.additional = additional;
|
||||
args.child_shards = TEMP_FILES / threads;
|
||||
|
||||
args.geomfd = geomfd;
|
||||
|
4
tile.h
4
tile.h
@ -25,9 +25,9 @@ void deserialize_uint(char **f, unsigned *n);
|
||||
void deserialize_byte(char **f, signed char *n);
|
||||
struct pool_val *deserialize_string(char **f, struct pool *p, int type);
|
||||
|
||||
long long write_tile(char **geom, char *metabase, char *stringpool, unsigned *file_bbox, int z, unsigned x, unsigned y, int detail, int min_detail, int basezoom, struct pool **file_keys, char **layernames, sqlite3 *outdb, double droprate, int buffer, const char *fname, FILE **geomfile, int file_minzoom, int file_maxzoom, double todo, char *geomstart, long long along, double gamma, int nlayers, char *prevent);
|
||||
long long write_tile(char **geom, char *metabase, char *stringpool, unsigned *file_bbox, int z, unsigned x, unsigned y, int detail, int min_detail, int basezoom, struct pool **file_keys, char **layernames, sqlite3 *outdb, double droprate, int buffer, const char *fname, FILE **geomfile, int file_minzoom, int file_maxzoom, double todo, char *geomstart, long long along, double gamma, int nlayers, char *prevent, char *additional);
|
||||
|
||||
int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, unsigned *file_bbox, struct pool **file_keys, unsigned *midx, unsigned *midy, char **layernames, int maxzoom, int minzoom, sqlite3 *outdb, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int nlayers, char *prevent, int full_detail, int low_detail, int min_detail);
|
||||
int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, unsigned *file_bbox, struct pool **file_keys, unsigned *midx, unsigned *midy, char **layernames, int maxzoom, int minzoom, sqlite3 *outdb, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int nlayers, char *prevent, char *additional, int full_detail, int low_detail, int min_detail);
|
||||
|
||||
extern unsigned initial_x, initial_y;
|
||||
extern int geometry_scale;
|
||||
|
Loading…
x
Reference in New Issue
Block a user