mirror of
https://github.com/mapbox/tippecanoe.git
synced 2025-01-21 20:08:20 +00:00
Clean up layer name handling to fix layer merging crash
This commit is contained in:
parent
638cfef2b6
commit
218a2fc75e
@ -1,3 +1,7 @@
|
||||
## 1.16.9
|
||||
|
||||
* Clean up layer name handling to fix layer merging crash
|
||||
|
||||
## 1.16.8
|
||||
|
||||
* Fix some code that could sometimes try to divide by zero
|
||||
|
72
main.cpp
72
main.cpp
@ -416,7 +416,7 @@ void do_read_parallel(char *map, long long len, long long initial_offset, const
|
||||
pja[i].treefile = reader[i].treefile;
|
||||
pja[i].fname = fname;
|
||||
pja[i].basezoom = basezoom;
|
||||
pja[i].layer = source < nlayers ? source : 0;
|
||||
pja[i].layer = source;
|
||||
pja[i].droprate = droprate;
|
||||
pja[i].file_bbox = reader[i].file_bbox;
|
||||
pja[i].segment = i;
|
||||
@ -1004,7 +1004,7 @@ void choose_first_zoom(long long *file_bbox, struct reader *reader, unsigned *iz
|
||||
}
|
||||
}
|
||||
|
||||
int read_input(std::vector<source> &sources, char *fname, const char *layername, int maxzoom, int minzoom, int basezoom, double basezoom_marker_width, sqlite3 *outdb, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, double droprate, int buffer, const char *tmpdir, double gamma, int read_parallel, int forcetable, const char *attribution, bool uses_gamma, long long *file_bbox) {
|
||||
int read_input(std::vector<source> &sources, char *fname, int maxzoom, int minzoom, int basezoom, double basezoom_marker_width, sqlite3 *outdb, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, double droprate, int buffer, const char *tmpdir, double gamma, int read_parallel, int forcetable, const char *attribution, bool uses_gamma, long long *file_bbox) {
|
||||
int ret = EXIT_SUCCESS;
|
||||
|
||||
struct reader reader[CPUS];
|
||||
@ -1111,26 +1111,12 @@ int read_input(std::vector<source> &sources, char *fname, const char *layername,
|
||||
initialized[i] = initial_x[i] = initial_y[i] = 0;
|
||||
}
|
||||
|
||||
size_t nlayers;
|
||||
if (layername != NULL) {
|
||||
nlayers = 1;
|
||||
} else {
|
||||
nlayers = sources.size();
|
||||
if (nlayers == 0) {
|
||||
nlayers = 1;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> layernames;
|
||||
size_t nlayers = sources.size();
|
||||
for (size_t l = 0; l < nlayers; l++) {
|
||||
if (layername != NULL) {
|
||||
layernames.push_back(std::string(layername));
|
||||
} else {
|
||||
if (sources[l].layer.size() == 0) {
|
||||
const char *src;
|
||||
if (sources.size() < 1) {
|
||||
if (sources[l].file.size() == 0) {
|
||||
src = fname;
|
||||
} else if (sources[l].layer.size() != 0) {
|
||||
src = sources[l].layer.c_str();
|
||||
} else {
|
||||
src = sources[l].file.c_str();
|
||||
}
|
||||
@ -1162,7 +1148,7 @@ int read_input(std::vector<source> &sources, char *fname, const char *layername,
|
||||
out.append(trunc, p, 1);
|
||||
}
|
||||
}
|
||||
layernames.push_back(out);
|
||||
sources[l].layer = out;
|
||||
|
||||
if (!quiet) {
|
||||
fprintf(stderr, "For layer %d, using name \"%s\"\n", (int) l, out.c_str());
|
||||
@ -1172,25 +1158,21 @@ int read_input(std::vector<source> &sources, char *fname, const char *layername,
|
||||
|
||||
std::map<std::string, layermap_entry> layermap;
|
||||
for (size_t l = 0; l < nlayers; l++) {
|
||||
layermap.insert(std::pair<std::string, layermap_entry>(layernames[l], layermap_entry(l)));
|
||||
layermap.insert(std::pair<std::string, layermap_entry>(sources[l].layer, layermap_entry(l)));
|
||||
}
|
||||
std::vector<std::map<std::string, layermap_entry> > layermaps;
|
||||
for (size_t l = 0; l < CPUS; l++) {
|
||||
layermaps.push_back(layermap);
|
||||
}
|
||||
|
||||
size_t nsources = sources.size();
|
||||
if (nsources == 0) {
|
||||
nsources = 1;
|
||||
}
|
||||
|
||||
long overall_offset = 0;
|
||||
|
||||
size_t nsources = sources.size();
|
||||
for (size_t source = 0; source < nsources; source++) {
|
||||
std::string reading;
|
||||
int fd;
|
||||
|
||||
if (source >= sources.size()) {
|
||||
if (sources[source].file.size() == 0) {
|
||||
reading = "standard input";
|
||||
fd = 0;
|
||||
} else {
|
||||
@ -1202,6 +1184,13 @@ int read_input(std::vector<source> &sources, char *fname, const char *layername,
|
||||
}
|
||||
}
|
||||
|
||||
auto a = layermap.find(sources[source].layer);
|
||||
if (a == layermap.end()) {
|
||||
fprintf(stderr, "Internal error: couldn't find layer %s", sources[source].layer.c_str());
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
size_t layer = a->second.id;
|
||||
|
||||
struct stat st;
|
||||
char *map = NULL;
|
||||
off_t off = 0;
|
||||
@ -1220,7 +1209,7 @@ int read_input(std::vector<source> &sources, char *fname, const char *layername,
|
||||
}
|
||||
|
||||
if (map != NULL && map != MAP_FAILED) {
|
||||
do_read_parallel(map, st.st_size - off, overall_offset, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, source, nlayers, &layermaps, droprate, initialized, initial_x, initial_y, maxzoom, layernames[source < nlayers ? source : 0], uses_gamma);
|
||||
do_read_parallel(map, st.st_size - off, overall_offset, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, layer, nlayers, &layermaps, droprate, initialized, initial_x, initial_y, maxzoom, sources[layer].layer, uses_gamma);
|
||||
overall_offset += st.st_size - off;
|
||||
checkdisk(reader, CPUS);
|
||||
|
||||
@ -1231,7 +1220,7 @@ int read_input(std::vector<source> &sources, char *fname, const char *layername,
|
||||
} else {
|
||||
FILE *fp = fdopen(fd, "r");
|
||||
if (fp == NULL) {
|
||||
perror(sources[source].file.c_str());
|
||||
perror(sources[layer].file.c_str());
|
||||
if (close(fd) != 0) {
|
||||
perror("close source file");
|
||||
exit(EXIT_FAILURE);
|
||||
@ -1288,7 +1277,7 @@ int read_input(std::vector<source> &sources, char *fname, const char *layername,
|
||||
}
|
||||
|
||||
fflush(readfp);
|
||||
start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, parser_created, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, source, nlayers, layermaps, droprate, initialized, initial_x, initial_y, maxzoom, layernames[source < nlayers ? source : 0], gamma != 0);
|
||||
start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, parser_created, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, layer, nlayers, layermaps, droprate, initialized, initial_x, initial_y, maxzoom, sources[layer].layer, gamma != 0);
|
||||
|
||||
initial_offset += ahead;
|
||||
overall_offset += ahead;
|
||||
@ -1325,7 +1314,7 @@ int read_input(std::vector<source> &sources, char *fname, const char *layername,
|
||||
fflush(readfp);
|
||||
|
||||
if (ahead > 0) {
|
||||
start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, parser_created, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, source, nlayers, layermaps, droprate, initialized, initial_x, initial_y, maxzoom, layernames[source < nlayers ? source : 0], gamma != 0);
|
||||
start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, parser_created, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, layer, nlayers, layermaps, droprate, initialized, initial_x, initial_y, maxzoom, sources[layer].layer, gamma != 0);
|
||||
|
||||
if (parser_created) {
|
||||
if (pthread_join(parallel_parser, NULL) != 0) {
|
||||
@ -1342,7 +1331,7 @@ int read_input(std::vector<source> &sources, char *fname, const char *layername,
|
||||
|
||||
long long layer_seq = overall_offset;
|
||||
json_pull *jp = json_begin_file(fp);
|
||||
parse_json(jp, reading.c_str(), &layer_seq, &progress_seq, &reader[0].metapos, &reader[0].geompos, &reader[0].indexpos, exclude, include, exclude_all, reader[0].metafile, reader[0].geomfile, reader[0].indexfile, reader[0].poolfile, reader[0].treefile, fname, basezoom, source < nlayers ? source : 0, droprate, reader[0].file_bbox, 0, &initialized[0], &initial_x[0], &initial_y[0], reader, maxzoom, &layermaps[0], layernames[source < nlayers ? source : 0], uses_gamma);
|
||||
parse_json(jp, reading.c_str(), &layer_seq, &progress_seq, &reader[0].metapos, &reader[0].geompos, &reader[0].indexpos, exclude, include, exclude_all, reader[0].metafile, reader[0].geomfile, reader[0].indexfile, reader[0].poolfile, reader[0].treefile, fname, basezoom, layer, droprate, reader[0].file_bbox, 0, &initialized[0], &initial_x[0], &initial_y[0], reader, maxzoom, &layermaps[0], sources[layer].layer, uses_gamma);
|
||||
json_end(jp);
|
||||
overall_offset = layer_seq;
|
||||
checkdisk(reader, CPUS);
|
||||
@ -1880,7 +1869,7 @@ int main(int argc, char **argv) {
|
||||
int i;
|
||||
|
||||
char *name = NULL;
|
||||
char *layer = NULL;
|
||||
char *layername = NULL;
|
||||
char *outdir = NULL;
|
||||
int maxzoom = 14;
|
||||
int minzoom = 0;
|
||||
@ -1994,7 +1983,7 @@ int main(int argc, char **argv) {
|
||||
break;
|
||||
|
||||
case 'l':
|
||||
layer = optarg;
|
||||
layername = optarg;
|
||||
break;
|
||||
|
||||
case 'A':
|
||||
@ -2260,9 +2249,22 @@ int main(int argc, char **argv) {
|
||||
sources.push_back(src);
|
||||
}
|
||||
|
||||
if (sources.size() == 0) {
|
||||
struct source src;
|
||||
src.layer = "";
|
||||
src.file = ""; // standard input
|
||||
sources.push_back(src);
|
||||
}
|
||||
|
||||
if (layername != NULL) {
|
||||
for (size_t a = 0; a < sources.size(); a++) {
|
||||
sources[a].layer = layername;
|
||||
}
|
||||
}
|
||||
|
||||
long long file_bbox[4] = {UINT_MAX, UINT_MAX, 0, 0};
|
||||
|
||||
ret = read_input(sources, name ? name : outdir, layer, maxzoom, minzoom, basezoom, basezoom_marker_width, outdb, &exclude, &include, exclude_all, droprate, buffer, tmpdir, gamma, read_parallel, forcetable, attribution, gamma != 0, file_bbox);
|
||||
ret = read_input(sources, name ? name : outdir, maxzoom, minzoom, basezoom, basezoom_marker_width, outdb, &exclude, &include, exclude_all, droprate, buffer, tmpdir, gamma, read_parallel, forcetable, attribution, gamma != 0, file_bbox);
|
||||
|
||||
mbtiles_close(outdb, argv);
|
||||
|
||||
|
6
tile.cpp
6
tile.cpp
@ -1619,6 +1619,12 @@ long long write_tile(FILE *geoms, long long *geompos_in, char *metabase, char *s
|
||||
}
|
||||
|
||||
auto l = layers.find(layername);
|
||||
if (l == layers.end()) {
|
||||
fprintf(stderr, "Internal error: couldn't find layer %s\n", layername.c_str());
|
||||
fprintf(stderr, "segment %d\n", partials[i].segment);
|
||||
fprintf(stderr, "layer %lld\n", partials[i].layer);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
l->second.push_back(c);
|
||||
}
|
||||
}
|
||||
|
@ -1 +1 @@
|
||||
#define VERSION "tippecanoe v1.16.8\n"
|
||||
#define VERSION "tippecanoe v1.16.9\n"
|
||||
|
Loading…
Reference in New Issue
Block a user