mirror of
https://github.com/mapbox/tippecanoe.git
synced 2025-01-21 12:05:05 +00:00
Merge branch 'master' into plugins
This commit is contained in:
commit
04d0cc6fa1
4
.dockerignore
Normal file
4
.dockerignore
Normal file
@ -0,0 +1,4 @@
|
||||
# Don't copy Dockerfile or git items
|
||||
.gitignore
|
||||
.git
|
||||
Dockerfile
|
21
CHANGELOG.md
21
CHANGELOG.md
@ -1,3 +1,24 @@
|
||||
## 1.19.3
|
||||
|
||||
* Upgrade protozero to version 1.5.2
|
||||
|
||||
## 1.19.2
|
||||
|
||||
* Ignore UTF-8 byte order mark if present
|
||||
|
||||
## 1.19.1
|
||||
|
||||
* Add an option to increase maxzoom if features are still being dropped
|
||||
|
||||
## 1.19.0
|
||||
|
||||
* Tile-join can merge and create directories, not only mbtiles
|
||||
* Maxzoom guessing (-zg) takes into account resolution within each feature
|
||||
|
||||
## 1.18.2
|
||||
|
||||
* Fix crash with very long (>128K) attribute values
|
||||
|
||||
## 1.18.1
|
||||
|
||||
* Only warn once about invalid polygons in tippecanoe-decode
|
||||
|
24
Dockerfile
Normal file
24
Dockerfile
Normal file
@ -0,0 +1,24 @@
|
||||
# Start from ubuntu
|
||||
FROM ubuntu:17.04
|
||||
|
||||
# Update repos and install dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y install build-essential libsqlite3-dev zlib1g-dev
|
||||
|
||||
# Create a directory and copy in all files
|
||||
RUN mkdir -p /tmp/tippecanoe-src
|
||||
WORKDIR /tmp/tippecanoe-src
|
||||
COPY . /tmp/tippecanoe-src
|
||||
|
||||
# Build tippecanoe
|
||||
RUN make \
|
||||
&& make install
|
||||
|
||||
# Remove the temp directory and unneeded packages
|
||||
WORKDIR /
|
||||
RUN rm -rf /tmp/tippecanoe-src \
|
||||
&& apt-get -y remove --purge build-essential && apt-get -y autoremove
|
||||
|
||||
# Run the default command to show usage
|
||||
CMD tippecanoe --help
|
34
Makefile
34
Makefile
@ -46,7 +46,7 @@ C = $(wildcard *.c) $(wildcard *.cpp)
|
||||
INCLUDES = -I/usr/local/include -I.
|
||||
LIBS = -L/usr/local/lib
|
||||
|
||||
tippecanoe: geojson.o jsonpull/jsonpull.o tile.o pool.o mbtiles.o geometry.o projection.o memfile.o mvt.o serial.o main.o text.o rawtiles.o plugin.o read_json.o write_json.o
|
||||
tippecanoe: geojson.o jsonpull/jsonpull.o tile.o pool.o mbtiles.o geometry.o projection.o memfile.o mvt.o serial.o main.o text.o dirtiles.o plugin.o read_json.o write_json.o
|
||||
$(CXX) $(PG) $(LIBS) $(FINAL_FLAGS) $(CXXFLAGS) -o $@ $^ $(LDFLAGS) -lm -lz -lsqlite3 -lpthread
|
||||
|
||||
tippecanoe-enumerate: enumerate.o
|
||||
@ -55,7 +55,7 @@ tippecanoe-enumerate: enumerate.o
|
||||
tippecanoe-decode: decode.o projection.o mvt.o write_json.o
|
||||
$(CXX) $(PG) $(LIBS) $(FINAL_FLAGS) $(CXXFLAGS) -o $@ $^ $(LDFLAGS) -lm -lz -lsqlite3
|
||||
|
||||
tile-join: tile-join.o projection.o pool.o mbtiles.o mvt.o memfile.o
|
||||
tile-join: tile-join.o projection.o pool.o mbtiles.o mvt.o memfile.o dirtiles.o jsonpull/jsonpull.o
|
||||
$(CXX) $(PG) $(LIBS) $(FINAL_FLAGS) $(CXXFLAGS) -o $@ $^ $(LDFLAGS) -lm -lz -lsqlite3 -lpthread
|
||||
|
||||
unit: unit.o text.o
|
||||
@ -117,7 +117,7 @@ parallel-test:
|
||||
|
||||
raw-tiles-test:
|
||||
./tippecanoe -e tests/raw-tiles/raw-tiles tests/raw-tiles/hackspots.geojson -pC
|
||||
diff -x '.*' -rq tests/raw-tiles/raw-tiles tests/raw-tiles/compare
|
||||
diff -x '*.DS_Store' -rq tests/raw-tiles/raw-tiles tests/raw-tiles/compare
|
||||
rm -rf tests/raw-tiles/raw-tiles
|
||||
|
||||
decode-test:
|
||||
@ -165,8 +165,32 @@ join-test:
|
||||
./tippecanoe-decode tests/join-population/just-macarthur.mbtiles > tests/join-population/just-macarthur.mbtiles.json.check
|
||||
./tippecanoe-decode tests/join-population/no-macarthur.mbtiles > tests/join-population/no-macarthur.mbtiles.json.check
|
||||
cmp tests/join-population/just-macarthur.mbtiles.json.check tests/join-population/just-macarthur.mbtiles.json
|
||||
cmp tests/join-population/no-macarthur.mbtiles.json.check tests/join-population/no-macarthur.mbtiles.json
|
||||
rm tests/join-population/tabblock_06001420.mbtiles tests/join-population/joined.mbtiles tests/join-population/joined-i.mbtiles tests/join-population/joined.mbtiles.json.check tests/join-population/joined-i.mbtiles.json.check tests/join-population/macarthur.mbtiles tests/join-population/merged.mbtiles tests/join-population/merged.mbtiles.json.check tests/join-population/macarthur2.mbtiles tests/join-population/windows.mbtiles tests/join-population/windows.mbtiles.json.check tests/join-population/just-macarthur.mbtiles tests/join-population/no-macarthur.mbtiles tests/join-population/just-macarthur.mbtiles.json.check tests/join-population/no-macarthur.mbtiles.json.check
|
||||
cmp tests/join-population/no-macarthur.mbtiles.json.check tests/join-population/no-macarthur.mbtiles.json
|
||||
./tile-join -pC -e tests/join-population/raw-merged-folder tests/join-population/tabblock_06001420.mbtiles tests/join-population/macarthur.mbtiles tests/join-population/macarthur2.mbtiles
|
||||
diff -x '*.DS_Store' -rq tests/join-population/raw-merged-folder tests/join-population/raw-merged-folder-compare
|
||||
./tippecanoe -z12 -e tests/join-population/tabblock_06001420-folder tests/join-population/tabblock_06001420.json
|
||||
./tippecanoe -Z5 -z10 -e tests/join-population/macarthur-folder -l macarthur tests/join-population/macarthur.json
|
||||
./tippecanoe -d10 -D10 -Z9 -z11 -e tests/join-population/macarthur2-folder -l macarthur tests/join-population/macarthur2.json
|
||||
./tile-join -f -o tests/join-population/merged-folder.mbtiles tests/join-population/tabblock_06001420-folder tests/join-population/macarthur-folder tests/join-population/macarthur2-folder
|
||||
./tippecanoe-decode tests/join-population/merged-folder.mbtiles > tests/join-population/merged-folder.mbtiles.json.check
|
||||
cmp tests/join-population/merged-folder.mbtiles.json.check tests/join-population/merged-folder.mbtiles.json
|
||||
./tile-join -n "merged name" -N "merged description" -e tests/join-population/merged-mbtiles-to-folder tests/join-population/tabblock_06001420.mbtiles tests/join-population/macarthur.mbtiles tests/join-population/macarthur2.mbtiles
|
||||
./tile-join -n "merged name" -N "merged description" -e tests/join-population/merged-folders-to-folder tests/join-population/tabblock_06001420-folder tests/join-population/macarthur-folder tests/join-population/macarthur2-folder
|
||||
diff -x '*.DS_Store' -rq tests/join-population/merged-mbtiles-to-folder tests/join-population/merged-folders-to-folder
|
||||
./tile-join -f -c tests/join-population/windows.csv -o tests/join-population/windows-merged.mbtiles tests/join-population/macarthur.mbtiles tests/join-population/macarthur2-folder
|
||||
./tile-join -c tests/join-population/windows.csv -e tests/join-population/windows-merged-folder tests/join-population/macarthur.mbtiles tests/join-population/macarthur2-folder
|
||||
./tile-join -f -o tests/join-population/windows-merged2.mbtiles tests/join-population/windows-merged-folder
|
||||
./tippecanoe-decode tests/join-population/windows-merged.mbtiles > tests/join-population/windows-merged.mbtiles.json.check
|
||||
./tippecanoe-decode tests/join-population/windows-merged2.mbtiles > tests/join-population/windows-merged2.mbtiles.json.check
|
||||
cmp tests/join-population/windows-merged.mbtiles.json.check tests/join-population/windows-merged2.mbtiles.json.check
|
||||
./tile-join -f -o tests/join-population/macarthur-and-macarthur2-merged.mbtiles tests/join-population/macarthur.mbtiles tests/join-population/macarthur2-folder
|
||||
./tile-join -e tests/join-population/macarthur-and-macarthur2-folder tests/join-population/macarthur.mbtiles tests/join-population/macarthur2-folder
|
||||
./tile-join -f -o tests/join-population/macarthur-and-macarthur2-merged2.mbtiles tests/join-population/macarthur-and-macarthur2-folder
|
||||
./tippecanoe-decode tests/join-population/macarthur-and-macarthur2-merged.mbtiles > tests/join-population/macarthur-and-macarthur2-merged.mbtiles.json.check
|
||||
./tippecanoe-decode tests/join-population/macarthur-and-macarthur2-merged2.mbtiles > tests/join-population/macarthur-and-macarthur2-merged2.mbtiles.json.check
|
||||
cmp tests/join-population/macarthur-and-macarthur2-merged.mbtiles.json.check tests/join-population/macarthur-and-macarthur2-merged2.mbtiles.json.check
|
||||
rm tests/join-population/tabblock_06001420.mbtiles tests/join-population/joined.mbtiles tests/join-population/joined-i.mbtiles tests/join-population/joined.mbtiles.json.check tests/join-population/joined-i.mbtiles.json.check tests/join-population/macarthur.mbtiles tests/join-population/merged.mbtiles tests/join-population/merged.mbtiles.json.check tests/join-population/merged-folder.mbtiles tests/join-population/macarthur2.mbtiles tests/join-population/windows.mbtiles tests/join-population/windows-merged.mbtiles tests/join-population/windows-merged2.mbtiles tests/join-population/windows.mbtiles.json.check tests/join-population/just-macarthur.mbtiles tests/join-population/no-macarthur.mbtiles tests/join-population/just-macarthur.mbtiles.json.check tests/join-population/no-macarthur.mbtiles.json.check tests/join-population/merged-folder.mbtiles.json.check tests/join-population/windows-merged.mbtiles.json.check tests/join-population/windows-merged2.mbtiles.json.check tests/join-population/macarthur-and-macarthur2-merged.mbtiles tests/join-population/macarthur-and-macarthur2-merged2.mbtiles tests/join-population/macarthur-and-macarthur2-merged.mbtiles.json.check tests/join-population/macarthur-and-macarthur2-merged2.mbtiles.json.check
|
||||
rm -rf tests/join-population/raw-merged-folder tests/join-population/tabblock_06001420-folder tests/join-population/macarthur-folder tests/join-population/macarthur2-folder tests/join-population/merged-mbtiles-to-folder tests/join-population/merged-folders-to-folder tests/join-population/windows-merged-folder tests/join-population/macarthur-and-macarthur2-folder
|
||||
|
||||
# Use this target to regenerate the standards that the tests are compared against
|
||||
# after making a change that legitimately changes their output
|
||||
|
44
README.md
44
README.md
@ -52,6 +52,24 @@ You can concatenate multiple GeoJSON features or files together,
|
||||
and it will parse out the features and ignore whatever other objects
|
||||
it encounters.
|
||||
|
||||
Docker Image
|
||||
------------
|
||||
|
||||
A tippecanoe Docker image can be built from source and executed as a task to
|
||||
automatically install dependencies and allow tippecanoe to run on any system
|
||||
supported by Docker.
|
||||
|
||||
```docker
|
||||
$ docker build -t tippecanoe:latest .
|
||||
$ docker run -it --rm \
|
||||
-v /tiledata:/data \
|
||||
tippecanoe:latest \
|
||||
tippecanoe --output=/data/output.mbtiles /data/example.geojson
|
||||
```
|
||||
|
||||
The commands above will build a Docker image from the source and compile the
|
||||
latest version. The image supports all tippecanoe flags and options.
|
||||
|
||||
Options
|
||||
-------
|
||||
|
||||
@ -117,6 +135,9 @@ than at all newlines.
|
||||
* `-z` _zoom_ or `--maximum-zoom=`_zoom_: Maxzoom: the highest zoom level for which tiles are generated (default 14)
|
||||
* `-zg` or `--maximum-zoom=g`: Guess what is probably a reasonable maxzoom based on the spacing of features.
|
||||
* `-Z` _zoom_ or `--minimum-zoom=`_zoom_: Minzoom: the lowest zoom level for which tiles are generated (default 0)
|
||||
* `-ae` or `--extend-zooms-if-still-dropping`: Increase the maxzoom if features are still being dropped at that zoom level.
|
||||
The detail and simplification options that ordinarily apply only to the maximum zoom level will apply both to the originally
|
||||
specified maximum zoom and to any levels added beyond that.
|
||||
|
||||
### Tile resolution
|
||||
|
||||
@ -413,22 +434,29 @@ The name is [a joking reference](http://en.wikipedia.org/wiki/Tippecanoe_and_Tyl
|
||||
tile-join
|
||||
=========
|
||||
|
||||
Tile-join is a tool for joining new attributes from a CSV file to features that
|
||||
have already been tiled with tippecanoe. It reads the tiles from an existing .mbtiles
|
||||
file, matches them against the records of the CSV, and writes out a new tileset.
|
||||
Tile-join is a tool for joining new attributes from a CSV file to features
|
||||
that have already been tiled with tippecanoe. It reads the tiles from an
|
||||
existing .mbtiles file or a directory of tiles, matches them against the
|
||||
records of the CSV, and writes out a new tileset.
|
||||
|
||||
If you specify multiple source mbtiles files, they are all read and their combined
|
||||
contents are written to the new mbtiles output. If they define the same layers or
|
||||
the same tiles, the layers or tiles are merged.
|
||||
If you specify multiple source mbtiles files or source directories of tiles,
|
||||
all the sources are read and their combined contents are written to the new
|
||||
mbtiles output. If they define the same layers or the same tiles, the layers
|
||||
or tiles are merged.
|
||||
|
||||
You can use the `-e` flag to output a directory of tiles rather than a
|
||||
.mbtiles file.
|
||||
|
||||
The options are:
|
||||
|
||||
* `-o` *out.mbtiles*: Write the new tiles to the specified .mbtiles file
|
||||
* `-f`: Remove *out.mbtiles* if it already exists
|
||||
* `-o` *out.mbtiles*: Write the new tiles to the specified .mbtiles file.
|
||||
* `-e` *directory*: Write the new tiles to the specified directory instead of to an mbtiles file.
|
||||
* `-f`: Remove *out.mbtiles* if it already exists.
|
||||
* `-c` *match*`.csv`: Use *match*`.csv` as the source for new attributes to join to the features. The first line of the file should be the key names; the other lines are values. The first column is the one to match against the existing features; the other columns are the new data to add.
|
||||
* `-x` *key*: Remove attributes of type *key* from the output. You can use this to remove the field you are matching against if you no longer need it after joining, or to remove any other attributes you don't want.
|
||||
* `-i`: Only include features that matched the CSV.
|
||||
* `-pk`: Don't skip tiles larger than 500K.
|
||||
* `-pC`: Don't compress the PBF vector tile data.
|
||||
* `-l` *layer*: Include the named layer in the output. You can specify multiple `-l` options to keep multiple layers. If you don't specify, they will all be retained.
|
||||
* `-L` *layer*: Remove the named layer from the output. You can specify multiple `-L` options to remove multiple layers.
|
||||
* `-A` *attribution*: Set the attribution string.
|
||||
|
@ -1,10 +1,20 @@
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <sys/stat.h>
|
||||
#include "rawtiles.hpp"
|
||||
#include "dirtiles.hpp"
|
||||
|
||||
void write_raw_tile(const char *outdir, int z, int tx, int ty, std::string const &pbf) {
|
||||
std::string dir_read_tile(std::string pbfPath) {
|
||||
std::ifstream pbfFile(pbfPath, std::ios::in | std::ios::binary);
|
||||
std::ostringstream contents;
|
||||
contents << pbfFile.rdbuf();
|
||||
pbfFile.close();
|
||||
|
||||
return (contents.str());
|
||||
}
|
||||
|
||||
void dir_write_tile(const char *outdir, int z, int tx, int ty, std::string const &pbf) {
|
||||
mkdir(outdir, S_IRWXU | S_IRWXG | S_IRWXO);
|
||||
std::string curdir(outdir);
|
||||
std::string slash("/");
|
3
dirtiles.hpp
Normal file
3
dirtiles.hpp
Normal file
@ -0,0 +1,3 @@
|
||||
std::string dir_read_tile(std::string pbfPath);
|
||||
|
||||
void dir_write_tile(const char *outdir, int z, int tx, int ty, std::string const &pbf);
|
40
geojson.cpp
40
geojson.cpp
@ -20,6 +20,7 @@
|
||||
#include <sys/resource.h>
|
||||
#include <pthread.h>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <set>
|
||||
#include <map>
|
||||
#include <string>
|
||||
@ -98,7 +99,7 @@ static long long parse_geometry1(int t, json_object *j, long long *bbox, drawvec
|
||||
return geom.size();
|
||||
}
|
||||
|
||||
int serialize_geometry(json_object *geometry, json_object *properties, json_object *id, const char *reading, int line, volatile long long *layer_seq, volatile long long *progress_seq, long long *metapos, long long *geompos, long long *indexpos, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, FILE *metafile, FILE *geomfile, FILE *indexfile, struct memfile *poolfile, struct memfile *treefile, const char *fname, int basezoom, int layer, double droprate, long long *file_bbox, json_object *tippecanoe, int segment, int *initialized, unsigned *initial_x, unsigned *initial_y, struct reader *readers, int maxzoom, json_object *feature, std::map<std::string, layermap_entry> *layermap, std::string layername, bool uses_gamma, std::map<std::string, int> const *attribute_types) {
|
||||
int serialize_geometry(json_object *geometry, json_object *properties, json_object *id, const char *reading, int line, volatile long long *layer_seq, volatile long long *progress_seq, long long *metapos, long long *geompos, long long *indexpos, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, FILE *metafile, FILE *geomfile, FILE *indexfile, struct memfile *poolfile, struct memfile *treefile, const char *fname, int basezoom, int layer, double droprate, long long *file_bbox, json_object *tippecanoe, int segment, int *initialized, unsigned *initial_x, unsigned *initial_y, struct reader *readers, int maxzoom, json_object *feature, std::map<std::string, layermap_entry> *layermap, std::string layername, bool uses_gamma, std::map<std::string, int> const *attribute_types, double *dist_sum, size_t *dist_count, bool want_dist) {
|
||||
json_object *geometry_type = json_hash_get(geometry, "type");
|
||||
if (geometry_type == NULL) {
|
||||
static int warned = 0;
|
||||
@ -271,6 +272,33 @@ int serialize_geometry(json_object *geometry, json_object *properties, json_obje
|
||||
dv = fix_polygon(dv);
|
||||
}
|
||||
|
||||
if (want_dist) {
|
||||
std::vector<unsigned long long> locs;
|
||||
for (size_t i = 0; i < dv.size(); i++) {
|
||||
if (dv[i].op == VT_MOVETO || dv[i].op == VT_LINETO) {
|
||||
locs.push_back(encode(dv[i].x << geometry_scale, dv[i].y << geometry_scale));
|
||||
}
|
||||
}
|
||||
std::sort(locs.begin(), locs.end());
|
||||
size_t n = 0;
|
||||
double sum = 0;
|
||||
for (size_t i = 1; i < locs.size(); i++) {
|
||||
if (locs[i - 1] != locs[i]) {
|
||||
sum += log(locs[i] - locs[i - 1]);
|
||||
n++;
|
||||
}
|
||||
}
|
||||
if (n > 0) {
|
||||
double avg = exp(sum / n);
|
||||
// Convert approximately from tile units to feet
|
||||
double dist_ft = sqrt(avg) / 33;
|
||||
|
||||
*dist_sum += log(dist_ft) * n;
|
||||
*dist_count += n;
|
||||
}
|
||||
locs.clear();
|
||||
}
|
||||
|
||||
bool inline_meta = true;
|
||||
// Don't inline metadata for features that will span several tiles at maxzoom
|
||||
if (g > 0 && (bbox[2] < bbox[0] || bbox[3] < bbox[1])) {
|
||||
@ -423,7 +451,7 @@ void check_crs(json_object *j, const char *reading) {
|
||||
}
|
||||
}
|
||||
|
||||
void parse_json(json_pull *jp, const char *reading, volatile long long *layer_seq, volatile long long *progress_seq, long long *metapos, long long *geompos, long long *indexpos, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, FILE *metafile, FILE *geomfile, FILE *indexfile, struct memfile *poolfile, struct memfile *treefile, char *fname, int basezoom, int layer, double droprate, long long *file_bbox, int segment, int *initialized, unsigned *initial_x, unsigned *initial_y, struct reader *readers, int maxzoom, std::map<std::string, layermap_entry> *layermap, std::string layername, bool uses_gamma, std::map<std::string, int> const *attribute_types) {
|
||||
void parse_json(json_pull *jp, const char *reading, volatile long long *layer_seq, volatile long long *progress_seq, long long *metapos, long long *geompos, long long *indexpos, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, FILE *metafile, FILE *geomfile, FILE *indexfile, struct memfile *poolfile, struct memfile *treefile, char *fname, int basezoom, int layer, double droprate, long long *file_bbox, int segment, int *initialized, unsigned *initial_x, unsigned *initial_y, struct reader *readers, int maxzoom, std::map<std::string, layermap_entry> *layermap, std::string layername, bool uses_gamma, std::map<std::string, int> const *attribute_types, double *dist_sum, size_t *dist_count, bool want_dist) {
|
||||
long long found_hashes = 0;
|
||||
long long found_features = 0;
|
||||
long long found_geometries = 0;
|
||||
@ -491,7 +519,7 @@ void parse_json(json_pull *jp, const char *reading, volatile long long *layer_se
|
||||
}
|
||||
found_geometries++;
|
||||
|
||||
serialize_geometry(j, NULL, NULL, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, basezoom, layer, droprate, file_bbox, NULL, segment, initialized, initial_x, initial_y, readers, maxzoom, j, layermap, layername, uses_gamma, attribute_types);
|
||||
serialize_geometry(j, NULL, NULL, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, basezoom, layer, droprate, file_bbox, NULL, segment, initialized, initial_x, initial_y, readers, maxzoom, j, layermap, layername, uses_gamma, attribute_types, dist_sum, dist_count, want_dist);
|
||||
json_free(j);
|
||||
continue;
|
||||
}
|
||||
@ -534,10 +562,10 @@ void parse_json(json_pull *jp, const char *reading, volatile long long *layer_se
|
||||
if (geometries != NULL) {
|
||||
size_t g;
|
||||
for (g = 0; g < geometries->length; g++) {
|
||||
serialize_geometry(geometries->array[g], properties, id, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, basezoom, layer, droprate, file_bbox, tippecanoe, segment, initialized, initial_x, initial_y, readers, maxzoom, j, layermap, layername, uses_gamma, attribute_types);
|
||||
serialize_geometry(geometries->array[g], properties, id, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, basezoom, layer, droprate, file_bbox, tippecanoe, segment, initialized, initial_x, initial_y, readers, maxzoom, j, layermap, layername, uses_gamma, attribute_types, dist_sum, dist_count, want_dist);
|
||||
}
|
||||
} else {
|
||||
serialize_geometry(geometry, properties, id, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, basezoom, layer, droprate, file_bbox, tippecanoe, segment, initialized, initial_x, initial_y, readers, maxzoom, j, layermap, layername, uses_gamma, attribute_types);
|
||||
serialize_geometry(geometry, properties, id, reading, jp->line, layer_seq, progress_seq, metapos, geompos, indexpos, exclude, include, exclude_all, metafile, geomfile, indexfile, poolfile, treefile, fname, basezoom, layer, droprate, file_bbox, tippecanoe, segment, initialized, initial_x, initial_y, readers, maxzoom, j, layermap, layername, uses_gamma, attribute_types, dist_sum, dist_count, want_dist);
|
||||
}
|
||||
|
||||
json_free(j);
|
||||
@ -549,7 +577,7 @@ void parse_json(json_pull *jp, const char *reading, volatile long long *layer_se
|
||||
void *run_parse_json(void *v) {
|
||||
struct parse_json_args *pja = (struct parse_json_args *) v;
|
||||
|
||||
parse_json(pja->jp, pja->reading, pja->layer_seq, pja->progress_seq, pja->metapos, pja->geompos, pja->indexpos, pja->exclude, pja->include, pja->exclude_all, pja->metafile, pja->geomfile, pja->indexfile, pja->poolfile, pja->treefile, pja->fname, pja->basezoom, pja->layer, pja->droprate, pja->file_bbox, pja->segment, pja->initialized, pja->initial_x, pja->initial_y, pja->readers, pja->maxzoom, pja->layermap, *pja->layername, pja->uses_gamma, pja->attribute_types);
|
||||
parse_json(pja->jp, pja->reading, pja->layer_seq, pja->progress_seq, pja->metapos, pja->geompos, pja->indexpos, pja->exclude, pja->include, pja->exclude_all, pja->metafile, pja->geomfile, pja->indexfile, pja->poolfile, pja->treefile, pja->fname, pja->basezoom, pja->layer, pja->droprate, pja->file_bbox, pja->segment, pja->initialized, pja->initial_x, pja->initial_y, pja->readers, pja->maxzoom, pja->layermap, *pja->layername, pja->uses_gamma, pja->attribute_types, pja->dist_sum, pja->dist_count, pja->want_dist);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -29,10 +29,13 @@ struct parse_json_args {
|
||||
std::string *layername;
|
||||
bool uses_gamma;
|
||||
std::map<std::string, int> const *attribute_types;
|
||||
double *dist_sum;
|
||||
size_t *dist_count;
|
||||
bool want_dist;
|
||||
};
|
||||
|
||||
struct json_pull *json_begin_map(char *map, long long len);
|
||||
void json_end_map(struct json_pull *jp);
|
||||
|
||||
void parse_json(json_pull *jp, const char *reading, volatile long long *layer_seq, volatile long long *progress_seq, long long *metapos, long long *geompos, long long *indexpos, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, FILE *metafile, FILE *geomfile, FILE *indexfile, struct memfile *poolfile, struct memfile *treefile, char *fname, int basezoom, int layer, double droprate, long long *file_bbox, int segment, int *initialized, unsigned *initial_x, unsigned *initial_y, struct reader *readers, int maxzoom, std::map<std::string, layermap_entry> *layermap, std::string layername, bool uses_gamma, std::map<std::string, int> const *attribute_types);
|
||||
void parse_json(json_pull *jp, const char *reading, volatile long long *layer_seq, volatile long long *progress_seq, long long *metapos, long long *geompos, long long *indexpos, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, FILE *metafile, FILE *geomfile, FILE *indexfile, struct memfile *poolfile, struct memfile *treefile, char *fname, int basezoom, int layer, double droprate, long long *file_bbox, int segment, int *initialized, unsigned *initial_x, unsigned *initial_y, struct reader *readers, int maxzoom, std::map<std::string, layermap_entry> *layermap, std::string layername, bool uses_gamma, std::map<std::string, int> const *attribute_types, double *dist_sum, size_t *dist_count, bool want_dist);
|
||||
void *run_parse_json(void *v);
|
||||
|
@ -36,14 +36,14 @@ json_pull *json_begin(ssize_t (*read)(struct json_pull *, char *buffer, size_t n
|
||||
|
||||
static inline int peek(json_pull *j) {
|
||||
if (j->buffer_head < j->buffer_tail) {
|
||||
return j->buffer[j->buffer_head];
|
||||
return (unsigned char) j->buffer[j->buffer_head];
|
||||
} else {
|
||||
j->buffer_head = 0;
|
||||
j->buffer_tail = j->read(j, j->buffer, BUFFER);
|
||||
if (j->buffer_head >= j->buffer_tail) {
|
||||
return EOF;
|
||||
}
|
||||
return j->buffer[j->buffer_head];
|
||||
return (unsigned char) j->buffer[j->buffer_head];
|
||||
}
|
||||
}
|
||||
|
||||
@ -295,6 +295,22 @@ again:
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Byte-order mark
|
||||
if (c == 0xEF) {
|
||||
int c2 = peek(j);
|
||||
if (c2 == 0xBB) {
|
||||
c2 = read_wrap(j);
|
||||
c2 = peek(j);
|
||||
if (c2 == 0xBF) {
|
||||
c2 = read_wrap(j);
|
||||
c = ' ';
|
||||
continue;
|
||||
}
|
||||
}
|
||||
j->error = "Corrupt byte-order mark found";
|
||||
return NULL;
|
||||
}
|
||||
} while (c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == 0x1E);
|
||||
|
||||
/////////////////////////// Arrays
|
||||
|
65
main.cpp
65
main.cpp
@ -80,8 +80,6 @@ size_t TEMP_FILES;
|
||||
long long MAX_FILES;
|
||||
static long long diskfree;
|
||||
|
||||
#define MAX_ZOOM 24
|
||||
|
||||
struct reader {
|
||||
int metafd;
|
||||
int poolfd;
|
||||
@ -377,7 +375,7 @@ void *run_sort(void *v) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void do_read_parallel(char *map, long long len, long long initial_offset, const char *reading, struct reader *reader, volatile long long *progress_seq, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, char *fname, int basezoom, int source, int nlayers, std::vector<std::map<std::string, layermap_entry> > *layermaps, double droprate, int *initialized, unsigned *initial_x, unsigned *initial_y, int maxzoom, std::string layername, bool uses_gamma, std::map<std::string, int> const *attribute_types, int separator) {
|
||||
void do_read_parallel(char *map, long long len, long long initial_offset, const char *reading, struct reader *reader, volatile long long *progress_seq, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, char *fname, int basezoom, int source, int nlayers, std::vector<std::map<std::string, layermap_entry> > *layermaps, double droprate, int *initialized, unsigned *initial_x, unsigned *initial_y, int maxzoom, std::string layername, bool uses_gamma, std::map<std::string, int> const *attribute_types, int separator, double *dist_sum, size_t *dist_count, bool want_dist) {
|
||||
long long segs[CPUS + 1];
|
||||
segs[0] = 0;
|
||||
segs[CPUS] = len;
|
||||
@ -390,11 +388,15 @@ void do_read_parallel(char *map, long long len, long long initial_offset, const
|
||||
}
|
||||
}
|
||||
|
||||
double dist_sums[CPUS];
|
||||
size_t dist_counts[CPUS];
|
||||
|
||||
volatile long long layer_seq[CPUS];
|
||||
for (size_t i = 0; i < CPUS; i++) {
|
||||
// To preserve feature ordering, unique id for each segment
|
||||
// begins with that segment's offset into the input
|
||||
layer_seq[i] = segs[i] + initial_offset;
|
||||
dist_sums[i] = dist_counts[i] = 0;
|
||||
}
|
||||
|
||||
struct parse_json_args pja[CPUS];
|
||||
@ -436,6 +438,9 @@ void do_read_parallel(char *map, long long len, long long initial_offset, const
|
||||
pja[i].layername = &layername;
|
||||
pja[i].uses_gamma = uses_gamma;
|
||||
pja[i].attribute_types = attribute_types;
|
||||
pja[i].dist_sum = &(dist_sums[i]);
|
||||
pja[i].dist_count = &(dist_counts[i]);
|
||||
pja[i].want_dist = want_dist;
|
||||
|
||||
if (pthread_create(&pthreads[i], NULL, run_parse_json, &pja[i]) != 0) {
|
||||
perror("pthread_create");
|
||||
@ -450,6 +455,9 @@ void do_read_parallel(char *map, long long len, long long initial_offset, const
|
||||
perror("pthread_join 370");
|
||||
}
|
||||
|
||||
*dist_sum += dist_sums[i];
|
||||
*dist_count += dist_counts[i];
|
||||
|
||||
json_end_map(pja[i].jp);
|
||||
}
|
||||
}
|
||||
@ -481,6 +489,9 @@ struct read_parallel_arg {
|
||||
std::string layername;
|
||||
bool uses_gamma;
|
||||
std::map<std::string, int> const *attribute_types;
|
||||
double *dist_sum;
|
||||
size_t *dist_count;
|
||||
bool want_dist;
|
||||
};
|
||||
|
||||
void *run_read_parallel(void *v) {
|
||||
@ -502,7 +513,7 @@ void *run_read_parallel(void *v) {
|
||||
}
|
||||
madvise(map, rpa->len, MADV_RANDOM); // sequential, but from several pointers at once
|
||||
|
||||
do_read_parallel(map, rpa->len, rpa->offset, rpa->reading, rpa->reader, rpa->progress_seq, rpa->exclude, rpa->include, rpa->exclude_all, rpa->fname, rpa->basezoom, rpa->source, rpa->nlayers, rpa->layermaps, rpa->droprate, rpa->initialized, rpa->initial_x, rpa->initial_y, rpa->maxzoom, rpa->layername, rpa->uses_gamma, rpa->attribute_types, rpa->separator);
|
||||
do_read_parallel(map, rpa->len, rpa->offset, rpa->reading, rpa->reader, rpa->progress_seq, rpa->exclude, rpa->include, rpa->exclude_all, rpa->fname, rpa->basezoom, rpa->source, rpa->nlayers, rpa->layermaps, rpa->droprate, rpa->initialized, rpa->initial_x, rpa->initial_y, rpa->maxzoom, rpa->layername, rpa->uses_gamma, rpa->attribute_types, rpa->separator, rpa->dist_sum, rpa->dist_count, rpa->want_dist);
|
||||
|
||||
madvise(map, rpa->len, MADV_DONTNEED);
|
||||
if (munmap(map, rpa->len) != 0) {
|
||||
@ -519,7 +530,7 @@ void *run_read_parallel(void *v) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void start_parsing(int fd, FILE *fp, long long offset, long long len, volatile int *is_parsing, pthread_t *parallel_parser, bool &parser_created, const char *reading, struct reader *reader, volatile long long *progress_seq, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, char *fname, int basezoom, int source, int nlayers, std::vector<std::map<std::string, layermap_entry> > &layermaps, double droprate, int *initialized, unsigned *initial_x, unsigned *initial_y, int maxzoom, std::string layername, bool uses_gamma, std::map<std::string, int> const *attribute_types, int separator) {
|
||||
void start_parsing(int fd, FILE *fp, long long offset, long long len, volatile int *is_parsing, pthread_t *parallel_parser, bool &parser_created, const char *reading, struct reader *reader, volatile long long *progress_seq, std::set<std::string> *exclude, std::set<std::string> *include, int exclude_all, char *fname, int basezoom, int source, int nlayers, std::vector<std::map<std::string, layermap_entry> > &layermaps, double droprate, int *initialized, unsigned *initial_x, unsigned *initial_y, int maxzoom, std::string layername, bool uses_gamma, std::map<std::string, int> const *attribute_types, int separator, double *dist_sum, size_t *dist_count, bool want_dist) {
|
||||
// This has to kick off an intermediate thread to start the parser threads,
|
||||
// so the main thread can get back to reading the next input stage while
|
||||
// the intermediate thread waits for the completion of the parser threads.
|
||||
@ -558,6 +569,9 @@ void start_parsing(int fd, FILE *fp, long long offset, long long len, volatile i
|
||||
rpa->layername = layername;
|
||||
rpa->uses_gamma = uses_gamma;
|
||||
rpa->attribute_types = attribute_types;
|
||||
rpa->dist_sum = dist_sum;
|
||||
rpa->dist_count = dist_count;
|
||||
rpa->want_dist = want_dist;
|
||||
|
||||
if (pthread_create(parallel_parser, NULL, run_read_parallel, rpa) != 0) {
|
||||
perror("pthread_create");
|
||||
@ -1179,6 +1193,8 @@ int read_input(std::vector<source> &sources, char *fname, int maxzoom, int minzo
|
||||
}
|
||||
|
||||
long overall_offset = 0;
|
||||
double dist_sum = 0;
|
||||
size_t dist_count = 0;
|
||||
|
||||
size_t nsources = sources.size();
|
||||
for (size_t source = 0; source < nsources; source++) {
|
||||
@ -1241,7 +1257,7 @@ int read_input(std::vector<source> &sources, char *fname, int maxzoom, int minzo
|
||||
}
|
||||
|
||||
if (map != NULL && map != MAP_FAILED && read_parallel_this) {
|
||||
do_read_parallel(map, st.st_size - off, overall_offset, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, layer, nlayers, &layermaps, droprate, initialized, initial_x, initial_y, maxzoom, sources[layer].layer, uses_gamma, attribute_types, read_parallel_this);
|
||||
do_read_parallel(map, st.st_size - off, overall_offset, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, layer, nlayers, &layermaps, droprate, initialized, initial_x, initial_y, maxzoom, sources[layer].layer, uses_gamma, attribute_types, read_parallel_this, &dist_sum, &dist_count, guess_maxzoom);
|
||||
overall_offset += st.st_size - off;
|
||||
checkdisk(reader, CPUS);
|
||||
|
||||
@ -1317,7 +1333,7 @@ int read_input(std::vector<source> &sources, char *fname, int maxzoom, int minzo
|
||||
}
|
||||
|
||||
fflush(readfp);
|
||||
start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, parser_created, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, layer, nlayers, layermaps, droprate, initialized, initial_x, initial_y, maxzoom, sources[layer].layer, gamma != 0, attribute_types, read_parallel_this);
|
||||
start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, parser_created, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, layer, nlayers, layermaps, droprate, initialized, initial_x, initial_y, maxzoom, sources[layer].layer, gamma != 0, attribute_types, read_parallel_this, &dist_sum, &dist_count, guess_maxzoom);
|
||||
|
||||
initial_offset += ahead;
|
||||
overall_offset += ahead;
|
||||
@ -1354,7 +1370,7 @@ int read_input(std::vector<source> &sources, char *fname, int maxzoom, int minzo
|
||||
fflush(readfp);
|
||||
|
||||
if (ahead > 0) {
|
||||
start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, parser_created, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, layer, nlayers, layermaps, droprate, initialized, initial_x, initial_y, maxzoom, sources[layer].layer, gamma != 0, attribute_types, read_parallel_this);
|
||||
start_parsing(readfd, readfp, initial_offset, ahead, &is_parsing, ¶llel_parser, parser_created, reading.c_str(), reader, &progress_seq, exclude, include, exclude_all, fname, basezoom, layer, nlayers, layermaps, droprate, initialized, initial_x, initial_y, maxzoom, sources[layer].layer, gamma != 0, attribute_types, read_parallel_this, &dist_sum, &dist_count, guess_maxzoom);
|
||||
|
||||
if (parser_created) {
|
||||
if (pthread_join(parallel_parser, NULL) != 0) {
|
||||
@ -1371,7 +1387,7 @@ int read_input(std::vector<source> &sources, char *fname, int maxzoom, int minzo
|
||||
|
||||
long long layer_seq = overall_offset;
|
||||
json_pull *jp = json_begin_file(fp);
|
||||
parse_json(jp, reading.c_str(), &layer_seq, &progress_seq, &reader[0].metapos, &reader[0].geompos, &reader[0].indexpos, exclude, include, exclude_all, reader[0].metafile, reader[0].geomfile, reader[0].indexfile, reader[0].poolfile, reader[0].treefile, fname, basezoom, layer, droprate, reader[0].file_bbox, 0, &initialized[0], &initial_x[0], &initial_y[0], reader, maxzoom, &layermaps[0], sources[layer].layer, uses_gamma, attribute_types);
|
||||
parse_json(jp, reading.c_str(), &layer_seq, &progress_seq, &reader[0].metapos, &reader[0].geompos, &reader[0].indexpos, exclude, include, exclude_all, reader[0].metafile, reader[0].geomfile, reader[0].indexfile, reader[0].poolfile, reader[0].treefile, fname, basezoom, layer, droprate, reader[0].file_bbox, 0, &initialized[0], &initial_x[0], &initial_y[0], reader, maxzoom, &layermaps[0], sources[layer].layer, uses_gamma, attribute_types, &dist_sum, &dist_count, guess_maxzoom);
|
||||
json_end(jp);
|
||||
overall_offset = layer_seq;
|
||||
checkdisk(reader, CPUS);
|
||||
@ -1628,6 +1644,11 @@ int read_input(std::vector<source> &sources, char *fname, int maxzoom, int minzo
|
||||
}
|
||||
}
|
||||
|
||||
if (count == 0 && dist_count == 0) {
|
||||
fprintf(stderr, "Can't guess maxzoom (-zg) without at least two distinct feature locations\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (count > 0) {
|
||||
// Geometric mean is appropriate because distances between features
|
||||
// are typically lognormally distributed
|
||||
@ -1635,7 +1656,8 @@ int read_input(std::vector<source> &sources, char *fname, int maxzoom, int minzo
|
||||
|
||||
// Convert approximately from tile units to feet
|
||||
double dist_ft = sqrt(avg) / 33;
|
||||
double want = dist_ft / 250;
|
||||
// Factor of 8 (3 zooms) beyond minimum required to distinguish features
|
||||
double want = dist_ft / 8;
|
||||
|
||||
maxzoom = ceil(log(360 / (.00000274 * want)) / log(2) - full_detail);
|
||||
if (maxzoom < 0) {
|
||||
@ -1648,9 +1670,25 @@ int read_input(std::vector<source> &sources, char *fname, int maxzoom, int minzo
|
||||
if (!quiet) {
|
||||
fprintf(stderr, "Choosing a maxzoom of -z%d for features about %d feet apart\n", maxzoom, (int) ceil(dist_ft));
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "Can't guess maxzoom (-zg) without at least two distinct feature locations\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (dist_count != 0) {
|
||||
double want2 = exp(dist_sum / dist_count) / 8;
|
||||
int mz = ceil(log(360 / (.00000274 * want2)) / log(2) - full_detail);
|
||||
|
||||
if (mz < 0) {
|
||||
mz = 0;
|
||||
}
|
||||
if (mz > MAX_ZOOM) {
|
||||
mz = MAX_ZOOM;
|
||||
}
|
||||
|
||||
if (mz > maxzoom || count <= 0) {
|
||||
if (!quiet) {
|
||||
fprintf(stderr, "Choosing a maxzoom of -z%d for resolution of about %d feet within features\n", mz, (int) exp(dist_sum / dist_count));
|
||||
}
|
||||
maxzoom = mz;
|
||||
}
|
||||
}
|
||||
|
||||
if (maxzoom < minzoom) {
|
||||
@ -2062,6 +2100,7 @@ int main(int argc, char **argv) {
|
||||
{"Zoom levels", 0, 0, 0},
|
||||
{"maximum-zoom", required_argument, 0, 'z'},
|
||||
{"minimum-zoom", required_argument, 0, 'Z'},
|
||||
{"extend-zooms-if-still-dropping", no_argument, &additional[A_EXTEND_ZOOMS], 1},
|
||||
|
||||
{"Tile resolution", 0, 0, 0},
|
||||
{"full-detail", required_argument, 0, 'd'},
|
||||
|
2
main.hpp
2
main.hpp
@ -19,3 +19,5 @@ extern size_t max_tile_size;
|
||||
|
||||
int mkstemp_cloexec(char *name);
|
||||
FILE *fopen_oflag(const char *name, const char *mode, int oflag);
|
||||
|
||||
#define MAX_ZOOM 24
|
||||
|
@ -48,6 +48,24 @@ The GeoJSON features need not be wrapped in a FeatureCollection.
|
||||
You can concatenate multiple GeoJSON features or files together,
|
||||
and it will parse out the features and ignore whatever other objects
|
||||
it encounters.
|
||||
.SH Docker Image
|
||||
.PP
|
||||
A tippecanoe Docker image can be built from source and executed as a task to
|
||||
automatically install dependencies and allow tippecanoe to run on any system
|
||||
supported by Docker.
|
||||
.PP
|
||||
.RS
|
||||
.nf
|
||||
$ docker build \-t tippecanoe:latest .
|
||||
$ docker run \-it \-\-rm \\
|
||||
\-v /tiledata:/data \\
|
||||
tippecanoe:latest \\
|
||||
tippecanoe \-\-output=/data/output.mbtiles /data/example.geojson
|
||||
.fi
|
||||
.RE
|
||||
.PP
|
||||
The commands above will build a Docker image from the source and compile the
|
||||
latest version. The image supports all tippecanoe flags and options.
|
||||
.SH Options
|
||||
.PP
|
||||
There are a lot of options. A lot of the time you won't want to use any of them
|
||||
@ -126,6 +144,10 @@ than at all newlines.
|
||||
\fB\fC\-zg\fR or \fB\fC\-\-maximum\-zoom=g\fR: Guess what is probably a reasonable maxzoom based on the spacing of features.
|
||||
.IP \(bu 2
|
||||
\fB\fC\-Z\fR \fIzoom\fP or \fB\fC\-\-minimum\-zoom=\fR\fIzoom\fP: Minzoom: the lowest zoom level for which tiles are generated (default 0)
|
||||
.IP \(bu 2
|
||||
\fB\fC\-ae\fR or \fB\fC\-\-extend\-zooms\-if\-still\-dropping\fR: Increase the maxzoom if features are still being dropped at that zoom level.
|
||||
The detail and simplification options that ordinarily apply only to the maximum zoom level will apply both to the originally
|
||||
specified maximum zoom and to any levels added beyond that.
|
||||
.RE
|
||||
.SS Tile resolution
|
||||
.RS
|
||||
@ -479,20 +501,27 @@ Check out some examples of maps made with tippecanoe \[la]MADE_WITH.md\[ra]
|
||||
The name is a joking reference \[la]http://en.wikipedia.org/wiki/Tippecanoe_and_Tyler_Too\[ra] to a "tiler" for making map tiles.
|
||||
.SH tile\-join
|
||||
.PP
|
||||
Tile\-join is a tool for joining new attributes from a CSV file to features that
|
||||
have already been tiled with tippecanoe. It reads the tiles from an existing .mbtiles
|
||||
file, matches them against the records of the CSV, and writes out a new tileset.
|
||||
Tile\-join is a tool for joining new attributes from a CSV file to features
|
||||
that have already been tiled with tippecanoe. It reads the tiles from an
|
||||
existing .mbtiles file or a directory of tiles, matches them against the
|
||||
records of the CSV, and writes out a new tileset.
|
||||
.PP
|
||||
If you specify multiple source mbtiles files, they are all read and their combined
|
||||
contents are written to the new mbtiles output. If they define the same layers or
|
||||
the same tiles, the layers or tiles are merged.
|
||||
If you specify multiple source mbtiles files or source directories of tiles,
|
||||
all the sources are read and their combined contents are written to the new
|
||||
mbtiles output. If they define the same layers or the same tiles, the layers
|
||||
or tiles are merged.
|
||||
.PP
|
||||
You can use the \fB\fC\-e\fR flag to output a directory of tiles rather than a
|
||||
\&.mbtiles file.
|
||||
.PP
|
||||
The options are:
|
||||
.RS
|
||||
.IP \(bu 2
|
||||
\fB\fC\-o\fR \fIout.mbtiles\fP: Write the new tiles to the specified .mbtiles file
|
||||
\fB\fC\-o\fR \fIout.mbtiles\fP: Write the new tiles to the specified .mbtiles file.
|
||||
.IP \(bu 2
|
||||
\fB\fC\-f\fR: Remove \fIout.mbtiles\fP if it already exists
|
||||
\fB\fC\-e\fR \fIdirectory\fP: Write the new tiles to the specified directory instead of to an mbtiles file.
|
||||
.IP \(bu 2
|
||||
\fB\fC\-f\fR: Remove \fIout.mbtiles\fP if it already exists.
|
||||
.IP \(bu 2
|
||||
\fB\fC\-c\fR \fImatch\fP\fB\fC\&.csv\fR: Use \fImatch\fP\fB\fC\&.csv\fR as the source for new attributes to join to the features. The first line of the file should be the key names; the other lines are values. The first column is the one to match against the existing features; the other columns are the new data to add.
|
||||
.IP \(bu 2
|
||||
@ -502,6 +531,8 @@ The options are:
|
||||
.IP \(bu 2
|
||||
\fB\fC\-pk\fR: Don't skip tiles larger than 500K.
|
||||
.IP \(bu 2
|
||||
\fB\fC\-pC\fR: Don't compress the PBF vector tile data.
|
||||
.IP \(bu 2
|
||||
\fB\fC\-l\fR \fIlayer\fP: Include the named layer in the output. You can specify multiple \fB\fC\-l\fR options to keep multiple layers. If you don't specify, they will all be retained.
|
||||
.IP \(bu 2
|
||||
\fB\fC\-L\fR \fIlayer\fP: Remove the named layer from the output. You can specify multiple \fB\fC\-L\fR options to remove multiple layers.
|
||||
|
@ -53,7 +53,7 @@ int memfile_write(struct memfile *file, void *s, long long len) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
file->len += INCREMENT;
|
||||
file->len += (len + INCREMENT + 1) / INCREMENT * INCREMENT;
|
||||
|
||||
if (ftruncate(file->fd, file->len) != 0) {
|
||||
return -1;
|
||||
|
@ -14,6 +14,7 @@
|
||||
#define A_DROP_SMALLEST_AS_NEEDED ((int) 'n')
|
||||
#define A_GRID_LOW_ZOOMS ((int) 'L')
|
||||
#define A_DETECT_WRAPAROUND ((int) 'w')
|
||||
#define A_EXTEND_ZOOMS ((int) 'e')
|
||||
|
||||
#define P_SIMPLIFY ((int) 's')
|
||||
#define P_SIMPLIFY_LOW ((int) 'S')
|
||||
|
@ -16,56 +16,69 @@ documentation.
|
||||
* @brief Contains functions to swap bytes in values (for different endianness).
|
||||
*/
|
||||
|
||||
#include <cstdint>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
|
||||
#include <protozero/config.hpp>
|
||||
|
||||
namespace protozero {
|
||||
namespace detail {
|
||||
|
||||
/**
|
||||
* Swap N byte value between endianness formats. This template function must
|
||||
* be specialized to actually work.
|
||||
*/
|
||||
template <int N>
|
||||
inline void byteswap(const char* /*data*/, char* /*result*/) {
|
||||
static_assert(N == 1, "Can only swap 4 or 8 byte values");
|
||||
}
|
||||
|
||||
/**
|
||||
* Swap 4 byte value (int32_t, uint32_t, float) between endianness formats.
|
||||
*/
|
||||
template <>
|
||||
inline void byteswap<4>(const char* data, char* result) {
|
||||
inline uint32_t byteswap_impl(uint32_t value) noexcept {
|
||||
#ifdef PROTOZERO_USE_BUILTIN_BSWAP
|
||||
*reinterpret_cast<uint32_t*>(result) = __builtin_bswap32(*reinterpret_cast<const uint32_t*>(data));
|
||||
return __builtin_bswap32(value);
|
||||
#else
|
||||
result[3] = data[0];
|
||||
result[2] = data[1];
|
||||
result[1] = data[2];
|
||||
result[0] = data[3];
|
||||
return ((value & 0xff000000) >> 24) |
|
||||
((value & 0x00ff0000) >> 8) |
|
||||
((value & 0x0000ff00) << 8) |
|
||||
((value & 0x000000ff) << 24);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Swap 8 byte value (int64_t, uint64_t, double) between endianness formats.
|
||||
*/
|
||||
template <>
|
||||
inline void byteswap<8>(const char* data, char* result) {
|
||||
inline uint64_t byteswap_impl(uint64_t value) noexcept {
|
||||
#ifdef PROTOZERO_USE_BUILTIN_BSWAP
|
||||
*reinterpret_cast<uint64_t*>(result) = __builtin_bswap64(*reinterpret_cast<const uint64_t*>(data));
|
||||
return __builtin_bswap64(value);
|
||||
#else
|
||||
result[7] = data[0];
|
||||
result[6] = data[1];
|
||||
result[5] = data[2];
|
||||
result[4] = data[3];
|
||||
result[3] = data[4];
|
||||
result[2] = data[5];
|
||||
result[1] = data[6];
|
||||
result[0] = data[7];
|
||||
return ((value & 0xff00000000000000ULL) >> 56) |
|
||||
((value & 0x00ff000000000000ULL) >> 40) |
|
||||
((value & 0x0000ff0000000000ULL) >> 24) |
|
||||
((value & 0x000000ff00000000ULL) >> 8) |
|
||||
((value & 0x00000000ff000000ULL) << 8) |
|
||||
((value & 0x0000000000ff0000ULL) << 24) |
|
||||
((value & 0x000000000000ff00ULL) << 40) |
|
||||
((value & 0x00000000000000ffULL) << 56);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void byteswap_inplace(uint32_t* ptr) noexcept {
|
||||
*ptr = byteswap_impl(*ptr);
|
||||
}
|
||||
|
||||
inline void byteswap_inplace(uint64_t* ptr) noexcept {
|
||||
*ptr = byteswap_impl(*ptr);
|
||||
}
|
||||
|
||||
inline void byteswap_inplace(int32_t* ptr) noexcept {
|
||||
auto bptr = reinterpret_cast<uint32_t*>(ptr);
|
||||
*bptr = byteswap_impl(*bptr);
|
||||
}
|
||||
|
||||
inline void byteswap_inplace(int64_t* ptr) noexcept {
|
||||
auto bptr = reinterpret_cast<uint64_t*>(ptr);
|
||||
*bptr = byteswap_impl(*bptr);
|
||||
}
|
||||
|
||||
inline void byteswap_inplace(float* ptr) noexcept {
|
||||
auto bptr = reinterpret_cast<uint32_t*>(ptr);
|
||||
*bptr = byteswap_impl(*bptr);
|
||||
}
|
||||
|
||||
inline void byteswap_inplace(double* ptr) noexcept {
|
||||
auto bptr = reinterpret_cast<uint64_t*>(ptr);
|
||||
*bptr = byteswap_impl(*bptr);
|
||||
}
|
||||
|
||||
} // end namespace detail
|
||||
} // end namespace protozero
|
||||
|
||||
#endif // PROTOZERO_BYTESWAP_HPP
|
||||
|
@ -35,17 +35,6 @@ documentation.
|
||||
# define PROTOZERO_BYTE_ORDER PROTOZERO_LITTLE_ENDIAN
|
||||
#endif
|
||||
|
||||
// On some ARM machines and depending on compiler settings access to unaligned
|
||||
// floating point values will result in a SIGBUS. Do not use the bare pointers
|
||||
// in this case.
|
||||
#if PROTOZERO_BYTE_ORDER == PROTOZERO_LITTLE_ENDIAN
|
||||
# if !defined(__arm__) && !defined(_M_ARM)
|
||||
# ifndef PROTOZERO_DO_NOT_USE_BARE_POINTER
|
||||
# define PROTOZERO_USE_BARE_POINTER_FOR_PACKED_FIXED
|
||||
# endif
|
||||
# endif
|
||||
#endif
|
||||
|
||||
// Check whether __builtin_bswap is available
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
# define PROTOZERO_USE_BUILTIN_BSWAP
|
||||
|
@ -29,7 +29,7 @@ namespace protozero {
|
||||
*/
|
||||
struct exception : std::exception {
|
||||
/// Returns the explanatory string.
|
||||
const char *what() const noexcept override { return "pbf exception"; }
|
||||
const char* what() const noexcept override { return "pbf exception"; }
|
||||
};
|
||||
|
||||
/**
|
||||
@ -38,7 +38,7 @@ struct exception : std::exception {
|
||||
*/
|
||||
struct varint_too_long_exception : exception {
|
||||
/// Returns the explanatory string.
|
||||
const char *what() const noexcept override { return "varint too long exception"; }
|
||||
const char* what() const noexcept override { return "varint too long exception"; }
|
||||
};
|
||||
|
||||
/**
|
||||
@ -47,7 +47,7 @@ struct varint_too_long_exception : exception {
|
||||
*/
|
||||
struct unknown_pbf_wire_type_exception : exception {
|
||||
/// Returns the explanatory string.
|
||||
const char *what() const noexcept override { return "unknown pbf field type exception"; }
|
||||
const char* what() const noexcept override { return "unknown pbf field type exception"; }
|
||||
};
|
||||
|
||||
/**
|
||||
@ -60,7 +60,7 @@ struct unknown_pbf_wire_type_exception : exception {
|
||||
*/
|
||||
struct end_of_buffer_exception : exception {
|
||||
/// Returns the explanatory string.
|
||||
const char *what() const noexcept override { return "end of buffer exception"; }
|
||||
const char* what() const noexcept override { return "end of buffer exception"; }
|
||||
};
|
||||
|
||||
} // end namespace protozero
|
||||
|
328
protozero/iterators.hpp
Normal file
328
protozero/iterators.hpp
Normal file
@ -0,0 +1,328 @@
|
||||
#ifndef PROTOZERO_ITERATORS_HPP
|
||||
#define PROTOZERO_ITERATORS_HPP
|
||||
|
||||
/*****************************************************************************
|
||||
|
||||
protozero - Minimalistic protocol buffer decoder and encoder in C++.
|
||||
|
||||
This file is from https://github.com/mapbox/protozero where you can find more
|
||||
documentation.
|
||||
|
||||
*****************************************************************************/
|
||||
|
||||
/**
|
||||
* @file iterators.hpp
|
||||
*
|
||||
* @brief Contains the iterators for access to packed repeated fields.
|
||||
*/
|
||||
|
||||
#include <cstring>
|
||||
#include <iterator>
|
||||
#include <utility>
|
||||
|
||||
#include <protozero/config.hpp>
|
||||
#include <protozero/varint.hpp>
|
||||
|
||||
#if PROTOZERO_BYTE_ORDER != PROTOZERO_LITTLE_ENDIAN
|
||||
# include <protozero/byteswap.hpp>
|
||||
#endif
|
||||
|
||||
namespace protozero {
|
||||
|
||||
/**
|
||||
* A range of iterators based on std::pair. Created from beginning and
|
||||
* end iterators. Used as a return type from some pbf_reader methods
|
||||
* that is easy to use with range-based for loops.
|
||||
*/
|
||||
template <typename T, typename P = std::pair<T, T>>
|
||||
class iterator_range :
|
||||
#ifdef PROTOZERO_STRICT_API
|
||||
protected
|
||||
#else
|
||||
public
|
||||
#endif
|
||||
P {
|
||||
|
||||
public:
|
||||
|
||||
/// The type of the iterators in this range.
|
||||
using iterator = T;
|
||||
|
||||
/// The value type of the underlying iterator.
|
||||
using value_type = typename std::iterator_traits<T>::value_type;
|
||||
|
||||
/**
|
||||
* Default constructor. Create empty iterator_range.
|
||||
*/
|
||||
constexpr iterator_range() :
|
||||
P(iterator{}, iterator{}) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create iterator range from two iterators.
|
||||
*
|
||||
* @param first_iterator Iterator to beginning or range.
|
||||
* @param last_iterator Iterator to end or range.
|
||||
*/
|
||||
constexpr iterator_range(iterator&& first_iterator, iterator&& last_iterator) :
|
||||
P(std::forward<iterator>(first_iterator),
|
||||
std::forward<iterator>(last_iterator)) {
|
||||
}
|
||||
|
||||
/// Return iterator to beginning of range.
|
||||
constexpr iterator begin() const noexcept {
|
||||
return this->first;
|
||||
}
|
||||
|
||||
/// Return iterator to end of range.
|
||||
constexpr iterator end() const noexcept {
|
||||
return this->second;
|
||||
}
|
||||
|
||||
/// Return iterator to beginning of range.
|
||||
constexpr iterator cbegin() const noexcept {
|
||||
return this->first;
|
||||
}
|
||||
|
||||
/// Return iterator to end of range.
|
||||
constexpr iterator cend() const noexcept {
|
||||
return this->second;
|
||||
}
|
||||
|
||||
/// Return true if this range is empty.
|
||||
constexpr std::size_t empty() const noexcept {
|
||||
return begin() == end();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get element at the beginning of the range.
|
||||
*
|
||||
* @pre Range must not be empty.
|
||||
*/
|
||||
value_type front() const {
|
||||
protozero_assert(!empty());
|
||||
return *(this->first);
|
||||
}
|
||||
|
||||
/**
|
||||
* Advance beginning of range by one.
|
||||
*
|
||||
* @pre Range must not be empty.
|
||||
*/
|
||||
void drop_front() {
|
||||
protozero_assert(!empty());
|
||||
++this->first;
|
||||
}
|
||||
|
||||
/**
|
||||
* Swap the contents of this range with the other.
|
||||
*
|
||||
* @param other Other range to swap data with.
|
||||
*/
|
||||
void swap(iterator_range& other) noexcept {
|
||||
using std::swap;
|
||||
swap(this->first, other.first);
|
||||
swap(this->second, other.second);
|
||||
}
|
||||
|
||||
}; // struct iterator_range
|
||||
|
||||
/**
|
||||
* Swap two iterator_ranges.
|
||||
*
|
||||
* @param lhs First range.
|
||||
* @param rhs Second range.
|
||||
*/
|
||||
template <typename T>
|
||||
inline void swap(iterator_range<T>& lhs, iterator_range<T>& rhs) noexcept {
|
||||
lhs.swap(rhs);
|
||||
}
|
||||
|
||||
/**
|
||||
* A forward iterator used for accessing packed repeated fields of fixed
|
||||
* length (fixed32, sfixed32, float, double).
|
||||
*/
|
||||
template <typename T>
|
||||
class const_fixed_iterator {
|
||||
|
||||
/// Pointer to current iterator position
|
||||
const char* m_data;
|
||||
|
||||
/// Pointer to end iterator position
|
||||
const char* m_end;
|
||||
|
||||
public:
|
||||
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
using value_type = T;
|
||||
using difference_type = std::ptrdiff_t;
|
||||
using pointer = value_type*;
|
||||
using reference = value_type&;
|
||||
|
||||
const_fixed_iterator() noexcept :
|
||||
m_data(nullptr),
|
||||
m_end(nullptr) {
|
||||
}
|
||||
|
||||
const_fixed_iterator(const char* data, const char* end) noexcept :
|
||||
m_data(data),
|
||||
m_end(end) {
|
||||
}
|
||||
|
||||
const_fixed_iterator(const const_fixed_iterator&) noexcept = default;
|
||||
const_fixed_iterator(const_fixed_iterator&&) noexcept = default;
|
||||
|
||||
const_fixed_iterator& operator=(const const_fixed_iterator&) noexcept = default;
|
||||
const_fixed_iterator& operator=(const_fixed_iterator&&) noexcept = default;
|
||||
|
||||
~const_fixed_iterator() noexcept = default;
|
||||
|
||||
value_type operator*() const {
|
||||
value_type result;
|
||||
std::memcpy(&result, m_data, sizeof(value_type));
|
||||
#if PROTOZERO_BYTE_ORDER != PROTOZERO_LITTLE_ENDIAN
|
||||
detail::byteswap_inplace(&result);
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
const_fixed_iterator& operator++() {
|
||||
m_data += sizeof(value_type);
|
||||
return *this;
|
||||
}
|
||||
|
||||
const_fixed_iterator operator++(int) {
|
||||
const const_fixed_iterator tmp(*this);
|
||||
++(*this);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
bool operator==(const const_fixed_iterator& rhs) const noexcept {
|
||||
return m_data == rhs.m_data && m_end == rhs.m_end;
|
||||
}
|
||||
|
||||
bool operator!=(const const_fixed_iterator& rhs) const noexcept {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
}; // class const_fixed_iterator
|
||||
|
||||
/**
|
||||
* A forward iterator used for accessing packed repeated varint fields
|
||||
* (int32, uint32, int64, uint64, bool, enum).
|
||||
*/
|
||||
template <typename T>
|
||||
class const_varint_iterator {
|
||||
|
||||
protected:
|
||||
|
||||
/// Pointer to current iterator position
|
||||
const char* m_data;
|
||||
|
||||
/// Pointer to end iterator position
|
||||
const char* m_end;
|
||||
|
||||
public:
|
||||
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
using value_type = T;
|
||||
using difference_type = std::ptrdiff_t;
|
||||
using pointer = value_type*;
|
||||
using reference = value_type&;
|
||||
|
||||
const_varint_iterator() noexcept :
|
||||
m_data(nullptr),
|
||||
m_end(nullptr) {
|
||||
}
|
||||
|
||||
const_varint_iterator(const char* data, const char* end) noexcept :
|
||||
m_data(data),
|
||||
m_end(end) {
|
||||
}
|
||||
|
||||
const_varint_iterator(const const_varint_iterator&) noexcept = default;
|
||||
const_varint_iterator(const_varint_iterator&&) noexcept = default;
|
||||
|
||||
const_varint_iterator& operator=(const const_varint_iterator&) noexcept = default;
|
||||
const_varint_iterator& operator=(const_varint_iterator&&) noexcept = default;
|
||||
|
||||
~const_varint_iterator() noexcept = default;
|
||||
|
||||
value_type operator*() const {
|
||||
const char* d = m_data; // will be thrown away
|
||||
return static_cast<value_type>(decode_varint(&d, m_end));
|
||||
}
|
||||
|
||||
const_varint_iterator& operator++() {
|
||||
skip_varint(&m_data, m_end);
|
||||
return *this;
|
||||
}
|
||||
|
||||
const_varint_iterator operator++(int) {
|
||||
const const_varint_iterator tmp(*this);
|
||||
++(*this);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
bool operator==(const const_varint_iterator& rhs) const noexcept {
|
||||
return m_data == rhs.m_data && m_end == rhs.m_end;
|
||||
}
|
||||
|
||||
bool operator!=(const const_varint_iterator& rhs) const noexcept {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
}; // class const_varint_iterator
|
||||
|
||||
/**
|
||||
* A forward iterator used for accessing packed repeated svarint fields
|
||||
* (sint32, sint64).
|
||||
*/
|
||||
template <typename T>
|
||||
class const_svarint_iterator : public const_varint_iterator<T> {
|
||||
|
||||
public:
|
||||
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
using value_type = T;
|
||||
using difference_type = std::ptrdiff_t;
|
||||
using pointer = value_type*;
|
||||
using reference = value_type&;
|
||||
|
||||
const_svarint_iterator() noexcept :
|
||||
const_varint_iterator<T>() {
|
||||
}
|
||||
|
||||
const_svarint_iterator(const char* data, const char* end) noexcept :
|
||||
const_varint_iterator<T>(data, end) {
|
||||
}
|
||||
|
||||
const_svarint_iterator(const const_svarint_iterator&) = default;
|
||||
const_svarint_iterator(const_svarint_iterator&&) = default;
|
||||
|
||||
const_svarint_iterator& operator=(const const_svarint_iterator&) = default;
|
||||
const_svarint_iterator& operator=(const_svarint_iterator&&) = default;
|
||||
|
||||
~const_svarint_iterator() = default;
|
||||
|
||||
value_type operator*() const {
|
||||
const char* d = this->m_data; // will be thrown away
|
||||
return static_cast<value_type>(decode_zigzag64(decode_varint(&d, this->m_end)));
|
||||
}
|
||||
|
||||
const_svarint_iterator& operator++() {
|
||||
skip_varint(&this->m_data, this->m_end);
|
||||
return *this;
|
||||
}
|
||||
|
||||
const_svarint_iterator operator++(int) {
|
||||
const const_svarint_iterator tmp(*this);
|
||||
++(*this);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
}; // class const_svarint_iterator
|
||||
|
||||
} // end namespace protozero
|
||||
|
||||
#endif // PROTOZERO_ITERATORS_HPP
|
@ -18,8 +18,8 @@ documentation.
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include <protozero/types.hpp>
|
||||
#include <protozero/pbf_writer.hpp>
|
||||
#include <protozero/types.hpp>
|
||||
|
||||
namespace protozero {
|
||||
|
||||
@ -46,7 +46,7 @@ public:
|
||||
|
||||
using enum_type = T;
|
||||
|
||||
pbf_builder(std::string& data) noexcept :
|
||||
explicit pbf_builder(std::string& data) noexcept :
|
||||
pbf_writer(data) {
|
||||
}
|
||||
|
||||
@ -57,7 +57,7 @@ public:
|
||||
|
||||
/// @cond INTERNAL
|
||||
#define PROTOZERO_WRITER_WRAP_ADD_SCALAR(name, type) \
|
||||
inline void add_##name(T tag, type value) { \
|
||||
void add_##name(T tag, type value) { \
|
||||
pbf_writer::add_##name(pbf_tag_type(tag), value); \
|
||||
}
|
||||
|
||||
@ -79,38 +79,59 @@ public:
|
||||
#undef PROTOZERO_WRITER_WRAP_ADD_SCALAR
|
||||
/// @endcond
|
||||
|
||||
inline void add_bytes(T tag, const char* value, std::size_t size) {
|
||||
void add_bytes(T tag, const char* value, std::size_t size) {
|
||||
pbf_writer::add_bytes(pbf_tag_type(tag), value, size);
|
||||
}
|
||||
|
||||
inline void add_bytes(T tag, const std::string& value) {
|
||||
void add_bytes(T tag, const data_view& value) {
|
||||
pbf_writer::add_bytes(pbf_tag_type(tag), value);
|
||||
}
|
||||
|
||||
inline void add_string(T tag, const char* value, std::size_t size) {
|
||||
void add_bytes(T tag, const std::string& value) {
|
||||
pbf_writer::add_bytes(pbf_tag_type(tag), value);
|
||||
}
|
||||
|
||||
void add_bytes(T tag, const char* value) {
|
||||
pbf_writer::add_bytes(pbf_tag_type(tag), value);
|
||||
}
|
||||
|
||||
template <typename... Ts>
|
||||
void add_bytes_vectored(T tag, Ts&&... values) {
|
||||
pbf_writer::add_bytes_vectored(pbf_tag_type(tag), std::forward<Ts>(values)...);
|
||||
}
|
||||
|
||||
void add_string(T tag, const char* value, std::size_t size) {
|
||||
pbf_writer::add_string(pbf_tag_type(tag), value, size);
|
||||
}
|
||||
|
||||
inline void add_string(T tag, const std::string& value) {
|
||||
void add_string(T tag, const data_view& value) {
|
||||
pbf_writer::add_string(pbf_tag_type(tag), value);
|
||||
}
|
||||
|
||||
inline void add_string(T tag, const char* value) {
|
||||
void add_string(T tag, const std::string& value) {
|
||||
pbf_writer::add_string(pbf_tag_type(tag), value);
|
||||
}
|
||||
|
||||
inline void add_message(T tag, const char* value, std::size_t size) {
|
||||
void add_string(T tag, const char* value) {
|
||||
pbf_writer::add_string(pbf_tag_type(tag), value);
|
||||
}
|
||||
|
||||
void add_message(T tag, const char* value, std::size_t size) {
|
||||
pbf_writer::add_message(pbf_tag_type(tag), value, size);
|
||||
}
|
||||
|
||||
inline void add_message(T tag, const std::string& value) {
|
||||
void add_message(T tag, const data_view& value) {
|
||||
pbf_writer::add_message(pbf_tag_type(tag), value);
|
||||
}
|
||||
|
||||
void add_message(T tag, const std::string& value) {
|
||||
pbf_writer::add_message(pbf_tag_type(tag), value);
|
||||
}
|
||||
|
||||
/// @cond INTERNAL
|
||||
#define PROTOZERO_WRITER_WRAP_ADD_PACKED(name) \
|
||||
template <typename InputIterator> \
|
||||
inline void add_packed_##name(T tag, InputIterator first, InputIterator last) { \
|
||||
void add_packed_##name(T tag, InputIterator first, InputIterator last) { \
|
||||
pbf_writer::add_packed_##name(pbf_tag_type(tag), first, last); \
|
||||
}
|
||||
|
||||
@ -132,7 +153,7 @@ public:
|
||||
#undef PROTOZERO_WRITER_WRAP_ADD_PACKED
|
||||
/// @endcond
|
||||
|
||||
};
|
||||
}; // class pbf_builder
|
||||
|
||||
} // end namespace protozero
|
||||
|
||||
|
@ -13,7 +13,7 @@ documentation.
|
||||
/**
|
||||
* @file pbf_message.hpp
|
||||
*
|
||||
* @brief Contains the pbf_message class.
|
||||
* @brief Contains the pbf_message template class.
|
||||
*/
|
||||
|
||||
#include <type_traits>
|
||||
@ -75,19 +75,23 @@ public:
|
||||
pbf_reader(std::forward<Args>(args)...) {
|
||||
}
|
||||
|
||||
inline bool next() {
|
||||
bool next() {
|
||||
return pbf_reader::next();
|
||||
}
|
||||
|
||||
inline bool next(T tag) {
|
||||
return pbf_reader::next(pbf_tag_type(tag));
|
||||
bool next(T next_tag) {
|
||||
return pbf_reader::next(pbf_tag_type(next_tag));
|
||||
}
|
||||
|
||||
inline T tag() const noexcept {
|
||||
bool next(T next_tag, pbf_wire_type type) {
|
||||
return pbf_reader::next(pbf_tag_type(next_tag), type);
|
||||
}
|
||||
|
||||
T tag() const noexcept {
|
||||
return T(pbf_reader::tag());
|
||||
}
|
||||
|
||||
};
|
||||
}; // class pbf_message
|
||||
|
||||
} // end namespace protozero
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -22,6 +22,7 @@ documentation.
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include <protozero/config.hpp>
|
||||
#include <protozero/types.hpp>
|
||||
@ -68,38 +69,35 @@ class pbf_writer {
|
||||
// parent to the position where the data of the submessage is written to.
|
||||
std::size_t m_pos = 0;
|
||||
|
||||
inline void add_varint(uint64_t value) {
|
||||
void add_varint(uint64_t value) {
|
||||
protozero_assert(m_pos == 0 && "you can't add fields to a parent pbf_writer if there is an existing pbf_writer for a submessage");
|
||||
protozero_assert(m_data);
|
||||
write_varint(std::back_inserter(*m_data), value);
|
||||
}
|
||||
|
||||
inline void add_field(pbf_tag_type tag, pbf_wire_type type) {
|
||||
void add_field(pbf_tag_type tag, pbf_wire_type type) {
|
||||
protozero_assert(((tag > 0 && tag < 19000) || (tag > 19999 && tag <= ((1 << 29) - 1))) && "tag out of range");
|
||||
uint32_t b = (tag << 3) | uint32_t(type);
|
||||
const uint32_t b = (tag << 3) | uint32_t(type);
|
||||
add_varint(b);
|
||||
}
|
||||
|
||||
inline void add_tagged_varint(pbf_tag_type tag, uint64_t value) {
|
||||
void add_tagged_varint(pbf_tag_type tag, uint64_t value) {
|
||||
add_field(tag, pbf_wire_type::varint);
|
||||
add_varint(value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void add_fixed(T value) {
|
||||
void add_fixed(T value) {
|
||||
protozero_assert(m_pos == 0 && "you can't add fields to a parent pbf_writer if there is an existing pbf_writer for a submessage");
|
||||
protozero_assert(m_data);
|
||||
#if PROTOZERO_BYTE_ORDER == PROTOZERO_LITTLE_ENDIAN
|
||||
m_data->append(reinterpret_cast<const char*>(&value), sizeof(T));
|
||||
#else
|
||||
auto size = m_data->size();
|
||||
m_data->resize(size + sizeof(T));
|
||||
byteswap<sizeof(T)>(reinterpret_cast<const char*>(&value), const_cast<char*>(m_data->data() + size));
|
||||
#if PROTOZERO_BYTE_ORDER != PROTOZERO_LITTLE_ENDIAN
|
||||
detail::byteswap_inplace(&value);
|
||||
#endif
|
||||
m_data->append(reinterpret_cast<const char*>(&value), sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T, typename It>
|
||||
inline void add_packed_fixed(pbf_tag_type tag, It first, It last, std::input_iterator_tag) {
|
||||
void add_packed_fixed(pbf_tag_type tag, It first, It last, std::input_iterator_tag) {
|
||||
if (first == last) {
|
||||
return;
|
||||
}
|
||||
@ -112,12 +110,12 @@ class pbf_writer {
|
||||
}
|
||||
|
||||
template <typename T, typename It>
|
||||
inline void add_packed_fixed(pbf_tag_type tag, It first, It last, std::forward_iterator_tag) {
|
||||
void add_packed_fixed(pbf_tag_type tag, It first, It last, std::forward_iterator_tag) {
|
||||
if (first == last) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto length = std::distance(first, last);
|
||||
const auto length = std::distance(first, last);
|
||||
add_length_varint(tag, sizeof(T) * pbf_length_type(length));
|
||||
reserve(sizeof(T) * std::size_t(length));
|
||||
|
||||
@ -127,7 +125,7 @@ class pbf_writer {
|
||||
}
|
||||
|
||||
template <typename It>
|
||||
inline void add_packed_varint(pbf_tag_type tag, It first, It last) {
|
||||
void add_packed_varint(pbf_tag_type tag, It first, It last) {
|
||||
if (first == last) {
|
||||
return;
|
||||
}
|
||||
@ -140,7 +138,7 @@ class pbf_writer {
|
||||
}
|
||||
|
||||
template <typename It>
|
||||
inline void add_packed_svarint(pbf_tag_type tag, It first, It last) {
|
||||
void add_packed_svarint(pbf_tag_type tag, It first, It last) {
|
||||
if (first == last) {
|
||||
return;
|
||||
}
|
||||
@ -155,14 +153,18 @@ class pbf_writer {
|
||||
// The number of bytes to reserve for the varint holding the length of
|
||||
// a length-delimited field. The length has to fit into pbf_length_type,
|
||||
// and a varint needs 8 bit for every 7 bit.
|
||||
static const int reserve_bytes = sizeof(pbf_length_type) * 8 / 7 + 1;
|
||||
enum constant_reserve_bytes : int {
|
||||
reserve_bytes = sizeof(pbf_length_type) * 8 / 7 + 1
|
||||
};
|
||||
|
||||
// If m_rollpack_pos is set to this special value, it means that when
|
||||
// the submessage is closed, nothing needs to be done, because the length
|
||||
// of the submessage has already been written correctly.
|
||||
static const std::size_t size_is_known = std::numeric_limits<std::size_t>::max();
|
||||
enum constant_size_is_known : std::size_t {
|
||||
size_is_known = std::numeric_limits<std::size_t>::max()
|
||||
};
|
||||
|
||||
inline void open_submessage(pbf_tag_type tag, std::size_t size) {
|
||||
void open_submessage(pbf_tag_type tag, std::size_t size) {
|
||||
protozero_assert(m_pos == 0);
|
||||
protozero_assert(m_data);
|
||||
if (size == 0) {
|
||||
@ -177,7 +179,7 @@ class pbf_writer {
|
||||
m_pos = m_data->size();
|
||||
}
|
||||
|
||||
inline void rollback_submessage() {
|
||||
void rollback_submessage() {
|
||||
protozero_assert(m_pos != 0);
|
||||
protozero_assert(m_rollback_pos != size_is_known);
|
||||
protozero_assert(m_data);
|
||||
@ -185,20 +187,20 @@ class pbf_writer {
|
||||
m_pos = 0;
|
||||
}
|
||||
|
||||
inline void commit_submessage() {
|
||||
void commit_submessage() {
|
||||
protozero_assert(m_pos != 0);
|
||||
protozero_assert(m_rollback_pos != size_is_known);
|
||||
protozero_assert(m_data);
|
||||
auto length = pbf_length_type(m_data->size() - m_pos);
|
||||
const auto length = pbf_length_type(m_data->size() - m_pos);
|
||||
|
||||
protozero_assert(m_data->size() >= m_pos - reserve_bytes);
|
||||
auto n = write_varint(m_data->begin() + long(m_pos) - reserve_bytes, length);
|
||||
const auto n = write_varint(m_data->begin() + long(m_pos) - reserve_bytes, length);
|
||||
|
||||
m_data->erase(m_data->begin() + long(m_pos) - reserve_bytes + n, m_data->begin() + long(m_pos));
|
||||
m_pos = 0;
|
||||
}
|
||||
|
||||
inline void close_submessage() {
|
||||
void close_submessage() {
|
||||
protozero_assert(m_data);
|
||||
if (m_pos == 0 || m_rollback_pos == size_is_known) {
|
||||
return;
|
||||
@ -210,7 +212,7 @@ class pbf_writer {
|
||||
}
|
||||
}
|
||||
|
||||
inline void add_length_varint(pbf_tag_type tag, pbf_length_type length) {
|
||||
void add_length_varint(pbf_tag_type tag, pbf_length_type length) {
|
||||
add_field(tag, pbf_wire_type::length_delimited);
|
||||
add_varint(length);
|
||||
}
|
||||
@ -222,20 +224,18 @@ public:
|
||||
* stores a reference to that string and adds all data to it. The string
|
||||
* doesn't have to be empty. The pbf_writer will just append data.
|
||||
*/
|
||||
inline explicit pbf_writer(std::string& data) noexcept :
|
||||
explicit pbf_writer(std::string& data) noexcept :
|
||||
m_data(&data),
|
||||
m_parent_writer(nullptr),
|
||||
m_pos(0) {
|
||||
m_parent_writer(nullptr) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a writer without a data store. In this form the writer can not
|
||||
* be used!
|
||||
*/
|
||||
inline pbf_writer() noexcept :
|
||||
pbf_writer() noexcept :
|
||||
m_data(nullptr),
|
||||
m_parent_writer(nullptr),
|
||||
m_pos(0) {
|
||||
m_parent_writer(nullptr) {
|
||||
}
|
||||
|
||||
/**
|
||||
@ -248,10 +248,9 @@ public:
|
||||
* Setting this allows some optimizations but is only possible in
|
||||
* a few very specific cases.
|
||||
*/
|
||||
inline pbf_writer(pbf_writer& parent_writer, pbf_tag_type tag, std::size_t size=0) :
|
||||
pbf_writer(pbf_writer& parent_writer, pbf_tag_type tag, std::size_t size=0) :
|
||||
m_data(parent_writer.m_data),
|
||||
m_parent_writer(&parent_writer),
|
||||
m_pos(0) {
|
||||
m_parent_writer(&parent_writer) {
|
||||
m_parent_writer->open_submessage(tag, size);
|
||||
}
|
||||
|
||||
@ -262,17 +261,30 @@ public:
|
||||
pbf_writer& operator=(const pbf_writer&) noexcept = default;
|
||||
|
||||
/// A pbf_writer object can be moved
|
||||
inline pbf_writer(pbf_writer&&) noexcept = default;
|
||||
pbf_writer(pbf_writer&&) noexcept = default;
|
||||
|
||||
/// A pbf_writer object can be moved
|
||||
inline pbf_writer& operator=(pbf_writer&&) noexcept = default;
|
||||
pbf_writer& operator=(pbf_writer&&) noexcept = default;
|
||||
|
||||
inline ~pbf_writer() {
|
||||
~pbf_writer() {
|
||||
if (m_parent_writer) {
|
||||
m_parent_writer->close_submessage();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Swap the contents of this object with the other.
|
||||
*
|
||||
* @param other Other object to swap data with.
|
||||
*/
|
||||
void swap(pbf_writer& other) noexcept {
|
||||
using std::swap;
|
||||
swap(m_data, other.m_data);
|
||||
swap(m_parent_writer, other.m_parent_writer);
|
||||
swap(m_rollback_pos, other.m_rollback_pos);
|
||||
swap(m_pos, other.m_pos);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserve size bytes in the underlying message store in addition to
|
||||
* whatever the message store already holds. So unlike
|
||||
@ -286,7 +298,14 @@ public:
|
||||
m_data->reserve(m_data->size() + size);
|
||||
}
|
||||
|
||||
inline void rollback() {
|
||||
/**
|
||||
* Cancel writing of this submessage. The complete submessage will be
|
||||
* removed as if it was never created and no fields were added.
|
||||
*
|
||||
* @pre Must be a pbf_writer of a submessage, ie one opened with the
|
||||
* pbf_writer constructor taking a parent message.
|
||||
*/
|
||||
void rollback() {
|
||||
protozero_assert(m_parent_writer && "you can't call rollback() on a pbf_writer without a parent");
|
||||
protozero_assert(m_pos == 0 && "you can't call rollback() on a pbf_writer that has an open nested submessage");
|
||||
m_parent_writer->rollback_submessage();
|
||||
@ -304,7 +323,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_bool(pbf_tag_type tag, bool value) {
|
||||
void add_bool(pbf_tag_type tag, bool value) {
|
||||
add_field(tag, pbf_wire_type::varint);
|
||||
protozero_assert(m_pos == 0 && "you can't add fields to a parent pbf_writer if there is an existing pbf_writer for a submessage");
|
||||
protozero_assert(m_data);
|
||||
@ -317,7 +336,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_enum(pbf_tag_type tag, int32_t value) {
|
||||
void add_enum(pbf_tag_type tag, int32_t value) {
|
||||
add_tagged_varint(tag, uint64_t(value));
|
||||
}
|
||||
|
||||
@ -327,7 +346,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_int32(pbf_tag_type tag, int32_t value) {
|
||||
void add_int32(pbf_tag_type tag, int32_t value) {
|
||||
add_tagged_varint(tag, uint64_t(value));
|
||||
}
|
||||
|
||||
@ -337,7 +356,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_sint32(pbf_tag_type tag, int32_t value) {
|
||||
void add_sint32(pbf_tag_type tag, int32_t value) {
|
||||
add_tagged_varint(tag, encode_zigzag32(value));
|
||||
}
|
||||
|
||||
@ -347,7 +366,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_uint32(pbf_tag_type tag, uint32_t value) {
|
||||
void add_uint32(pbf_tag_type tag, uint32_t value) {
|
||||
add_tagged_varint(tag, value);
|
||||
}
|
||||
|
||||
@ -357,7 +376,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_int64(pbf_tag_type tag, int64_t value) {
|
||||
void add_int64(pbf_tag_type tag, int64_t value) {
|
||||
add_tagged_varint(tag, uint64_t(value));
|
||||
}
|
||||
|
||||
@ -367,7 +386,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_sint64(pbf_tag_type tag, int64_t value) {
|
||||
void add_sint64(pbf_tag_type tag, int64_t value) {
|
||||
add_tagged_varint(tag, encode_zigzag64(value));
|
||||
}
|
||||
|
||||
@ -377,7 +396,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_uint64(pbf_tag_type tag, uint64_t value) {
|
||||
void add_uint64(pbf_tag_type tag, uint64_t value) {
|
||||
add_tagged_varint(tag, value);
|
||||
}
|
||||
|
||||
@ -387,7 +406,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_fixed32(pbf_tag_type tag, uint32_t value) {
|
||||
void add_fixed32(pbf_tag_type tag, uint32_t value) {
|
||||
add_field(tag, pbf_wire_type::fixed32);
|
||||
add_fixed<uint32_t>(value);
|
||||
}
|
||||
@ -398,7 +417,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_sfixed32(pbf_tag_type tag, int32_t value) {
|
||||
void add_sfixed32(pbf_tag_type tag, int32_t value) {
|
||||
add_field(tag, pbf_wire_type::fixed32);
|
||||
add_fixed<int32_t>(value);
|
||||
}
|
||||
@ -409,7 +428,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_fixed64(pbf_tag_type tag, uint64_t value) {
|
||||
void add_fixed64(pbf_tag_type tag, uint64_t value) {
|
||||
add_field(tag, pbf_wire_type::fixed64);
|
||||
add_fixed<uint64_t>(value);
|
||||
}
|
||||
@ -420,7 +439,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_sfixed64(pbf_tag_type tag, int64_t value) {
|
||||
void add_sfixed64(pbf_tag_type tag, int64_t value) {
|
||||
add_field(tag, pbf_wire_type::fixed64);
|
||||
add_fixed<int64_t>(value);
|
||||
}
|
||||
@ -431,7 +450,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_float(pbf_tag_type tag, float value) {
|
||||
void add_float(pbf_tag_type tag, float value) {
|
||||
add_field(tag, pbf_wire_type::fixed32);
|
||||
add_fixed<float>(value);
|
||||
}
|
||||
@ -442,7 +461,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_double(pbf_tag_type tag, double value) {
|
||||
void add_double(pbf_tag_type tag, double value) {
|
||||
add_field(tag, pbf_wire_type::fixed64);
|
||||
add_fixed<double>(value);
|
||||
}
|
||||
@ -454,7 +473,7 @@ public:
|
||||
* @param value Pointer to value to be written
|
||||
* @param size Number of bytes to be written
|
||||
*/
|
||||
inline void add_bytes(pbf_tag_type tag, const char* value, std::size_t size) {
|
||||
void add_bytes(pbf_tag_type tag, const char* value, std::size_t size) {
|
||||
protozero_assert(m_pos == 0 && "you can't add fields to a parent pbf_writer if there is an existing pbf_writer for a submessage");
|
||||
protozero_assert(m_data);
|
||||
protozero_assert(size <= std::numeric_limits<pbf_length_type>::max());
|
||||
@ -468,10 +487,62 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_bytes(pbf_tag_type tag, const std::string& value) {
|
||||
void add_bytes(pbf_tag_type tag, const data_view& value) {
|
||||
add_bytes(tag, value.data(), value.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "bytes" field to data.
|
||||
*
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
void add_bytes(pbf_tag_type tag, const std::string& value) {
|
||||
add_bytes(tag, value.data(), value.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "bytes" field to data. Bytes from the value are written until
|
||||
* a null byte is encountered. The null byte is not added.
|
||||
*
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Pointer to zero-delimited value to be written
|
||||
*/
|
||||
void add_bytes(pbf_tag_type tag, const char* value) {
|
||||
add_bytes(tag, value, std::strlen(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "bytes" field to data using vectored input. All the data in the
|
||||
* 2nd and further arguments is "concatenated" with only a single copy
|
||||
* into the final buffer.
|
||||
*
|
||||
* This will work with objects of any type supporting the data() and
|
||||
* size() methods like std::string or protozero::data_view.
|
||||
*
|
||||
* Example:
|
||||
* @code
|
||||
* std::string data1 = "abc";
|
||||
* std::string data2 = "xyz";
|
||||
* writer.add_bytes_vectored(1, data1, data2);
|
||||
* @endcode
|
||||
*
|
||||
* @tparam Ts List of types supporting data() and size() methods.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param values List of objects of types Ts with data to be appended.
|
||||
*/
|
||||
template <typename... Ts>
|
||||
void add_bytes_vectored(pbf_tag_type tag, Ts&&... values) {
|
||||
protozero_assert(m_pos == 0 && "you can't add fields to a parent pbf_writer if there is an existing pbf_writer for a submessage");
|
||||
protozero_assert(m_data);
|
||||
size_t sum_size = 0;
|
||||
(void)std::initializer_list<size_t>{sum_size += values.size()...};
|
||||
protozero_assert(sum_size <= std::numeric_limits<pbf_length_type>::max());
|
||||
add_length_varint(tag, pbf_length_type(sum_size));
|
||||
m_data->reserve(m_data->size() + sum_size);
|
||||
(void)std::initializer_list<int>{(m_data->append(values.data(), values.size()), 0)...};
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "string" field to data.
|
||||
*
|
||||
@ -479,7 +550,7 @@ public:
|
||||
* @param value Pointer to value to be written
|
||||
* @param size Number of bytes to be written
|
||||
*/
|
||||
inline void add_string(pbf_tag_type tag, const char* value, std::size_t size) {
|
||||
void add_string(pbf_tag_type tag, const char* value, std::size_t size) {
|
||||
add_bytes(tag, value, size);
|
||||
}
|
||||
|
||||
@ -489,7 +560,17 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
inline void add_string(pbf_tag_type tag, const std::string& value) {
|
||||
void add_string(pbf_tag_type tag, const data_view& value) {
|
||||
add_bytes(tag, value.data(), value.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "string" field to data.
|
||||
*
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written
|
||||
*/
|
||||
void add_string(pbf_tag_type tag, const std::string& value) {
|
||||
add_bytes(tag, value.data(), value.size());
|
||||
}
|
||||
|
||||
@ -500,7 +581,7 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Pointer to value to be written
|
||||
*/
|
||||
inline void add_string(pbf_tag_type tag, const char* value) {
|
||||
void add_string(pbf_tag_type tag, const char* value) {
|
||||
add_bytes(tag, value, std::strlen(value));
|
||||
}
|
||||
|
||||
@ -511,7 +592,7 @@ public:
|
||||
* @param value Pointer to message to be written
|
||||
* @param size Length of the message
|
||||
*/
|
||||
inline void add_message(pbf_tag_type tag, const char* value, std::size_t size) {
|
||||
void add_message(pbf_tag_type tag, const char* value, std::size_t size) {
|
||||
add_bytes(tag, value, size);
|
||||
}
|
||||
|
||||
@ -521,7 +602,17 @@ public:
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written. The value must be a complete message.
|
||||
*/
|
||||
inline void add_message(pbf_tag_type tag, const std::string& value) {
|
||||
void add_message(pbf_tag_type tag, const data_view& value) {
|
||||
add_bytes(tag, value.data(), value.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "message" field to data.
|
||||
*
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param value Value to be written. The value must be a complete message.
|
||||
*/
|
||||
void add_message(pbf_tag_type tag, const std::string& value) {
|
||||
add_bytes(tag, value.data(), value.size());
|
||||
}
|
||||
|
||||
@ -535,126 +626,126 @@ public:
|
||||
/**
|
||||
* Add "repeated packed bool" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to bool.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_bool(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_bool(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_varint(tag, first, last);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "repeated packed enum" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to int32_t.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_enum(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_enum(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_varint(tag, first, last);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "repeated packed int32" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to int32_t.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_int32(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_int32(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_varint(tag, first, last);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "repeated packed sint32" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to int32_t.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_sint32(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_sint32(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_svarint(tag, first, last);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "repeated packed uint32" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to uint32_t.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_uint32(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_uint32(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_varint(tag, first, last);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "repeated packed int64" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to int64_t.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_int64(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_int64(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_varint(tag, first, last);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "repeated packed sint64" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to int64_t.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_sint64(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_sint64(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_svarint(tag, first, last);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "repeated packed uint64" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to uint64_t.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_uint64(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_uint64(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_varint(tag, first, last);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add "repeated packed fixed32" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to uint32_t.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_fixed32(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_fixed32(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_fixed<uint32_t, InputIterator>(tag, first, last,
|
||||
typename std::iterator_traits<InputIterator>::iterator_category());
|
||||
}
|
||||
@ -662,14 +753,14 @@ public:
|
||||
/**
|
||||
* Add "repeated packed sfixed32" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to int32_t.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_sfixed32(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_sfixed32(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_fixed<int32_t, InputIterator>(tag, first, last,
|
||||
typename std::iterator_traits<InputIterator>::iterator_category());
|
||||
}
|
||||
@ -677,14 +768,14 @@ public:
|
||||
/**
|
||||
* Add "repeated packed fixed64" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to uint64_t.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_fixed64(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_fixed64(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_fixed<uint64_t, InputIterator>(tag, first, last,
|
||||
typename std::iterator_traits<InputIterator>::iterator_category());
|
||||
}
|
||||
@ -692,14 +783,14 @@ public:
|
||||
/**
|
||||
* Add "repeated packed sfixed64" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to int64_t.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_sfixed64(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_sfixed64(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_fixed<int64_t, InputIterator>(tag, first, last,
|
||||
typename std::iterator_traits<InputIterator>::iterator_category());
|
||||
}
|
||||
@ -707,14 +798,14 @@ public:
|
||||
/**
|
||||
* Add "repeated packed float" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to float.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_float(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_float(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_fixed<float, InputIterator>(tag, first, last,
|
||||
typename std::iterator_traits<InputIterator>::iterator_category());
|
||||
}
|
||||
@ -722,14 +813,14 @@ public:
|
||||
/**
|
||||
* Add "repeated packed double" field to data.
|
||||
*
|
||||
* @tparam InputIterator An type satisfying the InputIterator concept.
|
||||
* @tparam InputIterator A type satisfying the InputIterator concept.
|
||||
* Dereferencing the iterator must yield a type assignable to double.
|
||||
* @param tag Tag (field number) of the field
|
||||
* @param first Iterator pointing to the beginning of the data
|
||||
* @param last Iterator pointing one past the end of data
|
||||
*/
|
||||
template <typename InputIterator>
|
||||
inline void add_packed_double(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
void add_packed_double(pbf_tag_type tag, InputIterator first, InputIterator last) {
|
||||
add_packed_fixed<double, InputIterator>(tag, first, last,
|
||||
typename std::iterator_traits<InputIterator>::iterator_category());
|
||||
}
|
||||
@ -742,6 +833,16 @@ public:
|
||||
|
||||
}; // class pbf_writer
|
||||
|
||||
/**
|
||||
* Swap two pbf_writer objects.
|
||||
*
|
||||
* @param lhs First object.
|
||||
* @param rhs Second object.
|
||||
*/
|
||||
inline void swap(pbf_writer& lhs, pbf_writer& rhs) noexcept {
|
||||
lhs.swap(rhs);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
||||
class packed_field {
|
||||
@ -752,6 +853,12 @@ namespace detail {
|
||||
|
||||
public:
|
||||
|
||||
packed_field(const packed_field&) = delete;
|
||||
packed_field& operator=(const packed_field&) = delete;
|
||||
|
||||
packed_field(packed_field&&) = default;
|
||||
packed_field& operator=(packed_field&&) = default;
|
||||
|
||||
packed_field(pbf_writer& parent_writer, pbf_tag_type tag) :
|
||||
m_writer(parent_writer, tag) {
|
||||
}
|
||||
@ -771,12 +878,14 @@ namespace detail {
|
||||
|
||||
public:
|
||||
|
||||
packed_field_fixed(pbf_writer& parent_writer, pbf_tag_type tag) :
|
||||
packed_field(parent_writer, tag) {
|
||||
template <typename P>
|
||||
packed_field_fixed(pbf_writer& parent_writer, P tag) :
|
||||
packed_field(parent_writer, static_cast<pbf_tag_type>(tag)) {
|
||||
}
|
||||
|
||||
packed_field_fixed(pbf_writer& parent_writer, pbf_tag_type tag, std::size_t size) :
|
||||
packed_field(parent_writer, tag, size * sizeof(T)) {
|
||||
template <typename P>
|
||||
packed_field_fixed(pbf_writer& parent_writer, P tag, std::size_t size) :
|
||||
packed_field(parent_writer, static_cast<pbf_tag_type>(tag), size * sizeof(T)) {
|
||||
}
|
||||
|
||||
void add_element(T value) {
|
||||
@ -790,8 +899,9 @@ namespace detail {
|
||||
|
||||
public:
|
||||
|
||||
packed_field_varint(pbf_writer& parent_writer, pbf_tag_type tag) :
|
||||
packed_field(parent_writer, tag) {
|
||||
template <typename P>
|
||||
packed_field_varint(pbf_writer& parent_writer, P tag) :
|
||||
packed_field(parent_writer, static_cast<pbf_tag_type>(tag)) {
|
||||
}
|
||||
|
||||
void add_element(T value) {
|
||||
@ -805,8 +915,9 @@ namespace detail {
|
||||
|
||||
public:
|
||||
|
||||
packed_field_svarint(pbf_writer& parent_writer, pbf_tag_type tag) :
|
||||
packed_field(parent_writer, tag) {
|
||||
template <typename P>
|
||||
packed_field_svarint(pbf_writer& parent_writer, P tag) :
|
||||
packed_field(parent_writer, static_cast<pbf_tag_type>(tag)) {
|
||||
}
|
||||
|
||||
void add_element(T value) {
|
||||
@ -817,19 +928,46 @@ namespace detail {
|
||||
|
||||
} // end namespace detail
|
||||
|
||||
/// Class for generating packed repeated bool fields.
|
||||
using packed_field_bool = detail::packed_field_varint<bool>;
|
||||
|
||||
/// Class for generating packed repeated enum fields.
|
||||
using packed_field_enum = detail::packed_field_varint<int32_t>;
|
||||
|
||||
/// Class for generating packed repeated int32 fields.
|
||||
using packed_field_int32 = detail::packed_field_varint<int32_t>;
|
||||
|
||||
/// Class for generating packed repeated sint32 fields.
|
||||
using packed_field_sint32 = detail::packed_field_svarint<int32_t>;
|
||||
|
||||
/// Class for generating packed repeated uint32 fields.
|
||||
using packed_field_uint32 = detail::packed_field_varint<uint32_t>;
|
||||
|
||||
/// Class for generating packed repeated int64 fields.
|
||||
using packed_field_int64 = detail::packed_field_varint<int64_t>;
|
||||
|
||||
/// Class for generating packed repeated sint64 fields.
|
||||
using packed_field_sint64 = detail::packed_field_svarint<int64_t>;
|
||||
|
||||
/// Class for generating packed repeated uint64 fields.
|
||||
using packed_field_uint64 = detail::packed_field_varint<uint64_t>;
|
||||
|
||||
/// Class for generating packed repeated fixed32 fields.
|
||||
using packed_field_fixed32 = detail::packed_field_fixed<uint32_t>;
|
||||
|
||||
/// Class for generating packed repeated sfixed32 fields.
|
||||
using packed_field_sfixed32 = detail::packed_field_fixed<int32_t>;
|
||||
|
||||
/// Class for generating packed repeated fixed64 fields.
|
||||
using packed_field_fixed64 = detail::packed_field_fixed<uint64_t>;
|
||||
|
||||
/// Class for generating packed repeated sfixed64 fields.
|
||||
using packed_field_sfixed64 = detail::packed_field_fixed<int64_t>;
|
||||
|
||||
/// Class for generating packed repeated float fields.
|
||||
using packed_field_float = detail::packed_field_fixed<float>;
|
||||
|
||||
/// Class for generating packed repeated double fields.
|
||||
using packed_field_double = detail::packed_field_fixed<double>;
|
||||
|
||||
} // end namespace protozero
|
||||
|
@ -16,33 +16,190 @@ documentation.
|
||||
* @brief Contains the declaration of low-level types used in the pbf format.
|
||||
*/
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include <protozero/config.hpp>
|
||||
|
||||
namespace protozero {
|
||||
|
||||
/**
|
||||
* The type used for field tags (field numbers).
|
||||
*/
|
||||
typedef uint32_t pbf_tag_type;
|
||||
/**
|
||||
* The type used for field tags (field numbers).
|
||||
*/
|
||||
using pbf_tag_type = uint32_t;
|
||||
|
||||
/**
|
||||
* The type used to encode type information.
|
||||
* See the table on
|
||||
* https://developers.google.com/protocol-buffers/docs/encoding
|
||||
*/
|
||||
enum class pbf_wire_type : uint32_t {
|
||||
varint = 0, // int32/64, uint32/64, sint32/64, bool, enum
|
||||
fixed64 = 1, // fixed64, sfixed64, double
|
||||
length_delimited = 2, // string, bytes, embedded messages,
|
||||
// packed repeated fields
|
||||
fixed32 = 5, // fixed32, sfixed32, float
|
||||
unknown = 99 // used for default setting in this library
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the tag and wire type of the current field in one integer suitable
|
||||
* for comparison with a switch statement.
|
||||
*
|
||||
* See pbf_reader.tag_and_type() for an example how to use this.
|
||||
*/
|
||||
template <typename T>
|
||||
constexpr inline uint32_t tag_and_type(T tag, pbf_wire_type wire_type) noexcept {
|
||||
return (static_cast<uint32_t>(static_cast<pbf_tag_type>(tag)) << 3) | static_cast<uint32_t>(wire_type);
|
||||
}
|
||||
|
||||
/**
|
||||
* The type used for length values, such as the length of a field.
|
||||
*/
|
||||
using pbf_length_type = uint32_t;
|
||||
|
||||
#ifdef PROTOZERO_USE_VIEW
|
||||
using data_view = PROTOZERO_USE_VIEW;
|
||||
#else
|
||||
|
||||
/**
|
||||
* Holds a pointer to some data and a length.
|
||||
*
|
||||
* This class is supposed to be compatible with the std::string_view
|
||||
* that will be available in C++17.
|
||||
*/
|
||||
class data_view {
|
||||
|
||||
const char* m_data;
|
||||
std::size_t m_size;
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* The type used to encode type information.
|
||||
* See the table on
|
||||
* https://developers.google.com/protocol-buffers/docs/encoding
|
||||
* Default constructor. Construct an empty data_view.
|
||||
*/
|
||||
enum class pbf_wire_type : uint32_t {
|
||||
varint = 0, // int32/64, uint32/64, sint32/64, bool, enum
|
||||
fixed64 = 1, // fixed64, sfixed64, double
|
||||
length_delimited = 2, // string, bytes, embedded messages,
|
||||
// packed repeated fields
|
||||
fixed32 = 5, // fixed32, sfixed32, float
|
||||
unknown = 99 // used for default setting in this library
|
||||
};
|
||||
constexpr data_view() noexcept
|
||||
: m_data(nullptr),
|
||||
m_size(0) {
|
||||
}
|
||||
|
||||
/**
|
||||
* The type used for length values, such as the length of a field.
|
||||
* Create data_view from pointer and size.
|
||||
*
|
||||
* @param ptr Pointer to the data.
|
||||
* @param length Length of the data.
|
||||
*/
|
||||
typedef uint32_t pbf_length_type;
|
||||
constexpr data_view(const char* ptr, std::size_t length) noexcept
|
||||
: m_data(ptr),
|
||||
m_size(length) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create data_view from string.
|
||||
*
|
||||
* @param str String with the data.
|
||||
*/
|
||||
data_view(const std::string& str) noexcept
|
||||
: m_data(str.data()),
|
||||
m_size(str.size()) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create data_view from zero-terminated string.
|
||||
*
|
||||
* @param ptr Pointer to the data.
|
||||
*/
|
||||
data_view(const char* ptr) noexcept
|
||||
: m_data(ptr),
|
||||
m_size(std::strlen(ptr)) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Swap the contents of this object with the other.
|
||||
*
|
||||
* @param other Other object to swap data with.
|
||||
*/
|
||||
void swap(data_view& other) noexcept {
|
||||
using std::swap;
|
||||
swap(m_data, other.m_data);
|
||||
swap(m_size, other.m_size);
|
||||
}
|
||||
|
||||
/// Return pointer to data.
|
||||
constexpr const char* data() const noexcept {
|
||||
return m_data;
|
||||
}
|
||||
|
||||
/// Return length of data in bytes.
|
||||
constexpr std::size_t size() const noexcept {
|
||||
return m_size;
|
||||
}
|
||||
|
||||
/// Returns true if size is 0.
|
||||
constexpr bool empty() const noexcept {
|
||||
return m_size == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert data view to string.
|
||||
*
|
||||
* @pre Must not be default constructed data_view.
|
||||
*/
|
||||
std::string to_string() const {
|
||||
protozero_assert(m_data);
|
||||
return std::string{m_data, m_size};
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert data view to string.
|
||||
*
|
||||
* @pre Must not be default constructed data_view.
|
||||
*/
|
||||
explicit operator std::string() const {
|
||||
protozero_assert(m_data);
|
||||
return std::string{m_data, m_size};
|
||||
}
|
||||
|
||||
}; // class data_view
|
||||
|
||||
/**
|
||||
* Swap two data_view objects.
|
||||
*
|
||||
* @param lhs First object.
|
||||
* @param rhs Second object.
|
||||
*/
|
||||
inline void swap(data_view& lhs, data_view& rhs) noexcept {
|
||||
lhs.swap(rhs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Two data_view instances are equal if they have the same size and the
|
||||
* same content.
|
||||
*
|
||||
* @param lhs First object.
|
||||
* @param rhs Second object.
|
||||
*/
|
||||
inline bool operator==(const data_view& lhs, const data_view& rhs) noexcept {
|
||||
return lhs.size() == rhs.size() && std::equal(lhs.data(), lhs.data() + lhs.size(), rhs.data());
|
||||
}
|
||||
|
||||
/**
|
||||
* Two data_view instances are not equal if they have different sizes or the
|
||||
* content differs.
|
||||
*
|
||||
* @param lhs First object.
|
||||
* @param rhs Second object.
|
||||
*/
|
||||
inline bool operator!=(const data_view& lhs, const data_view& rhs) noexcept {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
} // end namespace protozero
|
||||
|
||||
|
@ -23,13 +23,54 @@ documentation.
|
||||
namespace protozero {
|
||||
|
||||
/**
|
||||
* The maximum length of a 64bit varint.
|
||||
* The maximum length of a 64 bit varint.
|
||||
*/
|
||||
constexpr const int8_t max_varint_length = sizeof(uint64_t) * 8 / 7 + 1;
|
||||
|
||||
// from https://github.com/facebook/folly/blob/master/folly/Varint.h
|
||||
namespace detail {
|
||||
|
||||
// from https://github.com/facebook/folly/blob/master/folly/Varint.h
|
||||
inline uint64_t decode_varint_impl(const char** data, const char* end) {
|
||||
const int8_t* begin = reinterpret_cast<const int8_t*>(*data);
|
||||
const int8_t* iend = reinterpret_cast<const int8_t*>(end);
|
||||
const int8_t* p = begin;
|
||||
uint64_t val = 0;
|
||||
|
||||
if (iend - begin >= max_varint_length) { // fast path
|
||||
do {
|
||||
int64_t b;
|
||||
b = *p++; val = uint64_t((b & 0x7f) ); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 7); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 14); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 21); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 28); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 35); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 42); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 49); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 56); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 63); if (b >= 0) break;
|
||||
throw varint_too_long_exception();
|
||||
} while (false);
|
||||
} else {
|
||||
int shift = 0;
|
||||
while (p != iend && *p < 0) {
|
||||
val |= uint64_t(*p++ & 0x7f) << shift;
|
||||
shift += 7;
|
||||
}
|
||||
if (p == iend) {
|
||||
throw end_of_buffer_exception();
|
||||
}
|
||||
val |= uint64_t(*p++) << shift;
|
||||
}
|
||||
|
||||
*data = reinterpret_cast<const char*>(p);
|
||||
return val;
|
||||
}
|
||||
|
||||
} // end namespace detail
|
||||
|
||||
/**
|
||||
* Decode a 64bit varint.
|
||||
* Decode a 64 bit varint.
|
||||
*
|
||||
* Strong exception guarantee: if there is an exception the data pointer will
|
||||
* not be changed.
|
||||
@ -39,55 +80,69 @@ constexpr const int8_t max_varint_length = sizeof(uint64_t) * 8 / 7 + 1;
|
||||
* @param[in] end Pointer one past the end of the input data.
|
||||
* @returns The decoded integer
|
||||
* @throws varint_too_long_exception if the varint is longer then the maximum
|
||||
* length that would fit in a 64bit int. Usually this means your data
|
||||
* length that would fit in a 64 bit int. Usually this means your data
|
||||
* is corrupted or you are trying to read something as a varint that
|
||||
* isn't.
|
||||
* @throws end_of_buffer_exception if the *end* of the buffer was reached
|
||||
* before the end of the varint.
|
||||
*/
|
||||
inline uint64_t decode_varint(const char** data, const char* end) {
|
||||
const int8_t* begin = reinterpret_cast<const int8_t*>(*data);
|
||||
const int8_t* iend = reinterpret_cast<const int8_t*>(end);
|
||||
const int8_t* p = begin;
|
||||
uint64_t val = 0;
|
||||
|
||||
if (iend - begin >= max_varint_length) { // fast path
|
||||
do {
|
||||
int64_t b;
|
||||
b = *p++; val = uint64_t((b & 0x7f) ); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 7); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 14); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 21); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 28); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 35); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 42); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 49); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 56); if (b >= 0) break;
|
||||
b = *p++; val |= uint64_t((b & 0x7f) << 63); if (b >= 0) break;
|
||||
throw varint_too_long_exception();
|
||||
} while (false);
|
||||
} else {
|
||||
int shift = 0;
|
||||
while (p != iend && *p < 0) {
|
||||
val |= uint64_t(*p++ & 0x7f) << shift;
|
||||
shift += 7;
|
||||
}
|
||||
if (p == iend) {
|
||||
throw end_of_buffer_exception();
|
||||
}
|
||||
val |= uint64_t(*p++) << shift;
|
||||
// If this is a one-byte varint, decode it here.
|
||||
if (end != *data && ((**data & 0x80) == 0)) {
|
||||
uint64_t val = uint64_t(**data);
|
||||
++(*data);
|
||||
return val;
|
||||
}
|
||||
|
||||
*data = reinterpret_cast<const char*>(p);
|
||||
return val;
|
||||
// If this varint is more than one byte, defer to complete implementation.
|
||||
return detail::decode_varint_impl(data, end);
|
||||
}
|
||||
|
||||
/**
|
||||
* Varint-encode a 64bit integer.
|
||||
* Skip over a varint.
|
||||
*
|
||||
* Strong exception guarantee: if there is an exception the data pointer will
|
||||
* not be changed.
|
||||
*
|
||||
* @param[in,out] data Pointer to pointer to the input data. After the function
|
||||
* returns this will point to the next data to be read.
|
||||
* @param[in] end Pointer one past the end of the input data.
|
||||
* @throws end_of_buffer_exception if the *end* of the buffer was reached
|
||||
* before the end of the varint.
|
||||
*/
|
||||
template <typename OutputIterator>
|
||||
inline int write_varint(OutputIterator data, uint64_t value) {
|
||||
int n=1;
|
||||
inline void skip_varint(const char** data, const char* end) {
|
||||
const int8_t* begin = reinterpret_cast<const int8_t*>(*data);
|
||||
const int8_t* iend = reinterpret_cast<const int8_t*>(end);
|
||||
const int8_t* p = begin;
|
||||
|
||||
while (p != iend && *p < 0) {
|
||||
++p;
|
||||
}
|
||||
|
||||
if (p >= begin + max_varint_length) {
|
||||
throw varint_too_long_exception();
|
||||
}
|
||||
|
||||
if (p == iend) {
|
||||
throw end_of_buffer_exception();
|
||||
}
|
||||
|
||||
++p;
|
||||
|
||||
*data = reinterpret_cast<const char*>(p);
|
||||
}
|
||||
|
||||
/**
|
||||
* Varint encode a 64 bit integer.
|
||||
*
|
||||
* @tparam T An output iterator type.
|
||||
* @param data Output iterator the varint encoded value will be written to
|
||||
* byte by byte.
|
||||
* @param value The integer that will be encoded.
|
||||
* @throws Any exception thrown by increment or dereference operator on data.
|
||||
*/
|
||||
template <typename T>
|
||||
inline int write_varint(T data, uint64_t value) {
|
||||
int n = 1;
|
||||
|
||||
while (value >= 0x80) {
|
||||
*data++ = char((value & 0x7f) | 0x80);
|
||||
@ -102,29 +157,29 @@ inline int write_varint(OutputIterator data, uint64_t value) {
|
||||
/**
|
||||
* ZigZag encodes a 32 bit integer.
|
||||
*/
|
||||
inline uint32_t encode_zigzag32(int32_t value) noexcept {
|
||||
inline constexpr uint32_t encode_zigzag32(int32_t value) noexcept {
|
||||
return (static_cast<uint32_t>(value) << 1) ^ (static_cast<uint32_t>(value >> 31));
|
||||
}
|
||||
|
||||
/**
|
||||
* ZigZag encodes a 64 bit integer.
|
||||
*/
|
||||
inline uint64_t encode_zigzag64(int64_t value) noexcept {
|
||||
inline constexpr uint64_t encode_zigzag64(int64_t value) noexcept {
|
||||
return (static_cast<uint64_t>(value) << 1) ^ (static_cast<uint64_t>(value >> 63));
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes a 32 bit ZigZag-encoded integer.
|
||||
*/
|
||||
inline int32_t decode_zigzag32(uint32_t value) noexcept {
|
||||
return int32_t(value >> 1) ^ -int32_t(value & 1);
|
||||
inline constexpr int32_t decode_zigzag32(uint32_t value) noexcept {
|
||||
return static_cast<int32_t>(value >> 1) ^ -static_cast<int32_t>(value & 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes a 64 bit ZigZag-encoded integer.
|
||||
*/
|
||||
inline int64_t decode_zigzag64(uint64_t value) noexcept {
|
||||
return int64_t(value >> 1) ^ -int64_t(value & 1);
|
||||
inline constexpr int64_t decode_zigzag64(uint64_t value) noexcept {
|
||||
return static_cast<int64_t>(value >> 1) ^ -static_cast<int64_t>(value & 1);
|
||||
}
|
||||
|
||||
} // end namespace protozero
|
||||
|
@ -10,13 +10,25 @@ documentation.
|
||||
|
||||
*****************************************************************************/
|
||||
|
||||
#define PROTOZERO_VERSION_MAJOR 1
|
||||
#define PROTOZERO_VERSION_MINOR 3
|
||||
#define PROTOZERO_VERSION_PATCH 0
|
||||
/**
|
||||
* @file version.hpp
|
||||
*
|
||||
* @brief Contains macros defining the protozero version.
|
||||
*/
|
||||
|
||||
/// The major version number
|
||||
#define PROTOZERO_VERSION_MAJOR 1
|
||||
|
||||
/// The minor version number
|
||||
#define PROTOZERO_VERSION_MINOR 5
|
||||
|
||||
/// The patch number
|
||||
#define PROTOZERO_VERSION_PATCH 2
|
||||
|
||||
/// The complete version number
|
||||
#define PROTOZERO_VERSION_CODE (PROTOZERO_VERSION_MAJOR * 10000 + PROTOZERO_VERSION_MINOR * 100 + PROTOZERO_VERSION_PATCH)
|
||||
|
||||
#define PROTOZERO_VERSION_STRING "1.3.0"
|
||||
|
||||
/// Version number as string
|
||||
#define PROTOZERO_VERSION_STRING "1.5.2"
|
||||
|
||||
#endif // PROTOZERO_VERSION_HPP
|
||||
|
@ -1 +0,0 @@
|
||||
void write_raw_tile(const char *outdir, int z, int tx, int ty, std::string const &pbf);
|
File diff suppressed because one or more lines are too long
3398
tests/join-population/merged-folder.mbtiles.json
Normal file
3398
tests/join-population/merged-folder.mbtiles.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
tests/join-population/raw-merged-folder-compare/10/164/395.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/10/164/395.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/10/164/396.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/10/164/396.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/11/328/790.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/11/328/790.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/11/328/791.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/11/328/791.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/11/329/791.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/11/329/791.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/12/656/1581.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/12/656/1581.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/3/1/3.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/3/1/3.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/4/2/6.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/4/2/6.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/5/5/12.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/5/5/12.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/6/10/24.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/6/10/24.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/7/20/49.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/7/20/49.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/8/40/98.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/8/40/98.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/8/41/98.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/8/41/98.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/8/41/99.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/8/41/99.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/9/81/197.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/9/81/197.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/9/82/197.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/9/82/197.pbf
Normal file
Binary file not shown.
BIN
tests/join-population/raw-merged-folder-compare/9/82/198.pbf
Normal file
BIN
tests/join-population/raw-merged-folder-compare/9/82/198.pbf
Normal file
Binary file not shown.
@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "tests/join-population/macarthur.mbtiles + tests/join-population/macarthur2.mbtiles + tests/join-population/tabblock_06001420.mbtiles",
|
||||
"description": "tests/join-population/tabblock_06001420.mbtiles",
|
||||
"version": "2",
|
||||
"minzoom": "0",
|
||||
"maxzoom": "12",
|
||||
"center": "-122.299805,37.892187,12",
|
||||
"bounds": "-122.343750,37.695438,-122.104097,37.926868",
|
||||
"type": "overlay",
|
||||
"format": "pbf",
|
||||
"json": "{\"vector_layers\": [ { \"id\": \"macarthur\", \"description\": \"\", \"minzoom\": 5, \"maxzoom\": 11, \"fields\": {\"FULLNAME\": \"String\", \"LINEARID\": \"String\", \"MTFCC\": \"String\", \"RTTYP\": \"String\"} }, { \"id\": \"tabblock_06001420\", \"description\": \"\", \"minzoom\": 3, \"maxzoom\": 12, \"fields\": {\"ALAND10\": \"Number\", \"AWATER10\": \"Number\", \"BLOCKCE10\": \"String\", \"COUNTYFP10\": \"String\", \"FUNCSTAT10\": \"String\", \"GEOID10\": \"String\", \"INTPTLAT10\": \"String\", \"INTPTLON10\": \"String\", \"MTFCC10\": \"String\", \"NAME10\": \"String\", \"STATEFP10\": \"String\", \"TRACTCE10\": \"String\", \"UACE10\": \"String\", \"UATYP10\": \"String\", \"UR10\": \"String\"} } ] }"
|
||||
}
|
53
tests/knox/in.json
Normal file
53
tests/knox/in.json
Normal file
File diff suppressed because one or more lines are too long
1144
tests/knox/out/-zg.json
Normal file
1144
tests/knox/out/-zg.json
Normal file
File diff suppressed because it is too large
Load Diff
1144
tests/knox/out/-zg_-P.json
Normal file
1144
tests/knox/out/-zg_-P.json
Normal file
File diff suppressed because it is too large
Load Diff
18
tests/longattr/out/-z0.json
Normal file
18
tests/longattr/out/-z0.json
Normal file
File diff suppressed because one or more lines are too long
5
tests/longattr/sherlock.json
Normal file
5
tests/longattr/sherlock.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
521
tile-join.cpp
521
tile-join.cpp
@ -1,3 +1,7 @@
|
||||
#define _DEFAULT_SOURCE
|
||||
#include <dirent.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
@ -16,10 +20,20 @@
|
||||
#include "pool.hpp"
|
||||
#include "mbtiles.hpp"
|
||||
#include "geometry.hpp"
|
||||
#include "dirtiles.hpp"
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
|
||||
extern "C" {
|
||||
#include "jsonpull/jsonpull.h"
|
||||
}
|
||||
|
||||
std::string dequote(std::string s);
|
||||
|
||||
bool pk = false;
|
||||
bool pC = false;
|
||||
size_t CPUS;
|
||||
|
||||
struct stats {
|
||||
@ -260,8 +274,12 @@ struct reader {
|
||||
long long x;
|
||||
long long sorty;
|
||||
long long y;
|
||||
int pbf_count;
|
||||
int z_flag;
|
||||
|
||||
std::string data;
|
||||
std::vector<std::string> pbf_path;
|
||||
std::vector<std::string> large_zoom;
|
||||
|
||||
sqlite3 *db;
|
||||
sqlite3_stmt *stmt;
|
||||
@ -297,38 +315,198 @@ struct reader {
|
||||
}
|
||||
};
|
||||
|
||||
struct reader *begin_reading(char *fname) {
|
||||
sqlite3 *db;
|
||||
std::vector<std::string> split_slash(std::string pbf_path) {
|
||||
std::vector<std::string> path_parts;
|
||||
std::string path(pbf_path);
|
||||
std::istringstream iss(path);
|
||||
std::string token;
|
||||
|
||||
if (sqlite3_open(fname, &db) != SQLITE_OK) {
|
||||
fprintf(stderr, "%s: %s\n", fname, sqlite3_errmsg(db));
|
||||
exit(EXIT_FAILURE);
|
||||
while (std::getline(iss, token, '/')) {
|
||||
path_parts.push_back(token);
|
||||
}
|
||||
|
||||
const char *sql = "SELECT zoom_level, tile_column, tile_row, tile_data from tiles order by zoom_level, tile_column, tile_row;";
|
||||
sqlite3_stmt *stmt;
|
||||
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) {
|
||||
fprintf(stderr, "%s: select failed: %s\n", fname, sqlite3_errmsg(db));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
return path_parts;
|
||||
}
|
||||
|
||||
struct reader *r = new reader;
|
||||
r->db = db;
|
||||
r->stmt = stmt;
|
||||
r->next = NULL;
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
r->zoom = sqlite3_column_int(stmt, 0);
|
||||
r->x = sqlite3_column_int(stmt, 1);
|
||||
r->sorty = sqlite3_column_int(stmt, 2);
|
||||
r->y = (1LL << r->zoom) - 1 - r->sorty;
|
||||
|
||||
const char *data = (const char *) sqlite3_column_blob(stmt, 3);
|
||||
size_t len = sqlite3_column_bytes(stmt, 3);
|
||||
|
||||
r->data = std::string(data, len);
|
||||
int filter(const struct dirent *dir) {
|
||||
if (strcmp(dir->d_name, ".") == 0 || strcmp(dir->d_name, "..") == 0 || strcmp(dir->d_name, ".DS_Store") == 0 || strcmp(dir->d_name, "metadata.json") == 0) {
|
||||
return 0;
|
||||
} else {
|
||||
r->zoom = 32;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively walk through a specified directory and its subdirectories,
|
||||
// using alphasort function and integer variable zoom_range to handle input in numerical order.
|
||||
// Store the path of all pbf files in a pbf_path vector member of reader struct,
|
||||
// with the help of a large_zoom vector and two integer members pbf_count and z_flag
|
||||
// to ensure the tiles order in pbf_path to be the same as in mbtiles.
|
||||
struct reader *read_dir(struct reader *readers, const char *name, int level, int zoom_range) {
|
||||
struct dirent **namelist;
|
||||
struct stat buf;
|
||||
std::string path;
|
||||
int i = 0;
|
||||
int n = scandir(name, &namelist, filter, alphasort);
|
||||
std::vector<std::string> path_parts1, path_parts2;
|
||||
readers->pbf_count = 0;
|
||||
|
||||
if (n > 0) {
|
||||
while (i < n) {
|
||||
path = std::string(name) + "/" + std::string(namelist[i]->d_name);
|
||||
|
||||
if (stat(path.c_str(), &buf) == 0 && S_ISDIR(buf.st_mode)) {
|
||||
if (level == 0) {
|
||||
if (std::stoi(namelist[i]->d_name) <= 9) {
|
||||
zoom_range = 0;
|
||||
} else {
|
||||
zoom_range = 1;
|
||||
}
|
||||
|
||||
if (readers->pbf_count > 0) {
|
||||
if (readers->z_flag == 0) {
|
||||
std::sort(readers->pbf_path.end() - (readers->pbf_count + 1), readers->pbf_path.end(), std::greater<std::string>());
|
||||
} else {
|
||||
std::sort(readers->large_zoom.end() - (readers->pbf_count + 1), readers->large_zoom.end(), std::greater<std::string>());
|
||||
}
|
||||
readers->pbf_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (level == 1 && readers->pbf_count > 0) {
|
||||
if (zoom_range == 0) {
|
||||
std::sort(readers->pbf_path.end() - (readers->pbf_count + 1), readers->pbf_path.end(), std::greater<std::string>());
|
||||
} else {
|
||||
std::sort(readers->large_zoom.end() - (readers->pbf_count + 1), readers->large_zoom.end(), std::greater<std::string>());
|
||||
}
|
||||
readers->pbf_count = 0;
|
||||
}
|
||||
|
||||
read_dir(readers, path.c_str(), level + 1, zoom_range);
|
||||
} else {
|
||||
if (level == 0) {
|
||||
fprintf(stderr, "ERROR: Directory structure in '%s' should be zoom/x/y\n", name);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (level == 1) {
|
||||
fprintf(stderr, "ERROR: Directory structure in '%s' should be zoom/x/y\n", split_slash(name)[0].c_str());
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (zoom_range == 0) {
|
||||
readers->pbf_path.push_back(path);
|
||||
|
||||
if (readers->pbf_path.size() > 1) {
|
||||
path_parts1 = split_slash(readers->pbf_path[readers->pbf_path.size() - 1]);
|
||||
path_parts2 = split_slash(readers->pbf_path[readers->pbf_path.size() - 2]);
|
||||
int p1 = path_parts1.size();
|
||||
int p2 = path_parts2.size();
|
||||
|
||||
if (std::stoll(path_parts1[p1 - 3]) == std::stoll(path_parts2[p2 - 3]) && std::stoll(path_parts1[p1 - 2]) == std::stoll(path_parts2[p2 - 2])) {
|
||||
readers->z_flag = 0;
|
||||
readers->pbf_count++;
|
||||
}
|
||||
|
||||
path_parts1.clear();
|
||||
path_parts2.clear();
|
||||
}
|
||||
} else {
|
||||
readers->large_zoom.push_back(path);
|
||||
|
||||
if (readers->large_zoom.size() > 1) {
|
||||
path_parts1 = split_slash(readers->large_zoom[readers->large_zoom.size() - 1]);
|
||||
path_parts2 = split_slash(readers->large_zoom[readers->large_zoom.size() - 2]);
|
||||
int p1 = path_parts1.size();
|
||||
int p2 = path_parts2.size();
|
||||
|
||||
if (std::stoll(path_parts1[p1 - 3]) == std::stoll(path_parts2[p2 - 3]) && std::stoll(path_parts1[p1 - 2]) == std::stoll(path_parts2[p2 - 2])) {
|
||||
readers->z_flag = 1;
|
||||
readers->pbf_count++;
|
||||
}
|
||||
|
||||
path_parts1.clear();
|
||||
path_parts2.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
free(namelist[i]);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (level == 0) {
|
||||
if (readers->pbf_count > 0) {
|
||||
std::sort(readers->pbf_path.end() - (readers->pbf_count + 1), readers->pbf_path.end(), std::greater<std::string>());
|
||||
}
|
||||
|
||||
readers->pbf_path.insert(std::end(readers->pbf_path), std::begin(readers->large_zoom), std::end(readers->large_zoom));
|
||||
}
|
||||
|
||||
free(namelist);
|
||||
} else if (n == 0) {
|
||||
fprintf(stderr, "ERROR: Empty directory '%s'\n", name);
|
||||
exit(EXIT_FAILURE);
|
||||
} else {
|
||||
perror("scandir");
|
||||
}
|
||||
|
||||
return readers;
|
||||
}
|
||||
|
||||
struct reader *begin_reading(char *fname) {
|
||||
DIR *dir;
|
||||
struct reader *r = new reader;
|
||||
if ((dir = opendir(fname)) != NULL) {
|
||||
r = read_dir(r, fname, 0, 0);
|
||||
|
||||
std::vector<std::string> path_parts;
|
||||
path_parts = split_slash(r->pbf_path[0]);
|
||||
int p = path_parts.size();
|
||||
|
||||
r->db = NULL;
|
||||
r->stmt = NULL;
|
||||
r->next = NULL;
|
||||
r->pbf_count = 0;
|
||||
r->zoom = std::stoll(path_parts[p - 3]);
|
||||
r->x = std::stoll(path_parts[p - 2]);
|
||||
r->y = std::stoll(path_parts[p - 1].substr(0, path_parts[p - 1].find_last_of(".")));
|
||||
r->sorty = (1LL << r->zoom) - 1 - r->y;
|
||||
r->data = dir_read_tile(r->pbf_path[0]);
|
||||
path_parts.clear();
|
||||
closedir(dir);
|
||||
} else {
|
||||
sqlite3 *db;
|
||||
|
||||
if (sqlite3_open(fname, &db) != SQLITE_OK) {
|
||||
fprintf(stderr, "%s: %s\n", fname, sqlite3_errmsg(db));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
const char *sql = "SELECT zoom_level, tile_column, tile_row, tile_data from tiles order by zoom_level, tile_column, tile_row;";
|
||||
sqlite3_stmt *stmt;
|
||||
|
||||
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) {
|
||||
fprintf(stderr, "%s: select failed: %s\n", fname, sqlite3_errmsg(db));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
r->db = db;
|
||||
r->stmt = stmt;
|
||||
r->next = NULL;
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
r->zoom = sqlite3_column_int(stmt, 0);
|
||||
r->x = sqlite3_column_int(stmt, 1);
|
||||
r->sorty = sqlite3_column_int(stmt, 2);
|
||||
r->y = (1LL << r->zoom) - 1 - r->sorty;
|
||||
|
||||
const char *data = (const char *) sqlite3_column_blob(stmt, 3);
|
||||
size_t len = sqlite3_column_bytes(stmt, 3);
|
||||
|
||||
r->data = std::string(data, len);
|
||||
} else {
|
||||
r->zoom = 32;
|
||||
}
|
||||
}
|
||||
|
||||
return r;
|
||||
@ -405,7 +583,12 @@ void *join_worker(void *v) {
|
||||
if (anything) {
|
||||
std::string pbf = tile.encode();
|
||||
std::string compressed;
|
||||
compress(pbf, compressed);
|
||||
|
||||
if (!pC) {
|
||||
compress(pbf, compressed);
|
||||
} else {
|
||||
compressed = pbf;
|
||||
}
|
||||
|
||||
if (!pk && compressed.size() > 500000) {
|
||||
fprintf(stderr, "Tile %lld/%lld/%lld size is %lld, >500000. Skipping this tile\n.", ai->first.z, ai->first.x, ai->first.y, (long long) compressed.size());
|
||||
@ -418,7 +601,7 @@ void *join_worker(void *v) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void handle_tasks(std::map<zxy, std::vector<std::string>> &tasks, std::vector<std::map<std::string, layermap_entry>> &layermaps, sqlite3 *outdb, std::vector<std::string> &header, std::map<std::string, std::vector<std::string>> &mapping, std::set<std::string> &exclude, int ifmatched, std::set<std::string> &keep_layers, std::set<std::string> &remove_layers) {
|
||||
void handle_tasks(std::map<zxy, std::vector<std::string>> &tasks, std::vector<std::map<std::string, layermap_entry>> &layermaps, sqlite3 *outdb, const char *outdir, std::vector<std::string> &header, std::map<std::string, std::vector<std::string>> &mapping, std::set<std::string> &exclude, int ifmatched, std::set<std::string> &keep_layers, std::set<std::string> &remove_layers) {
|
||||
pthread_t pthreads[CPUS];
|
||||
std::vector<arg> args;
|
||||
|
||||
@ -462,19 +645,22 @@ void handle_tasks(std::map<zxy, std::vector<std::string>> &tasks, std::vector<st
|
||||
}
|
||||
|
||||
for (auto ai = args[i].outputs.begin(); ai != args[i].outputs.end(); ++ai) {
|
||||
mbtiles_write_tile(outdb, ai->first.z, ai->first.x, ai->first.y, ai->second.data(), ai->second.size());
|
||||
if (outdb != NULL) {
|
||||
mbtiles_write_tile(outdb, ai->first.z, ai->first.x, ai->first.y, ai->second.data(), ai->second.size());
|
||||
} else if (outdir != NULL) {
|
||||
dir_write_tile(outdir, ai->first.z, ai->first.x, ai->first.y, ai->second);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void decode(struct reader *readers, char *map, std::map<std::string, layermap_entry> &layermap, sqlite3 *outdb, struct stats *st, std::vector<std::string> &header, std::map<std::string, std::vector<std::string>> &mapping, std::set<std::string> &exclude, int ifmatched, std::string &attribution, std::string &description, std::set<std::string> &keep_layers, std::set<std::string> &remove_layers, std::string &name) {
|
||||
void decode(struct reader *readers, char *map, std::map<std::string, layermap_entry> &layermap, sqlite3 *outdb, const char *outdir, struct stats *st, std::vector<std::string> &header, std::map<std::string, std::vector<std::string>> &mapping, std::set<std::string> &exclude, int ifmatched, std::string &attribution, std::string &description, std::set<std::string> &keep_layers, std::set<std::string> &remove_layers, std::string &name) {
|
||||
std::vector<std::map<std::string, layermap_entry>> layermaps;
|
||||
for (size_t i = 0; i < CPUS; i++) {
|
||||
layermaps.push_back(std::map<std::string, layermap_entry>());
|
||||
}
|
||||
|
||||
std::map<zxy, std::vector<std::string>> tasks;
|
||||
|
||||
double minlat = INT_MAX;
|
||||
double minlon = INT_MAX;
|
||||
double maxlat = INT_MIN;
|
||||
@ -485,7 +671,6 @@ void decode(struct reader *readers, char *map, std::map<std::string, layermap_en
|
||||
reader *r = readers;
|
||||
readers = readers->next;
|
||||
r->next = NULL;
|
||||
|
||||
if (r->zoom != zoom_for_bbox) {
|
||||
// Only use highest zoom for bbox calculation
|
||||
// to avoid z0 always covering the world
|
||||
@ -512,23 +697,40 @@ void decode(struct reader *readers, char *map, std::map<std::string, layermap_en
|
||||
|
||||
if (readers == NULL || readers->zoom != r->zoom || readers->x != r->x || readers->y != r->y) {
|
||||
if (tasks.size() > 100 * CPUS) {
|
||||
handle_tasks(tasks, layermaps, outdb, header, mapping, exclude, ifmatched, keep_layers, remove_layers);
|
||||
handle_tasks(tasks, layermaps, outdb, outdir, header, mapping, exclude, ifmatched, keep_layers, remove_layers);
|
||||
tasks.clear();
|
||||
}
|
||||
}
|
||||
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
r->zoom = sqlite3_column_int(r->stmt, 0);
|
||||
r->x = sqlite3_column_int(r->stmt, 1);
|
||||
r->sorty = sqlite3_column_int(r->stmt, 2);
|
||||
r->y = (1LL << r->zoom) - 1 - r->sorty;
|
||||
if (r->db != NULL) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
r->zoom = sqlite3_column_int(r->stmt, 0);
|
||||
r->x = sqlite3_column_int(r->stmt, 1);
|
||||
r->sorty = sqlite3_column_int(r->stmt, 2);
|
||||
r->y = (1LL << r->zoom) - 1 - r->sorty;
|
||||
const char *data = (const char *) sqlite3_column_blob(r->stmt, 3);
|
||||
size_t len = sqlite3_column_bytes(r->stmt, 3);
|
||||
|
||||
const char *data = (const char *) sqlite3_column_blob(r->stmt, 3);
|
||||
size_t len = sqlite3_column_bytes(r->stmt, 3);
|
||||
|
||||
r->data = std::string(data, len);
|
||||
r->data = std::string(data, len);
|
||||
} else {
|
||||
r->zoom = 32;
|
||||
}
|
||||
} else {
|
||||
r->zoom = 32;
|
||||
r->pbf_count++;
|
||||
|
||||
if (r->pbf_count != static_cast<int>(r->pbf_path.size())) {
|
||||
std::vector<std::string> path_parts;
|
||||
path_parts = split_slash(r->pbf_path[r->pbf_count]);
|
||||
int p = path_parts.size();
|
||||
r->zoom = std::stoll(path_parts[p - 3]);
|
||||
r->x = std::stoll(path_parts[p - 2]);
|
||||
r->y = std::stoll(path_parts[p - 1].substr(0, path_parts[p - 1].find_last_of(".")));
|
||||
r->sorty = (1LL << r->zoom) - 1 - r->y;
|
||||
r->data = dir_read_tile(r->pbf_path[r->pbf_count]);
|
||||
path_parts.clear();
|
||||
} else {
|
||||
r->zoom = 32;
|
||||
}
|
||||
}
|
||||
|
||||
struct reader **rr;
|
||||
@ -548,73 +750,151 @@ void decode(struct reader *readers, char *map, std::map<std::string, layermap_en
|
||||
st->minlat = min(minlat, st->minlat);
|
||||
st->maxlat = max(maxlat, st->maxlat);
|
||||
|
||||
handle_tasks(tasks, layermaps, outdb, header, mapping, exclude, ifmatched, keep_layers, remove_layers);
|
||||
handle_tasks(tasks, layermaps, outdb, outdir, header, mapping, exclude, ifmatched, keep_layers, remove_layers);
|
||||
layermap = merge_layermaps(layermaps);
|
||||
|
||||
struct reader *next;
|
||||
for (struct reader *r = readers; r != NULL; r = next) {
|
||||
next = r->next;
|
||||
sqlite3_finalize(r->stmt);
|
||||
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'minzoom'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
int minzoom = sqlite3_column_int(r->stmt, 0);
|
||||
st->minzoom = min(st->minzoom, minzoom);
|
||||
}
|
||||
if (r->db != NULL) {
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'maxzoom'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
int maxzoom = sqlite3_column_int(r->stmt, 0);
|
||||
st->maxzoom = max(st->maxzoom, maxzoom);
|
||||
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'minzoom'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
int minzoom = sqlite3_column_int(r->stmt, 0);
|
||||
st->minzoom = min(st->minzoom, minzoom);
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'center'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
const unsigned char *s = sqlite3_column_text(r->stmt, 0);
|
||||
sscanf((char *) s, "%lf,%lf", &st->midlon, &st->midlat);
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'maxzoom'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
int maxzoom = sqlite3_column_int(r->stmt, 0);
|
||||
st->maxzoom = max(st->maxzoom, maxzoom);
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'attribution'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
attribution = std::string((char *) sqlite3_column_text(r->stmt, 0));
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'center'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
const unsigned char *s = sqlite3_column_text(r->stmt, 0);
|
||||
sscanf((char *) s, "%lf,%lf", &st->midlon, &st->midlat);
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'description'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
description = std::string((char *) sqlite3_column_text(r->stmt, 0));
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'attribution'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
attribution = std::string((char *) sqlite3_column_text(r->stmt, 0));
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'name'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
if (name.size() == 0) {
|
||||
name = std::string((char *) sqlite3_column_text(r->stmt, 0));
|
||||
} else {
|
||||
name += " + " + std::string((char *) sqlite3_column_text(r->stmt, 0));
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'description'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
description = std::string((char *) sqlite3_column_text(r->stmt, 0));
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'name'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
if (name.size() == 0) {
|
||||
name = std::string((char *) sqlite3_column_text(r->stmt, 0));
|
||||
} else {
|
||||
name += " + " + std::string((char *) sqlite3_column_text(r->stmt, 0));
|
||||
}
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'bounds'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
const unsigned char *s = sqlite3_column_text(r->stmt, 0);
|
||||
if (sscanf((char *) s, "%lf,%lf,%lf,%lf", &minlon, &minlat, &maxlon, &maxlat) == 4) {
|
||||
st->minlon = min(minlon, st->minlon);
|
||||
st->maxlon = max(maxlon, st->maxlon);
|
||||
st->minlat = min(minlat, st->minlat);
|
||||
st->maxlat = max(maxlat, st->maxlat);
|
||||
}
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
|
||||
if (sqlite3_close(r->db) != SQLITE_OK) {
|
||||
fprintf(stderr, "Could not close database: %s\n", sqlite3_errmsg(r->db));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
} else {
|
||||
std::vector<std::string> path_parts;
|
||||
path_parts = split_slash(r->pbf_path[0]);
|
||||
std::string metadata_path = path_parts[0];
|
||||
|
||||
for (int i = 1; i < static_cast<int>(path_parts.size()) - 3; i++) {
|
||||
metadata_path = metadata_path + "/" + path_parts[i];
|
||||
}
|
||||
|
||||
metadata_path += "/metadata.json";
|
||||
|
||||
path_parts.clear();
|
||||
FILE *f = fopen(metadata_path.c_str(), "r");
|
||||
|
||||
if (f == NULL) {
|
||||
perror(metadata_path.c_str());
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
json_pull *jp = json_begin_file(f);
|
||||
json_object *j, *k;
|
||||
|
||||
while ((j = json_read(jp)) != NULL) {
|
||||
if (j->type == JSON_HASH) {
|
||||
if ((k = json_hash_get(j, "minzoom")) != NULL) {
|
||||
const std::string minzoom_tmp = k->string;
|
||||
int minzoom = std::stoi(minzoom_tmp);
|
||||
st->minzoom = min(st->minzoom, minzoom);
|
||||
}
|
||||
|
||||
if ((k = json_hash_get(j, "maxzoom")) != NULL) {
|
||||
const std::string maxzoom_tmp = k->string;
|
||||
int maxzoom = std::stoi(maxzoom_tmp);
|
||||
st->maxzoom = max(st->maxzoom, maxzoom);
|
||||
}
|
||||
|
||||
if ((k = json_hash_get(j, "center")) != NULL) {
|
||||
const std::string center = k->string;
|
||||
const unsigned char *s = (const unsigned char *) center.c_str();
|
||||
sscanf((char *) s, "%lf,%lf", &st->midlon, &st->midlat);
|
||||
}
|
||||
|
||||
if ((k = json_hash_get(j, "attribution")) != NULL) {
|
||||
attribution = k->string;
|
||||
}
|
||||
|
||||
if ((k = json_hash_get(j, "description")) != NULL) {
|
||||
description = k->string;
|
||||
}
|
||||
|
||||
if ((k = json_hash_get(j, "name")) != NULL) {
|
||||
const std::string name_tmp = k->string;
|
||||
if (name.size() == 0) {
|
||||
name = name_tmp;
|
||||
} else {
|
||||
name += " + " + name_tmp;
|
||||
}
|
||||
}
|
||||
|
||||
if ((k = json_hash_get(j, "bounds")) != NULL) {
|
||||
const std::string bounds = k->string;
|
||||
const unsigned char *s = (const unsigned char *) bounds.c_str();
|
||||
if (sscanf((char *) s, "%lf,%lf,%lf,%lf", &minlon, &minlat, &maxlon, &maxlat) == 4) {
|
||||
st->minlon = min(minlon, st->minlon);
|
||||
st->maxlon = max(maxlon, st->maxlon);
|
||||
st->minlat = min(minlat, st->minlat);
|
||||
st->maxlat = max(maxlat, st->maxlat);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
if (sqlite3_prepare_v2(r->db, "SELECT value from metadata where name = 'bounds'", -1, &r->stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(r->stmt) == SQLITE_ROW) {
|
||||
const unsigned char *s = sqlite3_column_text(r->stmt, 0);
|
||||
if (sscanf((char *) s, "%lf,%lf,%lf,%lf", &minlon, &minlat, &maxlon, &maxlat) == 4) {
|
||||
st->minlon = min(minlon, st->minlon);
|
||||
st->maxlon = max(maxlon, st->maxlon);
|
||||
st->minlat = min(minlat, st->minlat);
|
||||
st->maxlat = max(maxlat, st->maxlat);
|
||||
}
|
||||
}
|
||||
sqlite3_finalize(r->stmt);
|
||||
}
|
||||
|
||||
if (sqlite3_close(r->db) != SQLITE_OK) {
|
||||
fprintf(stderr, "Could not close database: %s\n", sqlite3_errmsg(r->db));
|
||||
exit(EXIT_FAILURE);
|
||||
json_free(j);
|
||||
json_end(jp);
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
delete r;
|
||||
@ -622,7 +902,7 @@ void decode(struct reader *readers, char *map, std::map<std::string, layermap_en
|
||||
}
|
||||
|
||||
void usage(char **argv) {
|
||||
fprintf(stderr, "Usage: %s [-f] [-i] [-pk] [-c joins.csv] [-x exclude ...] -o new.mbtiles source.mbtiles ...\n", argv[0]);
|
||||
fprintf(stderr, "Usage: %s [-f] [-i] [-pk] [-pC] [-c joins.csv] [-x exclude ...] -o new.mbtiles source.mbtiles ...\n", argv[0]);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
@ -708,7 +988,9 @@ void readcsv(char *fn, std::vector<std::string> &header, std::map<std::string, s
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
char *outfile = NULL;
|
||||
char *out_mbtiles = NULL;
|
||||
char *out_dir = NULL;
|
||||
sqlite3 *outdb = NULL;
|
||||
char *csv = NULL;
|
||||
int force = 0;
|
||||
int ifmatched = 0;
|
||||
@ -731,10 +1013,14 @@ int main(int argc, char **argv) {
|
||||
extern char *optarg;
|
||||
int i;
|
||||
|
||||
while ((i = getopt(argc, argv, "fo:c:x:ip:l:L:A:N:n:")) != -1) {
|
||||
while ((i = getopt(argc, argv, "fo:e:c:x:ip:l:L:A:N:n:")) != -1) {
|
||||
switch (i) {
|
||||
case 'o':
|
||||
outfile = optarg;
|
||||
out_mbtiles = optarg;
|
||||
break;
|
||||
|
||||
case 'e':
|
||||
out_dir = optarg;
|
||||
break;
|
||||
|
||||
case 'f':
|
||||
@ -760,6 +1046,8 @@ int main(int argc, char **argv) {
|
||||
case 'p':
|
||||
if (strcmp(optarg, "k") == 0) {
|
||||
pk = true;
|
||||
} else if (strcmp(optarg, "C") == 0) {
|
||||
pC = true;
|
||||
} else {
|
||||
fprintf(stderr, "%s: Unknown option for -p%s\n", argv[0], optarg);
|
||||
exit(EXIT_FAILURE);
|
||||
@ -793,15 +1081,27 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
}
|
||||
|
||||
if (argc - optind < 1 || outfile == NULL) {
|
||||
if (argc - optind < 1) {
|
||||
usage(argv);
|
||||
}
|
||||
|
||||
if (force) {
|
||||
unlink(outfile);
|
||||
if (out_mbtiles == NULL && out_dir == NULL) {
|
||||
fprintf(stderr, "%s: must specify -o out.mbtiles or -e directory\n", argv[0]);
|
||||
usage(argv);
|
||||
}
|
||||
|
||||
if (out_mbtiles != NULL && out_dir != NULL) {
|
||||
fprintf(stderr, "%s: Options -o and -e cannot be used together\n", argv[0]);
|
||||
usage(argv);
|
||||
}
|
||||
|
||||
if (out_mbtiles != NULL) {
|
||||
if (force) {
|
||||
unlink(out_mbtiles);
|
||||
}
|
||||
outdb = mbtiles_open(out_mbtiles, argv, 0);
|
||||
}
|
||||
|
||||
sqlite3 *outdb = mbtiles_open(outfile, argv, 0);
|
||||
struct stats st;
|
||||
memset(&st, 0, sizeof(st));
|
||||
st.minzoom = st.minlat = st.minlon = INT_MAX;
|
||||
@ -828,7 +1128,7 @@ int main(int argc, char **argv) {
|
||||
*rr = r;
|
||||
}
|
||||
|
||||
decode(readers, csv, layermap, outdb, &st, header, mapping, exclude, ifmatched, attribution, description, keep_layers, remove_layers, name);
|
||||
decode(readers, csv, layermap, outdb, out_dir, &st, header, mapping, exclude, ifmatched, attribution, description, keep_layers, remove_layers, name);
|
||||
|
||||
if (set_attribution.size() != 0) {
|
||||
attribution = set_attribution;
|
||||
@ -840,8 +1140,11 @@ int main(int argc, char **argv) {
|
||||
name = set_name;
|
||||
}
|
||||
|
||||
mbtiles_write_metadata(outdb, NULL, name.c_str(), st.minzoom, st.maxzoom, st.minlat, st.minlon, st.maxlat, st.maxlon, st.midlat, st.midlon, 0, attribution.size() != 0 ? attribution.c_str() : NULL, layermap, true, description.c_str());
|
||||
mbtiles_close(outdb, argv);
|
||||
mbtiles_write_metadata(outdb, out_dir, name.c_str(), st.minzoom, st.maxzoom, st.minlat, st.minlon, st.maxlat, st.maxlon, st.midlat, st.midlon, 0, attribution.size() != 0 ? attribution.c_str() : NULL, layermap, true, description.c_str());
|
||||
|
||||
if (outdb != NULL) {
|
||||
mbtiles_close(outdb, argv);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
46
tile.cpp
46
tile.cpp
@ -28,7 +28,7 @@
|
||||
#include <sys/wait.h>
|
||||
#include "mvt.hpp"
|
||||
#include "mbtiles.hpp"
|
||||
#include "rawtiles.hpp"
|
||||
#include "dirtiles.hpp"
|
||||
#include "geometry.hpp"
|
||||
#include "tile.hpp"
|
||||
#include "pool.hpp"
|
||||
@ -206,7 +206,7 @@ int metacmp(int m1, const std::vector<long long> &keys1, const std::vector<long
|
||||
}
|
||||
|
||||
void rewrite(drawvec &geom, int z, int nextzoom, int maxzoom, long long *bbox, unsigned tx, unsigned ty, int buffer, int line_detail, int *within, long long *geompos, FILE **geomfile, const char *fname, signed char t, int layer, long long metastart, signed char feature_minzoom, int child_shards, int max_zoom_increment, long long seq, int tippecanoe_minzoom, int tippecanoe_maxzoom, int segment, unsigned *initial_x, unsigned *initial_y, int m, std::vector<long long> &metakeys, std::vector<long long> &metavals, bool has_id, unsigned long long id, unsigned long long index, long long extent) {
|
||||
if (geom.size() > 0 && nextzoom <= maxzoom) {
|
||||
if (geom.size() > 0 && (nextzoom <= maxzoom || additional[A_EXTEND_ZOOMS])) {
|
||||
int xo, yo;
|
||||
int span = 1 << (nextzoom - z);
|
||||
|
||||
@ -1176,6 +1176,8 @@ struct write_tile_args {
|
||||
double fraction_out;
|
||||
const char *prefilter;
|
||||
const char *postfilter;
|
||||
bool still_dropping;
|
||||
int wrote_zoom;
|
||||
size_t tiling_seg;
|
||||
};
|
||||
|
||||
@ -1337,7 +1339,6 @@ struct run_prefilter_args {
|
||||
std::vector<std::vector<std::string>> *layer_unmaps;
|
||||
char *stringpool;
|
||||
long long *pool_off;
|
||||
|
||||
FILE *prefilter_fp;
|
||||
};
|
||||
|
||||
@ -1887,6 +1888,7 @@ long long write_tile(FILE *geoms, long long *geompos_in, char *metabase, char *s
|
||||
|
||||
if (gamma > arg->gamma_out) {
|
||||
arg->gamma_out = gamma;
|
||||
arg->still_dropping = true;
|
||||
}
|
||||
|
||||
if (!quiet) {
|
||||
@ -1896,9 +1898,14 @@ long long write_tile(FILE *geoms, long long *geompos_in, char *metabase, char *s
|
||||
continue;
|
||||
} else if (additional[A_DROP_DENSEST_AS_NEEDED]) {
|
||||
mingap_fraction = mingap_fraction * 200000.0 / totalsize * 0.90;
|
||||
mingap = choose_mingap(indices, mingap_fraction);
|
||||
unsigned long long mg = choose_mingap(indices, mingap_fraction);
|
||||
if (mg <= mingap) {
|
||||
mg = mingap * 1.5;
|
||||
}
|
||||
mingap = mg;
|
||||
if (mingap > arg->mingap_out) {
|
||||
arg->mingap_out = mingap;
|
||||
arg->still_dropping = true;
|
||||
}
|
||||
if (!quiet) {
|
||||
fprintf(stderr, "Going to try keeping the sparsest %0.2f%% of the features to make it fit\n", mingap_fraction * 100.0);
|
||||
@ -1912,6 +1919,7 @@ long long write_tile(FILE *geoms, long long *geompos_in, char *metabase, char *s
|
||||
minextent = m;
|
||||
if (minextent > arg->minextent_out) {
|
||||
arg->minextent_out = minextent;
|
||||
arg->still_dropping = true;
|
||||
}
|
||||
if (!quiet) {
|
||||
fprintf(stderr, "Going to try keeping the biggest %0.2f%% of the features to make it fit\n", minextent_fraction * 100.0);
|
||||
@ -1926,11 +1934,12 @@ long long write_tile(FILE *geoms, long long *geompos_in, char *metabase, char *s
|
||||
}
|
||||
if (additional[A_DROP_FRACTION_AS_NEEDED] && fraction < arg->fraction_out) {
|
||||
arg->fraction_out = fraction;
|
||||
arg->still_dropping = true;
|
||||
}
|
||||
line_detail++; // to keep it the same when the loop decrements it
|
||||
continue;
|
||||
} else {
|
||||
fprintf(stderr, "Try using -B (and --drop-lines or --drop-polygons if needed) to set a higher base zoom level.\n");
|
||||
fprintf(stderr, "Try using --drop-fraction-as-needed or --drop-densest-as-needed.\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -1964,6 +1973,7 @@ long long write_tile(FILE *geoms, long long *geompos_in, char *metabase, char *s
|
||||
|
||||
if (gamma > arg->gamma_out) {
|
||||
arg->gamma_out = gamma;
|
||||
arg->still_dropping = true;
|
||||
}
|
||||
|
||||
if (!quiet) {
|
||||
@ -1972,9 +1982,14 @@ long long write_tile(FILE *geoms, long long *geompos_in, char *metabase, char *s
|
||||
line_detail++; // to keep it the same when the loop decrements it
|
||||
} else if (additional[A_DROP_DENSEST_AS_NEEDED]) {
|
||||
mingap_fraction = mingap_fraction * max_tile_size / compressed.size() * 0.90;
|
||||
mingap = choose_mingap(indices, mingap_fraction);
|
||||
unsigned long long mg = choose_mingap(indices, mingap_fraction);
|
||||
if (mg <= mingap) {
|
||||
mg = mingap * 1.5;
|
||||
}
|
||||
mingap = mg;
|
||||
if (mingap > arg->mingap_out) {
|
||||
arg->mingap_out = mingap;
|
||||
arg->still_dropping = true;
|
||||
}
|
||||
if (!quiet) {
|
||||
fprintf(stderr, "Going to try keeping the sparsest %0.2f%% of the features to make it fit\n", mingap_fraction * 100.0);
|
||||
@ -1987,6 +2002,7 @@ long long write_tile(FILE *geoms, long long *geompos_in, char *metabase, char *s
|
||||
minextent = m;
|
||||
if (minextent > arg->minextent_out) {
|
||||
arg->minextent_out = minextent;
|
||||
arg->still_dropping = true;
|
||||
}
|
||||
if (!quiet) {
|
||||
fprintf(stderr, "Going to try keeping the biggest %0.2f%% of the features to make it fit\n", minextent_fraction * 100.0);
|
||||
@ -2004,6 +2020,7 @@ long long write_tile(FILE *geoms, long long *geompos_in, char *metabase, char *s
|
||||
}
|
||||
if (additional[A_DROP_FRACTION_AS_NEEDED] && fraction < arg->fraction_out) {
|
||||
arg->fraction_out = fraction;
|
||||
arg->still_dropping = true;
|
||||
}
|
||||
line_detail++; // to keep it the same when the loop decrements it
|
||||
}
|
||||
@ -2017,7 +2034,7 @@ long long write_tile(FILE *geoms, long long *geompos_in, char *metabase, char *s
|
||||
if (outdb != NULL) {
|
||||
mbtiles_write_tile(outdb, z, tx, ty, compressed.data(), compressed.size());
|
||||
} else if (outdir != NULL) {
|
||||
write_raw_tile(outdir, z, tx, ty, compressed);
|
||||
dir_write_tile(outdir, z, tx, ty, compressed);
|
||||
}
|
||||
|
||||
if (pthread_mutex_unlock(&db_lock) != 0) {
|
||||
@ -2078,6 +2095,8 @@ void *run_thread(void *vargs) {
|
||||
deserialize_uint_io(geom, &x, &geompos);
|
||||
deserialize_uint_io(geom, &y, &geompos);
|
||||
|
||||
arg->wrote_zoom = z;
|
||||
|
||||
// fprintf(stderr, "%d/%u/%u\n", z, x, y);
|
||||
|
||||
long long len = write_tile(geom, &geompos, arg->metabase, arg->stringpool, z, x, y, z == arg->maxzoom ? arg->full_detail : arg->low_detail, arg->min_detail, arg->basezoom, arg->outdb, arg->outdir, arg->droprate, arg->buffer, arg->fname, arg->geomfile, arg->minzoom, arg->maxzoom, arg->todo, arg->along, geompos, arg->gamma, arg->child_shards, arg->meta_off, arg->pool_off, arg->initial_x, arg->initial_y, arg->running, arg->simplification, arg->layermaps, arg->layer_unmaps, arg->tiling_seg, arg->pass, arg->passes, arg->mingap, arg->minextent, arg->fraction, arg->prefilter, arg->postfilter, arg);
|
||||
@ -2145,7 +2164,7 @@ void *run_thread(void *vargs) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, unsigned *midx, unsigned *midy, int maxzoom, int minzoom, int basezoom, sqlite3 *outdb, const char *outdir, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int full_detail, int low_detail, int min_detail, long long *meta_off, long long *pool_off, unsigned *initial_x, unsigned *initial_y, double simplification, std::vector<std::map<std::string, layermap_entry>> &layermaps, const char *prefilter, const char *postfilter) {
|
||||
int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, unsigned *midx, unsigned *midy, int &maxzoom, int minzoom, int basezoom, sqlite3 *outdb, const char *outdir, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int full_detail, int low_detail, int min_detail, long long *meta_off, long long *pool_off, unsigned *initial_x, unsigned *initial_y, double simplification, std::vector<std::map<std::string, layermap_entry>> &layermaps, const char *prefilter, const char *postfilter) {
|
||||
// The existing layermaps are one table per input thread.
|
||||
// We need to add another one per *tiling* thread so that it can be
|
||||
// safely changed during tiling.
|
||||
@ -2329,6 +2348,8 @@ int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpo
|
||||
args[thread].running = &running;
|
||||
args[thread].pass = pass;
|
||||
args[thread].passes = 2 - start;
|
||||
args[thread].wrote_zoom = -1;
|
||||
args[thread].still_dropping = false;
|
||||
|
||||
if (pthread_create(&pthreads[thread], NULL, run_thread, &args[thread]) != 0) {
|
||||
perror("pthread_create");
|
||||
@ -2359,6 +2380,15 @@ int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpo
|
||||
if (args[thread].fraction_out < zoom_fraction) {
|
||||
zoom_fraction = args[thread].fraction_out;
|
||||
}
|
||||
|
||||
// Zoom counter might be lower than reality if zooms are being skipped
|
||||
if (args[thread].wrote_zoom > i) {
|
||||
i = args[thread].wrote_zoom;
|
||||
}
|
||||
|
||||
if (additional[A_EXTEND_ZOOMS] && i == maxzoom && args[thread].still_dropping && maxzoom < MAX_ZOOM) {
|
||||
maxzoom++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
2
tile.hpp
2
tile.hpp
@ -1,3 +1,3 @@
|
||||
int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, unsigned *midx, unsigned *midy, int maxzoom, int minzoom, int basezoom, sqlite3 *outdb, const char *outdir, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int full_detail, int low_detail, int min_detail, long long *meta_off, long long *pool_off, unsigned *initial_x, unsigned *initial_y, double simplification, std::vector<std::map<std::string, layermap_entry> > &layermap, const char *prefilter, const char *postfilter);
|
||||
int traverse_zooms(int *geomfd, off_t *geom_size, char *metabase, char *stringpool, unsigned *midx, unsigned *midy, int &maxzoom, int minzoom, int basezoom, sqlite3 *outdb, const char *outdir, double droprate, int buffer, const char *fname, const char *tmpdir, double gamma, int full_detail, int low_detail, int min_detail, long long *meta_off, long long *pool_off, unsigned *initial_x, unsigned *initial_y, double simplification, std::vector<std::map<std::string, layermap_entry> > &layermap, const char *prefilter, const char *postfilter);
|
||||
|
||||
int manage_gap(unsigned long long index, unsigned long long *previndex, double scale, double gamma, double *gap);
|
||||
|
@ -1 +1 @@
|
||||
#define VERSION "tippecanoe v1.18.1\n"
|
||||
#define VERSION "tippecanoe v1.19.3\n"
|
||||
|
Loading…
Reference in New Issue
Block a user