Compare commits

..

951 Commits

Author SHA1 Message Date
dc42c7ccc2 Merge pull request #481 from mapbox/crashers
Be more careful about corrupt mbtiles files
2017-10-30 15:16:07 -07:00
faf40658a6 Bump version number 2017-10-30 13:53:54 -07:00
93a325605c Guard against impossible tile coordinates when decoding 2017-10-30 13:24:31 -07:00
e2b9b96ba8 Detect impossible zoom levels in mbtiles when decoding 2017-10-30 12:55:22 -07:00
a3e95db0c3 Guard against decoding tiles with an impossible extent 2017-10-30 12:48:55 -07:00
dfbb13e7db Guard against impossible zoom level 2017-10-27 17:40:39 -07:00
d13e08c9b5 Guard against null keys and values in tileset metadata 2017-10-27 17:38:07 -07:00
4c7de922ce Merge pull request #480 from mapbox/join-ints
Make sure to encode tile-joined integers as ints, not doubles
2017-10-27 17:31:00 -07:00
197d36bdc3 Make sure to encode tile-joined integers as ints, not doubles 2017-10-27 12:43:23 -07:00
68e4f312f8 Merge pull request #479 from mapbox/rename-layer
Add tile-join option to rename layers
2017-10-27 12:21:52 -07:00
18a5300e87 Add tile-join option to rename layers 2017-10-27 11:04:06 -07:00
cd47a398fa Merge pull request #477 from mapbox/empty-attribute-keys
Fix error when parsing attributes with empty-string keys
2017-10-13 12:32:32 -07:00
ee6da93494 Fix error when parsing attributes with empty-string keys 2017-10-13 11:22:15 -07:00
42a56b1ad3 Merge pull request #471 from mapbox/coalesce-smallest
Experiment with coalescing features to reduce tile size
2017-10-10 11:33:29 -07:00
91e469aac2 Merge any leftover feature remnants onto some feature.
Add a test for coalescing small features onto large ones.
2017-10-09 17:37:21 -07:00
748ef3b1d5 Add a tippecanoe-decode option to report tile size and feature count 2017-10-09 17:05:29 -07:00
ecadd779c9 Don't try to coalesce features of different types 2017-10-09 16:26:55 -07:00
8ae1ec5379 Distribute coalescing error more evenly 2017-10-06 18:06:37 -07:00
be922702ce Experiment with coalescing features to reduce tile size 2017-10-06 18:01:08 -07:00
4c2d80ea17 Corrected Link to "Visualizing Month of Lightning" 2017-09-18 14:31:07 -04:00
e000bcc261 Merge pull request #463 from mapbox/stringpool
Limit the depth of the search in the string pool.
2017-09-08 10:16:19 -07:00
a2d12f178f Reorder and reword documentation 2017-09-07 15:34:55 -07:00
84a6aa6d73 Use std::isinf() and std::isnan() instead of the C versions
According to https://github.com/mapbox/tippecanoe/issues/464 this
is necessary for g++-5 on Linux
2017-09-07 14:40:50 -07:00
c3d23675d1 Also fix arithmetic overflow when reading a Geobuf GeometryCollection 2017-09-07 13:48:09 -07:00
c77c2a2b1e Fix compiler complaints about arithmetic overflow in hashing 2017-09-07 12:04:32 -07:00
93920d06e1 Slightly lower average traversal using a hash for comparison 2017-09-06 17:10:10 -07:00
764a8ac17c Use 32-bit offsets in the string pool search tree to reduce its size 2017-09-06 15:26:19 -07:00
0f8b32c69f Limit the depth of the search in the string pool.
Use a better comparison function. (Attributes often have a common
prefix but rarely have a common suffix.)
2017-09-06 13:06:08 -07:00
2518f238d4 Use tippecanoe instead of tile-join in the filter example 2017-09-06 11:49:55 -07:00
7ce882c035 Merge pull request #462 from mapbox/gl-filters
Add feature attribute filters (as in the GL style spec) to tile-join
2017-09-06 11:20:32 -07:00
c9e4275217 Do fewer tests in the address sanitizer build 2017-09-05 14:02:43 -07:00
bdd95af8b2 Be careful not to generate empty layers in tile-join.
Add a test of filtering during tile-join.
Bump version number.
2017-09-05 13:33:10 -07:00
d349b7700d Add the tests for filtering for feature type 2017-09-05 12:17:23 -07:00
7b71f41e43 Fix compiler warnings 2017-09-01 17:36:43 -07:00
86ff221663 I think this should fix the access to already-freed memory
Also drop the reference to the JSON parser when disconnecting
a JSON object from the parse tree.
2017-09-01 17:22:05 -07:00
681907e88d Add the feature ID filter tests 2017-09-01 16:53:19 -07:00
8d502eb805 Fix memory leak 2017-09-01 16:41:01 -07:00
c232e565db Working on testing against Mapbox GL JS filter tests 2017-09-01 16:27:27 -07:00
0fd4454129 Allow filter expressions during tippecanoe as well as during tile-join 2017-09-01 11:51:12 -07:00
dbb789dadc Merge branch 'master' into gl-filters 2017-09-01 10:29:08 -07:00
dd07511193 Merge pull request #460 from mapbox/geobuf
Add input support for Geobuf format
2017-09-01 10:28:22 -07:00
9a21c04f06 Bump version number; fix node version that I accidentally reverted 2017-08-31 14:17:18 -07:00
223d837736 Multithreaded geobuf feature parsing 2017-08-31 13:54:04 -07:00
12d744e961 Try specifying a newer version of Node 2017-08-30 16:03:00 -07:00
6384b6a49a Fix #include path and json2geobuf path 2017-08-30 15:45:37 -07:00
a5b1378d1a Minimize external dependencies for Geobuf testing 2017-08-30 15:32:36 -07:00
076dfcdfeb Work around differences of opinion about the range of feature IDs
Geobuf uses signed 64-bit ints
Vector tile spec uses unsigned 64-bit ints
Geobuf silently corrupts IDs through use of floating point
2017-08-30 14:34:59 -07:00
f65faaf3da Support GeometryCollections in geobuf 2017-08-30 14:10:35 -07:00
5c5fbe2617 Support top-level geometries 2017-08-30 14:01:09 -07:00
5665d08745 Support per-feature minzoom and maxzoom in geobuf 2017-08-30 11:44:57 -07:00
607ea6c643 Lower the precision of all test input coordinates to 6 decimal digits 2017-08-30 10:31:51 -07:00
15a48e67d6 Canonicalize numbers in stringified attributes so geobuf can match 2017-08-29 17:40:57 -07:00
caac717b17 Add another missing #include 2017-08-29 17:25:36 -07:00
8b6a51e20b Fix wildcarding for geobuf test invocation 2017-08-29 16:15:22 -07:00
4d6ebaa088 Fix nulls and negative integers in geobuf parsing 2017-08-29 16:04:50 -07:00
c65a388597 Add missing #include 2017-08-29 15:58:02 -07:00
12784de211 Produce "nan" and "inf" strings instead of aborting 2017-08-29 15:54:25 -07:00
228567364f Remove the memmove I accidentally left in, corrupting small numbers 2017-08-29 15:43:16 -07:00
cef6b022dd Fix the typo I made in the conversion to std::string 2017-08-29 14:58:16 -07:00
0d56d1bf38 Add missing #include 2017-08-29 14:49:24 -07:00
f9a007e8c3 Use milo dtoa for consistent string representation of numbers 2017-08-29 14:44:34 -07:00
3840176d5c Convert to output to std::string 2017-08-29 14:26:20 -07:00
5f09ccae88 Add namespace; fix warning 2017-08-29 13:31:59 -07:00
18b2a2c39c Milo dtoa from https://github.com/miloyip/dtoa-benchmark 2017-08-29 12:51:59 -07:00
4b66aa828d Geobuf testing exposes a bug in type coercion of 0.0 to boolean 2017-08-29 11:43:39 -07:00
1581b79a3e Forgot to implement "in" and "!in" 2017-08-28 14:35:07 -07:00
754cbdc634 Add short circuits; warnings for mismatched types, wrong array lengths 2017-08-28 14:26:36 -07:00
4e1611eec9 Fix boolean comparisions 2017-08-28 14:08:05 -07:00
8d7f8af1d9 Make indent 2017-08-28 14:01:57 -07:00
b7df68c164 Remember how to keep a parse tree around after the parser is closed 2017-08-28 13:56:52 -07:00
5878213516 Simplify the filter format, and actually run the filter. 2017-08-28 13:42:14 -07:00
7be21f6046 First (untested) pass at handling GL Style Spec filters 2017-08-28 13:26:11 -07:00
7c031a9796 Memory-map geobuf input instead of reading it into a temporary string 2017-08-28 11:30:30 -07:00
5943c82457 Move file-format-neutral code out of JSON-specific source file 2017-08-28 11:10:57 -07:00
8d09f0769e Fix multipolygon and feature ID parsing 2017-08-25 17:40:08 -07:00
ad89fb88fb Most of the way through geometry decoding 2017-08-25 16:36:53 -07:00
b98bf6e8c7 Get attribute value decoding working 2017-08-25 15:46:32 -07:00
75be013059 Start on geobuf parsing 2017-08-25 14:56:03 -07:00
e7ee83f27b Move attribute type coercion out of parsing and into serialization 2017-08-24 17:27:30 -07:00
f4818ffb07 Move attribute include/exclude logic into serialization 2017-08-24 17:10:15 -07:00
34b1b215f4 Move tilestats management out of parsing and into serialization 2017-08-24 16:30:01 -07:00
ed8fbd0236 Split more serialization details out from being parsing parameters 2017-08-24 15:57:33 -07:00
b114e22d39 Factor out geometry fixups 2017-08-23 12:48:45 -07:00
6caf20b9c8 Put the pieces back together 2017-08-23 11:43:48 -07:00
6cea2d5db6 Progress on factoring out serialization state into a single object 2017-08-22 18:10:52 -07:00
235dbf57af More progress on splitting apart parsing and serialization 2017-08-22 16:51:11 -07:00
f0b32cf710 Merge branch 'master' into geobuf 2017-08-22 16:21:03 -07:00
e7f264fa51 Merge pull request #458 from mapbox/nan-infinity
Add better diagnostics for NaN or Infinity in input JSON
2017-08-22 15:49:45 -07:00
0b3e731f0b Add better diagnostics for NaN or Infinity in input JSON 2017-08-21 10:44:04 -07:00
071b4efdab Merge pull request #451 from mapbox/no-tile-stats
Add an option not to produce tilestats
2017-08-16 15:23:29 -07:00
4d1ddc5a03 Add tile-join options to select zoom levels to copy 2017-08-16 13:49:16 -07:00
e6fc22187a Tilestats metadata no longer needs to be limited to 60K 2017-08-16 12:53:10 -07:00
8f02aa40c4 Improve tile-join documentation 2017-08-15 15:55:53 -07:00
205c28bb64 Fix tilestats with truncated strings. Trim tilestats if TileJSON is huge 2017-08-15 11:23:03 -07:00
5fc261020c Add tests for not producing tilestats 2017-08-15 10:41:00 -07:00
372194cee9 Add an option not to produce tilestats 2017-08-15 10:35:41 -07:00
b1620f6c59 Work on factoring out serial_feature 2017-08-15 10:25:11 -07:00
23a4ed8754 Merge branch 'master' into geobuf 2017-08-10 10:28:17 -07:00
f5111857d2 Merge pull request #332 from mapbox/plugins
Add plugin support
2017-08-09 14:13:27 -07:00
e453e32321 Fix test flakiness for filtered tile-join, and some other warnings. 2017-08-09 11:30:16 -07:00
915b1481ad Forgot to check in this file 2017-08-09 10:27:21 -07:00
200f6777ba Fix layer list in tilestats when filtering and tile-joining.
(The feature count when filtering will be the sum of features
across tiles instead of filters from the original input, since
the filter reader doesn't know what the original input feature
set was.)
2017-08-08 16:41:20 -07:00
635429cd87 Fix dangling pointer. Defer tilestats generation until tiling if filtering. 2017-08-08 13:38:48 -07:00
eebc8f7639 Remember to close polygons before sending them to the prefilter 2017-08-08 11:58:51 -07:00
c79f19e3ca Merge branch 'master' into plugins 2017-08-08 11:08:10 -07:00
fa0e38da2b First stages of geobuf support 2017-08-08 10:31:26 -07:00
e982b2f6a7 Merge pull request #436 from mapbox/tilestats
Generate layer, feature, and attribute statistics as part of tileset metadata
2017-07-27 14:11:42 -07:00
8280c3f99d Bump version number 2017-07-27 13:44:51 -07:00
cc28bbab3a Move the tilestats into the "json" row of the metadata 2017-07-26 09:54:49 -07:00
5a09fcc35e Some basic unit tests for string truncation 2017-07-21 14:27:30 -07:00
a373c2516a Remove debugging code 2017-07-21 13:17:21 -07:00
9ec52f2c90 Merge branch 'master' into tilestats 2017-07-21 13:08:33 -07:00
7e6d10c2fa In tilestats, truncate and merge layers and attributes with long names 2017-07-21 12:56:30 -07:00
62f135a97d Exclude long attribute values from tilestats output, as specified 2017-07-21 12:28:14 -07:00
00aed42c0e Add missing #include 2017-07-20 23:10:45 -07:00
518c8db790 Straighten out the extra tilestat attributes in some joined tilesets
The problem was in the case where the CSV provided a new value for
an attribute that replaced an existing value. Both the old and new
value were being included in the tilestats instead of just the new.
2017-07-20 17:39:41 -07:00
54cbc570f1 Don't write to a directory that contains an existing tileset.
The -f option will clear the directory now, as with mbtiles.
2017-07-20 14:17:09 -07:00
855e344552 Don't keep tilestats for features that are filtered out in tile-join 2017-07-19 16:53:53 -07:00
97d0b2a1b5 Retain attributes in alphabetical order, for consistency 2017-07-19 16:40:06 -07:00
3cafef89f1 Factor out duplicated tilestats code 2017-07-19 14:45:15 -07:00
7b03e1ee87 Include tilestats in the metadata table. (Not all tests are passing) 2017-07-19 13:09:47 -07:00
9078098511 Only insert a new tilestats record if there isn't already one 2017-07-18 17:28:30 -07:00
b3078ddeec Also make tilestats in tile-join 2017-07-18 16:53:06 -07:00
81e3f09024 Merge pull request #440 from mapbox/no-geoms
Close mbtiles properly when exiting because there are no features
2017-07-18 14:21:11 -07:00
9304bc5ca1 Generate the rest of the tilestats 2017-07-18 14:20:33 -07:00
ba82ef8274 Complain if the output file or directory is specified more than once 2017-07-18 14:11:48 -07:00
3eb1237b0a Progress on generating tilestats as JSON 2017-07-18 13:27:37 -07:00
6ac9ca3709 Bump version number 2017-07-18 10:50:25 -07:00
18f9a8dbb7 Close mbtiles properly when exiting because there are no features 2017-07-18 09:54:59 -07:00
71f57793fd Derive tilejson attribute types from sample attribute values 2017-07-17 17:31:46 -07:00
77bf14bfb0 Track sample values for each attribute in each layer 2017-07-17 13:28:46 -07:00
b1771a3365 Make layer attributes a map to sample values 2017-07-17 11:58:37 -07:00
7445feb845 Work in the direction of tracking representative attribute values 2017-07-14 17:59:24 -07:00
62ee53992b Track how many features of each type are in each layer 2017-07-14 17:23:41 -07:00
65c095cc2b Clean up #includes and add fields for counting attributes 2017-07-14 16:56:23 -07:00
6a505cdba7 Add an example of a zoom level filter 2017-07-07 17:41:06 -07:00
24a182772f Merge pull request #433 from mapbox/getopt
Add long options to tippecanoe-decode and tile-join. Add --quiet to to tile-join.
2017-07-07 14:44:28 -07:00
c164a3e69b Add long options to tippecanoe-decode and tile-join. Add --quiet to tile-join. 2017-07-07 12:51:49 -07:00
04d0cc6fa1 Merge branch 'master' into plugins 2017-07-06 22:28:35 -07:00
412be8e6fc Merge pull request #431 from mapbox/protozero-1.5.2
Upgrade protozero to version 1.5.2
2017-06-30 15:37:35 -07:00
3ba45c7277 Upgrade protozero to version 1.5.2 2017-06-30 11:20:26 -07:00
bd845ac57d Merge pull request #429 from mapbox/bom2
Ignore UTF-8 byte order mark if present
2017-06-29 15:25:31 -07:00
dd0a135b01 Ignore UTF-8 byte order mark if present 2017-06-29 14:42:42 -07:00
240ccbd219 Merge pull request #426 from mapbox/extend-zooms
Add an option to increase maxzoom if features are still being dropped
2017-05-31 14:07:57 -07:00
dba24959ba Avoid potential infinite loop from choosing the same min density again 2017-05-31 11:02:02 -07:00
cf3a0800b8 Add an option to increase maxzoom if features are still being dropped 2017-05-31 10:47:34 -07:00
b80a1d7621 Merge pull request #425 from mapbox/better-maxzoom-guessing
Better maxzoom guessing by considering resolution within features
2017-05-30 17:22:40 -07:00
c7d146ea48 Don't do work for guessing maxzoom unless needed. Add a better test. 2017-05-30 16:58:56 -07:00
8776f17980 Bump version number, give better warning message 2017-05-30 16:02:08 -07:00
977effc96d Merge pull request #424 from joykuotw/master
Add tile-join directory support
2017-05-30 15:43:56 -07:00
ab8ecb7e00 Add missing #include 2017-05-30 13:41:24 -07:00
331707f88a Better maxzoom guessing by considering resolution within features 2017-05-30 13:28:25 -07:00
a5db055c50 Change fixed-size buffer to variable-length string 2017-05-30 20:11:13 +01:00
e8e949fac9 Fix memory leak 2017-05-30 18:40:22 +01:00
52ceaaddb6 Add tile-join source to directories of tiles 2017-05-30 14:53:55 +01:00
ef8af63ab4 Add tile-join options to no tile compression and output to directory 2017-05-25 11:25:57 +01:00
9c3fb0f669 Change the function name rawtiles to dirtiles 2017-05-24 16:32:44 +01:00
5db7b504e8 Merge pull request #421 from mapbox/long-attributes
Fix crash with very long (>128K) attribute values
2017-05-18 15:11:42 -07:00
eb8ab1dd58 Fix crash with very long (>128K) attribute values 2017-05-18 14:48:18 -07:00
d0980e29d0 Merge pull request #419 from Burke9077/master
Add basic support for Docker
2017-05-18 11:47:21 -07:00
1bb31882fa Added docker information to the README 2017-05-18 09:48:37 -04:00
4016876670 Added dockerfile 2017-05-17 17:02:26 -04:00
b301512860 Merge branch 'master' into plugins 2017-05-12 17:20:30 -07:00
27dadc8ade Merge pull request #418 from mapbox/no-compression-test
There should be a test for the no-compression mbtiles output format
2017-05-12 17:09:41 -07:00
2f7b5d8afd There should be a test for the no-compression mbtiles output format 2017-05-12 16:21:25 -07:00
733092abf6 Merge pull request #417 from mapbox/only-warn-once
Only warn once about invalid polygon encoding in tippecanoe-decode
2017-05-12 11:24:02 -07:00
9632c14551 Only warn once about invalid polygon encoding in tippecanoe-decode 2017-05-12 11:15:27 -07:00
7fe3de9cfa Merge pull request #416 from mapbox/tile-join-compression
Tile-join had not been compressing its tiles.
2017-05-11 12:51:14 -07:00
fb6551c59e Calculate the tileset bounding box in tile-join from the tile boundaries 2017-05-11 12:36:35 -07:00
30d54ff50d Tile-join had not been compressing its tiles. 2017-05-11 12:08:47 -07:00
685e1a4360 Merge pull request #412 from mapbox/polygon-closure
Enforce polygon winding and closure rules in tippecanoe-decode
2017-05-05 11:47:54 -07:00
922bef72c8 Fix array bounds error found by -fsanitize=address 2017-05-05 11:22:40 -07:00
9eb3a7f7ec Enforce polygon winding and closure rules in tippecanoe-decode 2017-05-05 10:56:50 -07:00
eab593fea5 Merge pull request #411 from mapbox/tile-join-name
Add tile-join options to set tileset name, description, attribution
2017-05-04 15:27:40 -07:00
f87a9d1660 Add tile-join options to set tileset name, description, attribution 2017-05-04 15:08:31 -07:00
92deeb58f9 Merge pull request #410 from mapbox/tile-join-name
Preserve the tileset names from the source mbtiles in tile-join
2017-05-04 14:17:52 -07:00
6ec453229c Preserve the tileset names from the source mbtiles in tile-join 2017-05-04 13:31:30 -07:00
24bde0be03 Fix formatting 2017-05-04 10:39:48 -07:00
263a1b9e5f Add sample filter to limit tiles to a bounding box 2017-05-04 10:25:53 -07:00
f9f57ebb3f Merge branch 'master' into plugins 2017-05-04 10:19:32 -07:00
ad0eba7ccd Merge pull request #405 from mapbox/too-much-splitting
Fix RFC 8142 support: Don't try to split *all* memory mapped files
2017-05-01 15:40:04 -07:00
acdb5b72db Fix RFC 8142 support: Don't try to split *all* memory mapped files 2017-05-01 15:27:55 -07:00
a0234923b7 Merge branch 'master' into plugins 2017-05-01 11:43:05 -07:00
1669d96510 Merge pull request #404 from mapbox/rfc8142
Support RFC 8142 GeoJSON text sequences
2017-04-28 16:56:40 -07:00
63e0c89c4b Support RFC 8142 GeoJSON text sequences 2017-04-28 16:39:16 -07:00
dc01d33402 Merge pull request #402 from mapbox/gitignore
Put everything normally generated during compilation in .gitignore
2017-04-24 08:29:56 -07:00
0ecc6fa1c5 Put everything normally generated during compilation in .gitignore 2017-04-23 15:31:57 -07:00
5a8bfa23a4 Merge branch 'master' into plugins 2017-04-21 16:18:00 -07:00
6173180cee Merge pull request #401 from mapbox/usage
Organize usage output the same way as in the README
2017-04-21 14:49:26 -07:00
569fd97475 Also generate the string of option letters instead of writing it again 2017-04-21 13:56:40 -07:00
6c7a52147b Organize usage output the same way as in the README 2017-04-21 13:48:39 -07:00
d2a8761483 Merge pull request #399 from mapbox/projection-warning
Try to be clearer in the warning message about projections
2017-04-18 14:02:07 -07:00
d19680e392 Try to be clearer in the warning message about projections 2017-04-18 13:24:16 -07:00
7dc586bc84 Merge branch 'master' into plugins 2017-04-17 16:01:12 -07:00
8b5168cb2b Merge pull request #398 from mapbox/attribute-types
Add -T option to coerce the types of feature attributes
2017-04-17 15:46:13 -07:00
178b5d0054 Add -T option to coerce the types of feature attributes 2017-04-17 15:20:03 -07:00
65ee2cf0a5 Merge pull request #397 from mapbox/guess-maxzoom
Add -zg option to automatically choose an appropriate maxzoom
2017-04-17 08:51:48 -07:00
242a657fc1 Fix array bounds error caught by -fsanitize=address 2017-04-13 14:23:43 -07:00
510247c2fb Add -zg option to automatically choose an appropriate maxzoom 2017-04-13 14:17:15 -07:00
e75bb4a16a Merge pull request #395 from mapbox/close-featurecollections
Clean up JSON parsing at the end of each FeatureCollection
2017-04-12 13:19:19 -07:00
198a5a06d4 Clean up JSON parsing at the end of each FeatureCollection 2017-04-12 12:45:35 -07:00
a0831cd71e Merge pull request #391 from mapbox/reformat-readme
Clean up documentation
2017-04-12 12:36:58 -07:00
f4677d5de1 Reorganize input and layer options 2017-04-12 10:16:43 -07:00
a394c9e93d Merge branch 'master' into reformat-readme 2017-04-11 17:31:22 -07:00
79b73e6317 Call out the most useful options at the start 2017-04-11 17:24:15 -07:00
ab300c2080 Forgot to format a few more options 2017-04-11 17:06:17 -07:00
5ec6ce1743 Continue to reorder and clarify options 2017-04-11 17:03:32 -07:00
63ea7bdc4e Try to reorder options into some more readable organization 2017-04-11 16:51:56 -07:00
bb365bde44 Reformatting README 2017-04-11 16:32:32 -07:00
7f918bd79b Merge pull request #390 from mapbox/layer-filter
Add tile-join options to include or exclude layers by name
2017-04-10 16:00:27 -07:00
a7be83381a Add tile-join options to include or exclude layers by name 2017-04-10 15:22:30 -07:00
94a15eaf85 Merge branch 'master' into plugins 2017-04-07 14:28:58 -07:00
da92b93b62 Merge pull request #389 from mapbox/joykuotw-master
Add --output-to-directory and --no-tile-compression options
2017-04-07 14:19:10 -07:00
f558b78380 Include tileset metadata when writing either to directory or mbtiles 2017-04-07 13:25:05 -07:00
1c1ae6a8be Update documentation and reduce use of global variables 2017-04-07 12:36:34 -07:00
b66ab10252 Add algorithms library 2017-04-07 17:03:30 +01:00
682a272f91 Add condition to prevent options -o and -e being used together 2017-04-07 16:24:29 +01:00
a320248857 Do make indent, add flag --output-to-directory or -e to write pbf files to a directory, also add flag --no-tile-compression or -pC to get raw protobuf 2017-04-07 13:33:01 +01:00
9fb1208b79 Fix indentation for rawtiles.cpp 2017-04-05 16:05:50 +01:00
29fa3d8541 Update the readme to add the flag 2017-04-05 16:02:57 +01:00
f03b89656c Seperated out to method and added test for --raw-tiles 2017-04-05 15:59:37 +01:00
aee12ac1fe Add flag --raw-tiles or -pC to get raw protobuf 2017-04-05 00:34:54 +01:00
ae4f03d92b Add missing #include 2017-03-28 17:15:41 -07:00
7d47226444 Merge branch 'master' into plugins 2017-03-28 16:42:22 -07:00
834ba19277 Add option for newline-delimited output format to tippecanoe-decode 2017-03-28 16:25:40 -07:00
8777bdf5d3 Merge pull request #382 from mapbox/reconcile
Add --description option
2017-03-22 15:42:20 -07:00
1362ac448f Add --description option to set mbtiles description 2017-03-21 11:07:15 -07:00
fa56adc530 Clean up utility functions that are also used in other projects 2017-03-16 15:06:58 -07:00
eaff7b93c5 Merge branch 'master' into plugins 2017-03-15 16:19:33 -07:00
8c6f6250b1 Merge pull request #381 from mapbox/detect-wraparound
Add --detect-longitude-wraparound option
2017-03-15 16:10:20 -07:00
ea77e3db6f Add --detect-longitude-wraparound option 2017-03-15 13:37:50 -07:00
bbf9716361 Merge pull request #380 from mapbox/short-circuit-maxzoom
Stop processing zooms when a feature reaches its explicit maxzoom tag
2017-03-15 09:51:02 -07:00
9c53268a2b Stop processing zooms when a feature reaches its explicit maxzoom tag 2017-03-14 16:14:54 -07:00
860189a577 Merge pull request #378 from mapbox/no-polygon-splitting
Remove polygon splitting, since polygon cleaning is now fast enough
2017-03-02 15:03:52 -08:00
c46f4cfc75 Remove polygon splitting, since polygon cleaning is now fast enough 2017-03-02 12:08:08 -08:00
9cbbc4c68d Merge branch 'master' into plugins-merge 2017-03-02 10:45:25 -08:00
cef4d2cd47 Merge pull request #376 from mapbox/decode-layer
Add an option to decode only specified layers
2017-03-02 10:26:21 -08:00
3a1f074c96 Add test and documentation for tippecanoe-decode -l 2017-03-01 16:01:57 -08:00
835fafe30a Add an option to decode only specified layers 2017-02-27 10:10:28 -08:00
30a987d9ba Merge pull request #374 from mapbox/multilayer-crash
Clean up layer name handling to fix layer merging crash
2017-02-20 17:24:24 -08:00
218a2fc75e Clean up layer name handling to fix layer merging crash 2017-02-20 16:19:34 -08:00
638cfef2b6 Merge pull request #368 from mapbox/division-by-0
Fix division by 0 by using more general and robust polygon-checking code
2017-02-20 14:09:42 -08:00
5bdbb8a911 Bump version number 2017-02-20 11:47:01 -08:00
55e7e20a73 Add a check for environmental variable TIPPECANOE_MAX_THREADS 2017-02-17 16:47:21 -08:00
9c1f2b2123 Guard against potentially rounding to 0 when choosing sorting unit 2017-02-17 14:14:55 -08:00
1b68dcdc22 Fix division by 0 by using more general and robust polygon-checking code 2017-02-17 13:25:56 -08:00
1cc65d61ba Merge pull request #363 from mapbox/polygon-placeholder
Fix area of placeholders for degenerate multipolygons
2017-02-08 11:52:27 -08:00
62e1018c87 Fix area of placeholders for degenerate multipolygons 2017-02-06 14:14:34 -08:00
4517bd6e90 Test with faulty replacement of polygons with big squares 2017-02-06 14:10:50 -08:00
a1fe858151 Merge pull request #358 from mapbox/wagyu-030
Upgrade to Wagyu 0.3.0; downgrade C++ requirement to C++ 11
2017-01-26 15:26:20 -08:00
31f254ac99 Upgrade to Wagyu 0.3.0; downgrade C++ requirement to C++ 11 2017-01-26 13:28:17 -08:00
5c2106c0dc Merge branch 'master' into plugins 2017-01-25 16:38:03 -08:00
28b8f1c326 Merge pull request #355 from mapbox/decode-zoomlevels
Add minzoom and maxzoom options to tippecanoe-decode
2017-01-25 10:47:06 -08:00
c74a9a8325 Add minzoom and maxzoom options to tippecanoe-decode 2017-01-24 14:15:40 -08:00
47cb2434e6 Merge branch 'master' into plugins 2017-01-19 15:48:39 -08:00
c67b5f33bd Merge pull request #352 from mapbox/warn-id
Only warn once about non-numeric/non-integer/negative feature IDs
2017-01-19 10:04:11 -08:00
5a8f9f11c0 Only warn once about non-numeric/non-integer/negative feature IDs 2017-01-18 14:26:17 -08:00
b3847c1da1 Merge pull request #350 from mapbox/c++-readme
Add instructions for upgrading g++ on Linux
2017-01-17 15:06:32 -08:00
801c5d6574 Add instructions for upgrading g++ on Linux 2017-01-17 10:38:40 -08:00
7b5069f2f6 Start writing some documentation 2017-01-13 15:57:52 -08:00
150e3663e1 Pass the index, sequence, and extent through the prefilter when needed 2017-01-13 14:59:11 -08:00
aa75f4a4c4 Merge branch 'master' into plugins 2017-01-12 16:03:55 -08:00
a00903ecc6 Merge pull request #347 from mapbox/wagyu-quick-clip
Use Wagyu's quick_clip
2017-01-12 14:20:32 -08:00
72086b7e92 Upgrade Wagyu to cfc895 and use its Sutherland-Hodgman implementation 2017-01-11 15:43:07 -08:00
2fdec7d2e4 Try using Wagyu's quick_clip 2017-01-06 16:24:09 -08:00
3549aa35e8 Merge branch 'master' into plugins 2017-01-05 15:02:32 -08:00
859202165d Merge pull request #345 from andrewharvey/patch-2
Include build-essential in Linux prerequisites
2017-01-05 10:15:16 -08:00
ece768bfe4 include build-essential in Linux prerequisites 2017-01-05 14:49:11 +11:00
c1ec437756 Merge pull request #344 from mapbox/wagyu-bfbf2893
Upgrade Wagyu to bfbf2893
2017-01-04 17:16:17 -08:00
bdac4d4e80 Upgrade Wagyu to bfbf2893 2017-01-04 16:33:43 -08:00
bf619a315c Merge branch 'master' into plugins 2017-01-03 13:25:05 -08:00
e53c8ed7e6 Merge pull request #343 from mapbox/attributes-for-named-layers
Associate attributes with the right layer when explicitly tagged (#342)
2017-01-03 13:24:37 -08:00
c87d6eadfc Associate attributes with the right layer when explicitly tagged (#342) 2017-01-03 12:20:52 -08:00
8e17c3aa09 Update the layer list outside of the property loop 2017-01-02 16:33:44 -08:00
cf6a2d3a67 Provide layer names to postfilter and read layer names back in 2016-12-21 11:47:49 -08:00
71ac6596af Warn about broken pipes in filters instead of exiting abruptly 2016-12-21 10:10:22 -08:00
6a5461763c Fix reordering of attributes and failure to update layer name table 2016-12-20 16:41:23 -08:00
a2060299c9 Fix arithmetic overflow that was breaking some prefilter polygons 2016-12-20 15:59:10 -08:00
854dc2bca5 Merge branch 'master' into plugins 2016-12-20 14:15:06 -08:00
8ab5bb4809 Merge pull request #341 from mapbox/choose-first-tile
Choose a deeper initial tile than 0/0/0 if one contains all the features
2016-12-16 14:15:42 -08:00
6a2e80769e Choose a deeper initial tile than 0/0/0 if one contains all the features 2016-12-16 12:22:38 -08:00
78d91b3fde Merge pull request #333 from mapbox/wagyu-clean
Switch polygon topology correction from Clipper to Wagyu
2016-12-14 16:13:31 -08:00
275d25739d Update to wagyu-0.2.1 (eec53a6) 2016-12-14 14:14:55 -08:00
4fc671f1d1 Merge branch 'master' into plugins 2016-12-14 11:29:58 -08:00
b3c116b989 Merge branch 'master' into wagyu-clean 2016-12-14 11:12:07 -08:00
5549789da5 Merge pull request #338 from mapbox/warnings
Fix warnings identified by g++
2016-12-14 11:11:29 -08:00
56e1f55bb6 Fix warnings identified by g++ 2016-12-13 17:54:55 -08:00
3e4fcd22ec Merge pull request #337 from mapbox/sanitize-integer
Fix integer overflow identified by -fsanitize=integer
2016-12-13 17:28:27 -08:00
d7d5bed781 Remove duplicate vertices before calling wagyu 2016-12-13 16:55:33 -08:00
af3d48e5b3 Fix integer overflow identified by -fsanitize=integer 2016-12-13 16:19:38 -08:00
ce3ffee8ff Merge pull request #336 from mapbox/tile-join-fixes
Don't allow two attributes with the same name, and strip \r from CSV.
2016-12-13 15:24:15 -08:00
f928133993 Don't allow two attributes with the same name, and strip \r from CSV. 2016-12-13 14:35:51 -08:00
c2fa8e3633 Switch the segment and layer ID based on prefilter output 2016-12-12 17:00:45 -08:00
ad4060eced Fix coordinate rounding error in the prefilter 2016-12-12 16:12:22 -08:00
9c0e2cdfa7 Fill out layermaps when reading the output of the prefilter 2016-12-12 16:08:08 -08:00
5e7f718afc Fill out layermaps when reading the output of the postfilter 2016-12-12 15:21:05 -08:00
57ff54e683 Fix coordinate overflow by increasing integer size 2016-12-09 16:35:41 -08:00
5dc773ffae Carry attribute keys and values through from the prefilter 2016-12-09 15:54:47 -08:00
0e5b513637 Start getting features (just geometry so far) back from the prefilter 2016-12-09 15:35:57 -08:00
a338f5390f Fix where I was closing the prefilter pipe in the wrong thread 2016-12-09 14:15:17 -08:00
c8a8915064 Push prefilter writing into a thread (but something is crashing) 2016-12-09 14:01:07 -08:00
9f10f48bfb Pull feature deserialization and rewriting out of the loop 2016-12-09 11:53:50 -08:00
5194a39c16 Factor out clipping to tile boundaries; test random attributes & layers 2016-12-09 10:47:03 -08:00
daf1941ba9 Add missing #include 2016-12-08 17:22:07 -08:00
569825324a Factor out feature deserialization 2016-12-08 17:11:37 -08:00
16df86c26e Set up and tear down the prefilter pipeline 2016-12-08 16:13:02 -08:00
d940eb1cef Factor out filter setup from the reading and writing loops 2016-12-08 15:46:12 -08:00
d1dc310bbc The GeoJSON-producing part of prefiltering 2016-12-08 15:13:38 -08:00
e3823c966c Use more idiomatic C++ to quote JSON strings 2016-12-08 14:02:06 -08:00
5960a15fcd Add magic #defines to avoid default small Mac stdio limits 2016-12-08 12:33:02 -08:00
6530e155eb Don't put a comma between features in filter output 2016-12-08 11:14:06 -08:00
8cf81483b1 Add missing #include 2016-12-08 10:31:27 -08:00
3f14a0dd55 Factor out conversion from JSON types to vector tile attribute types 2016-12-07 16:17:17 -08:00
a4f5406cfb Switch polygon topology correction from Clipper to Wagyu 2016-12-07 14:29:09 -08:00
22cb5186d7 Copy of wagyu-0.2.0 (b3fa127) 2016-12-07 14:14:56 -08:00
c0d5171f1d Copy of geometry.hpp-0.9.0 (e3acceb) 2016-12-07 14:11:53 -08:00
89bfd27a8b Copy of variant-1.1.4 (02bd1ac) 2016-12-07 14:09:51 -08:00
5554b9cbba Add the command-line option to specify the filter 2016-12-07 12:15:57 -08:00
a114a890d8 Keep from leaking other pipe file descriptors to unrelated children 2016-12-07 11:26:03 -08:00
58e268777c Missed a file for the close-on-exec flag 2016-12-07 11:19:29 -08:00
3d1ceac96a Lock around setup of pipeline and filter process 2016-12-07 11:16:34 -08:00
87d86ecfc9 Set close-on-exec flag for most file descriptors 2016-12-07 10:57:56 -08:00
7514797c4c Add missing #includes 2016-12-06 17:25:23 -08:00
d8fe69a99e Round coordinates instead of truncating to avoid projection error 2016-12-06 17:19:22 -08:00
679189e5a2 Parse JSON coming back in and turn it back into features 2016-12-06 17:19:22 -08:00
c82e3e98c3 Factor out parsing the geometry coordinate array 2016-12-06 17:19:22 -08:00
4256473283 More reorganization to reuse JSON parsing 2016-12-06 17:19:22 -08:00
112f451c66 Move JSON-writing again to keep it from requiring all plugin code 2016-12-06 17:19:22 -08:00
72478ae13e Be more consistent about checking for errors from close() 2016-12-06 17:19:22 -08:00
94bebbd276 Write GeoJSON to the filter and read (but don't parse) what comes back 2016-12-06 17:19:22 -08:00
adfceed554 Factor out conversion from vector tile to GeoJSON 2016-12-06 17:19:22 -08:00
bdd2632fb7 clean all binary programs 2016-12-06 16:52:37 -08:00
785d341cde Merge pull request #331 from mapbox/no-dropping-if-zoom-tagged
Dot-dropping doesn't apply if there is a per-feature minzoom tag
2016-12-06 16:03:00 -08:00
e8c5759f70 Dot-dropping doesn't apply if there is a per-feature minzoom tag 2016-12-06 15:36:10 -08:00
afa782fb7f Merge pull request #328 from mapbox/grid-snap-rounding
Round coordinates in low-zoom grid instead of truncating
2016-11-30 14:31:23 -08:00
f6d5d1803b Round coordinates in low-zoom grid instead of truncating
To avoid rounding error if features are already tile-grid-aligned
2016-11-29 15:18:44 -08:00
ab9eafa0b7 Merge pull request #325 from mapbox/gridded
Option to snap low zooms to a stairstep grid
2016-11-29 13:59:15 -08:00
f1ff7301c1 Add a test for grid snapping 2016-11-28 16:32:54 -08:00
0db8d9ed8b Option to snap low zooms to a stairstep grid 2016-11-28 15:45:38 -08:00
c68d553e94 Merge pull request #323 from mapbox/point-area
Stop --drop-smallest-as-needed from always dropping all points
2016-11-23 15:10:25 -08:00
c867ce5f32 Stop --drop-smallest-as-needed from always dropping all points 2016-11-23 13:57:32 -08:00
2d022c6c57 Merge pull request #312 from mapbox/dynamic-gamma
Add an option to dynamically increase gamma until tiles are small enough
2016-11-21 14:33:23 -08:00
bedace67e7 Remove --merge-polygons-as-needed until it works well. Bump version. 2016-11-21 13:36:31 -08:00
e9aa8c1b7d Add an option to prevent tiny polygon reduction 2016-11-21 11:26:37 -08:00
dc9e68b128 Make feature-dropping option names a little more consistent 2016-11-17 12:40:11 -08:00
5479e59aa9 Save another byte in features that have no metadata 2016-11-17 10:11:59 -08:00
439b544c8c Don't serialize the feature sequence number unless needed for -pi 2016-11-16 17:35:25 -08:00
a69a087b08 Merge pull request #319 from jingsam/patch-1
Create .gitignore
2016-11-16 10:25:26 -08:00
eb1c724fc4 Create .gitignore 2016-11-16 16:10:41 +08:00
9ea29601ab Don't let line simplification reduce a polygon ring to below 3 points 2016-11-14 11:22:21 -08:00
32aa653082 Don't spend geometry space on index or extent unless it is needed 2016-11-11 17:37:46 -08:00
013e6512b4 Add an option to drop the smallest features to make tiles small enough 2016-11-09 17:09:05 -08:00
38ce49d2d4 Another option for plain fractional dropping, but across the whole zoom 2016-11-04 12:26:13 -07:00
ee48be26e0 Rename -k to -M before I regret that the unit for it is not kilobytes 2016-11-04 11:15:53 -07:00
700ca489c7 Fix flakiness: don't decrease the global feature gap 2016-11-03 17:49:32 -07:00
a8bdbe8012 Fix the progress indicator when doing two passes over each zoom level 2016-11-03 17:13:11 -07:00
689f2ef7e9 Make better guesses about what gap will make a tile small enough 2016-11-02 16:57:35 -07:00
2e3ba8f374 Retain original feature index rather than recalculating
For better density calculation of clipped features
2016-11-02 15:11:22 -07:00
d0db3323fb Binary search to find gap that leaves the desired fraction of features 2016-11-02 14:25:04 -07:00
faa5720e3b Remove dead code 2016-11-01 16:31:45 -07:00
2e026f9b3b Use float instead of double in tiles if the value comes out the same 2016-10-27 14:37:46 -07:00
0834626d63 More careful JSON parsing thanks to http://seriot.ch/parsing_json.html 2016-10-27 11:39:50 -07:00
80e5159144 Remove dead code 2016-10-26 16:00:59 -07:00
7e6aa19d42 Make sure memfile growth gets tested 2016-10-26 15:57:24 -07:00
8296190487 Remove leftovers from early versions of --detect-shared-borders 2016-10-26 15:47:26 -07:00
93d3c40593 Remove dead code 2016-10-26 15:43:46 -07:00
9fbc7b9a55 Add an ignored third coordinate to improve code coverage 2016-10-26 15:33:46 -07:00
7727b3a92c Start each tile's gamma/mingap at the same point to help flaky tests 2016-10-26 10:40:53 -07:00
565b5dc6b4 Add a test of discovering the minimum workable feature spacing 2016-10-25 16:39:41 -07:00
10fc9254d1 Add an option to discover the minimum workable gap between features 2016-10-25 16:28:32 -07:00
db859e8801 Reorder which tile-shrinking strategy to try first 2016-10-25 15:21:00 -07:00
667e8f7a29 Use the same discovered gamma across all tiles of a zoom level 2016-10-25 14:13:55 -07:00
feb8ac0165 Merge pull request #313 from andrewharvey/patch-1
update clean target to remove all built files
2016-10-25 10:03:43 -07:00
4b3fc4aebe Track the new identity of merged polygons so they can be merged further 2016-10-25 10:01:18 -07:00
1594a09eb5 update clean target to remove all built files 2016-10-25 16:17:49 +11:00
057d7b759c Iterate over arcs, not polygons, when merging 2016-10-24 17:58:26 -07:00
2798bf7b6f Add an option to merge adjacent polygons together to reduce tile size 2016-10-24 17:22:07 -07:00
f32916c472 Tests of the three current strategies for reducing tile size 2016-10-24 15:33:14 -07:00
7cb7fc4913 Fix where I was inserting the copied ring in backwards order 2016-10-24 15:06:57 -07:00
3cc95231ec Work on merging together adjacent polygons to reduce tile size 2016-10-24 15:06:49 -07:00
83e73e8840 Add an option to dynamically increase gamma until tiles are small enough 2016-10-24 12:29:36 -07:00
1cfc58267e Merge pull request #310 from mapbox/consistent-dropping
Make feature dropping consistent across tile boundaries and zoom levels
2016-10-18 15:28:31 -07:00
e1655941cc Add a test where the base zoom is beyond the max zoom 2016-10-17 13:53:52 -07:00
17cd74d7e4 Remove warning flag that gcc doesn't support 2016-10-17 13:29:39 -07:00
92cc08a554 Fix some compiler warnings about signed comparisons 2016-10-14 17:11:57 -07:00
eb1c64db27 Fix use of 32-bit zigzag encoding/decoding for 64-bit integers 2016-10-14 15:48:35 -07:00
1f38e85f30 Bump version number and fix out-of-date documentation 2016-10-14 15:42:01 -07:00
82377944ee Merge branch 'master' into consistent-dropping 2016-10-14 15:35:35 -07:00
2a856b49bd Merge pull request #302 from mapbox/simplify-polygons-together
Find edges shared between polygons and simplify them individually
2016-10-14 15:34:25 -07:00
71bf20b205 Remove debug output 2016-10-14 12:30:54 -07:00
feb9b4481a Stabilize edge list order by also comparing ring IDs 2016-10-14 12:27:24 -07:00
9160e6add5 Guard against uninitialized variables 2016-10-14 12:19:56 -07:00
a5d803aa9a Lots of debug output to try to track down Mac/Linux differences 2016-10-14 12:15:23 -07:00
6aee0d39df Merge pull request #309 from mapbox/enumerate-test
There should be a test of tippecanoe-enumerate
2016-10-13 10:25:00 -07:00
bbfc7c677e There should be a test of tippecanoe-enumerate 2016-10-12 16:27:05 -07:00
abac4f2b85 Merge pull request #308 from mapbox/deps
Autogenerate header dependencies
2016-10-12 16:09:19 -07:00
b26e6a5a4e This should have been linking as C++, not C 2016-10-12 14:07:41 -07:00
c4ee5d3e69 Autogenerate header dependencies
Following http://scottmcpeak.com/autodepend/autodepend.html
2016-10-12 13:21:29 -07:00
2e9971c6d5 Remake test standards after inspection 2016-10-12 10:41:01 -07:00
081e330845 Reset counter with each tile, to match old low-zoom feature density 2016-10-12 09:49:25 -07:00
81d8fe21f8 Fix structure packing to fix polygon dropping 2016-10-11 17:47:53 -07:00
948ea138bb Do a fixup pass on the feature minzooms after base/rate calculation 2016-10-11 17:24:22 -07:00
9d37bd104a Keep start/end in tue merged index pointing to final geometry offsets 2016-10-11 15:13:27 -07:00
08310d9564 Forgot this other place where final geometry can be written out 2016-10-11 12:42:20 -07:00
d381f5a9e1 Make the feature counter global, not reset with each merge phase 2016-10-11 12:05:50 -07:00
5ab41417fc Calculate feature-dropping (except gamma) during geometry reordering 2016-10-10 17:15:33 -07:00
c8a1b082e0 Don't serialize the per-feature minzoom until geometry merging time 2016-10-10 15:31:09 -07:00
896c9d8398 Rename option to --detect-shared-borders and add a test 2016-10-06 16:16:51 -07:00
7258643d5a A sorted list is smaller and faster than a map of sets 2016-10-05 17:16:18 -07:00
4c1b135848 Save a little time by only looking up each segment once 2016-10-05 17:16:18 -07:00
46b634ce46 Remember to close the polygon 2016-10-05 17:16:18 -07:00
6455b6633e Remember to honor --no-line-simplification if it was requested 2016-10-05 17:16:18 -07:00
938274aa91 Remove more debugging code 2016-10-05 17:16:18 -07:00
2b4280695f Extract common edges, simplify, then reassemble polygon rings 2016-10-05 17:16:18 -07:00
d35dc4936a If a point divides one border, it divides any that touch there 2016-10-05 17:16:18 -07:00
57cc343855 Most of the way toward TopoJSON-style factoring out of edges 2016-10-05 17:16:18 -07:00
91bfc2ca89 Choose a consistent starting point for rings with no shared edges 2016-10-05 17:16:18 -07:00
59619fb6cd Crunch out zero-length linetos to improve the match 2016-10-05 17:16:18 -07:00
6b4076684c Find a common-edge transition if it's the first point of the ring 2016-10-05 17:16:18 -07:00
2bc9e15975 Roll rings around so they start at an intersection transition, if any 2016-10-05 17:16:18 -07:00
f7daa05515 Add a critical point where the set of rings using a polygon edge changes 2016-10-05 17:16:18 -07:00
04157e7728 Merge pull request #304 from mapbox/utf8-check
Enforce that string feature attributes must be encoded as UTF-8
2016-10-05 15:54:58 -07:00
d4d966893c Forgot to test the emoji case 2016-10-05 15:01:47 -07:00
9806db3c0a Make UTF-8 checking into a unit test with Catch 2016-10-05 14:55:32 -07:00
ef38318a6d Enforce that string feature attributes must be encoded as UTF-8 2016-10-04 16:43:31 -07:00
dc86eb6b5a Merge pull request #299 from mapbox/tile-join-whitespace
Trim whitespace after commas in tile-join .csv input
2016-09-21 13:30:28 -07:00
32f32e45b6 Trim whitespace after commas in tile-join .csv input 2016-09-21 12:32:03 -07:00
4912f4ad08 Merge pull request #298 from mapbox/tile-join-merge
Give tile-join the ability to merge multiple tilesets
2016-09-20 16:53:46 -07:00
083a280659 Fix build errors on Linux (pthread library, signed comparison) 2016-09-20 16:01:10 -07:00
4fb54eaeeb A test of layer merging, tile merging, and extent scaling 2016-09-20 15:53:10 -07:00
4ba98062d6 Recover memory from the pre-joined tiles after joining 2016-09-20 14:19:40 -07:00
87e4a338f6 Essentials of multithreaded tile-joining 2016-09-20 12:59:04 -07:00
021d792d33 Getting ready for multithreaded tile-joining 2016-09-20 11:04:24 -07:00
6c74f4a1cd Keep tile data in a string instead of a pointer into the query 2016-09-20 10:17:02 -07:00
437152e02b Track the minzoom and maxzoom for each layer separately 2016-09-19 17:53:31 -07:00
d7037f3d3a Add tile-join -pk option not to care about byte limit. Update docs. 2016-09-19 17:20:44 -07:00
232056c0da Make the global per-layer list of attribute types 2016-09-19 16:53:41 -07:00
470c0e2b5c Remove debug output 2016-09-19 16:36:38 -07:00
7a30aeaa6b Fix memory leak 2016-09-19 16:34:06 -07:00
8d57f031ee Rescale geometry if layer extents don't match 2016-09-19 16:25:30 -07:00
a37fc361c2 Merge tiles and layers. Differing extents not handled yet. 2016-09-19 16:02:14 -07:00
47288ec05f The query-merge part of multi-source tile joining 2016-09-19 15:29:13 -07:00
c20eab972b Merge pull request #296 from esamelson/patch-1
Fix typo in README.md for --drop-rate option
2016-09-06 12:53:14 -07:00
d130ca5d55 Fix typo in README.md for --drop-rate option 2016-09-06 12:00:35 -07:00
b84b2b066d Merge pull request #294 from mapbox/named-layer
Add the ability to specify layer names within the GeoJSON input
2016-08-30 16:56:19 -07:00
965f4c225c Bump version number 2016-08-30 15:36:22 -07:00
4dd3411c64 Merge branch 'master' into named-layer 2016-08-30 15:33:51 -07:00
76739fd27b Fix a typo so it actually works, and add a test and documentation 2016-08-30 15:32:09 -07:00
d8ba9db386 Remove unused layer count and layer name list 2016-08-30 15:09:18 -07:00
1bed572350 Rename variables for clarity 2016-08-30 15:05:33 -07:00
1f53491009 Tile by layer names instead of by layer numbers 2016-08-30 14:59:53 -07:00
3e881a428c Make a reverse-mapping table from layer IDs to names 2016-08-30 14:38:30 -07:00
d490d8475e Remove unused layer count and layer name list 2016-08-30 14:17:28 -07:00
d4e1ee0627 Replace malloc/free with new/delete to fix parallel-reading crash 2016-08-30 14:02:51 -07:00
31d2a3738a Get rid of the old file_keys 2016-08-30 13:46:37 -07:00
531c238c5b Pass the layer maps through into tiling 2016-08-29 17:42:46 -07:00
7f49ce5caa Merge per-thread layer names and file keys 2016-08-29 16:38:57 -07:00
c26fa23564 Per-thread layermap will contain the file keys, not just a layer ID 2016-08-29 14:59:28 -07:00
5a8a7216cb Merge pull request #293 from mapbox/simplification-readme
Fix spelling of --simplification in documentation
2016-08-29 12:12:32 -07:00
7f4ef43113 Fix spelling of --simplification in documentation 2016-08-29 11:56:30 -07:00
6a7a139170 Merge pull request #288 from mapbox/bad-polygon-revival
Don't try to revive a placeholder for a degenerate polygon with negative area
2016-08-25 13:45:12 -07:00
0d1931319c Use simpler calculation to intersect polygon edges with tile edges.
Add the polygon that produced a bad tile with the previous version
as a test.
2016-08-24 15:32:48 -07:00
9161b74d99 Don't try to revive a placeholder for a degenerate polygon with negative area 2016-08-24 12:34:28 -07:00
f7e64dca5f Work in progress on being able to specify per-feature layer names 2016-08-23 15:33:53 -07:00
85fd49f28c Merge pull request #284 from mapbox/join-id
Pass feature IDs through in tile-join
2016-08-16 17:07:14 -07:00
c85303aada Pass feature IDs through in tile-join 2016-08-16 13:21:15 -07:00
938e6a9eea Merge pull request #280 from mapbox/fragment-geometry
Move polygon winding fixup out of tiling and into parsing
2016-08-15 14:04:06 -07:00
b2aa6de898 Provide some JSON context when reporting parsing errors 2016-08-15 13:11:35 -07:00
be6e3c88d4 Merge pull request #281 from mapbox/dataset
Don't say "dataset"
2016-08-10 14:17:36 -07:00
9ef9954d20 Don't say "dataset" 2016-08-10 12:08:15 -07:00
475ce9dd23 Fix g++ compiler warnings 2016-08-08 17:14:48 -07:00
8b339abd40 Use the same serialization in both places, and add sanity checks 2016-08-08 17:08:36 -07:00
bf571571a9 Factor out (initial) feature serialization 2016-08-08 15:36:49 -07:00
6de7920c9e Separate data gathering from serialization a little better 2016-08-02 14:53:30 -07:00
84370c59b8 Get rid of some more explicit memory management 2016-08-02 14:07:56 -07:00
67272bfe4a Move polygon winding fixup out of tiling and into parsing 2016-08-01 17:35:37 -07:00
6d6c1abc64 Parse GeoJSON geometry into memory instead of straight to a file 2016-08-01 14:29:30 -07:00
6f58d31bc8 Merge pull request #279 from mapbox/fix-preserve
Fix the spelling of the --preserve-input-order option
2016-08-01 11:04:08 -07:00
e760521382 Fix the spelling of the --preserve-input-order option 2016-08-01 11:03:35 -07:00
1b1b745419 Merge pull request #276 from mapbox/feature-id
Fix wrongly-nested parentheses
2016-07-15 15:40:46 -07:00
10b9af81d3 Fix wrongly-nested parentheses 2016-07-15 15:32:37 -07:00
7c664dad09 Merge pull request #275 from mapbox/feature-id
Encode the "id" field of GeoJSON objects as vector tile feature ID
2016-07-15 15:25:42 -07:00
488dff0efb Encode the "id" field of GeoJSON objects as vector tile feature ID 2016-07-15 15:00:56 -07:00
2bc1b9bd91 Support feature IDs for decoding 2016-07-15 13:58:15 -07:00
a1f8564631 Merge pull request #273 from mapbox/empty-parallel
Fix error checking when reading empty files with parallel input
2016-07-13 16:00:45 -07:00
f9609302a9 Bump version number 2016-07-13 13:50:45 -07:00
92e323435e Fix error checking when reading empty files with parallel input 2016-07-13 13:00:38 -07:00
26e21b0bee Merge pull request #271 from mapbox/empty-features
Test that an empty feature is no longer generated (here in tile 11/328/791)
2016-07-12 17:48:43 -07:00
9908db5e56 Add an option to vary the level of line and polygon simplification 2016-07-12 16:51:56 -07:00
cb45f1c3bd That's still not right. Don't generate a tile with no layers. 2016-07-12 15:56:57 -07:00
666729e344 Test that an empty feature is no longer generated (here in tile 11/328/791) 2016-07-12 15:52:02 -07:00
09ab013461 Merge pull request #270 from mapbox/empty-features
Be even more careful not to produce features with empty geometry
2016-07-11 20:46:24 -07:00
d127c43566 Be even more careful not to produce features with empty geometry 2016-07-11 17:45:12 -07:00
85b27d3a49 Merge pull request #269 from mapbox/fix-progress
Fix double-counted progress in the progress indicator
2016-07-08 16:13:19 -07:00
40f2f61d98 Fix double-counted progress in the progress indicator 2016-07-08 15:49:59 -07:00
6bc9c5c18a Merge pull request #265 from mapbox/decode-projection
Add the ability to tippecanoe-decode into EPSG:3857 instead of WGS84
2016-06-28 16:32:10 -07:00
32fed3b78a Add the CRS to the tippecanoe-decode output if nonstandard 2016-06-28 15:29:37 -07:00
5d06f01e28 Add the ability to tippecanoe-decode into EPSG:3857 instead of WGS84 2016-06-28 15:18:27 -07:00
e48e2152cc Merge pull request #263 from andrewharvey/master
add new line to end of Usage information
2016-06-20 10:00:15 -07:00
e31908bfb4 add new line to end of Usage information 2016-06-20 16:52:12 +10:00
d352cd0fee Merge pull request #262 from mapbox/tile-join-version
Fix the tile layer version number in tile-join output
2016-06-16 13:12:33 -07:00
f552e6951d Missed one test 2016-06-16 13:00:55 -07:00
ae50abefc4 Add layer version and extent to tippecanoe-decode output and test standards 2016-06-16 12:50:08 -07:00
0975c91670 Rebuild documentation 2016-06-16 12:37:46 -07:00
850a36b2f6 Update clang-format options for clang-format 3.9.0 2016-06-16 12:33:38 -07:00
c40ec6c194 Fix the tile layer version number in tile-join output 2016-06-16 12:31:48 -07:00
d170ebc312 Merge pull request #259 from mapbox/tile-join-x
Fix a tile-join bug that would retain fields that were supposed to be…
2016-06-10 16:13:12 -07:00
8cae9971e8 Forgot to fix this test output 2016-06-10 15:59:37 -07:00
3d023f34d3 Fix a tile-join bug that would retain fields that were supposed to be excluded 2016-06-10 15:53:59 -07:00
864440e9a9 Merge pull request #255 from mapbox/clang-mason
Try to use Mason builds for clang
2016-06-06 17:35:19 -07:00
d18f93df4c Add libstdc++ packages 2016-06-06 17:27:32 -07:00
cf4a51c819 Try to use Mason builds for clang 2016-06-06 17:21:29 -07:00
7c0c8e0434 Merge pull request #253 from mapbox/epsg-3857
Add minimal support for alternate input projections
2016-06-01 17:20:19 -07:00
de46cfa798 Do the more common check first. Forgot to swap these max/min pairs. 2016-06-01 17:09:20 -07:00
a504840bd5 Warn if the GeoJSON specifies a different projection 2016-06-01 16:55:52 -07:00
2578781a37 Forgot to check in the input file for the test 2016-06-01 16:08:51 -07:00
e1427ab9e4 Fix the check for an unsupported projection 2016-06-01 15:51:55 -07:00
af412e2038 Add minimal support for alternate input projections 2016-06-01 15:49:41 -07:00
5da636ba23 Merge pull request #252 from mapbox/glow-map2
Add an option to calculate feature density as a feature attribute
2016-05-27 17:34:27 -07:00
20b0fe1a52 Revert unneeded change 2016-05-27 16:35:46 -07:00
9d0a41521f Forgot to add the test 2016-05-27 16:33:56 -07:00
692112ec3a Add an option to calculate feature density as a feature attribute 2016-05-27 16:25:40 -07:00
4030cc7c58 Merge pull request #245 from mapbox/inline-meta2
Add the ability to inline metadata with geometry
2016-05-25 11:53:57 -07:00
b5c5d9dad6 Merge pull request #249 from mapbox/decode-check-error
Fail gracefully if input to tippecanoe-decode isn't a vector tile
2016-05-25 11:53:41 -07:00
1d636e5c0d Consistently treat "prevent" and "additional" options as globals 2016-05-25 11:38:52 -07:00
e253bbfe1d Round upward to catch narrow-but-tall or wide-but-short features 2016-05-25 11:08:04 -07:00
fa7a52d032 Bump version number 2016-05-23 17:44:41 -07:00
4638c6f273 Merge branch 'master' into inline-meta2
Conflicts:
	tile.cpp
2016-05-23 15:57:28 -07:00
0f02e9fa95 Fix uninitialized variable 2016-05-23 15:45:55 -07:00
bba1c13b07 Warn if a feature that won't be clipped covers several tiles 2016-05-20 17:50:20 -07:00
f03fbdb5c1 Fail gracefully if input to tippecanoe-decode isn't a vector tile 2016-05-17 15:43:42 -07:00
b80081ec38 Merge pull request #247 from mapbox/less-memory
Pack structures tighter to use a little less memory
2016-05-13 15:59:07 -07:00
331deca4b4 Use bitfields to further reduce data structure size 2016-05-13 15:45:33 -07:00
f0e90620e4 A little more structure packing 2016-05-11 14:47:23 -07:00
c0edefa721 Pack structures tighter to use a little less memory 2016-05-11 14:23:39 -07:00
cc4a2736ea Merge pull request #243 from mapbox/z0-extent
Fix clipping extent at z0, and area calculation in tippecanoe-decode
2016-05-10 16:55:39 -07:00
3655a54d22 Add the ability to inline metadata with geometry 2016-05-10 16:46:45 -07:00
167f3c59ef Missed updating this test output 2016-05-10 16:39:54 -07:00
2a5e2091f3 Mention decoding fix in changelog 2016-05-10 13:29:14 -07:00
24327e195f Fix area calculation for polygon rings in tippecanoe-decode 2016-05-10 12:13:03 -07:00
4f01b13fe1 Let zoom level 0 have just as much extent and buffer as any other zoom 2016-05-09 16:01:10 -07:00
f920c05c75 Merge pull request #241 from mapbox/save-polygons
Don't let polygons with nonzero area disappear during cleaning
2016-05-05 14:55:52 -07:00
5cc6d97d9f Don't let polygons with nonzero area disappear during cleaning
If they collapse, turn them into placeholder squares with the
appropriate area so that there aren't visible coverage gaps.
2016-05-05 13:42:32 -07:00
fa523bff2b Merge pull request #235 from mapbox/malloc
Remove more uses of malloc; fix more warnings
2016-05-03 17:20:39 -07:00
0bd06c6d82 Merge branch 'master' into malloc 2016-05-03 17:06:52 -07:00
808de5378b Bump version number 2016-05-03 16:47:29 -07:00
1ce3f950b9 Merge pull request #236 from mapbox/enable-sanitizers
Add linux jobs that run address and integer sanitizers
2016-05-03 16:43:02 -07:00
0f1d2e4220 Fix additional g++ warnings 2016-05-03 16:39:26 -07:00
43ffd6fe11 Fix the warnings about the unused array of option names 2016-05-03 16:34:19 -07:00
ab3835d249 [travis] add linux jobs that run address and integer sanitizers 2016-05-03 16:18:44 -07:00
f1b3f6d231 Fix warnings about shadowed variables 2016-05-03 15:48:42 -07:00
5775d088eb Get rid of the strdup for attribution 2016-05-03 11:40:36 -07:00
9fcd079084 Add a test from the tile-join example 2016-05-03 11:30:53 -07:00
d712edcdc9 Stop using strdup for tile-join matching 2016-05-03 11:14:09 -07:00
271ec3d154 Stop using malloc for layer names 2016-05-03 10:52:49 -07:00
68c3bafab0 Merge pull request #234 from mapbox/clipper-update
Update clipper to 9edc2924e39:
2016-05-02 16:31:10 -07:00
1aac686670 Update clipper to 9edc2924e39:
commit 9edc2924e39266d70774b0ed0e07329a95e76f10
Author: Blake Thompson <flippmoke@gmail.com>
Date:   Mon May 2 14:45:21 2016 -0400

	Updated to prevent segfault in case where specific iterator was being deleted
	and therefore, the range second no longer existed.
2016-05-02 16:23:08 -07:00
364450ad4c Merge pull request #233 from mapbox/overflow
Make better use of the C++ standard library
2016-04-28 16:41:18 -07:00
023ce03672 Fix indentation and bump version number 2016-04-28 15:11:57 -07:00
adc70341ad Use std::set to track the layer-wide feature attribute types.
Track them during parsing, not tiling.  Remove the old string pool code.
2016-04-28 14:59:58 -07:00
87b90a5033 Use std::set to track included and excluded feature properties 2016-04-28 12:57:03 -07:00
f75d9e0dd4 Avoid needlessly constructing a temporary string 2016-04-28 12:46:40 -07:00
444de1f086 Get rid of malloc around layer names 2016-04-28 12:20:51 -07:00
3f0904cce8 Use std::string instead of malloc strings to make mbtiles metadata 2016-04-28 11:56:30 -07:00
40a6b7b37a Another attempt to ensure that string hashing can't overflow 2016-04-28 11:52:16 -07:00
5490f3e15f Fix numeric overflow 2016-04-28 11:52:09 -07:00
666565e820 Merge pull request #232 from mapbox/cplusplus
Convert everything to C++
2016-04-27 15:52:05 -07:00
744915025d Restore dependency on headers in subdirectories 2016-04-27 15:41:40 -07:00
2b393ad8e5 Move jsonpull into a subdirectory like other included libraries 2016-04-27 15:33:30 -07:00
48a82039ce Oops, I didn't check main.cpp in. 2016-04-27 15:14:09 -07:00
24db559f0b Fix complaint about duplicate #define 2016-04-27 15:12:03 -07:00
c4274303ea Fix indentation 2016-04-27 15:10:26 -07:00
ee97e6c307 Whittle down tile.hpp to the things that actually related to tile.cpp 2016-04-27 15:09:06 -07:00
3662f1a66b Split main program functions apart from GeoJSON parsing 2016-04-27 14:59:20 -07:00
a52733eb07 Use protozero functions for zigzag encoding and decoding 2016-04-27 14:22:44 -07:00
f3b9e15267 Move serialization code to its own file 2016-04-27 14:19:10 -07:00
65253cba50 Drag header files into C++ 2016-04-27 14:00:14 -07:00
a57c247508 Don't try to reindent Clipper and Protozero 2016-04-27 13:55:28 -07:00
7b0bb9a443 Drag the main function and GeoJSON parsing into C++ 2016-04-27 13:54:00 -07:00
3f3a341c0a Drag constant pool handling into C++ 2016-04-27 12:44:46 -07:00
3d56a56464 Drag memory-mapped file handling into C++ 2016-04-27 12:41:49 -07:00
fb9f3b6068 Drag tileset enumeration into C++ 2016-04-27 12:40:19 -07:00
575072bb2f Drag projection math into C++ 2016-04-27 12:39:21 -07:00
94db232a89 Drag mbtiles handling into C++ 2016-04-27 12:38:04 -07:00
d32d4bb35f No need for line clipping to have its own source file 2016-04-27 12:36:51 -07:00
b12413eddb Rename everything from .cc to .cpp to match other projects 2016-04-27 12:22:47 -07:00
8c7ac58ba1 Fix memory leaks and questionable arithmetic
Fix memory leaks and questionable arithmetic
2016-04-27 12:11:24 -07:00
737ae44dd7 Fix warnings about questionable numeric operations 2016-04-27 11:57:46 -07:00
d4504da2f5 Rationalize the highest allowed maxzoom in terms of detail 2016-04-27 11:46:24 -07:00
3182930f35 fix 'negation of 1 cannot be represented in type unsigned int' error - refs #227 2016-04-27 11:46:16 -07:00
d35ef72a99 fix 'runtime error: left shift of negative value -12' error - refs #227 2016-04-27 11:46:05 -07:00
87fea8082f Make indent, and finish writing a comment that I started 2016-04-27 10:51:54 -07:00
5cfd7cf68f Don't leak layer names 2016-04-27 10:46:01 -07:00
1e16eb9294 Don't leak whatever is left of one parse tree when starting another 2016-04-27 10:45:55 -07:00
39c180a673 Don't leak the non-GeoJSON objects in the JSON parse tree 2016-04-27 10:45:41 -07:00
60318e664e Fix leak of pools for -x and -y options 2016-04-27 10:45:33 -07:00
6a37ea4e54 Fix leaks of input source lists and bounding boxes 2016-04-27 10:45:25 -07:00
0205086886 Fix memory leak of temporary filenames 2016-04-27 10:45:17 -07:00
235939ea23 Merge pull request #226 from mapbox/c++-math
use std::fabs instead of clib fabs
2016-04-26 17:03:00 -07:00
65964c93f5 more std:: usage in c++ files 2016-04-26 16:56:24 -07:00
d0b5ba3862 use std::fabs instead of clib fabs 2016-04-26 16:51:12 -07:00
b2063108b2 Merge pull request #222 from mapbox/debug-builds
Explict Release/Debug modes to build
2016-04-26 15:52:45 -07:00
86e6cd717a Add OSX Debug build 2016-04-26 15:46:58 -07:00
ef3b9bea7c Explict Release/Debug modes to build 2016-04-26 15:39:42 -07:00
ce64565b38 [build] use AR variable in Makefile 2016-04-26 15:26:15 -07:00
bd5dc4ad18 Merge pull request #187 from mapbox/earcut-polygon
Upgrade Clipper to 07b828b1 to fix self-intersecting polygons
2016-04-26 14:26:10 -07:00
77bc24d743 Bump version number 2016-04-26 14:16:19 -07:00
bebb0dda90 Add missing #include 2016-04-26 14:01:59 -07:00
49d29ad368 But no, I still left a few more dependencies 2016-04-26 13:54:08 -07:00
33f9d91a4c Remove protobuf prerequisites again 2016-04-26 13:50:08 -07:00
a9a14b33e0 Merge remote-tracking branch 'origin/master' into earcut-polygon 2016-04-26 13:48:03 -07:00
535b328425 add libprotobuf back to packages (not sources) 2016-04-26 12:51:48 -07:00
3570d93434 whoops, libprotobuf is still required a bit (for now) 2016-04-26 12:48:32 -07:00
c4a13fc6e0 fix clang++ build by upgrading to 3.5 2016-04-26 12:41:27 -07:00
8c82ee4c5f Merge branch 'master' into earcut-polygon
Conflicts:
	tile.cc
2016-04-26 12:09:56 -07:00
d72639b8f7 Merge pull request #219 from mapbox/protozero
Use protozero for reading and writing vector tile protocol buffers
2016-04-25 17:00:32 -07:00
aaec2c98b6 Clearer names 2016-04-25 16:52:20 -07:00
52f2804122 Make lexical feature ordering work again, and add a test for it 2016-04-25 16:47:30 -07:00
e10a71e152 Use the mvt constant pool when tiling 2016-04-25 16:19:52 -07:00
7bb4c7dbe9 Add a helper method to manage the tile layer's key-value constant pool 2016-04-25 14:20:21 -07:00
2dea1d1564 Fix hardwired layer version number 2016-04-25 12:23:40 -07:00
23934166b1 Methods instead of functions 2016-04-25 12:13:52 -07:00
2afd0bf31b Try with clang++ instead 2016-04-25 10:22:14 -07:00
4e71c40f54 Try backing off to older C++ standard for older g++ 2016-04-25 09:58:36 -07:00
03d5c89742 These are version 2 tiles 2016-04-25 09:24:36 -07:00
9e9afb06d2 Fix signedness warnings, missing #includes, and code formatting. 2016-04-22 23:32:02 -07:00
b15956b476 Oops, was still linking to -lprotobuf-lite 2016-04-22 19:30:37 -07:00
4cee508e95 Add missing #include 2016-04-22 17:51:35 -07:00
135aea8527 Use protozero for writing tiles 2016-04-22 17:45:24 -07:00
5ec41d7bbb Use protozero for tile decoding 2016-04-22 17:10:33 -07:00
f9c4fb8374 Factor out tile reading and writing from tile-join 2016-04-22 15:47:06 -07:00
f837577b38 Rename pb_ prefixes to mvt_ 2016-04-22 15:10:16 -07:00
358f019372 Factor out vector tile writing 2016-04-22 15:06:26 -07:00
f902721dab Abstraction of tile decoding 2016-04-22 13:27:03 -07:00
b91e8f6d3e Start factoring out protocol buffer handling code 2016-04-22 12:00:19 -07:00
2607a76c63 Merge pull request #215 from mapbox/no-clipping
Add an option not to clip features if they appear in the tile at all
2016-04-21 10:36:51 -07:00
ea638914ce Add an option not to clip features and to include each exactly once per zoom 2016-04-20 15:06:43 -07:00
41099ed731 Don't add extra points at tile boundaries if not clipping
The extra points kept the features from being exactly the same
in each tile.
2016-04-19 16:13:02 -07:00
56d8178a7c Fix a mistake in the formatting of the Usage message 2016-04-19 15:35:58 -07:00
24f401da52 Add an option not to clip features if they appear in the tile at all 2016-04-19 15:32:58 -07:00
62a74afbdc Remember to do floating point division in pnpoly 2016-04-19 12:28:49 -07:00
694fa8ee97 Merge pull request #213 from mapbox/polygon-coalesce
Clean up polygon geometry again after coalescing
2016-04-18 16:15:13 -07:00
45cddc57bb Simplify coordinates to avoid cross-platform rounding errors 2016-04-18 16:10:08 -07:00
86b8567201 Clean up polygon geometry again after coalescing 2016-04-18 15:53:04 -07:00
04c56320d2 Merge branch 'master' into earcut-polygon
Conflicts:
	geometry.cc
2016-04-14 16:21:56 -07:00
2a6e41126f Merge pull request #212 from mapbox/limitfiles
Speculatively open files to avoid overrunning the system limits
2016-04-14 10:32:19 -07:00
81517a0cb4 Fix tracking of number of available file descriptors 2016-04-14 10:20:28 -07:00
732a51d684 Fix the option for testing radix sort
Add a check to make sure I don't make the same mistake again
2016-04-14 10:06:01 -07:00
222735004d Add a sanity check to make sure I remember to close all open files 2016-04-13 20:33:01 -07:00
137bb46db5 Clean up the maximum-files-open check a little 2016-04-13 20:20:25 -07:00
cae20bb5e6 Speculatively open files to avoid overrunning the system limits 2016-04-13 18:05:24 -07:00
7c9bd5da2f Merge pull request #210 from mapbox/block-input
Don't let the input buffer for parallel streaming input get too big.
2016-04-13 13:31:51 -07:00
fbcb00fee9 Add an option to set the tileset's attribution 2016-04-13 12:49:41 -07:00
4dba8b3f70 Update clipper to 381c817fd13e8 2016-04-13 10:18:32 -07:00
cb4a83776e Fix log formatting 2016-04-12 16:52:46 -07:00
e5461b1863 Additional debug output from polygon checking 2016-04-12 16:40:03 -07:00
3a46c05b46 Fix the progress indicator 2016-04-12 10:59:06 -07:00
968c94a409 Quiet warning about potentially uninitialized variable. 2016-04-11 16:43:46 -07:00
efe66dcafe Use stdio instead of mmap for geometry while tiling to reduce thrashing 2016-04-11 15:59:02 -07:00
c6d2988485 Logic may be clearer this way 2016-04-11 14:51:04 -07:00
e846b11ce7 Don't let the input buffer for parallel streaming input get too big. 2016-04-11 13:51:28 -07:00
9d48d6a93d Merge pull request #208 from mapbox/simplify-crash
Fix a line simplification crash when a segment degenerates to a single point
2016-04-07 16:50:01 -07:00
acb97361a0 Fix a line simplification crash when a segment degenerates to a single point 2016-04-07 16:45:45 -07:00
64faa1d79e Upgrade Clipper to 68c49e9a9a 2016-04-07 13:52:39 -07:00
1e6332bae8 Merge branch 'master' into earcut-polygon
Conflicts:
	geojson.c
	tile.h
2016-04-07 13:50:04 -07:00
1792a13e1a Merge pull request #207 from mapbox/checkspace
Keep an eye on free disk space and warn if tippecanoe is using it all
2016-04-07 12:23:05 -07:00
41ae22164c Mention the default location for temporary files 2016-04-07 11:39:32 -07:00
73fcdba02b Linux and Mac want different headers for file system stats 2016-04-07 11:23:33 -07:00
1eb0537302 Warn if temporary directory doesn't begin with / 2016-04-07 10:47:46 -07:00
39285c8102 Keep an eye on free disk space and warn if tippecanoe is using it all 2016-04-07 10:35:36 -07:00
22ede9ac8c Merge pull request #206 from mapbox/chatty-polygon
Add an option to drop a fraction of polygons by zoom.
2016-04-06 14:33:48 -07:00
574a2b79c8 Add an option to drop a fraction of polygons by zoom.
Only warn once about polygon cleaning failures.
2016-04-06 14:17:44 -07:00
1e31edbfb3 Merge branch 'master' into earcut-polygon
Conflicts:
	geojson.c
	tile.h
2016-04-05 15:22:43 -07:00
d1456c0f66 Merge pull request #202 from mapbox/radix
Restructure geometry reordering to try to reduce virtual memory thrashing
2016-04-05 15:13:44 -07:00
4a572b810b Close some file descriptors that were left dangling before 2016-04-05 14:07:24 -07:00
b10b436ac9 Add a way to test recursive radix sorting. Bump version number. 2016-04-05 13:32:44 -07:00
e394501faa Check for errors when closing files 2016-04-05 11:13:31 -07:00
c0a0aef060 Use large sort chunks, since that part can be parallelized 2016-04-05 10:08:46 -07:00
2a4be4f6d4 Parallel reading is effectively random order, not sequential 2016-04-04 17:00:11 -07:00
c62762078a Can't tell Linux to free pages, so just say we don't need them 2016-04-04 16:18:55 -07:00
72e485f285 Also advise when unmapping pages 2016-04-04 16:14:26 -07:00
b1d3849889 Sprinkle madvise calls to hint about how files should be paged in 2016-04-04 16:03:13 -07:00
2b0ac890c5 Fix the status message about how much geometry there is 2016-04-04 15:33:25 -07:00
d4b9f79d3c Fix the progress indicator when there is an inner radix sort 2016-04-04 14:49:41 -07:00
31c4d4850c Remove dead code 2016-04-04 13:22:23 -07:00
2fe841c6dc Remove extra newline output from old progress format 2016-04-04 11:42:40 -07:00
7b6cd2d0f4 Fix the any-valid-geometries check again 2016-04-04 11:25:21 -07:00
c9e542e688 Keep meta count inline to avoid thrash if no attributes 2016-04-04 11:18:37 -07:00
986719f2ff Rework sorting/merging progress indicator 2016-04-04 10:53:53 -07:00
e2b36a8ee9 Restore mergesort for the intermediate sort. Tests pass. 2016-04-01 18:15:31 -07:00
55f93963be Stop recursing when the indices are exactly the same 2016-04-01 17:41:53 -07:00
cf806baca2 Slow, but works for some cases now 2016-04-01 17:36:09 -07:00
60139532c8 Provide top-level output files for geometry and index 2016-04-01 17:01:10 -07:00
9793be1517 Split indices and geometries up by radix 2016-04-01 16:25:10 -07:00
851d7b576b Merge branch 'master' into radix 2016-04-01 15:32:15 -07:00
1e8c030b03 Make indent 2016-04-01 15:31:59 -07:00
560add2ccd More calculation about how many fds are available 2016-04-01 15:31:34 -07:00
24deae5a43 Reunify the string pool and metadata earlier to free up file descriptors 2016-04-01 12:38:32 -07:00
9acecdf93e OS-specific code to determine memory size 2016-03-31 15:29:43 -07:00
8cc844c9dd Merge pull request #201 from mapbox/llong_max
Fix where I used the wrong names for the max/min long long constants
2016-03-31 14:33:11 -07:00
389cdf2aa4 Bump version number 2016-03-31 11:44:04 -07:00
e5157ec66b Fix where I used the wrong names for the max/min long long constants 2016-03-31 11:39:12 -07:00
4da5a1996b Merge branch 'master' into earcut-polygon
Conflicts:
	Makefile
2016-03-29 15:18:01 -07:00
5d701913ab Merge pull request #199 from mapbox/named-layers
Add an option to give specific layer names to specific input files
2016-03-29 15:08:06 -07:00
62fac4d6f4 Add a test for layer names 2016-03-29 15:01:03 -07:00
bd3b9a5136 Add an option to give specific layer names to specific input files 2016-03-29 13:13:39 -07:00
10cd9c8875 Check the ring order manually and reorder them if they don't nest right 2016-03-29 11:05:39 -07:00
c404082421 Factor out copied-and-pasted polygon area calculations 2016-03-28 17:01:17 -07:00
6f5199adc7 Check whether polygon inner rings are inside the outer ring 2016-03-28 16:52:33 -07:00
c6051d876f Merge branch 'master' into earcut-polygon
Conflicts:
	geometry.cc
2016-03-28 15:45:16 -07:00
72b7dc977a Merge pull request #196 from mapbox/stray-geom
Remove temporary files that were accidentally left in place
2016-03-28 15:31:31 -07:00
448d1a124e Handle case of options that aren't processed individually 2016-03-28 15:10:04 -07:00
edce0f088d Add GNU-style long options 2016-03-28 15:03:28 -07:00
b47653e2e6 Remove temporary files that were accidentally left in place 2016-03-28 14:00:01 -07:00
9f2e221338 Merge pull request #193 from mapbox/malloc
Check return values after allocating memory
2016-03-28 13:46:45 -07:00
5a2a1b793a Exit cleanly if there was no valid input instead of giving an mmap error 2016-03-28 13:22:03 -07:00
cf2abf67d2 Oops: need to check array size in bytes, not number of objects 2016-03-28 13:08:54 -07:00
356575d0e0 Check for JSON array and hash overflows 2016-03-28 12:25:33 -07:00
7d602987e7 Merge pull request #194 from stevage/patch-2
Warn of spurious errors (as per #191)
2016-03-28 12:07:41 -07:00
21a635fb7a Check for string length overflow 2016-03-28 12:00:55 -07:00
a92bdce12e Upgrade clipper to f9c1344a09 2016-03-28 10:31:17 -07:00
8c9aa53bb5 Warn of spurious errors (as per #191) 2016-03-26 12:28:23 +11:00
1e5d420b66 Fix warnings about unused arguments 2016-03-25 13:45:28 -07:00
12be3e5a32 This one really is an int upstream 2016-03-25 13:21:32 -07:00
52dbed8132 Fix a few warnings about globals 2016-03-25 13:03:57 -07:00
c2231318bd Many places where I used unsigned array indices but meant size_t 2016-03-25 12:20:32 -07:00
eee596d5f5 Check return values from memory allocation 2016-03-25 11:57:33 -07:00
bc5a7b251f Check return values after allocating memory 2016-03-25 11:20:56 -07:00
91ffb084a7 Install libc++-dev in another attempt to fix the build 2016-03-24 17:26:29 -07:00
052c0a55df Try forcing libc++ on Linux to fix the build 2016-03-24 17:17:11 -07:00
76ab63c1d0 Another attempt at fixing Travis on Linux 2016-03-24 15:31:44 -07:00
a03b461aa5 Try to fix Travis on Linux 2016-03-24 15:18:51 -07:00
64c98b38b4 Add Clipper header dependency 2016-03-24 14:39:38 -07:00
05ff656aba Upgrade to Clipper 9dfe779ed7a 2016-03-24 14:36:36 -07:00
f37eccafe6 Merge branch 'master' into earcut-polygon 2016-03-24 14:32:49 -07:00
d4776c8bfb Warn when splitting up a 700-sided polygon for the first time 2016-03-23 17:35:19 -07:00
705a58d255 Reverse polygon rings if they come out of Clipper with the wrong polarity 2016-03-23 17:10:10 -07:00
54c7cfff4c Upgrade to Clipper 296c37e3206bfa 2016-03-23 16:46:55 -07:00
eebba7b75b Merge pull request #189 from mapbox/decode-pbf
Add the ability to tippecanoe-decode a standalone .pbf tile
2016-03-22 17:35:42 -07:00
962f82d44e Add a test for decoding standalone PBFs 2016-03-22 17:12:09 -07:00
d960963623 Add the ability to tippecanoe-decode a standalone .pbf tile 2016-03-22 16:51:29 -07:00
4fc6ca3c3b Keep from getting stuck in an infinite loop 2016-03-21 17:23:34 -07:00
a989611515 Upgrade to Clipper 3eb6a85910aff 2016-03-21 16:13:47 -07:00
707036fe79 Upgrade Clipper to 07b828b1 to fix self-intersecting polygons 2016-03-18 16:35:29 -07:00
7140a3dd91 Merge pull request #186 from mapbox/resolution-doc
Clarify maximum resolution in README and warning
2016-03-17 11:54:24 -07:00
16ca5cfeec Clarify maximum resolution in README and warning 2016-03-17 11:23:54 -07:00
c66fd315e1 Merge pull request #185 from mapbox/line-polygons
Silently drop zero-area polygons instead of showing Clipper errors
2016-03-10 17:31:47 -08:00
7896c3c2c9 Don't get stuck in a loop if subdividing a complex polygon fails 2016-03-10 17:07:22 -08:00
7eccc7a758 Silently drop zero-area polygons instead of showing Clipper errors 2016-03-10 16:03:02 -08:00
652abc9a62 Put all the apt-get instructions on one line 2016-03-10 12:15:49 -08:00
c387af48cc Merge pull request #182 from mapbox/thrashing
Reorder geometry as part of index merging, to reduce thrashing in low memory
2016-03-08 13:41:47 -08:00
cf5082122a Close original geometry temp files as soon as they are no longer needed 2016-03-07 16:45:35 -08:00
bf585a5849 Add newline to clean up progress messages 2016-03-07 16:38:21 -08:00
11e737ff29 Reorder geometry as part of the merge 2016-03-07 16:34:12 -08:00
310887e019 Fix duplicated variable name 2016-03-07 13:40:32 -08:00
81d2a8bfed Encourage -B, not -z, to set the base zoom level 2016-03-07 13:21:53 -08:00
b7f3fcfc7f Merge pull request #180 from mapbox/coverage
Add a gamma-reduction test and base zoom/drop rate guessing tests
2016-03-03 17:34:56 -08:00
f17cec5c44 Factor out gap logic instead of duplicating it 2016-03-03 17:03:47 -08:00
d38b5a999e Make base zoom and drop rate guessing more testable, and test them 2016-03-03 17:03:03 -08:00
e4ab47b3d6 Add a gamma-reduction test 2016-03-03 15:44:07 -08:00
b3aa32b9a3 Merge pull request #179 from mapbox/coverage
More tests for higher code coverage
2016-03-02 17:25:54 -08:00
c9fdd62c91 Try to add zoom skipping to code coverage 2016-03-02 16:37:55 -08:00
739f2dbc75 Test stability of feature ordering 2016-03-02 16:19:38 -08:00
fb9525cf4f Add a MultiPoint test 2016-03-02 15:23:22 -08:00
ae827af8dc Try to avoid another cross-platform rounding error 2016-03-02 15:19:16 -08:00
7e5db337c6 Test bare geometry and GeometryCollection 2016-03-02 15:06:27 -08:00
a847db8c5f Remove dead code 2016-03-02 14:58:07 -08:00
0fcd2d9a79 Test for crossing the date line, property encodings, zoom level filters 2016-03-02 14:57:03 -08:00
3c85d3243a Merge pull request #176 from mapbox/decode-limit
Increase maximum tile size for decoding
2016-02-19 16:56:44 -08:00
1549328e4e Bump version number 2016-02-19 16:50:40 -08:00
b87d3de5d1 Increase maximum tile size for decoding 2016-02-19 16:33:48 -08:00
7dec577eb2 Merge pull request #173 from mapbox/clipper-upgrade
Incorporate Mapnik's Clipper upgrades for consistent results between Mac and Linux
2016-02-17 13:43:09 -08:00
ce0ac700d7 Bump version number 2016-02-17 13:30:53 -08:00
a2b0bb01b9 Don't use std::int64_t, for the sake of older compilers 2016-02-17 13:27:45 -08:00
a4498d34b7 New test standards for the flaky country polygons 2016-02-17 13:23:09 -08:00
88e1de62b6 Revert "Temporarily remove flaky polygons from test"
This reverts commit 886b2c3350.
2016-02-17 13:22:14 -08:00
d25e537d20 Bring back Somerset county test 2016-02-17 13:21:58 -08:00
0f5a32b442 Additional Clipper changes through https://github.com/mapnik/clipper/commit/7484da1 2016-02-17 13:10:13 -08:00
51fffb7710 Incorporate Clipper fork from https://github.com/mapnik/clipper 2016-02-17 13:02:56 -08:00
eecdf7747a Merge pull request #170 from mapbox/sane-polygon
Split complex polygons into multiple features
2016-02-11 14:29:33 -08:00
84a2a1bd9d Use a test polygon that comes out the same on Mac and Linux 2016-02-11 14:18:09 -08:00
4f419ae9b2 Make indent 2016-02-11 12:22:22 -08:00
0fa305432b Bump version number 2016-02-11 12:16:46 -08:00
ea304d3a98 Split complex polygons into multiple features 2016-02-11 12:14:32 -08:00
ca4faec9f6 Bump version number in all test standards 2016-02-11 11:34:33 -08:00
ef646e8414 Refactor simple polygon clipping for arbitrary bounding boxes 2016-02-11 11:31:53 -08:00
c6ad34427a Some work toward letting features have sub-geometries 2016-02-11 11:09:05 -08:00
a1da515dc9 Bump version number 2016-02-10 12:01:29 -08:00
f53ce812d3 Merge pull request #168 from mapbox/enforce-maxzoom
Limit maxzoom to the max that actually works
2016-02-03 17:35:19 -08:00
01731e21ee Guard against missing source file in test invocation 2016-02-03 17:30:27 -08:00
cb68984052 Forgot to add source JSON 2016-02-03 17:28:47 -08:00
bcdb40b41e Limit maxzoom to the max that actually works 2016-02-03 17:12:17 -08:00
6239809632 Merge pull request #167 from mapbox/fix-z22
Even if the maxzoom is very high, don't use more bits of precision than exist
2016-02-03 16:59:22 -08:00
e7f571c9c3 Even if the maxzoom is very high, don't use more bits of precision than exist 2016-02-03 16:52:23 -08:00
c1963dc645 Merge pull request #165 from mapbox/input-sequence
Preserve input sequence across multiple input files
2016-02-02 15:57:19 -08:00
b649e99b32 Preserve input sequence across multiple input files 2016-02-02 15:43:27 -08:00
49324a3dc6 Merge pull request #164 from mapbox/parallel-test
Spell out a fuzz test for the four input-reading code paths
2016-02-02 14:14:26 -08:00
59c5d51e25 Don't allow the initial coordinate pair to be out of bounds 2016-02-02 13:33:49 -08:00
54d9e20e30 Add the input-paths test to the standard test target 2016-02-02 12:23:30 -08:00
dfaba5a3e6 Spell out a fuzz test for the four input-reading code paths 2016-02-02 12:21:19 -08:00
f5ea5974ee Merge pull request #158 from mapbox/ignore-error
Add an option to ignore sqlite errors on existing mbtiles files
2016-02-01 11:00:36 -08:00
2015ba21bf Merge branch 'master' into ignore-error
Conflicts:
	mbtiles.c
2016-02-01 10:10:05 -08:00
f9a0a55578 Merge pull request #160 from mapbox/layer-name-message
Better message about layer name, and put it on stderr with the rest.
2016-01-29 11:09:20 -08:00
6ec211ea4c Better message about layer name, and put it on stderr with the rest. 2016-01-29 11:05:51 -08:00
29355c1afd Merge pull request #138 from mapbox/travis-improvements
Add code coverage
2016-01-29 10:40:36 -08:00
bbd233c0ec Merge branch 'master' into travis-improvements
Conflicts:
	.travis.yml
2016-01-29 10:30:19 -08:00
4aa0ddd54a Merge pull request #157 from mapbox/testing
Start adding tests for Tippecanoe
2016-01-28 17:07:25 -08:00
ec7d72925a Bump version number 2016-01-28 17:01:13 -08:00
886b2c3350 Temporarily remove flaky polygons from test 2016-01-28 16:52:46 -08:00
9f484e028d And a basic polygon test, with Natural Earth countries 2016-01-28 16:06:30 -08:00
2aa1510de8 Stabilize metadata field order, which seems to be causing test flakiness 2016-01-28 15:46:58 -08:00
e786379358 Fix warning and let the autoformatter do its thing 2016-01-28 15:35:22 -08:00
9de13d7ea9 Natural Earth place names for testing basic dot-dropping 2016-01-28 15:27:04 -08:00
bc14bcdde2 Test merging of multiple input layers 2016-01-28 15:04:57 -08:00
b17f8fc663 Stabilize map center and mbtiles filename to fix test flakiness 2016-01-28 14:38:10 -08:00
a84061cf87 Add an option to ignore sqlite errors on existing mbtiles files 2016-01-28 14:18:31 -08:00
7c021035a6 Include metadata in tippecanoe-decode. Allow multiple inputs for tests. 2016-01-28 14:06:51 -08:00
6d38eff2a2 Use Natural Earth state boundaries to test some flag combinations 2016-01-28 12:34:52 -08:00
2a94d8084d No need to have these parts of the test run in the same subshell 2016-01-28 10:59:32 -08:00
528bca6629 Use tests in Travis configuration 2016-01-28 10:54:51 -08:00
794048c049 Add a test 2016-01-28 10:52:43 -08:00
7835f52829 Merge pull request #156 from mapbox/arrayprop
Stringify feature properties that are arrays or hashes
2016-01-27 14:50:52 -08:00
0e59a4f774 Retain original form of numbers when stringifying 2016-01-27 14:41:06 -08:00
167beae307 Stringify feature properties that are arrays or hashes 2016-01-27 14:29:34 -08:00
baf576f257 Merge pull request #152 from mapbox/clip-buffer
Make clipping behavior with no buffer consistent with Mapnik
2016-01-20 14:57:38 -08:00
1602574647 Bump version number 2016-01-20 14:53:58 -08:00
7aa27b7f4c Make calculation of child tile overlap match clipping behavior.
Coordinates that are right on the tile boundary appear in both
adjacent tiles.

Make tippecanoe-decode also use exact edge coordinates instead of
half-pixel locations.
2016-01-20 13:58:17 -08:00
45043922e7 Merge pull request #150 from mapbox/stream-parallel
Chunked parallel reading of input streams
2016-01-12 14:29:39 -08:00
872df4bd9f Bump version number 2016-01-12 14:27:05 -08:00
83322d8e35 Guard against unlikely overflow 2016-01-12 14:16:17 -08:00
ecae14e2d4 Stabilize feature order between the different reading methods 2016-01-12 14:12:56 -08:00
ca97c5ec6d Update docs 2016-01-12 12:36:12 -08:00
e4afaa7a27 Renaming in the hope of clarity 2016-01-12 12:31:17 -08:00
0680236e46 Fix warning 2016-01-12 12:18:05 -08:00
9d6ece5bbc Buffered reading makes it faster than the single-threaded version 2016-01-12 11:47:46 -08:00
333956ce42 Fix crashes 2016-01-11 17:29:06 -08:00
2d16577945 Starts but crashes 2016-01-11 16:52:45 -08:00
132b7ecd12 Factor out parallel reading; start to set up semi-parallel reading 2016-01-11 16:06:55 -08:00
29db0e8988 Merge pull request #149 from mapbox/fix-progress
Progress indicator improvements
2016-01-11 13:27:36 -08:00
ffbddef756 Accurate feature count, more chatty base zoom determination, less chatty tiling 2016-01-11 13:11:21 -08:00
20bd661693 Merge pull request #148 from mapbox/option-check
Give an error if -p or -a is used with an undefined option letter
2016-01-11 12:25:30 -08:00
a597733a01 Fix the build 2016-01-11 11:00:23 -08:00
404bf69847 Give an error if -p or -a is used with an undefined option letter 2016-01-11 10:47:06 -08:00
502d269143 Merge pull request #145 from mapbox/fix-layer-merge
Fix the layer number for layers being merged together
2016-01-08 13:28:31 -08:00
43c9bb705d Fix the layer number for layers being merged together 2016-01-08 13:26:56 -08:00
581728f157 Merge pull request #143 from mapbox/limit-simplify
Add an option to do line simplification only at zooms below maxzoom
2016-01-08 11:55:23 -08:00
b05478787b Bump version number 2016-01-08 11:54:14 -08:00
93d0767829 Add an option to do line simplification only at zooms below maxzoom 2016-01-08 11:31:10 -08:00
5e02bdd90a Merge pull request #142 from mapbox/consistent-clip
Simplify lines consistently on opposite sides of tile boundaries
2016-01-07 11:51:58 -08:00
1f8b6faec8 Simplify lines consistently on opposite sides of tile boundaries 2016-01-07 11:35:11 -08:00
e1e028b865 Merge pull request #141 from mapbox/multithread-low-zooms
Multithread line simplification and polygon cleaning at low zooms
2016-01-06 14:41:00 -08:00
22293ca6e8 Bump version number 2016-01-06 13:29:59 -08:00
c8573634e1 Track how many threads are active to calculate how many sub-threads to use 2016-01-05 13:56:36 -08:00
977533e449 Use multiple threads within a single tile for geometric simplification 2016-01-05 12:29:40 -08:00
f43d18eb73 Bring back the old simple polygon clipping algorithm 2016-01-04 17:31:33 -08:00
32111af79d Merge pull request #136 from mapbox/multithread-input2
Multithreaded input
2016-01-04 17:29:25 -08:00
1d77261509 Bump version number 2016-01-04 17:20:38 -08:00
2b378ceb9f Use multiple threads to sort the features 2016-01-04 14:45:34 -08:00
88d02915d7 remove vector_tile.pb.o 2015-12-22 17:23:56 -08:00
2460b5712f add coverage badge and a few excludes 2015-12-22 17:20:59 -08:00
71463c1325 avoid travis overwriting CC 2015-12-22 17:16:55 -08:00
ee43ab5fc8 fix syntax error 2015-12-22 17:12:58 -08:00
1f971bdfbd travis: run coverage first, build in debug 2015-12-22 17:08:41 -08:00
893604ca94 fix hardcoded cc 2015-12-22 17:07:59 -08:00
b39a1714c0 Merge remote-tracking branch 'origin/master' into multithread-input2 2015-12-22 17:04:32 -08:00
28b83cbe47 ensure we use upgraded gcc as well as g++ 2015-12-22 17:02:49 -08:00
228a4d6bb9 Make the geometric origin a per-reader property for thread safety 2015-12-22 16:58:27 -08:00
01caa22bfc enable coverage 2015-12-22 16:55:21 -08:00
2e22034a77 Merge branch 'master' of github.com:mapbox/tippecanoe into travis-improvements 2015-12-22 16:33:30 -08:00
76cf1f8951 Merge pull request #135 from mapbox/build-inherit
Inherit CXX,CC,CFLAGS,CXXFLAGS,LDFLAGS from env
2015-12-22 16:33:22 -08:00
f1ce176e75 also build on OS X 2015-12-22 16:30:28 -08:00
2fd00413cf Clean up progress indicator and feature sequence 2015-12-22 16:29:45 -08:00
8e3f8b8dc3 inherit CXX,CC,CFLAGS,CXXFLAGS,LDFLAGS from env 2015-12-22 16:26:24 -08:00
02cf4d46ad Close files that were being left open 2015-12-22 15:42:51 -08:00
fde3aa45de Make parallel reading a command-line option 2015-12-22 14:58:19 -08:00
ca4d1beb7c Guard against trying to map an empty string pool into memory 2015-12-22 14:27:38 -08:00
a8b39aa2ff Merge branch 'master' into multithread-input2 2015-12-22 14:10:21 -08:00
40ec317c36 Launch a separate thread to read each segment 2015-12-22 14:02:31 -08:00
f290794f22 Bump version number 2015-12-22 10:40:13 -08:00
a9ad1d99df Merge pull request #134 from mapbox/fixshards
Round the number of threads to use down to a power of 2
2015-12-22 10:35:22 -08:00
edd325e338 Round the number of threads to use down to a power of 2
Since other calculations depend on this being true
2015-12-22 10:33:08 -08:00
bd081c864e It tiles again! 2015-12-21 18:00:07 -08:00
aaf9e87bce Reassemble the combined string pool and metadata files 2015-12-21 17:46:04 -08:00
de60545da8 Include the segment number in the serialized geometry 2015-12-21 17:21:18 -08:00
1f335eec44 Get rid of a global I had forgotten about 2015-12-21 17:00:07 -08:00
2dc99698d2 Stub out enough to get it to compile 2015-12-21 16:17:47 -08:00
fee18cc31e Add errno.h - refs #131 2015-12-20 15:13:33 -08:00
ee35b9f7c9 fix matrix 2015-12-20 15:11:15 -08:00
255960b48d Add travis 2015-12-20 15:09:35 -08:00
f6dfe0ace0 WIP on splitting geometry-reading state into per-thread structures 2015-12-18 17:10:20 -08:00
2159d464d0 Segment the file into input chunks. Allow commas at the top level. 2015-12-18 15:59:07 -08:00
ed90c7b53a Use memory-mapped I/O for GeoJSON reading if possible
Conflicts:
	geojson.c
2015-12-18 15:20:43 -08:00
dd2a4b0fc8 Pass the input line number around instead of the JSON parser itself 2015-12-18 15:12:27 -08:00
c6dfae26cb Add a function to disconnect a JSON object from the parse tree 2015-12-18 12:05:22 -08:00
dfcdd03b19 Factor out the JSON token reading loop
Conflicts:
	geojson.c
2015-12-18 12:04:52 -08:00
08a6607b0b Merge pull request #127 from mapbox/maxzoom
Separate the concept of basezoom for dot-dropping from maxzoom for tiling
2015-12-18 11:29:00 -08:00
fd682b66a8 Mention base zoom and drop rate calculation in changelog 2015-12-18 11:28:00 -08:00
4fe1a950f5 Calculate effective basezoom and droprate, taking gamma into account 2015-12-17 17:16:04 -08:00
acd4444439 Allow specifying a marker width for automatic basezoom guessing 2015-12-17 12:33:52 -08:00
f9d1e87360 Heuristics for basezoom and droprate if no tiles are naturally small enough 2015-12-16 16:18:16 -08:00
38fe00c7ab Replicate the dot-gamma logic in the base/rate guessing code 2015-12-16 13:09:31 -08:00
b7a411769a Don't allow <0 gamma (randomized) if basezoom or droprate not known 2015-12-16 12:33:07 -08:00
6681817243 Add an option to guess the drop rate 2015-12-16 12:13:24 -08:00
eb906dfc72 Write spaces over the reading progress message at the end 2015-12-15 16:21:07 -08:00
966bee8919 Fix bounding box calculation 2015-12-15 15:55:54 -08:00
7f8eb4de83 Add an option to guess an appropriately dense base zoom level 2015-12-15 15:52:08 -08:00
8e2c20265f Try to clarify documentation 2015-12-15 12:22:19 -08:00
fee8b33bc8 Fix a place where it was still passing around maxzoom as basezoom 2015-12-15 12:08:07 -08:00
76de0c82d2 Further cleanup of minzoom- and maxzoom-related variable names 2015-12-15 12:00:05 -08:00
7372a2c4bc Add basezoom parameter for dot-dropping, independent of maxzoom 2015-12-15 11:56:49 -08:00
3b8a5d42dd Merge pull request #125 from mapbox/integers
Encode numeric attributes as integers instead of floating point if possible
2015-12-14 16:17:21 -08:00
9391c82216 Print floating point attributes as integers if they are exact integers 2015-12-14 16:10:34 -08:00
a60a9238e5 Use integers instead of floating point for numeric attributes if possible 2015-12-14 15:45:57 -08:00
9de2ddd23c Merge pull request #124 from mapbox/pointclip
Don't simplify "unused" movetos from points. Don't write empty geometries.
2015-12-11 12:39:33 -08:00
cf279940da Don't simplify "unused" movetos from points. Don't write empty geometries. 2015-12-11 12:34:55 -08:00
25e261aa35 Merge pull request #122 from mapbox/dateline2
More fixes for large polygons and clipping at the antimeridian
2015-12-09 15:25:12 -08:00
a6be746163 Unrelated code formatting correction 2015-12-09 15:24:07 -08:00
f04c5e153a Avoid arithmetic overflow in area calculation 2015-12-09 15:02:59 -08:00
4bde17f8ff Remove unnecessary coordinate offsetting. Negative coordinates are OK. 2015-12-08 17:21:59 -08:00
256139b385 Clipping is faster with only one duplicate/shifted geometry copy 2015-12-08 16:57:04 -08:00
dace304182 Offset coordinates for Clipper to keep them positive.
Limit very high or low latitudes and longitudes.
2015-12-08 16:24:17 -08:00
bbcf9e1b3c Merge pull request #120 from mapbox/dateline
Handle features that cross the antimeridian
2015-12-03 15:21:19 -08:00
5506288273 Bump version number 2015-12-03 15:19:54 -08:00
9faa625e75 Calculate bounding boxes better for features that cross the antimeridian 2015-12-03 14:42:59 -08:00
c832b34160 Better handling of features that cross the antimeridian 2015-12-03 13:12:34 -08:00
53471521f3 Merge pull request #119 from wilsaj/make-install-dependencies
include all CLI tools as dependencies for `make install`
2015-11-30 11:19:59 -08:00
7c693c8cda include all CLI tools as dependencies for make install 2015-11-30 13:10:19 -06:00
024c685759 Merge pull request #118 from mapbox/tile-join-multi
Let tile-join combine non-conflicting tilesets together
2015-11-24 13:40:12 -08:00
368916b7a1 Bump version number 2015-11-11 17:05:41 -08:00
c854fd5481 Merge pull request #112 from mapbox/checkcpu
Check how many CPUs and filehandles there are instead of guessing
2015-11-11 16:16:27 -08:00
e1f4bdd36c Check how many CPUs and filehandles there are instead of guessing 2015-11-11 16:10:39 -08:00
666a56d84a Merge pull request #110 from mapbox/more-docs
Document and install tippecanoe-enumerate and tippecanoe-decode
2015-11-03 14:11:39 -08:00
539979811d Spell out what tippecanoe-enumerate does 2015-11-03 14:10:02 -08:00
2e155b3bb4 Document and install tippecanoe-enumerate and tippecanoe-decode 2015-11-03 14:04:28 -08:00
71e00fc8fe Calculate the min and max lat, lon, and zoom when merging tilesets 2015-11-02 14:34:01 -08:00
321c12da5e Merge pull request #108 from mapbox/layer-name
Merge input files into one layer if a layer name is specified
2015-11-02 14:07:20 -08:00
7f24cd6767 Don't produce layers with no features. Say "layer" instead of "name" in decode. 2015-11-02 14:03:45 -08:00
84173e24a5 Produce JSON with proper nesting even if polygon rings go the wrong way 2015-11-02 13:52:52 -08:00
36f4f77640 Another polygon fix: if the outer ring was dropped, drop its inner rings too 2015-11-02 11:43:44 -08:00
4c4ba693b1 Remove debugging output accidentally left in 2015-11-02 10:46:13 -08:00
4df95df637 Fix tile-join crash when no CSV is specified. Accept multiple input files. 2015-10-30 17:59:15 -07:00
d13f60e9c4 Merge files into one layer if a name is specified with -l 2015-10-30 17:30:18 -07:00
0b6f6436a6 Factor out the JSON token reading loop 2015-10-30 15:53:31 -07:00
fd5ced6902 Pay attention to $PREFIX when installing the man page 2015-10-29 16:46:57 -07:00
9a09e92357 Update man page and put it in /usr/local 2015-10-29 11:30:12 -07:00
0ff0bf1ad9 Merge pull request #105 from mapbox/polygon-close
Use closepath for each polygon ring to match the vector tile spec
2015-10-28 14:38:32 -07:00
358e9cad5b Use closepath for each polygon ring to match the vector tile spec 2015-10-28 14:34:57 -07:00
b07a40ed5a Merge branch 'master' of https://github.com/mapbox/tippecanoe 2015-10-27 12:56:08 -07:00
c9d661597d Add tile resolution change to CHANGELOG 2015-10-27 12:55:42 -07:00
8993f01db1 Merge pull request #104 from mapbox/always-4096
Use 4096-unit tiles as much as possible to keep GL happy
2015-10-27 12:23:12 -07:00
0058360cde Use 4096-unit tiles as much as possible to keep GL happy 2015-10-27 12:22:18 -07:00
316 changed files with 390862 additions and 10570 deletions

4
.dockerignore Normal file
View File

@ -0,0 +1,4 @@
# Don't copy Dockerfile or git items
.gitignore
.git
Dockerfile

43
.gitignore vendored Normal file
View File

@ -0,0 +1,43 @@
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Autogenerated dependencies
*.d
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
tippecanoe
tile-join
tippecanoe-decode
tippecanoe-enumerate
unit
# Vim
*.swp
# Mac
.DS_Store

82
.travis.yml Normal file
View File

@ -0,0 +1,82 @@
language: node_js
node_js:
- "6"
sudo: false
matrix:
include:
# debug+integer-santizer build
- os: linux
compiler: clang
env: CLANG_VERSION='3.8.0' BUILDTYPE=Debug CC="clang-3.8" CXX="clang++-3.8" CXXFLAGS="-fsanitize=integer" CFLAGS="-fsanitize=integer" LDFLAGS="-fsanitize=integer"
addons:
apt:
sources: ['ubuntu-toolchain-r-test' ]
packages: [ 'libstdc++6','libstdc++-5-dev' ]
# debug+leak+address-sanitizer build
- os: linux
compiler: clang
env: CLANG_VERSION='3.8.0' BUILDTYPE=Debug ASAN_OPTIONS=detect_leaks=1 CC="clang-3.8" CXX="clang++-3.8" CXXFLAGS="-fsanitize=address" CFLAGS="-fsanitize=address" LDFLAGS="-fsanitize=address" FEWER=true
addons:
apt:
sources: ['ubuntu-toolchain-r-test' ]
packages: [ 'libstdc++6','libstdc++-5-dev' ]
# coverage+debug build
- os: linux
compiler: clang
env: CLANG_VERSION='3.8.0' BUILDTYPE=Debug CC="clang-3.8" CXX="clang++-3.8" CXXFLAGS="--coverage" CFLAGS="--coverage" LDFLAGS="--coverage"
addons:
apt:
sources: ['ubuntu-toolchain-r-test' ]
packages: [ 'libstdc++6','libstdc++-5-dev' ]
# release+linux+g++
- os: linux
compiler: gcc
env: BUILDTYPE=Release CC="gcc-4.9" CXX="g++-4.9"
addons:
apt:
sources: ['ubuntu-toolchain-r-test']
packages: [ 'g++-4.9' ]
# release+linux+clang++
- os: linux
compiler: clang
env: CLANG_VERSION='3.8.0' BUILDTYPE=Release CC="clang-3.8" CXX="clang++-3.8"
addons:
apt:
sources: ['ubuntu-toolchain-r-test' ]
packages: [ 'libstdc++6','libstdc++-5-dev' ]
# release+osx
- os: osx
compiler: clang
env: BUILDTYPE=Release
# debug+osx
- os: osx
compiler: clang
env: BUILDTYPE=Debug
before_install:
- DEPS_DIR="${TRAVIS_BUILD_DIR}/deps"
- export PATH=${DEPS_DIR}/bin:${PATH} && mkdir -p ${DEPS_DIR}
- |
if [[ ${CLANG_VERSION:-false} != false ]]; then
export CCOMPILER='clang'
export CXXCOMPILER='clang++'
CLANG_URL="https://mason-binaries.s3.amazonaws.com/${TRAVIS_OS_NAME}-x86_64/clang/${CLANG_VERSION}.tar.gz"
travis_retry wget --quiet -O - ${CLANG_URL} | tar --strip-components=1 -xz -C ${DEPS_DIR}
fi
install:
- BUILDTYPE=${BUILDTYPE} make -j
script:
- npm install geobuf
- if [ -n "${FEWER}" ]; then
BUILDTYPE=${BUILDTYPE} make fewer-tests; else
BUILDTYPE=${BUILDTYPE} make test geobuf-test;
fi
- if [ -n "${COVERAGE}" ]; then
/usr/bin/llvm-cov-3.5 -lp *.o;
pip install --user cpp-coveralls;
~/.local/bin/coveralls --no-gcov -i ./ --exclude clipper;
fi

View File

@ -1,3 +1,495 @@
## 1.26.3
* Guard against impossible coordinates when decoding tilesets
## 1.26.2
* Make sure to encode tile-joined integers as ints, not doubles
## 1.26.1
* Add tile-join option to rename layers
## 1.26.0
Fix error when parsing attributes with empty-string keys
## 1.25.0
* Add --coalesce-smallest-as-needed strategy for reducing tile sizes
* Add --stats option to tipppecanoe-decode
## 1.24.1
* Limit the size and depth of the string pool for better performance
## 1.24.0
* Add feature filters using the Mapbox GL Style Specification filter syntax
## 1.23.0
* Add input support for Geobuf file format
## 1.22.2
* Add better diagnostics for NaN or Infinity in input JSON
## 1.22.1
* Fix tilestats generation when long string attribute values are elided
* Add option not to produce tilestats
* Add tile-join options to select zoom levels to copy
## 1.22.0
* Add options to filter each tile's contents through a shell pipeline
## 1.21.0
* Generate layer, feature, and attribute statistics as part of tileset metadata
## 1.20.1
* Close mbtiles file properly when there are no valid features in the input
## 1.20.0
* Add long options to tippecanoe-decode and tile-join. Add --quiet to tile-join.
## 1.19.3
* Upgrade protozero to version 1.5.2
## 1.19.2
* Ignore UTF-8 byte order mark if present
## 1.19.1
* Add an option to increase maxzoom if features are still being dropped
## 1.19.0
* Tile-join can merge and create directories, not only mbtiles
* Maxzoom guessing (-zg) takes into account resolution within each feature
## 1.18.2
* Fix crash with very long (>128K) attribute values
## 1.18.1
* Only warn once about invalid polygons in tippecanoe-decode
## 1.18.0
* Fix compression of tiles in tile-join
* Calculate the tileset bounding box in tile-join from the tile boundaries
## 1.17.7
* Enforce polygon winding and closure rules in tippecanoe-decode
## 1.17.6
* Add tile-join options to set name, attribution, description
## 1.17.5
* Preserve the tileset names from the source mbtiles in tile-join
## 1.17.4
* Fix RFC 8142 support: Don't try to split *all* memory mapped files
## 1.17.3
* Support RFC 8142 GeoJSON text sequences
## 1.17.2
* Organize usage output the same way as in the README
## 1.17.1
* Add -T option to coerce the types of feature attributes
## 1.17.0
* Add -zg option to guess an appropriate maxzoom
## 1.16.17
* Clean up JSON parsing at the end of each FeatureCollection
to avoid running out of memory
## 1.16.16
* Add tile-join options to include or exclude specific layers
## 1.16.15
* Add --output-to-directory and --no-tile-compression options
## 1.16.14
* Add --description option for mbtiles metadata
* Clean up some utility functions
## 1.16.13
* Add --detect-longitude-wraparound option
## 1.16.12
* Stop processing higher zooms when a feature reaches its explicit maxzoom tag
## 1.16.11
* Remove polygon splitting, since polygon cleaning is now fast enough
## 1.16.10
* Add a tippecanoe-decode option to specify layer names
## 1.16.9
* Clean up layer name handling to fix layer merging crash
## 1.16.8
* Fix some code that could sometimes try to divide by zero
* Add check for $TIPPECANOE_MAX_THREADS environmental variable
## 1.16.7
* Fix area of placeholders for degenerate multipolygons
## 1.16.6
* Upgrade Wagyu to 0.3.0; downgrade C++ requirement to C++ 11
## 1.16.5
* Add -z and -Z options to tippecanoe-decode
## 1.16.4
* Use Wagyu's quick_lr_clip() instead of a separate implementation
## 1.16.3
* Upgrade Wagyu to bfbf2893
## 1.16.2
* Associate attributes with the right layer when explicitly tagged
## 1.16.1
* Choose a deeper starting tile than 0/0/0 if there is one that contains
all the features
## 1.16.0
* Switch from Clipper to Wagyu for polygon topology correction
## 1.15.4
* Dot-dropping with -r/-B doesn't apply if there is a per-feature minzoom tag
## 1.15.3
* Round coordinates in low-zoom grid math instead of truncating
## 1.15.2
* Add --grid-low-zooms option to snap low-zoom features to the tile grid
## 1.15.1
* Stop --drop-smallest-as-needed from always dropping all points
## 1.15.0
* New strategies for making tiles smaller, with uniform behavior across
the whole zoom level: --increase-gamma-as-needed,
--drop-densest-as-needed, --drop-fraction-as-needed,
--drop-smallest-as-needed.
* Option to specify the maximum tile size in bytes
* Option to turn off tiny polygon reduction
* Better error checking in JSON parsing
## 1.14.4
* Make -B/-r feature-dropping consistent between tiles and zoom levels
## 1.14.3
* Add --detect-shared-borders option for better polygon simplification
## 1.14.2
* Enforce that string feature attributes must be encoded as UTF-8
## 1.14.1
* Whitespace after commas in tile-join .csv input is no longer significant
## 1.14.0
* Tile-join is multithreaded and can merge multiple vector mbtiles files together
## 1.13.0
* Add the ability to specify layer names within the GeoJSON input
## 1.12.11
* Don't try to revive a placeholder for a degenerate polygon that had negative area
## 1.12.10
* Pass feature IDs through in tile-join
## 1.12.9
* Clean up parsing and serialization. Provide some context with parsing errors.
## 1.12.8
* Fix the spelling of the --preserve-input-order option
## 1.12.7
* Support the "id" field of GeoJSON objects and vector tile features
## 1.12.6
* Fix error reports when reading from an empty file with parallel input
## 1.12.5
* Add an option to vary the level of line and polygon simplification
* Be careful not to produce an empty tile if there was a feature with
empty geometry.
## 1.12.4
* Be even more careful not to produce features with empty geometry
## 1.12.3
* Fix double-counted progress in the progress indicator
## 1.12.2
* Add ability to specify a projection to tippecanoe-decode
## 1.12.1
* Fix incorrect tile layer version numbers in tile-join output
## 1.12.0
* Fix a tile-join bug that would retain fields that were supposed to be excluded
## 1.11.9
* Add minimal support for alternate input projections (EPSG:3857).
## 1.11.8
* Add an option to calculate the density of features as a feature attribute
## 1.11.7
* Keep metadata together with geometry for features that don't span many tiles,
to avoid extra memory load from indexing into a separate metadata file
## 1.11.6
* Reduce the size of critical data structures to reduce dynamic memory use
## 1.11.5
* Let zoom level 0 have just as much extent and buffer as any other zoom
* Fix tippecanoe-decode bug that would sometimes show outer rings as inner
## 1.11.4
* Don't let polygons with nonzero area disappear during cleaning
## 1.11.3
* Internal code cleanup
## 1.11.2
* Update Clipper to fix potential crash
## 1.11.1
* Make better use of C++ standard libraries
## 1.11.0
* Convert C source files to C++
## 1.10.0
* Upgrade Clipper to fix potential crashes and improve polygon topology
## 1.9.16
* Switch to protozero as the library for reading and writing protocol buffers
## 1.9.15
* Add option not to clip features
## 1.9.14
* Clean up polygons after coalescing, if necessary
## 1.9.13
* Don't trust the OS so much about how many files can be open
## 1.9.12
* Limit the size of the parallel parsing streaming input buffer
* Add an option to set the tileset's attribution
## 1.9.11
* Fix a line simplification crash when a segment degenerates to a single point
## 1.9.10
* Warn if temporary disk space starts to run low
## 1.9.9
* Add --drop-polygons to drop a fraction of polygons by zoom level
* Only complain once about failing to clean polygons
## 1.9.8
* Use an on-disk radix sort for the index to control virtual memory thrashing
when the geometry and index are too large to fit in memory
## 1.9.7
* Fix build problem (wrong spelling of long long max/min constants)
## 1.9.6
* Add an option to give specific layer names to specific input files
## 1.9.5
* Remove temporary files that were accidentally left behind
* Be more careful about checking memory allocations and array bounds
* Add GNU-style long options
## 1.9.4
* Tippecanoe-decode can decode .pbf files that aren't in an .mbtiles container
## 1.9.3
* Don't get stuck in a loop trying to split up very small, very complicated polygons
## 1.9.2
* Increase maximum tile size for tippecanoe-decode
## 1.9.1
* Incorporate Mapnik's Clipper upgrades for consistent results between Mac and Linux
## 1.9.0
* Claim vector tile version 2 in mbtiles
* Split too-complex polygons into multiple features
## 1.8.1
* Bug fixes to maxzoom, and more tests
## 1.8.0
* There are tests that can be run with "make test".
## 1.7.2
* Feature properties that are arrays or hashes get stringified
rather than being left out with a warning.
## 1.7.1
* Make clipping behavior with no buffer consistent with Mapnik.
Features that are exactly on a tile boundary appear in both tiles.
## 1.7.0
* Parallel processing of input with -P works with streamed input too
* Error handling if unsupported options given to -p or -a
## 1.6.4
* Fix crashing bug when layers are being merged with -l
## 1.6.3
* Add an option to do line simplification only at zooms below maxzoom
## 1.6.2
* Make sure line simplification matches on opposite sides of a tile boundary
## 1.6.1
* Use multiple threads for line simplification and polygon cleaning
## 1.6.0
* Add option of parallelized input when reading from a line-delimited file
## 1.5.1
* Fix internal error when number of CPUs is not a power of 2
* Add missing #include
## 1.5.0
* Base zoom for dot-dropping can be specified independently of
maxzoom for tiling.
* Tippecanoe can calculate a base zoom and drop rate for you.
## 1.4.3
* Encode numeric attributes as integers instead of floating point if possible
## 1.4.2
* Bug fix for problem that would occasionally produce empty point geometries
* More bug fixes for polygon generation
## 1.4.1
* Features that cross the antimeridian are split into two parts instead
of being partially lost off the edge
## 1.4.0
* More polygon correctness
* Query the system for the number of available CPUs instead of guessing
* Merge input files into one layer if a layer name is specified
* Document and install tippecanoe-enumerate and tippecanoe-decode
## 1.3.0
* Tile generation is multithreaded to take advantage of multiple CPUs
@ -10,6 +502,7 @@
* Output of `decode` utility is now in GeoJSON format
* Tile generation with a minzoom spends less time on unused lower zoom levels
* Bare geometries without a Feature wrapper are accepted
* Default tile resolution is 4096 units at all zooms since renderers assume it
## 1.2.0

24
Dockerfile Normal file
View File

@ -0,0 +1,24 @@
# Start from ubuntu
FROM ubuntu:17.04
# Update repos and install dependencies
RUN apt-get update \
&& apt-get -y upgrade \
&& apt-get -y install build-essential libsqlite3-dev zlib1g-dev
# Create a directory and copy in all files
RUN mkdir -p /tmp/tippecanoe-src
WORKDIR /tmp/tippecanoe-src
COPY . /tmp/tippecanoe-src
# Build tippecanoe
RUN make \
&& make install
# Remove the temp directory and unneeded packages
WORKDIR /
RUN rm -rf /tmp/tippecanoe-src \
&& apt-get -y remove --purge build-essential && apt-get -y autoremove
# Run the default command to show usage
CMD tippecanoe --help

View File

@ -1,4 +1,4 @@
## [Visualizing a Month of Lightning](http://rousseau.io/2015/03/23/visualizing-a-month-of-lightning/) by Jordan Rousseau
## [Visualizing a Month of Lightning](http://rousseau.io/2015/03/23/visualizing-a-month-of-lightning) by Jordan Rousseau
![](http://rousseau.io/assets/img/ltg-studio-style.png)

244
Makefile
View File

@ -1,53 +1,243 @@
PREFIX ?= /usr/local
MANDIR ?= /usr/share/man/man1/
MANDIR ?= $(PREFIX)/share/man/man1/
BUILDTYPE ?= Release
SHELL = /bin/bash
all: tippecanoe enumerate decode tile-join
# inherit from env if set
CC := $(CC)
CXX := $(CXX)
CFLAGS := $(CFLAGS)
CXXFLAGS := $(CXXFLAGS) -std=c++11
LDFLAGS := $(LDFLAGS)
WARNING_FLAGS := -Wall -Wshadow -Wsign-compare
RELEASE_FLAGS := -O3 -DNDEBUG
DEBUG_FLAGS := -O0 -DDEBUG -fno-inline-functions -fno-omit-frame-pointer
ifeq ($(BUILDTYPE),Release)
FINAL_FLAGS := -g $(WARNING_FLAGS) $(RELEASE_FLAGS)
else
FINAL_FLAGS := -g $(WARNING_FLAGS) $(DEBUG_FLAGS)
endif
all: tippecanoe tippecanoe-enumerate tippecanoe-decode tile-join unit geojson2nd
docs: man/tippecanoe.1
install: tippecanoe
install: tippecanoe tippecanoe-enumerate tippecanoe-decode tile-join
mkdir -p $(PREFIX)/bin
mkdir -p $(MANDIR)
cp tippecanoe $(PREFIX)/bin/tippecanoe
cp man/tippecanoe.1 $(MANDIR)
cp tippecanoe-enumerate $(PREFIX)/bin/tippecanoe-enumerate
cp tippecanoe-decode $(PREFIX)/bin/tippecanoe-decode
cp tile-join $(PREFIX)/bin/tile-join
cp man/tippecanoe.1 $(MANDIR)/tippecanoe.1
uninstall:
rm $(PREFIX)/bin/tippecanoe $(PREFIX)/bin/tippecanoe-enumerate $(PREFIX)/bin/tippecanoe-decode $(PREFIX)/bin/tile-join $(MANDIR)/tippecanoe.1
man/tippecanoe.1: README.md
md2man-roff README.md > man/tippecanoe.1
vector_tile.pb.cc vector_tile.pb.h: vector_tile.proto
protoc --cpp_out=. vector_tile.proto
PG=
H = $(shell find . '(' -name '*.h' -o -name '*.hh' ')')
C = $(shell find . '(' -name '*.c' -o -name '*.cc' ')')
H = $(wildcard *.h) $(wildcard *.hpp)
C = $(wildcard *.c) $(wildcard *.cpp)
INCLUDES = -I/usr/local/include
INCLUDES = -I/usr/local/include -I.
LIBS = -L/usr/local/lib
tippecanoe: geojson.o jsonpull.o vector_tile.pb.o tile.o clip.o pool.o mbtiles.o geometry.o projection.o memfile.o clipper/clipper.o
g++ $(PG) $(LIBS) -O3 -g -Wall -o $@ $^ -lm -lz -lprotobuf-lite -lsqlite3 -lpthread
tippecanoe: geojson.o jsonpull/jsonpull.o tile.o pool.o mbtiles.o geometry.o projection.o memfile.o mvt.o serial.o main.o text.o dirtiles.o plugin.o read_json.o write_json.o geobuf.o evaluator.o
$(CXX) $(PG) $(LIBS) $(FINAL_FLAGS) $(CXXFLAGS) -o $@ $^ $(LDFLAGS) -lm -lz -lsqlite3 -lpthread
enumerate: enumerate.o
gcc $(PG) $(LIBS) -O3 -g -Wall -o $@ $^ -lsqlite3
tippecanoe-enumerate: enumerate.o
$(CXX) $(PG) $(LIBS) $(FINAL_FLAGS) $(CFLAGS) -o $@ $^ $(LDFLAGS) -lsqlite3
decode: decode.o vector_tile.pb.o projection.o
g++ $(PG) $(LIBS) -O3 -g -Wall -o $@ $^ -lm -lz -lprotobuf-lite -lsqlite3
tippecanoe-decode: decode.o projection.o mvt.o write_json.o text.o
$(CXX) $(PG) $(LIBS) $(FINAL_FLAGS) $(CXXFLAGS) -o $@ $^ $(LDFLAGS) -lm -lz -lsqlite3
tile-join: tile-join.o vector_tile.pb.o projection.o pool.o mbtiles.o
g++ $(PG) $(LIBS) -O3 -g -Wall -o $@ $^ -lm -lz -lprotobuf-lite -lsqlite3
tile-join: tile-join.o projection.o pool.o mbtiles.o mvt.o memfile.o dirtiles.o jsonpull/jsonpull.o text.o evaluator.o
$(CXX) $(PG) $(LIBS) $(FINAL_FLAGS) $(CXXFLAGS) -o $@ $^ $(LDFLAGS) -lm -lz -lsqlite3 -lpthread
libjsonpull.a: jsonpull.o
ar rc $@ $^
ranlib $@
geojson2nd: geojson2nd.o jsonpull/jsonpull.o
$(CXX) $(PG) $(LIBS) $(FINAL_FLAGS) $(CXXFLAGS) -o $@ $^ $(LDFLAGS) -lm -lz -lsqlite3 -lpthread
%.o: %.c $(H)
cc $(PG) $(INCLUDES) -O3 -g -Wall -c $<
unit: unit.o text.o
$(CXX) $(PG) $(LIBS) $(FINAL_FLAGS) $(CXXFLAGS) -o $@ $^ $(LDFLAGS) -lm -lz -lsqlite3 -lpthread
%.o: %.cc $(H)
g++ $(PG) $(INCLUDES) -O3 -g -Wall -c $<
-include $(wildcard *.d)
%.o: %.c
$(CC) -MMD $(PG) $(INCLUDES) $(FINAL_FLAGS) $(CFLAGS) -c -o $@ $<
%.o: %.cpp
$(CXX) -MMD $(PG) $(INCLUDES) $(FINAL_FLAGS) $(CXXFLAGS) -c -o $@ $<
clean:
rm tippecanoe *.o
rm -f ./tippecanoe ./tippecanoe-* ./tile-join ./unit *.o *.d */*.o */*.d
indent:
clang-format -i -style="{BasedOnStyle: Google, IndentWidth: 8, UseTab: Always, AllowShortIfStatementsOnASingleLine: false, ColumnLimit: 0, ContinuationIndentWidth: 8, SpaceAfterCStyleCast: true, IndentCaseLabels: false, AllowShortBlocksOnASingleLine: false, AllowShortFunctionsOnASingleLine: false}" $(C) $(H)
clang-format -i -style="{BasedOnStyle: Google, IndentWidth: 8, UseTab: Always, AllowShortIfStatementsOnASingleLine: false, ColumnLimit: 0, ContinuationIndentWidth: 8, SpaceAfterCStyleCast: true, IndentCaseLabels: false, AllowShortBlocksOnASingleLine: false, AllowShortFunctionsOnASingleLine: false, SortIncludes: false}" $(C) $(H)
TESTS = $(wildcard tests/*/out/*.json)
SPACE = $(NULL) $(NULL)
test: tippecanoe tippecanoe-decode $(addsuffix .check,$(TESTS)) raw-tiles-test parallel-test pbf-test join-test enumerate-test decode-test join-filter-test unit
./unit
# Work around Makefile and filename punctuation limits: _ for space, @ for :, % for /
%.json.check:
./tippecanoe -aD -f -o $@.mbtiles $(subst @,:,$(subst %,/,$(subst _, ,$(patsubst %.json.check,%,$(word 4,$(subst /, ,$@)))))) $(wildcard $(subst $(SPACE),/,$(wordlist 1,2,$(subst /, ,$@)))/*.json) < /dev/null
./tippecanoe-decode $@.mbtiles > $@.out
cmp $@.out $(patsubst %.check,%,$@)
rm $@.out $@.mbtiles
geobuf-test: geojson2nd $(addsuffix .checkbuf,$(TESTS))
# For quicker address sanitizer build, hope that regular JSON parsing is tested enough by parallel and join tests
fewer-tests: tippecanoe tippecanoe-decode geobuf-test raw-tiles-test parallel-test pbf-test join-test enumerate-test decode-test join-filter-test unit
# XXX Use proper makefile rules instead of a for loop
%.json.checkbuf:
for i in $(wildcard $(subst $(SPACE),/,$(wordlist 1,2,$(subst /, ,$@)))/*.json); do ./geojson2nd -w $$i | ./node_modules/geobuf/bin/json2geobuf > $$i.geobuf; done
./tippecanoe -aD -f -o $@.mbtiles $(subst @,:,$(subst %,/,$(subst _, ,$(patsubst %.json.checkbuf,%,$(word 4,$(subst /, ,$@)))))) $(addsuffix .geobuf,$(wildcard $(subst $(SPACE),/,$(wordlist 1,2,$(subst /, ,$@)))/*.json)) < /dev/null
./tippecanoe-decode $@.mbtiles | sed 's/checkbuf/check/g' > $@.out
cmp $@.out $(patsubst %.checkbuf,%,$@)
rm $@.out $@.mbtiles
parallel-test:
mkdir -p tests/parallel
perl -e 'for ($$i = 0; $$i < 20; $$i++) { $$lon = rand(360) - 180; $$lat = rand(180) - 90; $$k = rand(1); $$v = rand(1); print "{ \"type\": \"Feature\", \"properties\": { \"yes\": \"no\", \"who\": 1, \"$$k\": \"$$v\" }, \"geometry\": { \"type\": \"Point\", \"coordinates\": [ $$lon, $$lat ] } }\n"; }' > tests/parallel/in1.json
perl -e 'for ($$i = 0; $$i < 300000; $$i++) { $$lon = rand(360) - 180; $$lat = rand(180) - 90; print "{ \"type\": \"Feature\", \"properties\": { }, \"geometry\": { \"type\": \"Point\", \"coordinates\": [ $$lon, $$lat ] } }\n"; }' > tests/parallel/in2.json
perl -e 'for ($$i = 0; $$i < 20; $$i++) { $$lon = rand(360) - 180; $$lat = rand(180) - 90; print "{ \"type\": \"Feature\", \"properties\": { }, \"geometry\": { \"type\": \"Point\", \"coordinates\": [ $$lon, $$lat ] } }\n"; }' > tests/parallel/in3.json
perl -e 'for ($$i = 0; $$i < 20; $$i++) { $$lon = rand(360) - 180; $$lat = rand(180) - 90; $$v = rand(1); print "{ \"type\": \"Feature\", \"properties\": { }, \"tippecanoe\": { \"layer\": \"$$v\" }, \"geometry\": { \"type\": \"Point\", \"coordinates\": [ $$lon, $$lat ] } }\n"; }' > tests/parallel/in4.json
echo -n "" > tests/parallel/empty1.json
echo "" > tests/parallel/empty2.json
./tippecanoe -z5 -f -pi -l test -n test -o tests/parallel/linear-file.mbtiles tests/parallel/in[1234].json tests/parallel/empty[12].json
./tippecanoe -z5 -f -pi -l test -n test -P -o tests/parallel/parallel-file.mbtiles tests/parallel/in[1234].json tests/parallel/empty[12].json
cat tests/parallel/in[1234].json | ./tippecanoe -z5 -f -pi -l test -n test -o tests/parallel/linear-pipe.mbtiles
cat tests/parallel/in[1234].json | ./tippecanoe -z5 -f -pi -l test -n test -P -o tests/parallel/parallel-pipe.mbtiles
cat tests/parallel/in[1234].json | sed 's/^/@/' | tr '@' '\036' | ./tippecanoe -z5 -f -pi -l test -n test -o tests/parallel/implicit-pipe.mbtiles
./tippecanoe -z5 -f -pi -l test -n test -P -o tests/parallel/parallel-pipes.mbtiles <(cat tests/parallel/in1.json) <(cat tests/parallel/empty1.json) <(cat tests/parallel/empty2.json) <(cat tests/parallel/in2.json) /dev/null <(cat tests/parallel/in3.json) <(cat tests/parallel/in4.json)
./tippecanoe-decode tests/parallel/linear-file.mbtiles > tests/parallel/linear-file.json
./tippecanoe-decode tests/parallel/parallel-file.mbtiles > tests/parallel/parallel-file.json
./tippecanoe-decode tests/parallel/linear-pipe.mbtiles > tests/parallel/linear-pipe.json
./tippecanoe-decode tests/parallel/parallel-pipe.mbtiles > tests/parallel/parallel-pipe.json
./tippecanoe-decode tests/parallel/implicit-pipe.mbtiles > tests/parallel/implicit-pipe.json
./tippecanoe-decode tests/parallel/parallel-pipes.mbtiles > tests/parallel/parallel-pipes.json
cmp tests/parallel/linear-file.json tests/parallel/parallel-file.json
cmp tests/parallel/linear-file.json tests/parallel/linear-pipe.json
cmp tests/parallel/linear-file.json tests/parallel/parallel-pipe.json
cmp tests/parallel/linear-file.json tests/parallel/implicit-pipe.json
cmp tests/parallel/linear-file.json tests/parallel/parallel-pipes.json
rm tests/parallel/*.mbtiles tests/parallel/*.json
raw-tiles-test:
./tippecanoe -f -e tests/raw-tiles/raw-tiles tests/raw-tiles/hackspots.geojson -pC
diff -x '*.DS_Store' -rq tests/raw-tiles/raw-tiles tests/raw-tiles/compare
rm -rf tests/raw-tiles/raw-tiles
decode-test:
mkdir -p tests/muni/decode
./tippecanoe -z11 -Z11 -f -o tests/muni/decode/multi.mbtiles tests/muni/*.json
./tippecanoe-decode -l subway tests/muni/decode/multi.mbtiles > tests/muni/decode/multi.mbtiles.json.check
./tippecanoe-decode -c tests/muni/decode/multi.mbtiles > tests/muni/decode/multi.mbtiles.pipeline.json.check
./tippecanoe-decode --stats tests/muni/decode/multi.mbtiles > tests/muni/decode/multi.mbtiles.stats.json.check
cmp tests/muni/decode/multi.mbtiles.json.check tests/muni/decode/multi.mbtiles.json
cmp tests/muni/decode/multi.mbtiles.pipeline.json.check tests/muni/decode/multi.mbtiles.pipeline.json
cmp tests/muni/decode/multi.mbtiles.stats.json.check tests/muni/decode/multi.mbtiles.stats.json
rm -f tests/muni/decode/multi.mbtiles.json.check tests/muni/decode/multi.mbtiles tests/muni/decode/multi.mbtiles.pipeline.json.check tests/muni/decode/multi.mbtiles.stats.json.check
pbf-test:
./tippecanoe-decode tests/pbf/11-328-791.vector.pbf 11 328 791 > tests/pbf/11-328-791.vector.pbf.out
cmp tests/pbf/11-328-791.json tests/pbf/11-328-791.vector.pbf.out
rm tests/pbf/11-328-791.vector.pbf.out
./tippecanoe-decode -s EPSG:3857 tests/pbf/11-328-791.vector.pbf 11 328 791 > tests/pbf/11-328-791.3857.vector.pbf.out
cmp tests/pbf/11-328-791.3857.json tests/pbf/11-328-791.3857.vector.pbf.out
rm tests/pbf/11-328-791.3857.vector.pbf.out
enumerate-test:
./tippecanoe -z5 -f -o tests/ne_110m_admin_0_countries/out/enum.mbtiles tests/ne_110m_admin_0_countries/in.json
./tippecanoe-enumerate tests/ne_110m_admin_0_countries/out/enum.mbtiles > tests/ne_110m_admin_0_countries/out/enum.check
cmp tests/ne_110m_admin_0_countries/out/enum tests/ne_110m_admin_0_countries/out/enum.check
rm tests/ne_110m_admin_0_countries/out/enum.mbtiles tests/ne_110m_admin_0_countries/out/enum.check
join-test:
./tippecanoe -f -z12 -o tests/join-population/tabblock_06001420.mbtiles tests/join-population/tabblock_06001420.json
./tippecanoe -f -Z5 -z10 -o tests/join-population/macarthur.mbtiles -l macarthur tests/join-population/macarthur.json
./tile-join -f -Z6 -z9 -o tests/join-population/macarthur-6-9.mbtiles tests/join-population/macarthur.mbtiles
./tippecanoe-decode tests/join-population/macarthur-6-9.mbtiles > tests/join-population/macarthur-6-9.mbtiles.json.check
cmp tests/join-population/macarthur-6-9.mbtiles.json.check tests/join-population/macarthur-6-9.mbtiles.json
rm -f tests/join-population/macarthur-6-9.mbtiles.json.check tests/join-population/macarthur-6-9.mbtiles
./tippecanoe -f -d10 -D10 -Z9 -z11 -o tests/join-population/macarthur2.mbtiles -l macarthur tests/join-population/macarthur2.json
./tile-join --quiet --force -o tests/join-population/joined.mbtiles -x GEOID10 -c tests/join-population/population.csv tests/join-population/tabblock_06001420.mbtiles
./tile-join --quiet --force --no-tile-stats -o tests/join-population/joined-no-tile-stats.mbtiles -x GEOID10 -c tests/join-population/population.csv tests/join-population/tabblock_06001420.mbtiles
./tile-join -f -i -o tests/join-population/joined-i.mbtiles -x GEOID10 -c tests/join-population/population.csv tests/join-population/tabblock_06001420.mbtiles
./tile-join -f -o tests/join-population/merged.mbtiles tests/join-population/tabblock_06001420.mbtiles tests/join-population/macarthur.mbtiles tests/join-population/macarthur2.mbtiles
./tile-join -f -c tests/join-population/windows.csv -o tests/join-population/windows.mbtiles tests/join-population/macarthur.mbtiles
./tippecanoe-decode --maximum-zoom=11 --minimum-zoom=4 tests/join-population/joined.mbtiles > tests/join-population/joined.mbtiles.json.check
./tippecanoe-decode --maximum-zoom=11 --minimum-zoom=4 tests/join-population/joined-no-tile-stats.mbtiles > tests/join-population/joined-no-tile-stats.mbtiles.json.check
./tippecanoe-decode tests/join-population/joined-i.mbtiles > tests/join-population/joined-i.mbtiles.json.check
./tippecanoe-decode tests/join-population/merged.mbtiles > tests/join-population/merged.mbtiles.json.check
./tippecanoe-decode tests/join-population/windows.mbtiles > tests/join-population/windows.mbtiles.json.check
cmp tests/join-population/joined.mbtiles.json.check tests/join-population/joined.mbtiles.json
cmp tests/join-population/joined-no-tile-stats.mbtiles.json.check tests/join-population/joined-no-tile-stats.mbtiles.json
cmp tests/join-population/joined-i.mbtiles.json.check tests/join-population/joined-i.mbtiles.json
cmp tests/join-population/merged.mbtiles.json.check tests/join-population/merged.mbtiles.json
cmp tests/join-population/windows.mbtiles.json.check tests/join-population/windows.mbtiles.json
./tile-join -f -l macarthur -n "macarthur name" -N "macarthur description" -A "macarthur attribution" -o tests/join-population/just-macarthur.mbtiles tests/join-population/merged.mbtiles
./tile-join -f -L macarthur -o tests/join-population/no-macarthur.mbtiles tests/join-population/merged.mbtiles
./tippecanoe-decode tests/join-population/just-macarthur.mbtiles > tests/join-population/just-macarthur.mbtiles.json.check
./tippecanoe-decode tests/join-population/no-macarthur.mbtiles > tests/join-population/no-macarthur.mbtiles.json.check
cmp tests/join-population/just-macarthur.mbtiles.json.check tests/join-population/just-macarthur.mbtiles.json
cmp tests/join-population/no-macarthur.mbtiles.json.check tests/join-population/no-macarthur.mbtiles.json
./tile-join --no-tile-compression -f -e tests/join-population/raw-merged-folder tests/join-population/tabblock_06001420.mbtiles tests/join-population/macarthur.mbtiles tests/join-population/macarthur2.mbtiles
diff -x '*.DS_Store' -rq tests/join-population/raw-merged-folder tests/join-population/raw-merged-folder-compare
./tippecanoe -z12 -f -e tests/join-population/tabblock_06001420-folder tests/join-population/tabblock_06001420.json
./tippecanoe -Z5 -z10 -f -e tests/join-population/macarthur-folder -l macarthur tests/join-population/macarthur.json
./tippecanoe -d10 -D10 -Z9 -z11 -f -e tests/join-population/macarthur2-folder -l macarthur tests/join-population/macarthur2.json
./tile-join -f -o tests/join-population/merged-folder.mbtiles tests/join-population/tabblock_06001420-folder tests/join-population/macarthur-folder tests/join-population/macarthur2-folder
./tippecanoe-decode tests/join-population/merged-folder.mbtiles > tests/join-population/merged-folder.mbtiles.json.check
cmp tests/join-population/merged-folder.mbtiles.json.check tests/join-population/merged-folder.mbtiles.json
./tile-join -n "merged name" -N "merged description" -f -e tests/join-population/merged-mbtiles-to-folder tests/join-population/tabblock_06001420.mbtiles tests/join-population/macarthur.mbtiles tests/join-population/macarthur2.mbtiles
./tile-join -n "merged name" -N "merged description" -f -e tests/join-population/merged-folders-to-folder tests/join-population/tabblock_06001420-folder tests/join-population/macarthur-folder tests/join-population/macarthur2-folder
diff -x '*.DS_Store' -rq tests/join-population/merged-mbtiles-to-folder tests/join-population/merged-folders-to-folder
./tile-join -f -c tests/join-population/windows.csv -o tests/join-population/windows-merged.mbtiles tests/join-population/macarthur.mbtiles tests/join-population/macarthur2-folder
./tile-join -c tests/join-population/windows.csv -f -e tests/join-population/windows-merged-folder tests/join-population/macarthur.mbtiles tests/join-population/macarthur2-folder
./tile-join -f -o tests/join-population/windows-merged2.mbtiles tests/join-population/windows-merged-folder
./tippecanoe-decode tests/join-population/windows-merged.mbtiles > tests/join-population/windows-merged.mbtiles.json.check
./tippecanoe-decode tests/join-population/windows-merged2.mbtiles > tests/join-population/windows-merged2.mbtiles.json.check
cmp tests/join-population/windows-merged.mbtiles.json.check tests/join-population/windows-merged2.mbtiles.json.check
./tile-join -f -o tests/join-population/macarthur-and-macarthur2-merged.mbtiles tests/join-population/macarthur.mbtiles tests/join-population/macarthur2-folder
./tile-join -f -e tests/join-population/macarthur-and-macarthur2-folder tests/join-population/macarthur.mbtiles tests/join-population/macarthur2-folder
./tile-join -f -o tests/join-population/macarthur-and-macarthur2-merged2.mbtiles tests/join-population/macarthur-and-macarthur2-folder
./tippecanoe-decode tests/join-population/macarthur-and-macarthur2-merged.mbtiles > tests/join-population/macarthur-and-macarthur2-merged.mbtiles.json.check
./tippecanoe-decode tests/join-population/macarthur-and-macarthur2-merged2.mbtiles > tests/join-population/macarthur-and-macarthur2-merged2.mbtiles.json.check
cmp tests/join-population/macarthur-and-macarthur2-merged.mbtiles.json.check tests/join-population/macarthur-and-macarthur2-merged2.mbtiles.json.check
rm tests/join-population/tabblock_06001420.mbtiles tests/join-population/joined.mbtiles tests/join-population/joined-i.mbtiles tests/join-population/joined.mbtiles.json.check tests/join-population/joined-i.mbtiles.json.check tests/join-population/macarthur.mbtiles tests/join-population/merged.mbtiles tests/join-population/merged.mbtiles.json.check tests/join-population/merged-folder.mbtiles tests/join-population/macarthur2.mbtiles tests/join-population/windows.mbtiles tests/join-population/windows-merged.mbtiles tests/join-population/windows-merged2.mbtiles tests/join-population/windows.mbtiles.json.check tests/join-population/just-macarthur.mbtiles tests/join-population/no-macarthur.mbtiles tests/join-population/just-macarthur.mbtiles.json.check tests/join-population/no-macarthur.mbtiles.json.check tests/join-population/merged-folder.mbtiles.json.check tests/join-population/windows-merged.mbtiles.json.check tests/join-population/windows-merged2.mbtiles.json.check tests/join-population/macarthur-and-macarthur2-merged.mbtiles tests/join-population/macarthur-and-macarthur2-merged2.mbtiles tests/join-population/macarthur-and-macarthur2-merged.mbtiles.json.check tests/join-population/macarthur-and-macarthur2-merged2.mbtiles.json.check
rm -rf tests/join-population/raw-merged-folder tests/join-population/tabblock_06001420-folder tests/join-population/macarthur-folder tests/join-population/macarthur2-folder tests/join-population/merged-mbtiles-to-folder tests/join-population/merged-folders-to-folder tests/join-population/windows-merged-folder tests/join-population/macarthur-and-macarthur2-folder
# Test renaming of layers
./tippecanoe -f -Z5 -z10 -o tests/join-population/macarthur.mbtiles -l macarthur1 tests/join-population/macarthur.json
./tippecanoe -f -Z5 -z10 -o tests/join-population/macarthur2.mbtiles -l macarthur2 tests/join-population/macarthur2.json
./tile-join -R macarthur1:one --rename-layer=macarthur2:two -f -o tests/join-population/renamed.mbtiles tests/join-population/macarthur.mbtiles tests/join-population/macarthur2.mbtiles
./tippecanoe-decode tests/join-population/renamed.mbtiles > tests/join-population/renamed.mbtiles.json.check
cmp tests/join-population/renamed.mbtiles.json.check tests/join-population/renamed.mbtiles.json
rm -f tests/join-population/renamed.mbtiles.json.check tests/join-population/renamed.mbtiles.json.check tests/join-population/macarthur.mbtiles tests/join-population/macarthur2.mbtiles
join-filter-test:
# Comes out different from the direct tippecanoe run because null attributes are lost
./tippecanoe -z0 -f -o tests/feature-filter/out/all.mbtiles tests/feature-filter/in.json
./tile-join -J tests/feature-filter/filter -f -o tests/feature-filter/out/filtered.mbtiles tests/feature-filter/out/all.mbtiles
./tippecanoe-decode tests/feature-filter/out/filtered.mbtiles > tests/feature-filter/out/filtered.json.check
cmp tests/feature-filter/out/filtered.json.check tests/feature-filter/out/filtered.json.standard
rm -f tests/feature-filter/out/filtered.json.check tests/feature-filter/out/filtered.mbtiles tests/feature-filter/out/all.mbtiles
# Use this target to regenerate the standards that the tests are compared against
# after making a change that legitimately changes their output
prep-test: $(TESTS)
tests/%.json: Makefile tippecanoe tippecanoe-decode
./tippecanoe -f -o $@.check.mbtiles $(subst @,:,$(subst %,/,$(subst _, ,$(patsubst %.json,%,$(word 4,$(subst /, ,$@)))))) $(wildcard $(subst $(SPACE),/,$(wordlist 1,2,$(subst /, ,$@)))/*.json)
./tippecanoe-decode $@.check.mbtiles > $@
cmp $(patsubst %.check,%,$@) $@
rm $@.check.mbtiles

477
README.md
View File

@ -1,8 +1,11 @@
tippecanoe
==========
Builds [vector tilesets](https://www.mapbox.com/developers/vector-tiles/) from large collections of [GeoJSON](http://geojson.org/)
features. This is a tool for [making maps from huge datasets](MADE_WITH.md).
Builds [vector tilesets](https://www.mapbox.com/developers/vector-tiles/) from large (or small) collections of [GeoJSON](http://geojson.org/) or [Geobuf](https://github.com/mapbox/geobuf) features,
[like these](MADE_WITH.md).
[![Build Status](https://travis-ci.org/mapbox/tippecanoe.svg)](https://travis-ci.org/mapbox/tippecanoe)
[![Coverage Status](https://coveralls.io/repos/mapbox/tippecanoe/badge.svg?branch=master&service=github)](https://coveralls.io/github/mapbox/tippecanoe?branch=master)
Intent
------
@ -38,7 +41,7 @@ Usage
-----
```sh
$ tippecanoe -o file.mbtiles [file.json ...]
$ tippecanoe -o file.mbtiles [options] [file.json file.geobuf ...]
```
If no files are specified, it reads GeoJSON from the standard input.
@ -49,72 +52,275 @@ You can concatenate multiple GeoJSON features or files together,
and it will parse out the features and ignore whatever other objects
it encounters.
Options
-------
Try this first
--------------
### Naming
If you aren't sure what options to use, try this:
* -l _name_: Layer name (default "file" if source is file.json or output is file.mbtiles). Only works if there is only one layer.
* -n _name_: Human-readable name (default file.json)
```sh
$ tippecanoe -o out.mbtiles -zg --drop-densest-as-needed in.geojson
```
### File control
The `-zg` option will make Tippecanoe choose a maximum zoom level that should be
high enough to reflect the precision of the original data. (If it turns out still
not to be as detailed as you want, use `-z` manually with a higher number.)
* -o _file_.mbtiles: Name the output file.
* -f: Delete the mbtiles file if it already exists instead of giving an error
* -t _directory_: Put the temporary files in _directory_.
If the tiles come out too big, the `--drop-densest-as-needed` option will make
Tippecanoe try dropping what should be the least visible features at each zoom level.
(If it drops too many features, use `-x` to leave out some feature attributes that
you didn't really need.)
### Zoom levels and resolution
Examples
--------
* -z _zoom_: Base (maxzoom) zoom level (default 14)
* -Z _zoom_: Lowest (minzoom) zoom level (default 0)
* -d _detail_: Detail at base zoom level (default 12 at -z14 or higher, or 13 at -z13 or lower. Detail beyond 13 has rendering problems with Mapbox GL.)
* -D _detail_: Detail at lower zoom levels (default 10, for tile resolution of 1024)
* -m _detail_: Minimum detail that it will try if tiles are too big at regular detail (default 7)
* -b _pixels_: Buffer size where features are duplicated from adjacent tiles. Units are "screen pixels"--1/256th of the tile width or height. (default 5)
### Properties
* -x _name_: Exclude the named properties from all features
* -y _name_: Include the named properties in all features, excluding all those not explicitly named
* -X: Exclude all properties and encode only geometries
### Point simplification
* -r _rate_: Rate at which dots are dropped at lower zoom levels (default 2.5)
* -g _gamma_: Rate at which especially dense dots are dropped (default 0, for no effect). A gamma of 2 reduces the number of dots less than a pixel apart to the square root of their original number.
### Doing more
* -ac: Coalesce adjacent line and polygon features that have the same properties
* -ar: Try reversing the directions of lines to make them coalesce and compress better
* -ao: Reorder features to put ones with the same properties in sequence, to try to get them to coalesce
* -al: Let "dot" dropping at lower zooms apply to lines too
### Doing less
* -ps: Don't simplify lines
* -pf: Don't limit tiles to 200,000 features
* -pk: Don't limit tiles to 500K bytes
* -pd: Dynamically drop some fraction of features from large tiles to keep them under the 500K size limit. It will probably look ugly at the tile boundaries.
* -pi: Preserve the original input order of features as the drawing order instead of ordering geographically. (This is implemented as a restoration of the original order at the end, so that dot-dropping is still geographic, which means it also undoes -ao).
* -q: Work quietly instead of reporting progress
Example
-------
Create a tileset of TIGER roads for Alameda County, to zoom level 13, with a custom layer name and description:
```sh
$ tippecanoe -o alameda.mbtiles -l alameda -n "Alameda County from TIGER" -z13 tl_2014_06001_roads.json
```
Create a tileset of all TIGER roads, at only zoom level 12, but with higher detail than normal,
with a custom layer name and description, and leaving out the `LINEARID` and `RTTYP` attributes:
```
$ cat tiger/tl_2014_*_roads.json | tippecanoe -o tiger.mbtiles -l roads -n "All TIGER roads, one zoom" -z12 -Z12 -d14 -x LINEARID -x RTTYP
```
Options
-------
There are a lot of options. A lot of the time you won't want to use any of them
other than `-o` _output_`.mbtiles` to name the output file, and probably `-f` to
delete the file that already exists with that name.
If you aren't sure what the right maxzoom is for your data, `-zg` will guess one for you
based on the density of features.
If you are mapping point features, you will often want to use `-Bg` to automatically choose
a base zoom level for dot dropping. If that doesn't work out for you, try
`-r1 --drop-fraction-as-needed` to turn off the normal dot dropping and instead
only drop features if the tiles get too big.
If you are mapping points or polygons, you will often want to use `--drop-densest-as-needed`
to drop some of them if necessary to make the low zoom levels work.
If your features have a lot of attributes, use `-y` to keep only the ones you really need.
If your input is formatted as newline-delimited GeoJSON, use `-P` to make input parsing a lot faster.
### Output tileset
* `-o` _file_`.mbtiles` or `--output=`_file_`.mbtiles`: Name the output file.
* `-e` _directory_ or `--output-to-directory`=_directory_: Write tiles to the specified *directory* instead of to an mbtiles file.
* `-f` or `--force`: Delete the mbtiles file if it already exists instead of giving an error
* `-F` or `--allow-existing`: Proceed (without deleting existing data) if the metadata or tiles table already exists
or if metadata fields can't be set. You probably don't want to use this.
### Tileset description and attribution
* `-n` _name_ or `--name=`_name_: Human-readable name for the tileset (default file.json)
* `-A` _text_ or `--attribution=`_text_: Attribution (HTML) to be shown with maps that use data from this tileset.
* `-N` _description_ or `--description=`_description_: Description for the tileset (default file.mbtiles)
### Input files and layer names
* _name_`.json` or _name_`.geojson`: Read the named GeoJSON input file into a layer called _name_.
* _name_`.geobuf` or _name_`.geobuf`: Read the named Geobuf input file into a layer called _name_.
* `-l` _name_ or `--layer=`_name_: Use the specified layer name instead of deriving a name from the input filename or output tileset. If there are multiple input files
specified, the files are all merged into the single named layer, even if they try to specify individual names with `-L`.
* `-L` _name_`:`_file.json_ or `--named-layer=`_name_`:`_file.json_: Specify layer names for individual files. If your shell supports it, you can use a subshell redirect like `-L` _name_`:<(cat dir/*.json)` to specify a layer name for the output of streamed input.
### Parallel processing of input
* `-P` or `--read-parallel`: Use multiple threads to read different parts of each GeoJSON input file at once.
This will only work if the input is line-delimited JSON with each Feature on its
own line, because it knows nothing of the top-level structure around the Features. Spurious "EOF" error
messages may result otherwise.
Performance will be better if the input is a named file that can be mapped into memory
rather than a stream that can only be read sequentially.
If the input file begins with the [RFC 8142](https://tools.ietf.org/html/rfc8142) record separator,
parallel processing of input will be invoked automatically, splitting at record separators rather
than at all newlines.
Parallel processing will also be automatic if the input file is in Geobuf format.
### Projection of input
* `-s` _projection_ or `--projection=`_projection_: Specify the projection of the input data. Currently supported are `EPSG:4326` (WGS84, the default) and `EPSG:3857` (Web Mercator). In general you should use WGS84 for your input files if at all possible.
### Zoom levels
* `-z` _zoom_ or `--maximum-zoom=`_zoom_: Maxzoom: the highest zoom level for which tiles are generated (default 14)
* `-zg` or `--maximum-zoom=g`: Guess what is probably a reasonable maxzoom based on the spacing of features.
* `-Z` _zoom_ or `--minimum-zoom=`_zoom_: Minzoom: the lowest zoom level for which tiles are generated (default 0)
* `-ae` or `--extend-zooms-if-still-dropping`: Increase the maxzoom if features are still being dropped at that zoom level.
The detail and simplification options that ordinarily apply only to the maximum zoom level will apply both to the originally
specified maximum zoom and to any levels added beyond that.
### Tile resolution
* `-d` _detail_ or `--full-detail=`_detail_: Detail at max zoom level (default 12, for tile resolution of 2^12=4096)
* `-D` _detail_ or `--low-detail=`_detail_: Detail at lower zoom levels (default 12, for tile resolution of 2^12=4096)
* `-m` _detail_ or `--minimum-detail=`_detail_: Minimum detail that it will try if tiles are too big at regular detail (default 7)
All internal math is done in terms of a 32-bit tile coordinate system, so 1/(2^32) of the size of Earth,
or about 1cm, is the smallest distinguishable distance. If _maxzoom_ + _detail_ > 32, no additional
resolution is obtained than by using a smaller _maxzoom_ or _detail_.
### Filtering feature attributes
* `-x` _name_ or `--exclude=`_name_: Exclude the named properties from all features
* `-y` _name_ or `--include=`_name_: Include the named properties in all features, excluding all those not explicitly named
* `-X` or `--exclude-all`: Exclude all properties and encode only geometries
* `-T`_attribute_`:`_type_ or `--attribute-type=`_attribute_`:`_type_: Coerce the named feature _attribute_ to be of the specified _type_.
The _type_ may be `string`, `float`, `int`, or `bool`.
If the type is `bool`, then original attributes of `0` (or, if numeric, `0.0`, etc.), `false`, `null`, or the empty string become `false`, and otherwise become `true`.
If the type is `float` or `int` and the original attribute was non-numeric, it becomes `0`.
If the type is `int` and the original attribute was floating-point, it is rounded to the nearest integer.
* `-j` *filter* or `--feature-filter`=*filter*: Check features against a per-layer filter (as defined in the [Mapbox GL Style Specification](https://www.mapbox.com/mapbox-gl-js/style-spec/#types-filter)) and only include those that match. Any features in layers that have no filter specified will be passed through. Filters for the layer `"*"` apply to all layers.
* `-J` *filter-file* or `--feature-filter-file`=*filter-file*: Like `-j`, but read the filter from a file.
Example: to find the Natural Earth countries with low `scalerank` but high `LABELRANK`:
```
tippecanoe -z5 -o filtered.mbtiles -j '{ "ne_10m_admin_0_countries": [ "all", [ "<", "scalerank", 3 ], [ ">", "LABELRANK", 5 ] ] }' ne_10m_admin_0_countries.geojson
```
### Dropping a fixed fraction of features by zoom level
* `-r` _rate_ or `--drop-rate=`_rate_: Rate at which dots are dropped at zoom levels below basezoom (default 2.5).
If you use `-rg`, it will guess a drop rate that will keep at most 50,000 features in the densest tile.
You can also specify a marker-width with `-rg`*width* to allow fewer features in the densest tile to
compensate for the larger marker, or `-rf`*number* to allow at most *number* features in the densest tile.
* `-B` _zoom_ or `--base-zoom=`_zoom_: Base zoom, the level at and above which all points are included in the tiles (default maxzoom).
If you use `-Bg`, it will guess a zoom level that will keep at most 50,000 features in the densest tile.
You can also specify a marker-width with `-Bg`*width* to allow fewer features in the densest tile to
compensate for the larger marker, or `-Bf`*number* to allow at most *number* features in the densest tile.
* `-al` or `--drop-lines`: Let "dot" dropping at lower zooms apply to lines too
* `-ap` or `--drop-polygons`: Let "dot" dropping at lower zooms apply to polygons too
### Dropping a fraction of features to keep under tile size limits
* `-as` or `--drop-densest-as-needed`: If a tile is too large, try to reduce it to under 500K by increasing the minimum spacing between features. The discovered spacing applies to the entire zoom level.
* `-ad` or `--drop-fraction-as-needed`: Dynamically drop some fraction of features from each zoom level to keep large tiles under the 500K size limit. (This is like `-pd` but applies to the entire zoom level, not to each tile.)
* `-an` or `--drop-smallest-as-needed`: Dynamically drop the smallest features (physically smallest: the shortest lines or the smallest polygons) from each zoom level to keep large tiles under the 500K size limit. This option will not work for point features.
* `-aN` or `--coalesce-smallest-as-needed`: Dynamically combine the smallest features (physically smallest: the shortest lines or the smallest polygons) from each zoom level into other nearby features to keep large tiles under the 500K size limit. This option will not work for point features, and will probably not help very much with LineStrings. It is mostly intended for polygons, to maintain the full original area covered by polygons while still reducing the feature count somehow. The attributes of the small polygons are *not* preserved into the combined features, only their geometry.
* `-pd` or `--force-feature-limit`: Dynamically drop some fraction of features from large tiles to keep them under the 500K size limit. It will probably look ugly at the tile boundaries. (This is like `-ad` but applies to each tile individually, not to the entire zoom level.) You probably don't want to use this.
### Dropping tightly overlapping features
* `-g` _gamma_ or `--gamma=_gamma`_: Rate at which especially dense dots are dropped (default 0, for no effect). A gamma of 2 reduces the number of dots less than a pixel apart to the square root of their original number.
* `-aG` or `--increase-gamma-as-needed`: If a tile is too large, try to reduce it to under 500K by increasing the `-g` gamma. The discovered gamma applies to the entire zoom level. You probably want to use `--drop-densest-as-needed` instead.
### Line and polygon simplification
* `-S` _scale_ or `--simplification=`_scale_: Multiply the tolerance for line and polygon simplification by _scale_. The standard tolerance tries to keep
the line or polygon within one tile unit of its proper location. You can probably go up to about 10 without too much visible difference.
* `-ps` or `--no-line-simplification`: Don't simplify lines and polygons
* `-pS` or `--simplify-only-low-zooms`: Don't simplify lines and polygons at maxzoom (but do simplify at lower zooms)
* `-pt` or `--no-tiny-polygon-reduction`: Don't combine the area of very small polygons into small squares that represent their combined area.
### Attempts to improve shared polygon boundaries
* `-ab` or `--detect-shared-borders`: In the manner of [TopoJSON](https://github.com/mbostock/topojson/wiki/Introduction), detect borders that are shared between multiple polygons and simplify them identically in each polygon. This takes more time and memory than considering each polygon individually.
* `-aL` or `--grid-low-zooms`: At all zoom levels below _maxzoom_, snap all lines and polygons to a stairstep grid instead of allowing diagonals. You will also want to specify a tile resolution, probably `-D8`. This option provides a way to display continuous parcel, gridded, or binned data at low zooms without overwhelming the tiles with tiny polygons, since features will either get stretched out to the grid unit or lost entirely, depending on how they happened to be aligned in the original data. You probably don't want to use this.
### Controlling clipping to tile boundaries
* `-b` _pixels_ or `--buffer=`_pixels_: Buffer size where features are duplicated from adjacent tiles. Units are "screen pixels"—1/256th of the tile width or height. (default 5)
* `-pc` or `--no-clipping`: Don't clip features to the size of the tile. If a feature overlaps the tile's bounds or buffer at all, it is included completely. Be careful: this can produce very large tilesets, especially with large polygons.
* `-pD` or `--no-duplication`: As with `--no-clipping`, each feature is included intact instead of cut to tile boundaries. In addition, it is included only in a single tile per zoom level rather than potentially in multiple copies. Clients of the tileset must check adjacent tiles (possibly some distance away) to ensure they have all features.
### Reordering features within each tile
* `-pi` or `--preserve-input-order`: Preserve the original input order of features as the drawing order instead of ordering geographically. (This is implemented as a restoration of the original order at the end, so that dot-dropping is still geographic, which means it also undoes `-ao`).
* `-ao` or `--reorder`: Reorder features to put ones with the same properties in sequence, to try to get them to coalesce. You probably don't want to use this.
* `-ac` or `--coalesce`: Coalesce adjacent line and polygon features that have the same properties. You probably don't want to use this.
* `-ar` or `--reverse`: Try reversing the directions of lines to make them coalesce and compress better. You probably don't want to use this.
### Adding calculated attributes
* `-ag` or `--calculate-feature-density`: Add a new attribute, `tippecanoe_feature_density`, to each feature, to record how densely features are spaced in that area of the tile. You can use this attribute in the style to produce a glowing effect where points are densely packed. It can range from 0 in the sparsest areas to 255 in the densest.
### Trying to correct bad source geometry
* `-aw` or `--detect-longitude-wraparound`: Detect when adjacent points within a feature jump to the other side of the world, and try to fix the geometry.
### Setting or disabling tile size limits
* `-M` _bytes_ or `--maximum-tile-bytes=`_bytes_: Use the specified number of _bytes_ as the maximum compressed tile size instead of 500K.
* `-pf` or `--no-feature-limit`: Don't limit tiles to 200,000 features
* `-pk` or `--no-tile-size-limit`: Don't limit tiles to 500K bytes
* `-pC` or `--no-tile-compression`: Don't compress the PBF vector tile data.
* `-pg` or `--no-tile-stats`: Don't generate the `tilestats` row in the tileset metadata. Uploads without [tilestats](https://github.com/mapbox/mapbox-geostats) will take longer to process.
### Temporary storage
* `-t` _directory_ or `--temporary-directory=`_directory_: Put the temporary files in _directory_.
If you don't specify, it will use `/tmp`.
### Progress indicator
* `-q` or `--quiet`: Work quietly instead of reporting progress
* `-v` or `--version`: Report Tippecanoe's version number
### Filters
* `-C` _command_ or `--prefilter=`_command_: Specify a shell filter command to be run at the start of assembling each tile
* `-c` _command_ or `--postfilter=`_command_: Specify a shell filter command to be run at the end of assembling each tile
The pre- and post-filter commands allow you to do optional filtering or transformation on the features of each tile
as it is created. They are shell commands, run with the zoom level, X, and Y as the `$1`, `$2`, and `$3` arguments.
Future versions of Tippecanoe may add additional arguments for more context.
The features are provided to the filter
as a series of newline-delimited GeoJSON objects on the standard input, and `tippecanoe` expects to read another
set of GeoJSON features from the filter's standard output.
The prefilter receives the features at the highest available resolution, before line simplification,
polygon topology repair, gamma calculation, dynamic feature dropping, or other internal processing.
The postfilter receives the features at tile resolution, after simplification, cleaning, and dropping.
The layer name is provided as part of the `tippecanoe` element of the feature and must be passed through
to keep the feature in its correct layer. In the case of the prefilter, the `tippecanoe` element may also
contain `index`, `sequence`, and `extent` elements, which must be passed through for internal operations like
`--drop-densest-as-needed`, `--drop-smallest-as-needed`, and `--preserve-input-order` to work.
#### Examples:
* Make a tileset of the Natural Earth countries to zoom level 5, and also copy the GeoJSON features
to files in a `tiles/z/x/y.geojson` directory hierarchy.
```
tippecanoe -o countries.mbtiles -z5 -C 'mkdir -p tiles/$1/$2; tee tiles/$1/$2/$3.geojson' ne_10m_admin_0_countries.json
```
* Make a tileset of the Natural Earth countries to zoom level 5, but including only those tiles that
intersect the [bounding box of Germany](https://www.flickr.com/places/info/23424829).
(The `limit-tiles-to-bbox` script is [in the Tippecanoe source directory](filters/limit-tiles-to-bbox).)
```
tippecanoe -o countries.mbtiles -z5 -C './filters/limit-tiles-to-bbox 5.8662 47.2702 15.0421 55.0581 $*' ne_10m_admin_0_countries.json
```
* Make a tileset of TIGER roads in Tippecanoe County, leaving out all but primary and secondary roads (as [classified by TIGER](https://www.census.gov/geo/reference/mtfcc.html)) below zoom level 11.
```
tippecanoe -o roads.mbtiles -c 'if [ $1 -lt 11 ]; then grep "\"MTFCC\": \"S1[12]00\""; else cat; fi' tl_2016_18157_roads.json
```
Environment
-----------
Tippecanoe ordinarily uses as many parallel threads as the operating system claims that CPUs are available.
You can override this number by setting the `TIPPECANOE_MAX_THREADS` environmental variable.
GeoJSON extension
-----------------
Tippecanoe defines a GeoJSON extension that you can use to specify the minimum and/or maximum zoom level
at which an individual feature will be included in the vector tile dataset being produced.
at which an individual feature will be included in the vector tileset being produced.
If you have a feature like this:
```
@ -131,7 +337,24 @@ If you have a feature like this:
with a `tippecanoe` object specifiying a `maxzoom` of 9 and a `minzoom` of 4, the feature
will only appear in the vector tiles for zoom levels 4 through 9. Note that the `tippecanoe`
object belongs to the Feature, not to its `properties`.
object belongs to the Feature, not to its `properties`. If you specify a `minzoom` for a feature,
it will be preserved down to that zoom level even if dot-dropping with `-r` would otherwise have
dropped it.
You can also specify a layer name in the `tippecanoe` object, which will take precedence over
the filename or name specified using `--layer`, like this:
```
{
"type" : "Feature",
"tippecanoe" : { "layer" : "streets" },
"properties" : { "FULLNAME" : "N Vasco Rd" },
"geometry" : {
"type" : "LineString",
"coordinates" : [ [ -121.733350, 37.767671 ], [ -121.733600, 37.767483 ], [ -121.733131, 37.766952 ] ]
}
}
```
Point styling
-------------
@ -141,7 +364,7 @@ coordinated with the base zoom level and dot-dropping rate. You can use this she
calculate the appropriate marker-width at high zoom levels to match the fraction of dots
that were dropped at low zoom levels.
If you used `-z` to change the base zoom level or `-r` to change the
If you used `-B` or `-z` to change the base zoom level or `-r` to change the
dot-dropping rate, replace them in the `basezoom` and `rate` below.
awk 'BEGIN {
@ -166,7 +389,9 @@ Geometric simplifications
At every zoom level, line and polygon features are subjected to Douglas-Peucker
simplification to the resolution of the tile.
For point features, it drops 1/2.5 of the dots for each zoom level above the base.
For point features, it drops 1/2.5 of the dots for each zoom level above the
point base zoom (which is normally the same as the `-z` max zoom, but can be
a different zoom specified with `-B` if you have precise but sparse data).
I don't know why 2.5 is the appropriate number, but the densities of many different
data sets fall off at about this same rate. You can use -r to specify a different rate.
@ -178,13 +403,14 @@ For line features, it drops any features that are too small to draw at all.
This still leaves the lower zooms too dark (and too dense for the 500K tile limit,
in some places), so I need to figure out an equitable way to throw features away.
Any polygons that are smaller than a minimum area (currently 4 square subpixels) will
Unless you specify `--no-tiny-polygon-reduction`,
any polygons that are smaller than a minimum area (currently 4 square subpixels) will
have their probability diffused, so that some of them will be drawn as a square of
this minimum size and others will not be drawn at all, preserving the total area that
all of them should have had together.
Features in the same tile that share the same type and attributes are coalesced
together into a single geometry. You are strongly encouraged to use -x to exclude
together into a single geometry if you use `--coalesce`. You are strongly encouraged to use -x to exclude
any unnecessary properties to reduce wasted file size.
If a tile is larger than 500K, it will try encoding that tile at progressively
@ -193,18 +419,12 @@ lower resolutions before failing if it still doesn't fit.
Development
-----------
Requires protoc and sqlite3. Rebuilding the manpage
Requires sqlite3 and zlib (should already be installed on MacOS). Rebuilding the manpage
uses md2man (`gem install md2man`).
MacOS:
brew install protobuf
Linux:
sudo apt-get install libprotobuf-dev
sudo apt-get install protobuf-compiler
sudo apt-get install libsqlite3-dev
sudo apt-get install build-essential libsqlite3-dev zlib1g-dev
Then build:
@ -214,6 +434,35 @@ and perhaps
make install
Tippecanoe now requires features from the 2011 C++ standard. If your compiler is older than
that, you will need to install a newer one. On MacOS, updating to the lastest XCode should
get you a new enough version of `clang++`. On Linux, you should be able to upgrade `g++` with
```
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
sudo apt-get update -y
sudo apt-get install -y g++-5
export CXX=g++-5
```
Docker Image
------------
A tippecanoe Docker image can be built from source and executed as a task to
automatically install dependencies and allow tippecanoe to run on any system
supported by Docker.
```docker
$ docker build -t tippecanoe:latest .
$ docker run -it --rm \
-v /tiledata:/data \
tippecanoe:latest \
tippecanoe --output=/data/output.mbtiles /data/example.geojson
```
The commands above will build a Docker image from the source and compile the
latest version. The image supports all tippecanoe flags and options.
Examples
------
@ -227,21 +476,64 @@ The name is [a joking reference](http://en.wikipedia.org/wiki/Tippecanoe_and_Tyl
tile-join
=========
Tile-join is a tool for joining new attributes from a CSV file to features that
have already been tiled with tippecanoe. It reads the tiles from an existing .mbtiles
file, matches them against the records of the CSV, and writes out a new tileset.
Tile-join is a tool for copying and merging vector mbtiles files and for
joining new attributes from a CSV file to existing features in them.
It reads the tiles from an
existing .mbtiles file or a directory of tiles, matches them against the
records of the CSV (if one is specified), and writes out a new tileset.
If you specify multiple source mbtiles files or source directories of tiles,
all the sources are read and their combined contents are written to the new
mbtiles output. If they define the same layers or the same tiles, the layers
or tiles are merged.
The options are:
* -o *out.mbtiles*: Write the new tiles to the specified .mbtiles file
* -f: Remove *out.mbtiles* if it already exists
* -c *match.csv*: Use *match.csv* as the source for new attributes to join to the features. The first line of the file should be the key names; the other lines are values. The first column is the one to match against the existing features; the other columns are the new data to add.
* -x *key*: Remove attributes of type *key* from the output. You can use this to remove the field you are matching against if you no longer need it after joining, or to remove any other attributes you don't want.
* -i: Only include features that matched the CSV.
### Output tileset
Because tile-join just copies the geometries to the new .mbtiles without processing them,
* `-o` *out.mbtiles* or `--output=`*out.mbtiles*: Write the new tiles to the specified .mbtiles file.
* `-e` *directory* or `--output-to-directory=`*directory*: Write the new tiles to the specified directory instead of to an mbtiles file.
* `-f` or `--force`: Remove *out.mbtiles* if it already exists.
### Tileset description and attribution
* `-A` *attribution* or `--attribution=`*attribution*: Set the attribution string.
* `-n` *name* or `--name=`*name*: Set the tileset name.
* `-N` *description* or `--description=`*description*: Set the tileset description.
### Layer filtering and naming
* `-l` *layer* or `--layer=`*layer*: Include the named layer in the output. You can specify multiple `-l` options to keep multiple layers. If you don't specify, they will all be retained.
* `-L` *layer* or `--exclude-layer=`*layer*: Remove the named layer from the output. You can specify multiple `-L` options to remove multiple layers.
* `-R`*old*`:`*new* or `--rename-layer=`*old*`:`*new*: Rename the layer named *old* to be named *new* instead. You can specify multiple `-R` options to rename multiple layers. Renaming happens before filtering.
### Zoom levels
* `-z` _zoom_ or `--maximum-zoom=`_zoom_: Don't copy tiles from higher zoom levels than the specified zoom
* `-Z` _zoom_ or `--minimum-zoom=`_zoom_: Don't copy tiles from lower zoom levels than the specified zoom
### Merging attributes from a CSV file
* `-c` *match*`.csv` or `--csv=`*match*`.csv`: Use *match*`.csv` as the source for new attributes to join to the features. The first line of the file should be the key names; the other lines are values. The first column is the one to match against the existing features; the other columns are the new data to add.
### Filtering features and feature attributes
* `-x` *key* or `--exclude=`*key*: Remove attributes of type *key* from the output. You can use this to remove the field you are matching against if you no longer need it after joining, or to remove any other attributes you don't want.
* `-i` or `--if-matched`: Only include features that matched the CSV.
* `-j` *filter* or `--feature-filter`=*filter*: Check features against a per-layer filter (as defined in the [Mapbox GL Style Specification](https://www.mapbox.com/mapbox-gl-js/style-spec/#types-filter)) and only include those that match. Any features in layers that have no filter specified will be passed through. Filters for the layer `"*"` apply to all layers.
* `-J` *filter-file* or `--feature-filter-file`=*filter-file*: Like `-j`, but read the filter from a file.
### Setting or disabling tile size limits
* `-pk` or `--no-tile-size-limit`: Don't skip tiles larger than 500K.
* `-pC` or `--no-tile-compression`: Don't compress the PBF vector tile data.
* `-pg` or `--no-tile-stats`: Don't generate the `tilestats` row in the tileset metadata. Uploads without [tilestats](https://github.com/mapbox/mapbox-geostats) will take longer to process.
Because tile-join just copies the geometries to the new .mbtiles without processing them
(except to rescale the extents if necessary),
it doesn't have any of tippecanoe's recourses if the new tiles are bigger than the 500K tile limit.
If a tile is too big, it is just left out of the new tileset.
If a tile is too big and you haven't specified `-pk`, it is just left out of the new tileset.
Example
-------
@ -288,3 +580,42 @@ Then you can join those populations to the geometries and discard the no-longer-
```sh
./tile-join -o population.mbtiles -x GEOID10 -c population.csv tl_2010_06001_tabblock10.mbtiles
```
tippecanoe-enumerate
====================
The `tippecanoe-enumerate` utility lists the tiles that an `mbtiles` file defines.
Each line of the output lists the name of the `mbtiles` file and the zoom, x, and y
coordinates of one of the tiles. It does basically the same thing as
select zoom_level, tile_column, (1 << zoom_level) - 1 - tile_row from tiles;
on the file in sqlite3.
tippecanoe-decode
=================
The `tippecanoe-decode` utility turns vector mbtiles back to GeoJSON. You can use it either
on an entire file:
tippecanoe-decode file.mbtiles
or on an individual tile:
tippecanoe-decode file.mbtiles zoom x y
tippecanoe-decode file.vector.pbf zoom x y
If you decode an entire file, you get a nested `FeatureCollection` identifying each
tile and layer separately. Note that the same features generally appear at all zooms,
so the output for the file will have many copies of the same features at different
resolutions.
### Options
* `-s` _projection_ or `--projection=`*projection*: Specify the projection of the output data. Currently supported are EPSG:4326 (WGS84, the default) and EPSG:3857 (Web Mercator).
* `-z` _maxzoom_ or `--maximum-zoom=`*maxzoom*: Specify the highest zoom level to decode from the tileset
* `-Z` _minzoom_ or `--minimum-zoom=`*minzoom*: Specify the lowest zoom level to decode from the tileset
* `-l` _layer_ or `--layer=`*layer*: Decode only layers with the specified names. (Multiple `-l` options can be specified.)
* `-c` or `--tag-layer-and-zoom`: Include each feature's layer and zoom level as part of its `tippecanoe` object rather than as a FeatureCollection wrapper
* `-S` or `--stats`: Just report statistics about each tile's size and the number of features in it, as a JSON structure.
* `-f` or `--force`: Decode tiles even if polygon ring order or closure problems are detected

View File

@ -1,5 +1,4 @@
Boost Software License - Version 1.0 - August 17th, 2003
http://www.boost.org/LICENSE_1_0.txt
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
@ -21,4 +20,4 @@ FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
DEALINGS IN THE SOFTWARE.

10524
catch/catch.hpp Normal file

File diff suppressed because it is too large Load Diff

84
clip.c
View File

@ -1,84 +0,0 @@
#include "clip.h"
#define INSIDE 0
#define LEFT 1
#define RIGHT 2
#define BOTTOM 4
#define TOP 8
static int computeOutCode(double x, double y, double xmin, double ymin, double xmax, double ymax) {
int code = INSIDE;
if (x < xmin) {
code |= LEFT;
} else if (x > xmax) {
code |= RIGHT;
}
if (y < ymin) {
code |= BOTTOM;
} else if (y > ymax) {
code |= TOP;
}
return code;
}
int clip(double *x0, double *y0, double *x1, double *y1, double xmin, double ymin, double xmax, double ymax) {
int outcode0 = computeOutCode(*x0, *y0, xmin, ymin, xmax, ymax);
int outcode1 = computeOutCode(*x1, *y1, xmin, ymin, xmax, ymax);
int accept = 0;
int changed = 0;
while (1) {
if (!(outcode0 | outcode1)) { // Bitwise OR is 0. Trivially accept and get out of loop
accept = 1;
break;
} else if (outcode0 & outcode1) { // Bitwise AND is not 0. Trivially reject and get out of loop
break;
} else {
// failed both tests, so calculate the line segment to clip
// from an outside point to an intersection with clip edge
double x = *x0, y = *y0;
// At least one endpoint is outside the clip rectangle; pick it.
int outcodeOut = outcode0 ? outcode0 : outcode1;
// Now find the intersection point;
// use formulas y = y0 + slope * (x - x0), x = x0 + (1 / slope) * (y - y0)
if (outcodeOut & TOP) { // point is above the clip rectangle
x = *x0 + (*x1 - *x0) * (ymax - *y0) / (*y1 - *y0);
y = ymax;
} else if (outcodeOut & BOTTOM) { // point is below the clip rectangle
x = *x0 + (*x1 - *x0) * (ymin - *y0) / (*y1 - *y0);
y = ymin;
} else if (outcodeOut & RIGHT) { // point is to the right of clip rectangle
y = *y0 + (*y1 - *y0) * (xmax - *x0) / (*x1 - *x0);
x = xmax;
} else if (outcodeOut & LEFT) { // point is to the left of clip rectangle
y = *y0 + (*y1 - *y0) * (xmin - *x0) / (*x1 - *x0);
x = xmin;
}
// Now we move outside point to intersection point to clip
// and get ready for next pass.
if (outcodeOut == outcode0) {
*x0 = x;
*y0 = y;
outcode0 = computeOutCode(*x0, *y0, xmin, ymin, xmax, ymax);
changed = 1;
} else {
*x1 = x;
*y1 = y;
outcode1 = computeOutCode(*x1, *y1, xmin, ymin, xmax, ymax);
changed = 1;
}
}
}
if (accept == 0) {
return 0;
} else {
return changed + 1;
}
}

1
clip.h
View File

@ -1 +0,0 @@
int clip(double *x0, double *y0, double *x1, double *y1, double xmin, double ymin, double xmax, double ymax);

View File

@ -1,407 +0,0 @@
=====================================================================
Clipper Change Log
=====================================================================
v6.2.1 (31 October 2014) Rev 482
* Bugfix in ClipperOffset.Execute where the Polytree.IsHole property
was returning incorrect values with negative offsets
* Very minor improvement to join rounding in ClipperOffset
* Fixed CPP OpenGL demo.
v6.2.0 (17 October 2014) Rev 477
* Numerous minor bugfixes, too many to list.
(See revisions 454-475 in Sourceforge Repository)
* The ZFillFunction (custom callback function) has had its parameters
changed.
* Curves demo removed (temporarily).
* Deprecated functions have been removed.
v6.1.5 (26 February 2014) Rev 460
* Improved the joining of output polygons sharing a common edge
when those common edges are horizontal.
* Fixed a bug in ClipperOffset.AddPath() which would produce
incorrect solutions when open paths were added before closed paths.
* Minor code tidy and performance improvement
v6.1.4 (6 February 2014)
* Fixed bugs in MinkowskiSum
* Fixed minor bug when using Clipper.ForceSimplify.
* Modified use_xyz callback so that all 4 vertices around an
intersection point are now passed to the callback function.
v6.1.3a (22 January 2014) Rev 453
* Fixed buggy PointInPolygon function (C++ and C# only).
Note this bug only affected the newly exported function, the
internal PointInPolygon function used by Clipper was OK.
v6.1.3 (19 January 2014) Rev 452
* Fixed potential endless loop condition when adding open
paths to Clipper.
* Fixed missing implementation of SimplifyPolygon function
in C++ code.
* Fixed incorrect upper range constant for polygon coordinates
in Delphi code.
* Added PointInPolygon function.
* Overloaded MinkowskiSum function to accommodate multi-contour
paths.
v6.1.2 (15 December 2013) Rev 444
* Fixed broken C++ header file.
* Minor improvement to joining polygons.
v6.1.1 (13 December 2013) Rev 441
* Fixed a couple of bugs affecting open paths that could
raise unhandled exceptions.
v6.1.0 (12 December 2013)
* Deleted: Previously deprecated code has been removed.
* Modified: The OffsetPaths function is now deprecated as it has
been replaced by the ClipperOffset class which is much more
flexible.
* Bugfixes: Several minor bugs have been fixed including
occasionally an incorrect nesting within the PolyTree structure.
v6.0.0 (30 October 2013)
* Added: Open path (polyline) clipping. A new 'Curves' demo
application showcases this (see the 'Curves' directory).
* Update: Major improvement in the merging of
shared/collinear edges in clip solutions (see Execute).
* Added: The IntPoint structure now has an optional 'Z' member.
(See the precompiler directive use_xyz.)
* Added: Users can now force Clipper to use 32bit integers
(via the precompiler directive use_int32) instead of using
64bit integers.
* Modified: To accommodate open paths, the Polygon and Polygons
structures have been renamed Path and Paths respectively. The
AddPolygon and AddPolygons methods of the ClipperBase class
have been renamed AddPath and AddPaths respectively. Several
other functions have been similarly renamed.
* Modified: The PolyNode Class has a new IsOpen property.
* Modified: The Clipper class has a new ZFillFunction property.
* Added: MinkowskiSum and MinkowskiDiff functions added.
* Added: Several other new functions have been added including
PolyTreeToPaths, OpenPathsFromPolyTree and ClosedPathsFromPolyTree.
* Added: The Clipper constructor now accepts an optional InitOptions
parameter to simplify setting properties.
* Bugfixes: Numerous minor bugs have been fixed.
* Deprecated: Version 6 is a major upgrade from previous versions
and quite a number of changes have been made to exposed structures
and functions. To minimize inconvenience to existing library users,
some code has been retained and some added to maintain backward
compatibility. However, because this code will be removed in a
future update, it has been marked as deprecated and a precompiler
directive use_deprecated has been defined.
v5.1.6 (23 May 2013)
* BugFix: CleanPolygon function was buggy.
* Changed: The behaviour of the 'miter' JoinType has been
changed so that when squaring occurs, it's no longer
extended up to the miter limit but is squared off at
exactly 'delta' units. (This improves the look of mitering
with larger limits at acute angles.)
* Added: New OffsetPolyLines function
* Update: Minor code refactoring and optimisations
v5.1.5 (5 May 2013)
* Added: ForceSimple property to Clipper class
* Update: Improved documentation
v5.1.4 (24 March 2013)
* Update: CleanPolygon function enhanced.
* Update: Documentation improved.
v5.1.3 (14 March 2013)
* Bugfix: Minor bugfixes.
* Update: Documentation significantly improved.
v5.1.2 (26 February 2013)
* Bugfix: PolyNode class was missing a constructor.
* Update: The MiterLimit parameter in the OffsetPolygons
function has been renamed Limit and can now also be used to
limit the number of vertices used to construct arcs when
JoinType is set to jtRound.
v5.1.0 (17 February 2013)
* Update: ExPolygons has been replaced with the PolyTree &
PolyNode classes to more fully represent the parent-child
relationships of the polygons returned by Clipper.
* Added: New CleanPolygon and CleanPolygons functions.
* Bugfix: Another orientation bug fixed.
v5.0.2 - 30 December 2012
* Bugfix: Significant fixes in and tidy of the internal
Int128 class (which is used only when polygon coordinate
values are greater than ±0x3FFFFFFF (~1.07e9)).
* Update: The Area algorithm has been updated and is faster.
* Update: Documentation updates. The newish but undocumented
'CheckInputs' parameter of the OffsetPolygons function has been
renamed 'AutoFix' and documented too. The comments on rounding
have also been improved (ie clearer and expanded).
v4.10.0 - 25 December 2012
* Bugfix: Orientation bugs should now be resolved (finally!).
* Bugfix: Bug in Int128 class
v4.9.8 - 2 December 2012
* Bugfix: Further fixes to rare Orientation bug.
v4.9.7 - 29 November 2012
* Bugfix: Bug that very rarely returned the wrong polygon
orientation.
* Bugfix: Obscure bug affecting OffsetPolygons when using
jtRound for the JoinType parameter and when polygons also
contain very large coordinate values (> +/-100000000000).
v4.9.6 - 9 November 2012
* Bugfix: Another obscure bug related to joining polygons.
v4.9.4 - 2 November 2012
* Bugfix: Bugs in Int128 class occasionally causing
wrong orientations.
* Bugfix: Further fixes related to joining polygons.
v4.9.0 - 9 October 2012
* Bugfix: Obscure bug related to joining polygons.
v4.8.9 - 25 September 2012
* Bugfix: Obscure bug related to precision of intersections.
v4.8.8 - 30 August 2012
* Bugfix: Fixed bug in OffsetPolygons function introduced in
version 4.8.5.
v4.8.7 - 24 August 2012
* Bugfix: ReversePolygon function in C++ translation was broken.
* Bugfix: Two obscure bugs affecting orientation fixed too.
v4.8.6 - 11 August 2012
* Bugfix: Potential for memory overflow errors when using
ExPolygons structure.
* Bugfix: The polygon coordinate range has been reduced to
+/- 0x3FFFFFFFFFFFFFFF (4.6e18).
* Update: ReversePolygons function was misnamed ReversePoints in C++.
* Update: SimplifyPolygon function now takes a PolyFillType parameter.
v4.8.5 - 15 July 2012
* Bugfix: Potential for memory overflow errors in OffsetPolygons().
v4.8.4 - 1 June 2012
* Bugfix: Another obscure bug affecting ExPolygons structure.
v4.8.3 - 27 May 2012
* Bugfix: Obscure bug causing incorrect removal of a vertex.
v4.8.2 - 21 May 2012
* Bugfix: Obscure bug could cause an exception when using
ExPolygon structure.
v4.8.1 - 12 May 2012
* Update: Cody tidy and minor bug fixes.
v4.8.0 - 30 April 2012
* Bugfix: Occasional errors in orientation fixed.
* Update: Added notes on rounding to the documentation.
v4.7.6 - 11 April 2012
* Fixed a bug in Orientation function (affecting C# translations only).
* Minor documentation update.
v4.7.5 - 28 March 2012
* Bugfix: Fixed a recently introduced bug that occasionally caused an
unhandled exception in C++ and C# translations.
v4.7.4 - 15 March 2012
* Bugfix: Another minor bugfix.
v4.7.2 - 4 March 2012
* Bugfix: Fixed bug introduced in ver 4.7 which sometimes caused
an exception if ExPolygon structure was passed to Clipper's
Execute method.
v4.7.1 - 3 March 2012
* Bugfix: Rare crash when JoinCommonEdges joined polygons that
'cancelled' each other.
* Bugfix: Clipper's internal Orientation method occasionally
returned wrong result.
* Update: Improved C# code (thanks to numerous excellent suggestions
from David Piepgrass)
v4.7 - 10 February 2012
* Improved the joining of output polygons sharing a common edge.
v4.6.6 - 3 February 2012
* Bugfix: Another obscure bug occasionally causing incorrect
polygon orientation.
v4.6.5 - 17 January 2012
* Bugfix: Obscure bug occasionally causing incorrect hole
assignment in ExPolygon structure.
v4.6.4 - 8 November 2011
* Added: SimplifyPolygon and SimplifyPolygons functions.
v4.6.3 - 11 November 2011
* Bugfix: Fixed another minor mitering bug in OffsetPolygons.
v4.6.2 - 10 November 2011
* Bugfix: Fixed a rare bug in the orientation of polygons
returned by Clipper's Execute() method.
* Bugfix: Previous update introduced a mitering bug in the
OffsetPolygons function.
v4.6 - 29 October 2011
* Added: Support for Positive and Negative polygon fill
types (in addition to the EvenOdd and NonZero fill types).
* Bugfix: The OffsetPolygons function was generating the
occasional artefact when 'shrinking' polygons.
v4.5.5 - 8 October 2011
* Bugfix: Fixed an obscure bug in Clipper's JoinCommonEdges
method.
* Update: Replaced IsClockwise function with Orientation
function. The orientation issues affecting OffsetPolygons
should now be finally resolved.
* Change: The Area function once again returns a signed value.
v4.5.1 - 28 September 2011
* Deleted: The UseFullCoordinateRange property has been
deleted since integer range is now managed implicitly.
* BugFix: Minor bug in OffsetPolygon mitering.
* Change: C# JoinType enum moved from Clipper class to
ClipperLib namespace.
* Change: The Area function now returns the absolute area
(irrespective of orientation).
* Change: The IsClockwise function now requires a second
parameter - YAxisPositiveUpward - to accommodate displays
with Y-axis oriented in either direction
v4.4.4 - 10 September 2011
* Change: Deleted jtButt from JoinType (used by the
OffsetPolygons function).
* BugFix: Fixed another minor bug in OffsetPolygons function.
* Update: Further improvements to the help file
v4.4.3 - 29 August 2011
* BugFix: fixed a minor rounding issue in OffsetPolygons
function (affected C++ & C# translations).
* BugFix: fixed a minor bug in OffsetPolygons' function
declaration (affected C++ translation only).
* Change: 'clipper' namespace changed to 'ClipperLib'
namespace in both C++ and C# code to remove the ambiguity
between the Clipper class and the namespace. (This also
required numerous updates to the accompanying demos.)
v4.4.2 - 26 August 2011
* BugFix: minor bugfixes in Clipper.
* Update: the OffsetPolygons function has been significantly
improved by offering 4 different join styles.
v4.4.0 - 6 August 2011
* BugFix: A number of minor bugs have been fixed that mostly
affected the new ExPolygons structure.
v4.3.0 - 17 June 2011
* New: ExPolygons structure that explicitly associates 'hole'
polygons with their 'outer' container polygons.
* New: Execute method overloaded so the solution parameter
can now be either Polygons or ExPolygons.
* BugFix: Fixed a rare bug in solution polygons orientation.
v4.2.8 - 21 May 2011
* Update: JoinCommonEdges() improved once more.
* BugFix: Several minor bugs fixed.
v4.2.6 - 1 May 2011
* Bugfix: minor bug in SlopesEqual function.
* Update: Merging of output polygons sharing common edges
has been significantly inproved
v4.2.4 - 26 April 2011
Input polygon coordinates can now contain the full range of
signed 64bit integers (ie +/-9,223,372,036,854,775,807). This
means that floating point values can be converted to and from
Clipper's 64bit integer coordinates structure (IntPoint) and
still retain a precision of up to 18 decimal places. However,
since the large-integer math that supports this expanded range
imposes a small cost on performance (~15%), a new property
UseFullCoordinateRange has been added to the Clipper class to
allow users the choice of whether or not to use this expanded
coordinate range. If this property is disabled, coordinate values
are restricted to +/-1,500,000,000.
v4.2 - 12 April 2011
JoinCommonEdges() code significantly improved plus other minor
improvements.
v4.1.2 - 9 April 2011
* Update: Minor code tidy.
* Bugfix: Possible endless loop in JoinCommonEdges() in clipper.pas.
v4.1.1 - 8 April 2011
* Update: All polygon coordinates are now stored as 64bit integers
(though they're still restricted to range -1.5e9 to +1.5e9 pending
the inclusion of code supporting 64bit math).
* Change: AddPolygon and AddPolygons methods now return boolean
values.
* Bugfix: Bug in JoinCommonEdges() caused potential endless loop.
* Bugfix: Bug in IsClockwise(). (C++ code only)
v4.0 - 5 April 2011
* Clipper 4 is a major rewrite of earlier versions. The biggest
change is that floating point values are no longer used,
except for the storing of edge slope values. The main benefit
of this is the issue of numerical robustness has been
addressed. Due to other major code improvements Clipper v4
is approximately 40% faster than Clipper v3.
* The AddPolyPolygon method has been renamed to AddPolygons.
* The IgnoreOrientation property has been removed.
* The clipper_misc library has been merged back into the
main clipper library.
v3.1.0 - 17 February 2011
* Bugfix: Obscure bug in TClipperBase.SetDx method that caused
problems with very small edges ( edges <1/1000th pixel in size).
v3.0.3 - 9 February 2011
* Bugfix: Significant bug, but only in C# code.
* Update: Minor refactoring.
v3.0 - 31 January 2011
* Update: Major rewrite of the portion of code that calculates
the output polygons' orientation.
* Update: Help file significantly improved.
* Change: Renamed ForceOrientation property to IgnoreOrientation.
If the orientation of output polygons is not important, or can
be managed separately, clipping routines can be sped up by about
60% by setting IgnoreOrientation to true. Defaults to false.
* Change: The OffsetPolygon and Area functions have been moved to
the new unit - clipper_misc.
2.99 - 15 January 2011
* Bugfix: Obscure bug in AddPolygon method could cause an endless loop.
2.8 - 20 November 2010
* Updated: Output polygons which previously shared a common
edge are now merged.
* Changed: The orientation of outer polygons is now clockwise
when the display's Y axis is positive downwards (as is
typical for most Windows applications). Inner polygons
(holes) have the opposite orientation.
* Added: Support module for Cairo Graphics Library (with demo).
* Updated: C# and C++ demos.
2.522 - 15 October 2010
* Added C# translation (thanks to Olivier Lejeune) and
a link to Ruby bindings (thanks to Mike Owens).
2.0 - 30 July 2010
* Clipper now clips using both the Even-Odd (alternate) and
Non-Zero (winding) polygon filling rules. (Previously Clipper
assumed the Even-Odd rule for polygon filling.)
1.4c - 16 June 2010
* Added C++ support for AGG graphics library
1.2s - 2 June 2010
* Added C++ translation of clipper.pas
1.0 - 9 May 2010

File diff suppressed because it is too large Load Diff

View File

@ -1,395 +0,0 @@
/*******************************************************************************
* *
* Author : Angus Johnson *
* Version : 6.2.1 *
* Date : 31 October 2014 *
* Website : http://www.angusj.com *
* Copyright : Angus Johnson 2010-2014 *
* *
* License: *
* Use, modification & distribution is subject to Boost Software License Ver 1. *
* http://www.boost.org/LICENSE_1_0.txt *
* *
* Attributions: *
* The code in this library is an extension of Bala Vatti's clipping algorithm: *
* "A generic solution to polygon clipping" *
* Communications of the ACM, Vol 35, Issue 7 (July 1992) pp 56-63. *
* http://portal.acm.org/citation.cfm?id=129906 *
* *
* Computer graphics and geometric modeling: implementation and algorithms *
* By Max K. Agoston *
* Springer; 1 edition (January 4, 2005) *
* http://books.google.com/books?q=vatti+clipping+agoston *
* *
* See also: *
* "Polygon Offsetting by Computing Winding Numbers" *
* Paper no. DETC2005-85513 pp. 565-575 *
* ASME 2005 International Design Engineering Technical Conferences *
* and Computers and Information in Engineering Conference (IDETC/CIE2005) *
* September 24-28, 2005 , Long Beach, California, USA *
* http://www.me.berkeley.edu/~mcmains/pubs/DAC05OffsetPolygon.pdf *
* *
*******************************************************************************/
#ifndef clipper_hpp
#define clipper_hpp
#define CLIPPER_VERSION "6.2.0"
//use_int32: When enabled 32bit ints are used instead of 64bit ints. This
//improve performance but coordinate values are limited to the range +/- 46340
//#define use_int32
//use_xyz: adds a Z member to IntPoint. Adds a minor cost to perfomance.
//#define use_xyz
//use_lines: Enables line clipping. Adds a very minor cost to performance.
//#define use_lines
//use_deprecated: Enables temporary support for the obsolete functions
//#define use_deprecated
#include <vector>
#include <set>
#include <stdexcept>
#include <cstring>
#include <cstdlib>
#include <ostream>
#include <functional>
#include <queue>
namespace ClipperLib {
enum ClipType { ctIntersection, ctUnion, ctDifference, ctXor };
enum PolyType { ptSubject, ptClip };
//By far the most widely used winding rules for polygon filling are
//EvenOdd & NonZero (GDI, GDI+, XLib, OpenGL, Cairo, AGG, Quartz, SVG, Gr32)
//Others rules include Positive, Negative and ABS_GTR_EQ_TWO (only in OpenGL)
//see http://glprogramming.com/red/chapter11.html
enum PolyFillType { pftEvenOdd, pftNonZero, pftPositive, pftNegative };
#ifdef use_int32
typedef int cInt;
static cInt const loRange = 0x7FFF;
static cInt const hiRange = 0x7FFF;
#else
typedef signed long long cInt;
static cInt const loRange = 0x3FFFFFFF;
static cInt const hiRange = 0x3FFFFFFFFFFFFFFFLL;
typedef signed long long long64; //used by Int128 class
typedef unsigned long long ulong64;
#endif
struct IntPoint {
cInt X;
cInt Y;
#ifdef use_xyz
cInt Z;
IntPoint(cInt x = 0, cInt y = 0, cInt z = 0): X(x), Y(y), Z(z) {};
#else
IntPoint(cInt x = 0, cInt y = 0): X(x), Y(y) {};
#endif
friend inline bool operator== (const IntPoint& a, const IntPoint& b)
{
return a.X == b.X && a.Y == b.Y;
}
friend inline bool operator!= (const IntPoint& a, const IntPoint& b)
{
return a.X != b.X || a.Y != b.Y;
}
};
//------------------------------------------------------------------------------
typedef std::vector< IntPoint > Path;
typedef std::vector< Path > Paths;
inline Path& operator <<(Path& poly, const IntPoint& p) {poly.push_back(p); return poly;}
inline Paths& operator <<(Paths& polys, const Path& p) {polys.push_back(p); return polys;}
std::ostream& operator <<(std::ostream &s, const IntPoint &p);
std::ostream& operator <<(std::ostream &s, const Path &p);
std::ostream& operator <<(std::ostream &s, const Paths &p);
struct DoublePoint
{
double X;
double Y;
DoublePoint(double x = 0, double y = 0) : X(x), Y(y) {}
DoublePoint(IntPoint ip) : X((double)ip.X), Y((double)ip.Y) {}
};
//------------------------------------------------------------------------------
#ifdef use_xyz
typedef void (*ZFillCallback)(IntPoint& e1bot, IntPoint& e1top, IntPoint& e2bot, IntPoint& e2top, IntPoint& pt);
#endif
enum InitOptions {ioReverseSolution = 1, ioStrictlySimple = 2, ioPreserveCollinear = 4};
enum JoinType {jtSquare, jtRound, jtMiter};
enum EndType {etClosedPolygon, etClosedLine, etOpenButt, etOpenSquare, etOpenRound};
class PolyNode;
typedef std::vector< PolyNode* > PolyNodes;
class PolyNode
{
public:
PolyNode();
virtual ~PolyNode(){};
Path Contour;
PolyNodes Childs;
PolyNode* Parent;
PolyNode* GetNext() const;
bool IsHole() const;
bool IsOpen() const;
int ChildCount() const;
private:
unsigned Index; //node index in Parent.Childs
bool m_IsOpen;
JoinType m_jointype;
EndType m_endtype;
PolyNode* GetNextSiblingUp() const;
void AddChild(PolyNode& child);
friend class Clipper; //to access Index
friend class ClipperOffset;
};
class PolyTree: public PolyNode
{
public:
~PolyTree(){Clear();};
PolyNode* GetFirst() const;
void Clear();
int Total() const;
private:
PolyNodes AllNodes;
friend class Clipper; //to access AllNodes
};
bool Orientation(const Path &poly);
double Area(const Path &poly);
int PointInPolygon(const IntPoint &pt, const Path &path);
void SimplifyPolygon(const Path &in_poly, Paths &out_polys, PolyFillType fillType = pftEvenOdd);
void SimplifyPolygons(const Paths &in_polys, Paths &out_polys, PolyFillType fillType = pftEvenOdd);
void SimplifyPolygons(Paths &polys, PolyFillType fillType = pftEvenOdd);
void CleanPolygon(const Path& in_poly, Path& out_poly, double distance = 1.415);
void CleanPolygon(Path& poly, double distance = 1.415);
void CleanPolygons(const Paths& in_polys, Paths& out_polys, double distance = 1.415);
void CleanPolygons(Paths& polys, double distance = 1.415);
void MinkowskiSum(const Path& pattern, const Path& path, Paths& solution, bool pathIsClosed);
void MinkowskiSum(const Path& pattern, const Paths& paths, Paths& solution, bool pathIsClosed);
void MinkowskiDiff(const Path& poly1, const Path& poly2, Paths& solution);
void PolyTreeToPaths(const PolyTree& polytree, Paths& paths);
void ClosedPathsFromPolyTree(const PolyTree& polytree, Paths& paths);
void OpenPathsFromPolyTree(PolyTree& polytree, Paths& paths);
void ReversePath(Path& p);
void ReversePaths(Paths& p);
struct IntRect { cInt left; cInt top; cInt right; cInt bottom; };
//enums that are used internally ...
enum EdgeSide { esLeft = 1, esRight = 2};
//forward declarations (for stuff used internally) ...
struct TEdge;
struct IntersectNode;
struct LocalMinimum;
struct Scanbeam;
struct OutPt;
struct OutRec;
struct Join;
typedef std::vector < OutRec* > PolyOutList;
typedef std::vector < TEdge* > EdgeList;
typedef std::vector < Join* > JoinList;
typedef std::vector < IntersectNode* > IntersectList;
//------------------------------------------------------------------------------
//ClipperBase is the ancestor to the Clipper class. It should not be
//instantiated directly. This class simply abstracts the conversion of sets of
//polygon coordinates into edge objects that are stored in a LocalMinima list.
class ClipperBase
{
public:
ClipperBase();
virtual ~ClipperBase();
bool AddPath(const Path &pg, PolyType PolyTyp, bool Closed);
bool AddPaths(const Paths &ppg, PolyType PolyTyp, bool Closed);
virtual void Clear();
IntRect GetBounds();
bool PreserveCollinear() {return m_PreserveCollinear;};
void PreserveCollinear(bool value) {m_PreserveCollinear = value;};
protected:
void DisposeLocalMinimaList();
TEdge* AddBoundsToLML(TEdge *e, bool IsClosed);
void PopLocalMinima();
virtual void Reset();
TEdge* ProcessBound(TEdge* E, bool IsClockwise);
void DoMinimaLML(TEdge* E1, TEdge* E2, bool IsClosed);
TEdge* DescendToMin(TEdge *&E);
void AscendToMax(TEdge *&E, bool Appending, bool IsClosed);
typedef std::vector<LocalMinimum> MinimaList;
MinimaList::iterator m_CurrentLM;
MinimaList m_MinimaList;
bool m_UseFullRange;
EdgeList m_edges;
bool m_PreserveCollinear;
bool m_HasOpenPaths;
};
//------------------------------------------------------------------------------
class Clipper : public virtual ClipperBase
{
public:
Clipper(int initOptions = 0);
~Clipper();
bool Execute(ClipType clipType,
Paths &solution,
PolyFillType subjFillType = pftEvenOdd,
PolyFillType clipFillType = pftEvenOdd);
bool Execute(ClipType clipType,
PolyTree &polytree,
PolyFillType subjFillType = pftEvenOdd,
PolyFillType clipFillType = pftEvenOdd);
bool ReverseSolution() {return m_ReverseOutput;};
void ReverseSolution(bool value) {m_ReverseOutput = value;};
bool StrictlySimple() {return m_StrictSimple;};
void StrictlySimple(bool value) {m_StrictSimple = value;};
//set the callback function for z value filling on intersections (otherwise Z is 0)
#ifdef use_xyz
void ZFillFunction(ZFillCallback zFillFunc);
#endif
protected:
void Reset();
virtual bool ExecuteInternal();
private:
PolyOutList m_PolyOuts;
JoinList m_Joins;
JoinList m_GhostJoins;
IntersectList m_IntersectList;
ClipType m_ClipType;
typedef std::priority_queue<cInt> ScanbeamList;
ScanbeamList m_Scanbeam;
TEdge *m_ActiveEdges;
TEdge *m_SortedEdges;
bool m_ExecuteLocked;
PolyFillType m_ClipFillType;
PolyFillType m_SubjFillType;
bool m_ReverseOutput;
bool m_UsingPolyTree;
bool m_StrictSimple;
#ifdef use_xyz
ZFillCallback m_ZFill; //custom callback
#endif
void SetWindingCount(TEdge& edge);
bool IsEvenOddFillType(const TEdge& edge) const;
bool IsEvenOddAltFillType(const TEdge& edge) const;
void InsertScanbeam(const cInt Y);
cInt PopScanbeam();
void InsertLocalMinimaIntoAEL(const cInt botY);
void InsertEdgeIntoAEL(TEdge *edge, TEdge* startEdge);
void AddEdgeToSEL(TEdge *edge);
void CopyAELToSEL();
void DeleteFromSEL(TEdge *e);
void DeleteFromAEL(TEdge *e);
void UpdateEdgeIntoAEL(TEdge *&e);
void SwapPositionsInSEL(TEdge *edge1, TEdge *edge2);
bool IsContributing(const TEdge& edge) const;
bool IsTopHorz(const cInt XPos);
void SwapPositionsInAEL(TEdge *edge1, TEdge *edge2);
void DoMaxima(TEdge *e);
void ProcessHorizontals(bool IsTopOfScanbeam);
void ProcessHorizontal(TEdge *horzEdge, bool isTopOfScanbeam);
void AddLocalMaxPoly(TEdge *e1, TEdge *e2, const IntPoint &pt);
OutPt* AddLocalMinPoly(TEdge *e1, TEdge *e2, const IntPoint &pt);
OutRec* GetOutRec(int idx);
void AppendPolygon(TEdge *e1, TEdge *e2);
void IntersectEdges(TEdge *e1, TEdge *e2, IntPoint &pt);
OutRec* CreateOutRec();
OutPt* AddOutPt(TEdge *e, const IntPoint &pt);
void DisposeAllOutRecs();
void DisposeOutRec(PolyOutList::size_type index);
bool ProcessIntersections(const cInt topY);
void BuildIntersectList(const cInt topY);
void ProcessIntersectList();
void ProcessEdgesAtTopOfScanbeam(const cInt topY);
void BuildResult(Paths& polys);
void BuildResult2(PolyTree& polytree);
void SetHoleState(TEdge *e, OutRec *outrec);
void DisposeIntersectNodes();
bool FixupIntersectionOrder();
void FixupOutPolygon(OutRec &outrec);
bool IsHole(TEdge *e);
bool FindOwnerFromSplitRecs(OutRec &outRec, OutRec *&currOrfl);
void FixHoleLinkage(OutRec &outrec);
void AddJoin(OutPt *op1, OutPt *op2, const IntPoint offPt);
void ClearJoins();
void ClearGhostJoins();
void AddGhostJoin(OutPt *op, const IntPoint offPt);
bool JoinPoints(Join *j, OutRec* outRec1, OutRec* outRec2);
void JoinCommonEdges();
void DoSimplePolygons();
void FixupFirstLefts1(OutRec* OldOutRec, OutRec* NewOutRec);
void FixupFirstLefts2(OutRec* OldOutRec, OutRec* NewOutRec);
#ifdef use_xyz
void SetZ(IntPoint& pt, TEdge& e1, TEdge& e2);
#endif
};
//------------------------------------------------------------------------------
class ClipperOffset
{
public:
ClipperOffset(double miterLimit = 2.0, double roundPrecision = 0.25);
~ClipperOffset();
void AddPath(const Path& path, JoinType joinType, EndType endType);
void AddPaths(const Paths& paths, JoinType joinType, EndType endType);
void Execute(Paths& solution, double delta);
void Execute(PolyTree& solution, double delta);
void Clear();
double MiterLimit;
double ArcTolerance;
private:
Paths m_destPolys;
Path m_srcPoly;
Path m_destPoly;
std::vector<DoublePoint> m_normals;
double m_delta, m_sinA, m_sin, m_cos;
double m_miterLim, m_StepsPerRad;
IntPoint m_lowest;
PolyNode m_polyNodes;
void FixOrientations();
void DoOffset(double delta);
void OffsetPoint(int j, int& k, JoinType jointype);
void DoSquare(int j, int k);
void DoMiter(int j, int k, double r);
void DoRound(int j, int k);
};
//------------------------------------------------------------------------------
class clipperException : public std::exception
{
public:
clipperException(const char* description): m_descr(description) {}
virtual ~clipperException() throw() {}
virtual const char* what() const throw() {return m_descr.c_str();}
private:
std::string m_descr;
};
//------------------------------------------------------------------------------
} //ClipperLib namespace
#endif //clipper_hpp

421
decode.cc
View File

@ -1,421 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sqlite3.h>
#include <string>
#include <zlib.h>
#include <math.h>
#include "vector_tile.pb.h"
#include "tile.h"
extern "C" {
#include "projection.h"
}
// https://github.com/mapbox/mapnik-vector-tile/blob/master/src/vector_tile_compression.hpp
inline bool is_compressed(std::string const &data) {
return data.size() > 2 && (((uint8_t) data[0] == 0x78 && (uint8_t) data[1] == 0x9C) || ((uint8_t) data[0] == 0x1F && (uint8_t) data[1] == 0x8B));
}
// https://github.com/mapbox/mapnik-vector-tile/blob/master/src/vector_tile_compression.hpp
inline int decompress(std::string const &input, std::string &output) {
z_stream inflate_s;
inflate_s.zalloc = Z_NULL;
inflate_s.zfree = Z_NULL;
inflate_s.opaque = Z_NULL;
inflate_s.avail_in = 0;
inflate_s.next_in = Z_NULL;
if (inflateInit2(&inflate_s, 32 + 15) != Z_OK) {
fprintf(stderr, "error: %s\n", inflate_s.msg);
}
inflate_s.next_in = (Bytef *) input.data();
inflate_s.avail_in = input.size();
size_t length = 0;
do {
output.resize(length + 2 * input.size());
inflate_s.avail_out = 2 * input.size();
inflate_s.next_out = (Bytef *) (output.data() + length);
int ret = inflate(&inflate_s, Z_FINISH);
if (ret != Z_STREAM_END && ret != Z_OK && ret != Z_BUF_ERROR) {
fprintf(stderr, "error: %s\n", inflate_s.msg);
return 0;
}
length += (2 * input.size() - inflate_s.avail_out);
} while (inflate_s.avail_out == 0);
inflateEnd(&inflate_s);
output.resize(length);
return 1;
}
int dezig(unsigned n) {
return (n >> 1) ^ (-(n & 1));
}
void printq(const char *s) {
putchar('"');
for (; *s; s++) {
if (*s == '\\' || *s == '"') {
printf("\\%c", *s);
} else if (*s >= 0 && *s < ' ') {
printf("\\u%04x", *s);
} else {
putchar(*s);
}
}
putchar('"');
}
struct draw {
int op;
double lon;
double lat;
draw(int op, double lon, double lat) {
this->op = op;
this->lon = lon;
this->lat = lat;
}
};
void handle(std::string message, int z, unsigned x, unsigned y, int describe) {
GOOGLE_PROTOBUF_VERIFY_VERSION;
int within = 0;
// https://github.com/mapbox/mapnik-vector-tile/blob/master/examples/c%2B%2B/tileinfo.cpp
mapnik::vector::tile tile;
if (is_compressed(message)) {
std::string uncompressed;
decompress(message, uncompressed);
if (!tile.ParseFromString(uncompressed)) {
fprintf(stderr, "Couldn't decompress tile %d/%u/%u\n", z, x, y);
exit(EXIT_FAILURE);
}
} else if (!tile.ParseFromString(message)) {
fprintf(stderr, "Couldn't parse tile %d/%u/%u\n", z, x, y);
exit(EXIT_FAILURE);
}
printf("{ \"type\": \"FeatureCollection\"");
if (describe) {
printf(", \"properties\": { \"zoom\": %d, \"x\": %d, \"y\": %d }", z, x, y);
}
printf(", \"features\": [\n");
for (int l = 0; l < tile.layers_size(); l++) {
mapnik::vector::tile_layer layer = tile.layers(l);
int extent = layer.extent();
for (int f = 0; f < layer.features_size(); f++) {
mapnik::vector::tile_feature feat = layer.features(f);
int px = 0, py = 0;
if (within) {
printf(",\n");
}
within = 1;
printf("{ \"type\": \"Feature\"");
printf(", \"properties\": { ");
for (int t = 0; t + 1 < feat.tags_size(); t += 2) {
if (t != 0) {
printf(", ");
}
const char *key = layer.keys(feat.tags(t)).c_str();
mapnik::vector::tile_value const &val = layer.values(feat.tags(t + 1));
if (val.has_string_value()) {
printq(key);
printf(": ");
printq(val.string_value().c_str());
} else if (val.has_int_value()) {
printq(key);
printf(": %lld", (long long) val.int_value());
} else if (val.has_double_value()) {
printq(key);
printf(": %g", val.double_value());
} else if (val.has_float_value()) {
printq(key);
printf(": %g", val.float_value());
} else if (val.has_sint_value()) {
printq(key);
printf(": %lld", (long long) val.sint_value());
} else if (val.has_uint_value()) {
printq(key);
printf(": %lld", (long long) val.uint_value());
} else if (val.has_bool_value()) {
printq(key);
printf(": %s", val.bool_value() ? "true" : "false");
}
}
printf(" }, \"geometry\": { ");
std::vector<draw> ops;
for (int g = 0; g < feat.geometry_size(); g++) {
uint32_t geom = feat.geometry(g);
uint32_t op = geom & 7;
uint32_t count = geom >> 3;
if (op == VT_MOVETO || op == VT_LINETO) {
for (unsigned k = 0; k < count; k++) {
px += dezig(feat.geometry(g + 1));
py += dezig(feat.geometry(g + 2));
g += 2;
long long scale = 1LL << (32 - z);
long long wx = scale * x + (scale / extent) * (px + .5);
long long wy = scale * y + (scale / extent) * (py + .5);
double lat, lon;
tile2latlon(wx, wy, 32, &lat, &lon);
ops.push_back(draw(op, lon, lat));
}
} else {
ops.push_back(draw(op, 0, 0));
}
}
if (feat.type() == VT_POINT) {
if (ops.size() == 1) {
printf("\"type\": \"Point\", \"coordinates\": [ %f, %f ]", ops[0].lon, ops[0].lat);
} else {
printf("\"type\": \"MultiPoint\", \"coordinates\": [ ");
for (unsigned i = 0; i < ops.size(); i++) {
if (i != 0) {
printf(", ");
}
printf("[ %f, %f ]", ops[i].lon, ops[i].lat);
}
printf(" ]");
}
} else if (feat.type() == VT_LINE) {
int movetos = 0;
for (unsigned i = 0; i < ops.size(); i++) {
if (ops[i].op == VT_MOVETO) {
movetos++;
}
}
if (movetos < 2) {
printf("\"type\": \"LineString\", \"coordinates\": [ ");
for (unsigned i = 0; i < ops.size(); i++) {
if (i != 0) {
printf(", ");
}
printf("[ %f, %f ]", ops[i].lon, ops[i].lat);
}
printf(" ]");
} else {
printf("\"type\": \"MultiLineString\", \"coordinates\": [ [ ");
int state = 0;
for (unsigned i = 0; i < ops.size(); i++) {
if (ops[i].op == VT_MOVETO) {
if (state == 0) {
printf("[ %f, %f ]", ops[i].lon, ops[i].lat);
state = 1;
} else {
printf(" ], [ ");
printf("[ %f, %f ]", ops[i].lon, ops[i].lat);
state = 1;
}
} else {
printf(", [ %f, %f ]", ops[i].lon, ops[i].lat);
}
}
printf(" ] ]");
}
} else if (feat.type() == VT_POLYGON) {
std::vector<std::vector<draw> > rings;
std::vector<double> areas;
for (unsigned i = 0; i < ops.size(); i++) {
if (ops[i].op == VT_MOVETO) {
rings.push_back(std::vector<draw>());
areas.push_back(0);
}
int n = rings.size() - 1;
if (n >= 0) {
rings[n].push_back(ops[i]);
}
}
int outer = 0;
for (unsigned i = 0; i < rings.size(); i++) {
double area = 0;
for (unsigned k = 0; k < rings[i].size(); k++) {
if (rings[i][k].op != VT_CLOSEPATH) {
area += rings[i][k].lon * rings[i][(k + 1) % rings[i].size()].lat;
area -= rings[i][k].lat * rings[i][(k + 1) % rings[i].size()].lon;
}
}
areas[i] = area;
if (areas[i] <= 0) {
outer++;
}
// printf("area %f\n", area / .00000274 / .00000274);
}
if (outer > 1) {
printf("\"type\": \"MultiPolygon\", \"coordinates\": [ [ [ ");
} else {
printf("\"type\": \"Polygon\", \"coordinates\": [ [ ");
}
int state = 0;
for (unsigned i = 0; i < rings.size(); i++) {
if (areas[i] <= 0) {
if (state != 0) {
// new multipolygon
printf(" ] ], [ [ ");
}
state = 1;
}
if (state == 2) {
// new ring in the same polygon
printf(" ], [ ");
}
for (unsigned j = 0; j < rings[i].size(); j++) {
if (rings[i][j].op != VT_CLOSEPATH) {
if (j != 0) {
printf(", ");
}
printf("[ %f, %f ]", rings[i][j].lon, rings[i][j].lat);
}
}
state = 2;
}
if (outer > 1) {
printf(" ] ] ]");
} else {
printf(" ] ]");
}
}
printf(" } }\n");
}
}
printf("] }\n");
}
void decode(char *fname, int z, unsigned x, unsigned y) {
sqlite3 *db;
int oz = z;
unsigned ox = x, oy = y;
if (sqlite3_open(fname, &db) != SQLITE_OK) {
fprintf(stderr, "%s: %s\n", fname, sqlite3_errmsg(db));
exit(EXIT_FAILURE);
}
if (z < 0) {
const char *sql = "SELECT tile_data, zoom_level, tile_column, tile_row from tiles order by zoom_level, tile_column, tile_row;";
sqlite3_stmt *stmt;
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) {
fprintf(stderr, "%s: select failed: %s\n", fname, sqlite3_errmsg(db));
exit(EXIT_FAILURE);
}
printf("{ \"type\": \"FeatureCollection\", \"features\": [\n");
int within = 0;
while (sqlite3_step(stmt) == SQLITE_ROW) {
if (within) {
printf(",\n");
}
within = 1;
int len = sqlite3_column_bytes(stmt, 0);
int z = sqlite3_column_int(stmt, 1);
int x = sqlite3_column_int(stmt, 2);
int y = sqlite3_column_int(stmt, 3);
y = (1LL << z) - 1 - y;
const char *s = (const char *) sqlite3_column_blob(stmt, 0);
handle(std::string(s, len), z, x, y, 1);
}
printf("] }\n");
sqlite3_finalize(stmt);
} else {
int handled = 0;
while (z >= 0 && !handled) {
const char *sql = "SELECT tile_data from tiles where zoom_level = ? and tile_column = ? and tile_row = ?;";
sqlite3_stmt *stmt;
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) {
fprintf(stderr, "%s: select failed: %s\n", fname, sqlite3_errmsg(db));
exit(EXIT_FAILURE);
}
sqlite3_bind_int(stmt, 1, z);
sqlite3_bind_int(stmt, 2, x);
sqlite3_bind_int(stmt, 3, (1LL << z) - 1 - y);
while (sqlite3_step(stmt) == SQLITE_ROW) {
int len = sqlite3_column_bytes(stmt, 0);
const char *s = (const char *) sqlite3_column_blob(stmt, 0);
if (z != oz) {
fprintf(stderr, "%s: Warning: using tile %d/%u/%u instead of %d/%u/%u\n", fname, z, x, y, oz, ox, oy);
}
handle(std::string(s, len), z, x, y, 0);
handled = 1;
}
sqlite3_finalize(stmt);
z--;
x /= 2;
y /= 2;
}
}
if (sqlite3_close(db) != SQLITE_OK) {
fprintf(stderr, "%s: could not close database: %s\n", fname, sqlite3_errmsg(db));
exit(EXIT_FAILURE);
}
}
void usage(char **argv) {
fprintf(stderr, "Usage: %s file.mbtiles zoom x y\n", argv[0]);
exit(EXIT_FAILURE);
}
int main(int argc, char **argv) {
extern int optind;
// extern char *optarg;
int i;
while ((i = getopt(argc, argv, "")) != -1) {
usage(argv);
}
if (argc == optind + 4) {
decode(argv[optind], atoi(argv[optind + 1]), atoi(argv[optind + 2]), atoi(argv[optind + 3]));
} else if (argc == optind + 1) {
decode(argv[optind], -1, -1, -1);
} else {
usage(argv);
}
return 0;
}

398
decode.cpp Normal file
View File

@ -0,0 +1,398 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sqlite3.h>
#include <getopt.h>
#include <string>
#include <vector>
#include <map>
#include <set>
#include <zlib.h>
#include <math.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <protozero/pbf_reader.hpp>
#include "mvt.hpp"
#include "projection.hpp"
#include "geometry.hpp"
#include "write_json.hpp"
int minzoom = 0;
int maxzoom = 32;
bool force = false;
void do_stats(mvt_tile &tile, size_t size, bool compressed, int z, unsigned x, unsigned y) {
printf("{ \"zoom\": %d, \"x\": %u, \"y\": %u, \"bytes\": %zu, \"compressed\": %s", z, x, y, size, compressed ? "true" : "false");
printf(", \"layers\": { ");
for (size_t i = 0; i < tile.layers.size(); i++) {
if (i != 0) {
printf(", ");
}
fprintq(stdout, tile.layers[i].name.c_str());
int points = 0, lines = 0, polygons = 0;
for (size_t j = 0; j < tile.layers[i].features.size(); j++) {
if (tile.layers[i].features[j].type == mvt_point) {
points++;
} else if (tile.layers[i].features[j].type == mvt_linestring) {
lines++;
} else if (tile.layers[i].features[j].type == mvt_polygon) {
polygons++;
}
}
printf(": { \"points\": %d, \"lines\": %d, \"polygons\": %d, \"extent\": %lld }", points, lines, polygons, tile.layers[i].extent);
}
printf(" } }\n");
}
void handle(std::string message, int z, unsigned x, unsigned y, int describe, std::set<std::string> const &to_decode, bool pipeline, bool stats) {
mvt_tile tile;
bool was_compressed;
try {
if (!tile.decode(message, was_compressed)) {
fprintf(stderr, "Couldn't parse tile %d/%u/%u\n", z, x, y);
exit(EXIT_FAILURE);
}
} catch (protozero::unknown_pbf_wire_type_exception e) {
fprintf(stderr, "PBF decoding error in tile %d/%u/%u\n", z, x, y);
exit(EXIT_FAILURE);
}
if (stats) {
do_stats(tile, message.size(), was_compressed, z, x, y);
return;
}
if (!pipeline) {
printf("{ \"type\": \"FeatureCollection\"");
if (describe) {
printf(", \"properties\": { \"zoom\": %d, \"x\": %d, \"y\": %d", z, x, y);
if (!was_compressed) {
printf(", \"compressed\": false");
}
printf(" }");
if (projection != projections) {
printf(", \"crs\": { \"type\": \"name\", \"properties\": { \"name\": ");
fprintq(stdout, projection->alias);
printf(" } }");
}
}
printf(", \"features\": [\n");
}
bool first_layer = true;
for (size_t l = 0; l < tile.layers.size(); l++) {
mvt_layer &layer = tile.layers[l];
if (layer.extent <= 0) {
fprintf(stderr, "Impossible layer extent %lld in mbtiles\n", layer.extent);
exit(EXIT_FAILURE);
}
if (to_decode.size() != 0 && !to_decode.count(layer.name)) {
continue;
}
if (!pipeline) {
if (describe) {
if (!first_layer) {
printf(",\n");
}
printf("{ \"type\": \"FeatureCollection\"");
printf(", \"properties\": { \"layer\": ");
fprintq(stdout, layer.name.c_str());
printf(", \"version\": %d, \"extent\": %lld", layer.version, layer.extent);
printf(" }");
printf(", \"features\": [\n");
first_layer = false;
}
}
// X and Y are unsigned, so no need to check <0
if (x > (1 << z) || y > (1 << z)) {
fprintf(stderr, "Impossible tile %d/%u/%u\n", z, x, y);
exit(EXIT_FAILURE);
}
layer_to_geojson(stdout, layer, z, x, y, !pipeline, pipeline, pipeline, 0, 0, 0, !force);
if (!pipeline) {
if (describe) {
printf("] }\n");
}
}
}
if (!pipeline) {
printf("] }\n");
}
}
void decode(char *fname, int z, unsigned x, unsigned y, std::set<std::string> const &to_decode, bool pipeline, bool stats) {
sqlite3 *db;
int oz = z;
unsigned ox = x, oy = y;
int fd = open(fname, O_RDONLY | O_CLOEXEC);
if (fd >= 0) {
struct stat st;
if (fstat(fd, &st) == 0) {
if (st.st_size < 50 * 1024 * 1024) {
char *map = (char *) mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (map != NULL && map != MAP_FAILED) {
if (strcmp(map, "SQLite format 3") != 0) {
if (z >= 0) {
std::string s = std::string(map, st.st_size);
handle(s, z, x, y, 1, to_decode, pipeline, stats);
munmap(map, st.st_size);
return;
} else {
fprintf(stderr, "Must specify zoom/x/y to decode a single pbf file\n");
exit(EXIT_FAILURE);
}
}
}
munmap(map, st.st_size);
}
} else {
perror("fstat");
}
if (close(fd) != 0) {
perror("close");
exit(EXIT_FAILURE);
}
} else {
perror(fname);
}
if (sqlite3_open(fname, &db) != SQLITE_OK) {
fprintf(stderr, "%s: %s\n", fname, sqlite3_errmsg(db));
exit(EXIT_FAILURE);
}
if (z < 0) {
int within = 0;
if (!pipeline && !stats) {
printf("{ \"type\": \"FeatureCollection\", \"properties\": {\n");
const char *sql2 = "SELECT name, value from metadata order by name;";
sqlite3_stmt *stmt2;
if (sqlite3_prepare_v2(db, sql2, -1, &stmt2, NULL) != SQLITE_OK) {
fprintf(stderr, "%s: select failed: %s\n", fname, sqlite3_errmsg(db));
exit(EXIT_FAILURE);
}
while (sqlite3_step(stmt2) == SQLITE_ROW) {
if (within) {
printf(",\n");
}
within = 1;
const unsigned char *name = sqlite3_column_text(stmt2, 0);
const unsigned char *value = sqlite3_column_text(stmt2, 1);
if (name == NULL || value == NULL) {
fprintf(stderr, "Corrupt mbtiles file: null metadata\n");
exit(EXIT_FAILURE);
}
fprintq(stdout, (char *) name);
printf(": ");
fprintq(stdout, (char *) value);
}
sqlite3_finalize(stmt2);
}
if (stats) {
printf("[\n");
}
const char *sql = "SELECT tile_data, zoom_level, tile_column, tile_row from tiles where zoom_level between ? and ? order by zoom_level, tile_column, tile_row;";
sqlite3_stmt *stmt;
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) {
fprintf(stderr, "%s: select failed: %s\n", fname, sqlite3_errmsg(db));
exit(EXIT_FAILURE);
}
sqlite3_bind_int(stmt, 1, minzoom);
sqlite3_bind_int(stmt, 2, maxzoom);
if (!pipeline && !stats) {
printf("\n}, \"features\": [\n");
}
within = 0;
while (sqlite3_step(stmt) == SQLITE_ROW) {
if (!pipeline && !stats) {
if (within) {
printf(",\n");
}
within = 1;
}
if (stats) {
if (within) {
printf(",\n");
}
within = 1;
}
int len = sqlite3_column_bytes(stmt, 0);
int tz = sqlite3_column_int(stmt, 1);
int tx = sqlite3_column_int(stmt, 2);
int ty = sqlite3_column_int(stmt, 3);
if (tz < 0 || tz >= 32) {
fprintf(stderr, "Impossible zoom level %d in mbtiles\n", tz);
exit(EXIT_FAILURE);
}
ty = (1LL << tz) - 1 - ty;
const char *s = (const char *) sqlite3_column_blob(stmt, 0);
handle(std::string(s, len), tz, tx, ty, 1, to_decode, pipeline, stats);
}
if (!pipeline && !stats) {
printf("] }\n");
}
if (stats) {
printf("]\n");
}
sqlite3_finalize(stmt);
} else {
int handled = 0;
while (z >= 0 && !handled) {
const char *sql = "SELECT tile_data from tiles where zoom_level = ? and tile_column = ? and tile_row = ?;";
sqlite3_stmt *stmt;
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) {
fprintf(stderr, "%s: select failed: %s\n", fname, sqlite3_errmsg(db));
exit(EXIT_FAILURE);
}
sqlite3_bind_int(stmt, 1, z);
sqlite3_bind_int(stmt, 2, x);
sqlite3_bind_int(stmt, 3, (1LL << z) - 1 - y);
while (sqlite3_step(stmt) == SQLITE_ROW) {
int len = sqlite3_column_bytes(stmt, 0);
const char *s = (const char *) sqlite3_column_blob(stmt, 0);
if (z != oz) {
fprintf(stderr, "%s: Warning: using tile %d/%u/%u instead of %d/%u/%u\n", fname, z, x, y, oz, ox, oy);
}
handle(std::string(s, len), z, x, y, 0, to_decode, pipeline, stats);
handled = 1;
}
sqlite3_finalize(stmt);
z--;
x /= 2;
y /= 2;
}
}
if (sqlite3_close(db) != SQLITE_OK) {
fprintf(stderr, "%s: could not close database: %s\n", fname, sqlite3_errmsg(db));
exit(EXIT_FAILURE);
}
}
void usage(char **argv) {
fprintf(stderr, "Usage: %s [-s projection] [-Z minzoom] [-z maxzoom] [-l layer ...] file.mbtiles [zoom x y]\n", argv[0]);
exit(EXIT_FAILURE);
}
int main(int argc, char **argv) {
extern int optind;
extern char *optarg;
int i;
std::set<std::string> to_decode;
bool pipeline = false;
bool stats = false;
struct option long_options[] = {
{"projection", required_argument, 0, 's'},
{"maximum-zoom", required_argument, 0, 'z'},
{"minimum-zoom", required_argument, 0, 'Z'},
{"layer", required_argument, 0, 'l'},
{"tag-layer-and-zoom", no_argument, 0, 'c'},
{"stats", no_argument, 0, 'S'},
{"force", no_argument, 0, 'f'},
{0, 0, 0, 0},
};
std::string getopt_str;
for (size_t lo = 0; long_options[lo].name != NULL; lo++) {
if (long_options[lo].val > ' ') {
getopt_str.push_back(long_options[lo].val);
if (long_options[lo].has_arg == required_argument) {
getopt_str.push_back(':');
}
}
}
while ((i = getopt_long(argc, argv, getopt_str.c_str(), long_options, NULL)) != -1) {
switch (i) {
case 0:
break;
case 's':
set_projection_or_exit(optarg);
break;
case 'z':
maxzoom = atoi(optarg);
break;
case 'Z':
minzoom = atoi(optarg);
break;
case 'l':
to_decode.insert(optarg);
break;
case 'c':
pipeline = true;
break;
case 'S':
stats = true;
break;
case 'f':
force = true;
break;
default:
usage(argv);
}
}
if (argc == optind + 4) {
decode(argv[optind], atoi(argv[optind + 1]), atoi(argv[optind + 2]), atoi(argv[optind + 3]), to_decode, pipeline, stats);
} else if (argc == optind + 1) {
decode(argv[optind], -1, -1, -1, to_decode, pipeline, stats);
} else {
usage(argv);
}
return 0;
}

126
dirtiles.cpp Normal file
View File

@ -0,0 +1,126 @@
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <dirent.h>
#include <sys/stat.h>
#include "dirtiles.hpp"
std::string dir_read_tile(std::string pbfPath) {
std::ifstream pbfFile(pbfPath, std::ios::in | std::ios::binary);
std::ostringstream contents;
contents << pbfFile.rdbuf();
pbfFile.close();
return (contents.str());
}
void dir_write_tile(const char *outdir, int z, int tx, int ty, std::string const &pbf) {
mkdir(outdir, S_IRWXU | S_IRWXG | S_IRWXO);
std::string curdir(outdir);
std::string slash("/");
std::string newdir = curdir + slash + std::to_string(z);
mkdir(newdir.c_str(), S_IRWXU | S_IRWXG | S_IRWXO);
newdir = newdir + "/" + std::to_string(tx);
mkdir(newdir.c_str(), S_IRWXU | S_IRWXG | S_IRWXO);
newdir = newdir + "/" + std::to_string(ty) + ".pbf";
struct stat st;
if (stat(newdir.c_str(), &st) == 0) {
fprintf(stderr, "Can't write tile to already existing %s\n", newdir.c_str());
exit(EXIT_FAILURE);
}
std::ofstream pbfFile(newdir, std::ios::out | std::ios::binary);
pbfFile.write(pbf.data(), pbf.size());
pbfFile.close();
}
bool numeric(const char *s) {
if (*s == '\0') {
return false;
}
for (; *s != 0; s++) {
if (*s < '0' || *s > '9') {
return false;
}
}
return true;
}
bool pbfname(const char *s) {
while (*s >= '0' && *s <= '9') {
s++;
}
return strcmp(s, ".pbf") == 0;
}
void check_dir(const char *dir, bool rm) {
struct stat st;
std::string meta = std::string(dir) + "/" + "metadata.json";
if (rm) {
unlink(meta.c_str()); // error OK since it may not exist;
} else {
if (stat(meta.c_str(), &st) == 0) {
fprintf(stderr, "%s: file exists\n", meta.c_str());
exit(EXIT_FAILURE);
}
}
DIR *d1 = opendir(dir);
if (d1 != NULL) {
struct dirent *dp;
while ((dp = readdir(d1)) != NULL) {
if (numeric(dp->d_name)) {
std::string z = std::string(dir) + "/" + dp->d_name;
DIR *d2 = opendir(z.c_str());
if (d2 == NULL) {
perror(z.c_str());
exit(EXIT_FAILURE);
}
struct dirent *dp2;
while ((dp2 = readdir(d2)) != NULL) {
if (numeric(dp2->d_name)) {
std::string x = z + "/" + dp2->d_name;
DIR *d3 = opendir(x.c_str());
if (d3 == NULL) {
perror(x.c_str());
exit(EXIT_FAILURE);
}
struct dirent *dp3;
while ((dp3 = readdir(d3)) != NULL) {
if (pbfname(dp3->d_name)) {
std::string y = x + "/" + dp3->d_name;
if (rm) {
if (unlink(y.c_str()) != 0) {
perror(y.c_str());
exit(EXIT_FAILURE);
}
} else {
fprintf(stderr, "%s: file exists\n", y.c_str());
exit(EXIT_FAILURE);
}
}
}
closedir(d3);
}
}
closedir(d2);
}
}
closedir(d1);
}
}

12
dirtiles.hpp Normal file
View File

@ -0,0 +1,12 @@
#include <string>
#ifndef DIRTILES_HPP
#define DIRTILES_HPP
std::string dir_read_tile(std::string pbfPath);
void dir_write_tile(const char *outdir, int z, int tx, int ty, std::string const &pbf);
void check_dir(const char *d, bool rm);
#endif

View File

@ -11,7 +11,7 @@ void enumerate(char *fname) {
exit(EXIT_FAILURE);
}
char *sql = "SELECT zoom_level, tile_column, tile_row from tiles;";
const char *sql = "SELECT zoom_level, tile_column, tile_row from tiles order by zoom_level, tile_column, tile_row;";
sqlite3_stmt *stmt;
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) {
@ -24,6 +24,11 @@ void enumerate(char *fname) {
long long x = sqlite3_column_int(stmt, 1);
long long y = sqlite3_column_int(stmt, 2);
if (zoom < 0 || zoom > 31) {
fprintf(stderr, "Corrupt mbtiles file: impossible zoom level %lld\n", zoom);
exit(EXIT_FAILURE);
}
y = (1LL << zoom) - 1 - y;
printf("%s %lld %lld %lld\n", fname, zoom, x, y);
}

328
evaluator.cpp Normal file
View File

@ -0,0 +1,328 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <map>
#include "mvt.hpp"
#include "evaluator.hpp"
int compare(mvt_value one, json_object *two, bool &fail) {
if (one.type == mvt_string) {
if (two->type != JSON_STRING) {
fail = true;
return false; // string vs non-string
}
return strcmp(one.string_value.c_str(), two->string);
}
if (one.type == mvt_double || one.type == mvt_float || one.type == mvt_int || one.type == mvt_uint || one.type == mvt_sint) {
if (two->type != JSON_NUMBER) {
fail = true;
return false; // number vs non-number
}
double v;
if (one.type == mvt_double) {
v = one.numeric_value.double_value;
} else if (one.type == mvt_float) {
v = one.numeric_value.float_value;
} else if (one.type == mvt_int) {
v = one.numeric_value.int_value;
} else if (one.type == mvt_uint) {
v = one.numeric_value.uint_value;
} else if (one.type == mvt_sint) {
v = one.numeric_value.sint_value;
} else {
fprintf(stderr, "Internal error: bad mvt type %d\n", one.type);
exit(EXIT_FAILURE);
}
if (v < two->number) {
return -1;
} else if (v > two->number) {
return 1;
} else {
return 0;
}
}
if (one.type == mvt_bool) {
if (two->type != JSON_TRUE && two->type != JSON_FALSE) {
fail = true;
return false; // bool vs non-bool
}
bool b = two->type != JSON_FALSE;
return one.numeric_value.bool_value > b;
}
if (one.type == mvt_null) {
if (two->type != JSON_NULL) {
fail = true;
return false; // null vs non-null
}
return 0; // null equals null
}
fprintf(stderr, "Internal error: bad mvt type %d\n", one.type);
exit(EXIT_FAILURE);
}
bool eval(std::map<std::string, mvt_value> const &feature, json_object *f) {
if (f == NULL || f->type != JSON_ARRAY) {
fprintf(stderr, "Filter is not an array: %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
if (f->length < 1) {
fprintf(stderr, "Array too small in filter: %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
if (f->array[0]->type != JSON_STRING) {
fprintf(stderr, "Filter operation is not a string: %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
if (strcmp(f->array[0]->string, "has") == 0 ||
strcmp(f->array[0]->string, "!has") == 0) {
if (f->length != 2) {
fprintf(stderr, "Wrong number of array elements in filter: %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
if (strcmp(f->array[0]->string, "has") == 0) {
if (f->array[1]->type != JSON_STRING) {
fprintf(stderr, "\"has\" key is not a string: %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
return feature.count(std::string(f->array[1]->string)) != 0;
}
if (strcmp(f->array[0]->string, "!has") == 0) {
if (f->array[1]->type != JSON_STRING) {
fprintf(stderr, "\"!has\" key is not a string: %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
return feature.count(std::string(f->array[1]->string)) == 0;
}
}
if (strcmp(f->array[0]->string, "==") == 0 ||
strcmp(f->array[0]->string, "!=") == 0 ||
strcmp(f->array[0]->string, ">") == 0 ||
strcmp(f->array[0]->string, ">=") == 0 ||
strcmp(f->array[0]->string, "<") == 0 ||
strcmp(f->array[0]->string, "<=") == 0) {
if (f->length != 3) {
fprintf(stderr, "Wrong number of array elements in filter: %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
if (f->array[1]->type != JSON_STRING) {
fprintf(stderr, "\"!has\" key is not a string: %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
auto ff = feature.find(std::string(f->array[1]->string));
if (ff == feature.end()) {
static bool warned = false;
if (!warned) {
const char *s = json_stringify(f);
fprintf(stderr, "Warning: attribute not found for comparison: %s\n", s);
free((void *) s);
warned = true;
}
if (strcmp(f->array[0]->string, "!=") == 0) {
return true; // attributes that aren't found are not equal
}
return false; // not found: comparison is false
}
bool fail = false;
int cmp = compare(ff->second, f->array[2], fail);
if (fail) {
static bool warned = false;
if (!warned) {
const char *s = json_stringify(f);
fprintf(stderr, "Warning: mismatched type in comparison: %s\n", s);
free((void *) s);
warned = true;
}
if (strcmp(f->array[0]->string, "!=") == 0) {
return true; // mismatched types are not equal
}
return false;
}
if (strcmp(f->array[0]->string, "==") == 0) {
return cmp == 0;
}
if (strcmp(f->array[0]->string, "!=") == 0) {
return cmp != 0;
}
if (strcmp(f->array[0]->string, ">") == 0) {
return cmp > 0;
}
if (strcmp(f->array[0]->string, ">=") == 0) {
return cmp >= 0;
}
if (strcmp(f->array[0]->string, "<") == 0) {
return cmp < 0;
}
if (strcmp(f->array[0]->string, "<=") == 0) {
return cmp <= 0;
}
fprintf(stderr, "Internal error: can't happen: %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
if (strcmp(f->array[0]->string, "all") == 0 ||
strcmp(f->array[0]->string, "any") == 0 ||
strcmp(f->array[0]->string, "none") == 0) {
bool v;
if (strcmp(f->array[0]->string, "all") == 0) {
v = true;
} else {
v = false;
}
for (size_t i = 1; i < f->length; i++) {
bool out = eval(feature, f->array[i]);
if (strcmp(f->array[0]->string, "all") == 0) {
v = v && out;
if (!v) {
break;
}
} else {
v = v || out;
if (v) {
break;
}
}
}
if (strcmp(f->array[0]->string, "none") == 0) {
return !v;
} else {
return v;
}
}
if (strcmp(f->array[0]->string, "in") == 0 ||
strcmp(f->array[0]->string, "!in") == 0) {
if (f->length < 2) {
fprintf(stderr, "Array too small in filter: %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
if (f->array[1]->type != JSON_STRING) {
fprintf(stderr, "\"!has\" key is not a string: %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
auto ff = feature.find(std::string(f->array[1]->string));
if (ff == feature.end()) {
static bool warned = false;
if (!warned) {
const char *s = json_stringify(f);
fprintf(stderr, "Warning: attribute not found for comparison: %s\n", s);
free((void *) s);
warned = true;
}
if (strcmp(f->array[0]->string, "!in") == 0) {
return true; // attributes that aren't found are not in
}
return false; // not found: comparison is false
}
bool found = false;
for (size_t i = 2; i < f->length; i++) {
bool fail = false;
int cmp = compare(ff->second, f->array[i], fail);
if (fail) {
static bool warned = false;
if (!warned) {
const char *s = json_stringify(f);
fprintf(stderr, "Warning: mismatched type in comparison: %s\n", s);
free((void *) s);
warned = true;
}
cmp = 1;
}
if (cmp == 0) {
found = true;
break;
}
}
if (strcmp(f->array[0]->string, "in") == 0) {
return found;
} else {
return !found;
}
}
fprintf(stderr, "Unknown filter %s\n", json_stringify(f));
exit(EXIT_FAILURE);
}
bool evaluate(std::map<std::string, mvt_value> const &feature, std::string const &layer, json_object *filter) {
if (filter == NULL || filter->type != JSON_HASH) {
fprintf(stderr, "Error: filter is not a hash: %s\n", json_stringify(filter));
exit(EXIT_FAILURE);
}
bool ok = true;
json_object *f;
f = json_hash_get(filter, layer.c_str());
if (ok && f != NULL) {
ok = eval(feature, f);
}
f = json_hash_get(filter, "*");
if (ok && f != NULL) {
ok = eval(feature, f);
}
return ok;
}
json_object *read_filter(const char *fname) {
FILE *fp = fopen(fname, "r");
if (fp == NULL) {
perror(fname);
exit(EXIT_FAILURE);
}
json_pull *jp = json_begin_file(fp);
json_object *filter = json_read_tree(jp);
if (filter == NULL) {
fprintf(stderr, "%s: %s\n", fname, jp->error);
exit(EXIT_FAILURE);
}
json_disconnect(filter);
json_end(jp);
fclose(fp);
return filter;
}
json_object *parse_filter(const char *s) {
json_pull *jp = json_begin_string(s);
json_object *filter = json_read_tree(jp);
if (filter == NULL) {
fprintf(stderr, "Could not parse filter %s\n", s);
fprintf(stderr, "%s\n", jp->error);
exit(EXIT_FAILURE);
}
json_disconnect(filter);
json_end(jp);
return filter;
}

13
evaluator.hpp Normal file
View File

@ -0,0 +1,13 @@
#ifndef EVALUATOR_HPP
#define EVALUATOR HPP
#include <map>
#include <string>
#include "jsonpull/jsonpull.h"
#include "mvt.hpp"
bool evaluate(std::map<std::string, mvt_value> const &feature, std::string const &layer, json_object *filter);
json_object *parse_filter(const char *s);
json_object *read_filter(const char *fname);
#endif

27
filters/limit-tiles-to-bbox Executable file
View File

@ -0,0 +1,27 @@
#!/usr/bin/perl
use Math::Trig;
use strict;
# http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
sub getTileNumber {
my ($lat, $lon, $zoom) = @_;
my $xtile = int(($lon + 180) / 360 * 2 ** $zoom);
my $ytile = int((1 - log(tan(deg2rad($lat)) + sec(deg2rad($lat))) / pi) / 2 * 2 ** $zoom);
return ($xtile, $ytile);
}
my ($minlon, $minlat, $maxlon, $maxlat, $z, $x, $y) = @ARGV;
my ($x1, $y1) = getTileNumber($maxlat, $minlon, $z);
my ($x2, $y2) = getTileNumber($minlat, $maxlon, $z);
if ($x >= $x1 && $x <= $x2 && $y >= $y1 && $y <= $y2) {
while (<STDIN>) {
print;
}
} else {
while (<STDIN>) {
}
}

572
geobuf.cpp Normal file
View File

@ -0,0 +1,572 @@
#include <stdio.h>
#include <string>
#include <limits.h>
#include <pthread.h>
#include "mvt.hpp"
#include "serial.hpp"
#include "geobuf.hpp"
#include "geojson.hpp"
#include "projection.hpp"
#include "main.hpp"
#include "protozero/varint.hpp"
#include "protozero/pbf_reader.hpp"
#include "protozero/pbf_writer.hpp"
#include "milo/dtoa_milo.h"
#include "jsonpull/jsonpull.h"
#define POINT 0
#define MULTIPOINT 1
#define LINESTRING 2
#define MULTILINESTRING 3
#define POLYGON 4
#define MULTIPOLYGON 5
struct queued_feature {
protozero::pbf_reader pbf;
size_t dim;
double e;
std::vector<std::string> *keys;
struct serialization_state *sst;
int layer;
std::string layername;
};
static std::vector<queued_feature> feature_queue;
void ensureDim(size_t dim) {
if (dim < 2) {
fprintf(stderr, "Geometry has fewer than 2 dimensions: %zu\n", dim);
exit(EXIT_FAILURE);
}
}
serial_val readValue(protozero::pbf_reader &pbf, std::vector<std::string> &keys) {
serial_val sv;
sv.type = mvt_null;
sv.s = "null";
while (pbf.next()) {
switch (pbf.tag()) {
case 1:
sv.type = mvt_string;
sv.s = pbf.get_string();
break;
case 2:
sv.type = mvt_double;
sv.s = milo::dtoa_milo(pbf.get_double());
break;
case 3:
sv.type = mvt_double;
sv.s = std::to_string(pbf.get_uint64());
break;
case 4:
sv.type = mvt_double;
sv.s = std::to_string(-(long long) pbf.get_uint64());
break;
case 5:
sv.type = mvt_bool;
if (pbf.get_bool()) {
sv.s = "true";
} else {
sv.s = "false";
}
break;
case 6:
sv.type = mvt_string; // stringified JSON
sv.s = pbf.get_string();
if (sv.s == "null") {
sv.type = mvt_null;
}
break;
default:
pbf.skip();
}
}
return sv;
}
drawvec readPoint(std::vector<long long> &coords, std::vector<int> &lengths, size_t dim, double e) {
ensureDim(dim);
long long x, y;
projection->project(coords[0] / e, coords[1] / e, 32, &x, &y);
drawvec dv;
dv.push_back(draw(VT_MOVETO, x, y));
return dv;
}
drawvec readLinePart(std::vector<long long> &coords, std::vector<int> &lengths, size_t dim, double e, size_t start, size_t end, bool closed) {
ensureDim(dim);
drawvec dv;
std::vector<long long> prev;
std::vector<double> p;
prev.resize(dim);
p.resize(dim);
for (size_t i = start; i + dim - 1 < end; i += dim) {
if (i + dim - 1 >= coords.size()) {
fprintf(stderr, "Internal error: line segment %zu vs %zu\n", i + dim - 1, coords.size());
exit(EXIT_FAILURE);
}
for (size_t d = 0; d < dim; d++) {
prev[d] += coords[i + d];
p[d] = prev[d] / e;
}
long long x, y;
projection->project(p[0], p[1], 32, &x, &y);
if (i == start) {
dv.push_back(draw(VT_MOVETO, x, y));
} else {
dv.push_back(draw(VT_LINETO, x, y));
}
}
if (closed && dv.size() > 0) {
dv.push_back(draw(VT_LINETO, dv[0].x, dv[0].y));
}
return dv;
}
drawvec readLine(std::vector<long long> &coords, std::vector<int> &lengths, size_t dim, double e, bool closed) {
return readLinePart(coords, lengths, dim, e, 0, coords.size(), closed);
}
drawvec readMultiLine(std::vector<long long> &coords, std::vector<int> &lengths, size_t dim, double e, bool closed) {
if (lengths.size() == 0) {
return readLinePart(coords, lengths, dim, e, 0, coords.size(), closed);
}
drawvec dv;
size_t here = 0;
for (size_t i = 0; i < lengths.size(); i++) {
drawvec dv2 = readLinePart(coords, lengths, dim, e, here, here + lengths[i] * dim, closed);
here += lengths[i] * dim;
for (size_t j = 0; j < dv2.size(); j++) {
dv.push_back(dv2[j]);
}
}
return dv;
}
drawvec readMultiPolygon(std::vector<long long> &coords, std::vector<int> &lengths, size_t dim, double e) {
ensureDim(dim);
if (lengths.size() == 0) {
return readLinePart(coords, lengths, dim, e, 0, coords.size(), true);
}
size_t polys = lengths[0];
size_t n = 1;
size_t here = 0;
drawvec dv;
for (size_t i = 0; i < polys; i++) {
size_t rings = lengths[n++];
for (size_t j = 0; j < rings; j++) {
drawvec dv2 = readLinePart(coords, lengths, dim, e, here, here + lengths[n] * dim, true);
here += lengths[n] * dim;
n++;
for (size_t k = 0; k < dv2.size(); k++) {
dv.push_back(dv2[k]);
}
}
dv.push_back(draw(VT_CLOSEPATH, 0, 0)); // mark that the next ring is outer
}
return dv;
}
struct drawvec_type {
drawvec dv;
int type;
};
std::vector<drawvec_type> readGeometry(protozero::pbf_reader &pbf, size_t dim, double e, std::vector<std::string> &keys) {
std::vector<drawvec_type> ret;
std::vector<long long> coords;
std::vector<int> lengths;
int type = -1;
while (pbf.next()) {
switch (pbf.tag()) {
case 1:
type = pbf.get_enum();
break;
case 2: {
auto pi = pbf.get_packed_uint32();
for (auto it = pi.first; it != pi.second; ++it) {
lengths.push_back(*it);
}
break;
}
case 3: {
auto pi = pbf.get_packed_sint64();
for (auto it = pi.first; it != pi.second; ++it) {
coords.push_back(*it);
}
break;
}
case 4: {
protozero::pbf_reader geometry_reader(pbf.get_message());
std::vector<drawvec_type> dv2 = readGeometry(geometry_reader, dim, e, keys);
for (size_t i = 0; i < dv2.size(); i++) {
ret.push_back(dv2[i]);
}
break;
}
default:
pbf.skip();
}
}
drawvec_type dv;
if (type == POINT) {
dv.dv = readPoint(coords, lengths, dim, e);
} else if (type == MULTIPOINT) {
dv.dv = readLine(coords, lengths, dim, e, false);
} else if (type == LINESTRING) {
dv.dv = readLine(coords, lengths, dim, e, false);
} else if (type == POLYGON) {
dv.dv = readMultiLine(coords, lengths, dim, e, true);
} else if (type == MULTIPOLYGON) {
dv.dv = readMultiPolygon(coords, lengths, dim, e);
} else {
// GeometryCollection
return ret;
}
dv.type = type / 2 + 1;
ret.push_back(dv);
return ret;
}
void readFeature(protozero::pbf_reader &pbf, size_t dim, double e, std::vector<std::string> &keys, struct serialization_state *sst, int layer, std::string layername) {
std::vector<drawvec_type> dv;
long long id = 0;
bool has_id = false;
std::vector<serial_val> values;
std::map<std::string, serial_val> other;
std::vector<std::string> full_keys;
std::vector<serial_val> full_values;
while (pbf.next()) {
switch (pbf.tag()) {
case 1: {
protozero::pbf_reader geometry_reader(pbf.get_message());
std::vector<drawvec_type> dv2 = readGeometry(geometry_reader, dim, e, keys);
for (size_t i = 0; i < dv2.size(); i++) {
dv.push_back(dv2[i]);
}
break;
}
case 11: {
static bool warned = false;
if (!warned) {
fprintf(stderr, "Non-numeric feature IDs not supported\n");
warned = true;
}
pbf.skip();
break;
}
case 12:
has_id = true;
id = pbf.get_sint64();
if (id < 0) {
static bool warned = false;
if (!warned) {
fprintf(stderr, "Out of range feature id %lld\n", id);
warned = true;
}
has_id = false;
}
break;
case 13: {
protozero::pbf_reader value_reader(pbf.get_message());
values.push_back(readValue(value_reader, keys));
break;
}
case 14: {
std::vector<size_t> properties;
auto pi = pbf.get_packed_uint32();
for (auto it = pi.first; it != pi.second; ++it) {
properties.push_back(*it);
}
for (size_t i = 0; i + 1 < properties.size(); i += 2) {
if (properties[i] >= keys.size()) {
fprintf(stderr, "Out of bounds key: %zu in %zu\n", properties[i], keys.size());
exit(EXIT_FAILURE);
}
if (properties[i + 1] >= values.size()) {
fprintf(stderr, "Out of bounds value: %zu in %zu\n", properties[i + 1], values.size());
exit(EXIT_FAILURE);
}
full_keys.push_back(keys[properties[i]]);
full_values.push_back(values[properties[i + 1]]);
}
values.clear();
break;
}
case 15: {
std::vector<size_t> misc;
auto pi = pbf.get_packed_uint32();
for (auto it = pi.first; it != pi.second; ++it) {
misc.push_back(*it);
}
for (size_t i = 0; i + 1 < misc.size(); i += 2) {
if (misc[i] >= keys.size()) {
fprintf(stderr, "Out of bounds key: %zu in %zu\n", misc[i], keys.size());
exit(EXIT_FAILURE);
}
if (misc[i + 1] >= values.size()) {
fprintf(stderr, "Out of bounds value: %zu in %zu\n", misc[i + 1], values.size());
exit(EXIT_FAILURE);
}
other.insert(std::pair<std::string, serial_val>(keys[misc[i]], values[misc[i + 1]]));
}
values.clear();
break;
}
default:
pbf.skip();
}
}
for (size_t i = 0; i < dv.size(); i++) {
serial_feature sf;
sf.layer = layer;
sf.layername = layername;
sf.segment = sst->segment;
sf.has_id = has_id;
sf.id = id;
sf.has_tippecanoe_minzoom = false;
sf.has_tippecanoe_maxzoom = false;
sf.feature_minzoom = false;
sf.seq = *(sst->layer_seq);
sf.geometry = dv[i].dv;
sf.t = dv[i].type;
sf.full_keys = full_keys;
sf.full_values = full_values;
sf.m = sf.full_values.size();
auto tip = other.find("tippecanoe");
if (tip != other.end()) {
json_pull *jp = json_begin_string(tip->second.s.c_str());
json_object *o = json_read_tree(jp);
if (o != NULL) {
json_object *min = json_hash_get(o, "minzoom");
if (min != NULL && (min->type == JSON_STRING || min->type == JSON_NUMBER)) {
sf.has_tippecanoe_minzoom = true;
sf.tippecanoe_minzoom = atoi(min->string);
}
json_object *max = json_hash_get(o, "maxzoom");
if (max != NULL && (max->type == JSON_STRING || max->type == JSON_NUMBER)) {
sf.has_tippecanoe_maxzoom = true;
sf.tippecanoe_maxzoom = atoi(max->string);
}
json_object *tlayer = json_hash_get(o, "layer");
if (tlayer != NULL && (tlayer->type == JSON_STRING || tlayer->type == JSON_NUMBER)) {
sf.layername = tlayer->string;
}
}
json_free(o);
json_end(jp);
}
serialize_feature(sst, sf);
}
}
struct queue_run_arg {
size_t start;
size_t end;
size_t segment;
};
void *run_parse_feature(void *v) {
struct queue_run_arg *qra = (struct queue_run_arg *) v;
for (size_t i = qra->start; i < qra->end; i++) {
struct queued_feature &qf = feature_queue[i];
readFeature(qf.pbf, qf.dim, qf.e, *qf.keys, &qf.sst[qra->segment], qf.layer, qf.layername);
}
return NULL;
}
void runQueue() {
if (feature_queue.size() == 0) {
return;
}
struct queue_run_arg qra[CPUS];
pthread_t pthreads[CPUS];
for (size_t i = 0; i < CPUS; i++) {
*(feature_queue[0].sst[i].layer_seq) = *(feature_queue[0].sst[0].layer_seq) + feature_queue.size() * i / CPUS;
qra[i].start = feature_queue.size() * i / CPUS;
qra[i].end = feature_queue.size() * (i + 1) / CPUS;
qra[i].segment = i;
if (pthread_create(&pthreads[i], NULL, run_parse_feature, &qra[i]) != 0) {
perror("pthread_create");
exit(EXIT_FAILURE);
}
}
for (size_t i = 0; i < CPUS; i++) {
void *retval;
if (pthread_join(pthreads[i], &retval) != 0) {
perror("pthread_join");
}
}
*(feature_queue[0].sst[0].layer_seq) = *(feature_queue[0].sst[CPUS - 1].layer_seq);
feature_queue.clear();
}
void queueFeature(protozero::pbf_reader &pbf, size_t dim, double e, std::vector<std::string> &keys, struct serialization_state *sst, int layer, std::string layername) {
struct queued_feature qf;
qf.pbf = pbf;
qf.dim = dim;
qf.e = e;
qf.keys = &keys;
qf.sst = sst;
qf.layer = layer;
qf.layername = layername;
feature_queue.push_back(qf);
if (feature_queue.size() > CPUS * 500) {
runQueue();
}
}
void outBareGeometry(drawvec const &dv, int type, size_t dim, double e, std::vector<std::string> &keys, struct serialization_state *sst, int layer, std::string layername) {
serial_feature sf;
sf.layer = layer;
sf.layername = layername;
sf.segment = sst->segment;
sf.has_id = false;
sf.has_tippecanoe_minzoom = false;
sf.has_tippecanoe_maxzoom = false;
sf.feature_minzoom = false;
sf.seq = (*sst->layer_seq);
sf.geometry = dv;
sf.t = type;
sf.m = 0;
serialize_feature(sst, sf);
}
void readFeatureCollection(protozero::pbf_reader &pbf, size_t dim, double e, std::vector<std::string> &keys, struct serialization_state *sst, int layer, std::string layername) {
while (pbf.next()) {
switch (pbf.tag()) {
case 1: {
protozero::pbf_reader feature_reader(pbf.get_message());
queueFeature(feature_reader, dim, e, keys, sst, layer, layername);
break;
}
default:
pbf.skip();
}
}
}
void parse_geobuf(struct serialization_state *sst, const char *src, size_t len, int layer, std::string layername) {
protozero::pbf_reader pbf(src, len);
size_t dim = 2;
double e = 1e6;
std::vector<std::string> keys;
while (pbf.next()) {
switch (pbf.tag()) {
case 1:
keys.push_back(pbf.get_string());
break;
case 2:
dim = pbf.get_int64();
break;
case 3:
e = pow(10, pbf.get_int64());
break;
case 4: {
protozero::pbf_reader feature_collection_reader(pbf.get_message());
readFeatureCollection(feature_collection_reader, dim, e, keys, sst, layer, layername);
break;
}
case 5: {
protozero::pbf_reader feature_reader(pbf.get_message());
queueFeature(feature_reader, dim, e, keys, sst, layer, layername);
break;
}
case 6: {
protozero::pbf_reader geometry_reader(pbf.get_message());
std::vector<drawvec_type> dv = readGeometry(geometry_reader, dim, e, keys);
for (size_t i = 0; i < dv.size(); i++) {
outBareGeometry(dv[i].dv, dv[i].type, dim, e, keys, sst, layer, layername);
}
break;
}
default:
pbf.skip();
}
}
runQueue();
}

13
geobuf.hpp Normal file
View File

@ -0,0 +1,13 @@
#ifndef GEOBUF_HPP
#define GEOBUF_HPP
#include <stdio.h>
#include <set>
#include <map>
#include <string>
#include "mbtiles.hpp"
#include "serial.hpp"
void parse_geobuf(struct serialization_state *sst, const char *s, size_t len, int layer, std::string layername);
#endif

1657
geojson.c

File diff suppressed because it is too large Load Diff

397
geojson.cpp Normal file
View File

@ -0,0 +1,397 @@
#ifdef MTRACE
#include <mcheck.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <string.h>
#include <fcntl.h>
#include <ctype.h>
#include <errno.h>
#include <limits.h>
#include <sqlite3.h>
#include <stdarg.h>
#include <sys/resource.h>
#include <pthread.h>
#include <vector>
#include <algorithm>
#include <set>
#include <map>
#include <string>
#include "jsonpull/jsonpull.h"
#include "pool.hpp"
#include "projection.hpp"
#include "memfile.hpp"
#include "main.hpp"
#include "mbtiles.hpp"
#include "geojson.hpp"
#include "geometry.hpp"
#include "options.hpp"
#include "serial.hpp"
#include "text.hpp"
#include "read_json.hpp"
#include "mvt.hpp"
int serialize_geojson_feature(struct serialization_state *sst, json_object *geometry, json_object *properties, json_object *id, int layer, json_object *tippecanoe, json_object *feature, std::string layername) {
json_object *geometry_type = json_hash_get(geometry, "type");
if (geometry_type == NULL) {
static int warned = 0;
if (!warned) {
fprintf(stderr, "%s:%d: null geometry (additional not reported)\n", sst->fname, sst->line);
json_context(feature);
warned = 1;
}
return 0;
}
if (geometry_type->type != JSON_STRING) {
fprintf(stderr, "%s:%d: geometry type is not a string\n", sst->fname, sst->line);
json_context(feature);
return 0;
}
json_object *coordinates = json_hash_get(geometry, "coordinates");
if (coordinates == NULL || coordinates->type != JSON_ARRAY) {
fprintf(stderr, "%s:%d: feature without coordinates array\n", sst->fname, sst->line);
json_context(feature);
return 0;
}
int t;
for (t = 0; t < GEOM_TYPES; t++) {
if (strcmp(geometry_type->string, geometry_names[t]) == 0) {
break;
}
}
if (t >= GEOM_TYPES) {
fprintf(stderr, "%s:%d: Can't handle geometry type %s\n", sst->fname, sst->line, geometry_type->string);
json_context(feature);
return 0;
}
int tippecanoe_minzoom = -1;
int tippecanoe_maxzoom = -1;
std::string tippecanoe_layername;
if (tippecanoe != NULL) {
json_object *min = json_hash_get(tippecanoe, "minzoom");
if (min != NULL && min->type == JSON_NUMBER) {
tippecanoe_minzoom = min->number;
}
if (min != NULL && min->type == JSON_STRING) {
tippecanoe_minzoom = atoi(min->string);
}
json_object *max = json_hash_get(tippecanoe, "maxzoom");
if (max != NULL && max->type == JSON_NUMBER) {
tippecanoe_maxzoom = max->number;
}
if (max != NULL && max->type == JSON_STRING) {
tippecanoe_maxzoom = atoi(max->string);
}
json_object *ln = json_hash_get(tippecanoe, "layer");
if (ln != NULL && (ln->type == JSON_STRING || ln->type == JSON_NUMBER)) {
tippecanoe_layername = std::string(ln->string);
}
}
bool has_id = false;
unsigned long long id_value = 0;
if (id != NULL) {
if (id->type == JSON_NUMBER) {
if (id->number >= 0) {
char *err = NULL;
id_value = strtoull(id->string, &err, 10);
if (err != NULL && *err != '\0') {
static bool warned_frac = false;
if (!warned_frac) {
fprintf(stderr, "Warning: Can't represent non-integer feature ID %s\n", id->string);
warned_frac = true;
}
} else {
has_id = true;
}
} else {
static bool warned_neg = false;
if (!warned_neg) {
fprintf(stderr, "Warning: Can't represent negative feature ID %s\n", id->string);
warned_neg = true;
}
}
} else {
static bool warned_nan = false;
if (!warned_nan) {
char *s = json_stringify(id);
fprintf(stderr, "Warning: Can't represent non-numeric feature ID %s\n", s);
free(s); // stringify
warned_nan = true;
}
}
}
size_t nprop = 0;
if (properties != NULL && properties->type == JSON_HASH) {
nprop = properties->length;
}
char *metakey[nprop];
std::vector<std::string> metaval;
metaval.resize(nprop);
int metatype[nprop];
size_t m = 0;
for (size_t i = 0; i < nprop; i++) {
if (properties->keys[i]->type == JSON_STRING) {
std::string s(properties->keys[i]->string);
int type = -1;
std::string val;
stringify_value(properties->values[i], type, val, sst->fname, sst->line, feature, properties->keys[i]->string);
if (type >= 0) {
metakey[m] = properties->keys[i]->string;
metatype[m] = type;
metaval[m] = val;
m++;
} else {
metakey[m] = properties->keys[i]->string;
metatype[m] = mvt_null;
metaval[m] = "null";
m++;
}
}
}
drawvec dv;
parse_geometry(t, coordinates, dv, VT_MOVETO, sst->fname, sst->line, feature);
serial_feature sf;
sf.layer = layer;
sf.segment = sst->segment;
sf.t = mb_geometry[t];
sf.has_id = has_id;
sf.id = id_value;
sf.has_tippecanoe_minzoom = (tippecanoe_minzoom != -1);
sf.tippecanoe_minzoom = tippecanoe_minzoom;
sf.has_tippecanoe_maxzoom = (tippecanoe_maxzoom != -1);
sf.tippecanoe_maxzoom = tippecanoe_maxzoom;
sf.geometry = dv;
sf.m = m;
sf.feature_minzoom = 0; // Will be filled in during index merging
sf.seq = *(sst->layer_seq);
if (tippecanoe_layername.size() != 0) {
sf.layername = tippecanoe_layername;
} else {
sf.layername = layername;
}
for (size_t i = 0; i < m; i++) {
sf.full_keys.push_back(metakey[i]);
serial_val sv;
sv.type = metatype[i];
sv.s = metaval[i];
sf.full_values.push_back(sv);
}
return serialize_feature(sst, sf);
}
void check_crs(json_object *j, const char *reading) {
json_object *crs = json_hash_get(j, "crs");
if (crs != NULL) {
json_object *properties = json_hash_get(crs, "properties");
if (properties != NULL) {
json_object *name = json_hash_get(properties, "name");
if (name->type == JSON_STRING) {
if (strcmp(name->string, projection->alias) != 0) {
fprintf(stderr, "%s: Warning: GeoJSON specified projection \"%s\", not the expected \"%s\".\n", reading, name->string, projection->alias);
fprintf(stderr, "%s: If \"%s\" is not the expected projection, use -s to specify the right one.\n", reading, projection->alias);
}
}
}
}
}
void parse_json(struct serialization_state *sst, json_pull *jp, int layer, std::string layername) {
long long found_hashes = 0;
long long found_features = 0;
long long found_geometries = 0;
while (1) {
json_object *j = json_read(jp);
if (j == NULL) {
if (jp->error != NULL) {
fprintf(stderr, "%s:%d: %s\n", sst->fname, jp->line, jp->error);
if (jp->root != NULL) {
json_context(jp->root);
}
}
json_free(jp->root);
break;
}
if (j->type == JSON_HASH) {
found_hashes++;
if (found_hashes == 50 && found_features == 0 && found_geometries == 0) {
fprintf(stderr, "%s:%d: Warning: not finding any GeoJSON features or geometries in input yet after 50 objects.\n", sst->fname, jp->line);
}
}
json_object *type = json_hash_get(j, "type");
if (type == NULL || type->type != JSON_STRING) {
continue;
}
if (found_features == 0) {
int i;
int is_geometry = 0;
for (i = 0; i < GEOM_TYPES; i++) {
if (strcmp(type->string, geometry_names[i]) == 0) {
is_geometry = 1;
break;
}
}
if (is_geometry) {
if (j->parent != NULL) {
if (j->parent->type == JSON_ARRAY) {
if (j->parent->parent->type == JSON_HASH) {
json_object *geometries = json_hash_get(j->parent->parent, "geometries");
if (geometries != NULL) {
// Parent of Parent must be a GeometryCollection
is_geometry = 0;
}
}
} else if (j->parent->type == JSON_HASH) {
json_object *geometry = json_hash_get(j->parent, "geometry");
if (geometry != NULL) {
// Parent must be a Feature
is_geometry = 0;
}
}
}
}
if (is_geometry) {
if (found_features != 0 && found_geometries == 0) {
fprintf(stderr, "%s:%d: Warning: found a mixture of features and bare geometries\n", sst->fname, jp->line);
}
found_geometries++;
serialize_geojson_feature(sst, j, NULL, NULL, layer, NULL, j, layername);
json_free(j);
continue;
}
}
if (strcmp(type->string, "Feature") != 0) {
if (strcmp(type->string, "FeatureCollection") == 0) {
check_crs(j, sst->fname);
json_free(j);
}
continue;
}
if (found_features == 0 && found_geometries != 0) {
fprintf(stderr, "%s:%d: Warning: found a mixture of features and bare geometries\n", sst->fname, jp->line);
}
found_features++;
json_object *geometry = json_hash_get(j, "geometry");
if (geometry == NULL) {
fprintf(stderr, "%s:%d: feature with no geometry\n", sst->fname, jp->line);
json_context(j);
json_free(j);
continue;
}
json_object *properties = json_hash_get(j, "properties");
if (properties == NULL || (properties->type != JSON_HASH && properties->type != JSON_NULL)) {
fprintf(stderr, "%s:%d: feature without properties hash\n", sst->fname, jp->line);
json_context(j);
json_free(j);
continue;
}
json_object *tippecanoe = json_hash_get(j, "tippecanoe");
json_object *id = json_hash_get(j, "id");
json_object *geometries = json_hash_get(geometry, "geometries");
if (geometries != NULL) {
size_t g;
for (g = 0; g < geometries->length; g++) {
serialize_geojson_feature(sst, geometries->array[g], properties, id, layer, tippecanoe, j, layername);
}
} else {
serialize_geojson_feature(sst, geometry, properties, id, layer, tippecanoe, j, layername);
}
json_free(j);
/* XXX check for any non-features in the outer object */
}
}
void *run_parse_json(void *v) {
struct parse_json_args *pja = (struct parse_json_args *) v;
parse_json(pja->sst, pja->jp, pja->layer, *pja->layername);
return NULL;
}
struct jsonmap {
char *map;
unsigned long long off;
unsigned long long end;
};
ssize_t json_map_read(struct json_pull *jp, char *buffer, size_t n) {
struct jsonmap *jm = (struct jsonmap *) jp->source;
if (jm->off + n >= jm->end) {
n = jm->end - jm->off;
}
memcpy(buffer, jm->map + jm->off, n);
jm->off += n;
return n;
}
struct json_pull *json_begin_map(char *map, long long len) {
struct jsonmap *jm = new jsonmap;
if (jm == NULL) {
perror("Out of memory");
exit(EXIT_FAILURE);
}
jm->map = map;
jm->off = 0;
jm->end = len;
return json_begin(json_map_read, jm);
}
void json_end_map(struct json_pull *jp) {
delete (struct jsonmap *) jp->source;
json_end(jp);
}

28
geojson.hpp Normal file
View File

@ -0,0 +1,28 @@
#ifndef GEOJSON_HPP
#define GEOJSON_HPP
#include <stdio.h>
#include <set>
#include <map>
#include <string>
#include "mbtiles.hpp"
#include "jsonpull/jsonpull.h"
#include "serial.hpp"
struct parse_json_args {
json_pull *jp;
int layer;
std::string *layername;
std::map<std::string, int> const *attribute_types;
bool want_dist;
struct serialization_state *sst;
};
struct json_pull *json_begin_map(char *map, long long len);
void json_end_map(struct json_pull *jp);
void parse_json(struct serialization_state *sst, json_pull *jp, int layer, std::string layername);
void *run_parse_json(void *v);
#endif

153
geojson2nd.cpp Normal file
View File

@ -0,0 +1,153 @@
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <string.h>
#include <stdarg.h>
#include <unistd.h>
#include <string>
#include "jsonpull/jsonpull.h"
int fail = EXIT_SUCCESS;
bool wrap = false;
std::string buffered;
int buffered_type = -1;
// 0: nothing yet
// 1: buffered a line
// 2: wrote the line and the wrapper
int buffer_state = 0;
void out(std::string s, int type) {
if (!wrap) {
printf("%s\n", s.c_str());
return;
}
if (buffer_state == 0) {
buffered = s;
buffered_type = type;
buffer_state = 1;
return;
}
if (buffer_state == 1) {
if (buffered_type == 1) {
printf("{\"type\":\"FeatureCollection\",\"features\":[\n");
} else {
printf("{\"type\":\"GeometryCollection\",\"geometries\":[\n");
}
printf("%s\n", buffered.c_str());
buffer_state = 2;
}
printf(",\n%s\n", s.c_str());
if (type != buffered_type) {
fprintf(stderr, "Error: mix of bare geometries and features\n");
exit(EXIT_FAILURE);
}
}
void process(FILE *fp, const char *fname) {
json_pull *jp = json_begin_file(fp);
while (1) {
json_object *j = json_read(jp);
if (j == NULL) {
if (jp->error != NULL) {
fprintf(stderr, "%s:%d: %s\n", fname, jp->line, jp->error);
}
json_free(jp->root);
break;
}
json_object *type = json_hash_get(j, "type");
if (type == NULL || type->type != JSON_STRING) {
continue;
}
if (strcmp(type->string, "Feature") == 0) {
char *s = json_stringify(j);
out(s, 1);
free(s);
json_free(j);
} else if (strcmp(type->string, "Point") == 0 ||
strcmp(type->string, "MultiPoint") == 0 ||
strcmp(type->string, "LineString") == 0 ||
strcmp(type->string, "MultiLineString") == 0 ||
strcmp(type->string, "MultiPolygon") == 0) {
int is_geometry = 1;
if (j->parent != NULL) {
if (j->parent->type == JSON_ARRAY) {
if (j->parent->parent->type == JSON_HASH) {
json_object *geometries = json_hash_get(j->parent->parent, "geometries");
if (geometries != NULL) {
// Parent of Parent must be a GeometryCollection
is_geometry = 0;
}
}
} else if (j->parent->type == JSON_HASH) {
json_object *geometry = json_hash_get(j->parent, "geometry");
if (geometry != NULL) {
// Parent must be a Feature
is_geometry = 0;
}
}
}
if (is_geometry) {
char *s = json_stringify(j);
out(s, 2);
free(s);
json_free(j);
}
} else if (strcmp(type->string, "FeatureCollection") == 0) {
json_free(j);
}
}
json_end(jp);
}
int main(int argc, char **argv) {
extern int optind;
int i;
while ((i = getopt(argc, argv, "w")) != -1) {
switch (i) {
case 'w':
wrap = true;
break;
default:
fprintf(stderr, "Unexpected option -%c\n", i);
exit(EXIT_FAILURE);
}
}
if (optind >= argc) {
process(stdin, "standard input");
} else {
for (i = optind; i < argc; i++) {
FILE *f = fopen(argv[i], "r");
if (f == NULL) {
perror(argv[i]);
exit(EXIT_FAILURE);
}
process(f, argv[i]);
fclose(f);
}
}
if (buffer_state == 1) {
printf("%s\n", buffered.c_str());
} else if (buffer_state == 2) {
printf("]}\n");
}
return fail;
}

View File

@ -1,696 +0,0 @@
#include <iostream>
#include <fstream>
#include <string>
#include <stack>
#include <vector>
#include <algorithm>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include <sqlite3.h>
#include <limits.h>
#include "geometry.hh"
#include "clipper/clipper.hpp"
extern "C" {
#include "tile.h"
#include "clip.h"
#include "projection.h"
}
drawvec decode_geometry(char **meta, int z, unsigned tx, unsigned ty, int detail, long long *bbox) {
drawvec out;
bbox[0] = LONG_LONG_MAX;
bbox[1] = LONG_LONG_MAX;
bbox[2] = LONG_LONG_MIN;
bbox[3] = LONG_LONG_MIN;
long long wx = initial_x, wy = initial_y;
while (1) {
draw d;
deserialize_byte(meta, &d.op);
if (d.op == VT_END) {
break;
}
if (d.op == VT_MOVETO || d.op == VT_LINETO) {
long long dx, dy;
deserialize_long_long(meta, &dx);
deserialize_long_long(meta, &dy);
wx += dx << geometry_scale;
wy += dy << geometry_scale;
long long wwx = wx;
long long wwy = wy;
if (z != 0) {
wwx -= tx << (32 - z);
wwy -= ty << (32 - z);
}
if (wwx < bbox[0]) {
bbox[0] = wwx;
}
if (wwy < bbox[1]) {
bbox[1] = wwy;
}
if (wwx > bbox[2]) {
bbox[2] = wwx;
}
if (wwy > bbox[3]) {
bbox[3] = wwy;
}
d.x = wwx;
d.y = wwy;
}
out.push_back(d);
}
return out;
}
void to_tile_scale(drawvec &geom, int z, int detail) {
unsigned i;
for (i = 0; i < geom.size(); i++) {
geom[i].x >>= (32 - detail - z);
geom[i].y >>= (32 - detail - z);
}
}
drawvec remove_noop(drawvec geom, int type, int shift) {
// first pass: remove empty linetos
long long x = 0, y = 0;
drawvec out;
unsigned i;
for (i = 0; i < geom.size(); i++) {
if (geom[i].op == VT_LINETO && (geom[i].x >> shift) == x && (geom[i].y >> shift) == y) {
continue;
}
if (geom[i].op == VT_CLOSEPATH) {
fprintf(stderr, "Shouldn't happen\n");
out.push_back(geom[i]);
} else { /* moveto or lineto */
out.push_back(geom[i]);
x = geom[i].x >> shift;
y = geom[i].y >> shift;
}
}
// second pass: remove unused movetos
geom = out;
out.resize(0);
for (i = 0; i < geom.size(); i++) {
if (geom[i].op == VT_MOVETO) {
if (i + 1 >= geom.size()) {
continue;
}
if (geom[i + 1].op == VT_MOVETO) {
continue;
}
if (geom[i + 1].op == VT_CLOSEPATH) {
fprintf(stderr, "Shouldn't happen\n");
i++; // also remove unused closepath
continue;
}
}
out.push_back(geom[i]);
}
// second pass: remove empty movetos
if (type == VT_LINE) {
geom = out;
out.resize(0);
for (i = 0; i < geom.size(); i++) {
if (geom[i].op == VT_MOVETO) {
if (i > 0 && geom[i - 1].op == VT_LINETO && (geom[i - 1].x >> shift) == (geom[i].x >> shift) && (geom[i - 1].y >> shift) == (geom[i].y >> shift)) {
continue;
}
}
out.push_back(geom[i]);
}
}
return out;
}
/* XXX */
#if 0
drawvec shrink_lines(drawvec &geom, int z, int detail, int basezoom, long long *here, double droprate) {
long long res = 200LL << (32 - 8 - z);
long long portion = res / exp(log(sqrt(droprate)) * (basezoom - z));
unsigned i;
drawvec out;
for (i = 0; i < geom.size(); i++) {
if (i > 0 && (geom[i - 1].op == VT_MOVETO || geom[i - 1].op == VT_LINETO) && geom[i].op == VT_LINETO) {
double dx = (geom[i].x - geom[i - 1].x);
double dy = (geom[i].y - geom[i - 1].y);
long long d = sqrt(dx * dx + dy * dy);
long long n;
long long next = LONG_LONG_MAX;
for (n = *here; n < *here + d; n = next) {
int within;
if (n % res < portion) {
next = (n / res) * res + portion;
within = 1;
} else {
next = (n / res + 1) * res;
within = 0;
}
if (next > *here + d) {
next = *here + d;
}
//printf("drawing from %lld to %lld in %lld\n", n - *here, next - *here, d);
double f1 = (n - *here) / (double) d;
double f2 = (next - *here) / (double) d;
if (within) {
out.push_back(draw(VT_MOVETO, geom[i - 1].x + f1 * (geom[i].x - geom[i - 1].x), geom[i - 1].y + f1 * (geom[i].y - geom[i - 1].y)));
out.push_back(draw(VT_LINETO, geom[i - 1].x + f2 * (geom[i].x - geom[i - 1].x), geom[i - 1].y + f2 * (geom[i].y - geom[i - 1].y)));
} else {
out.push_back(draw(VT_MOVETO, geom[i - 1].x + f2 * (geom[i].x - geom[i - 1].x), geom[i - 1].y + f2 * (geom[i].y - geom[i - 1].y)));
}
}
*here += d;
} else {
out.push_back(geom[i]);
}
}
return out;
}
#endif
static void decode_clipped(ClipperLib::PolyNode *t, drawvec &out) {
// To make the GeoJSON come out right, we need to do each of the
// outer rings followed by its children if any, and then go back
// to do any outer-ring children of those children as a new top level.
ClipperLib::Path p = t->Contour;
for (unsigned i = 0; i < p.size(); i++) {
out.push_back(draw((i == 0) ? VT_MOVETO : VT_LINETO, p[i].X, p[i].Y));
}
if (p.size() > 0) {
out.push_back(draw(VT_LINETO, p[0].X, p[0].Y));
}
for (int n = 0; n < t->ChildCount(); n++) {
ClipperLib::Path p = t->Childs[n]->Contour;
for (unsigned i = 0; i < p.size(); i++) {
out.push_back(draw((i == 0) ? VT_MOVETO : VT_LINETO, p[i].X, p[i].Y));
}
if (p.size() > 0) {
out.push_back(draw(VT_LINETO, p[0].X, p[0].Y));
}
}
for (int n = 0; n < t->ChildCount(); n++) {
for (int m = 0; m < t->Childs[n]->ChildCount(); m++) {
decode_clipped(t->Childs[n]->Childs[m], out);
}
}
}
drawvec clean_or_clip_poly(drawvec &geom, int z, int detail, int buffer, bool clip) {
ClipperLib::Clipper clipper(ClipperLib::ioStrictlySimple);
for (unsigned i = 0; i < geom.size(); i++) {
if (geom[i].op == VT_MOVETO) {
unsigned j;
for (j = i + 1; j < geom.size(); j++) {
if (geom[j].op != VT_LINETO) {
break;
}
}
ClipperLib::Path path;
drawvec tmp;
for (unsigned k = i; k < j; k++) {
path.push_back(ClipperLib::IntPoint(geom[k].x, geom[k].y));
}
if (!clipper.AddPath(path, ClipperLib::ptSubject, true)) {
#if 0
fprintf(stderr, "Couldn't add polygon for clipping:");
for (unsigned k = i; k < j; k++) {
fprintf(stderr, " %lld,%lld", geom[k].x, geom[k].y);
}
fprintf(stderr, "\n");
#endif
}
i = j - 1;
} else {
fprintf(stderr, "Unexpected operation in polygon %d\n", (int) geom[i].op);
exit(EXIT_FAILURE);
}
}
if (clip) {
long long area = 0xFFFFFFFF;
if (z != 0) {
area = 1LL << (32 - z);
}
long long clip_buffer = buffer * area / 256;
ClipperLib::Path edge;
edge.push_back(ClipperLib::IntPoint(-clip_buffer, -clip_buffer));
edge.push_back(ClipperLib::IntPoint(area + clip_buffer, -clip_buffer));
edge.push_back(ClipperLib::IntPoint(area + clip_buffer, area + clip_buffer));
edge.push_back(ClipperLib::IntPoint(-clip_buffer, area + clip_buffer));
edge.push_back(ClipperLib::IntPoint(-clip_buffer, -clip_buffer));
clipper.AddPath(edge, ClipperLib::ptClip, true);
}
ClipperLib::PolyTree clipped;
if (clip) {
if (!clipper.Execute(ClipperLib::ctIntersection, clipped)) {
fprintf(stderr, "Polygon clip failed\n");
}
} else {
if (!clipper.Execute(ClipperLib::ctUnion, clipped)) {
fprintf(stderr, "Polygon clean failed\n");
}
}
drawvec out;
for (int i = 0; i < clipped.ChildCount(); i++) {
decode_clipped(clipped.Childs[i], out);
}
return out;
}
drawvec reduce_tiny_poly(drawvec &geom, int z, int detail, bool *reduced, double *accum_area) {
drawvec out;
long long pixel = (1 << (32 - detail - z)) * 2;
*reduced = true;
for (unsigned i = 0; i < geom.size(); i++) {
if (geom[i].op == VT_MOVETO) {
unsigned j;
for (j = i + 1; j < geom.size(); j++) {
if (geom[j].op != VT_LINETO) {
break;
}
}
double area = 0;
for (unsigned k = i; k < j; k++) {
area += geom[k].x * geom[i + ((k - i + 1) % (j - i))].y;
area -= geom[k].y * geom[i + ((k - i + 1) % (j - i))].x;
}
area = area / 2;
if (fabs(area) <= pixel * pixel) {
// printf("area is only %f vs %lld so using square\n", area, pixel * pixel);
*accum_area += area;
if (*accum_area > pixel * pixel) {
// XXX use centroid;
out.push_back(draw(VT_MOVETO, geom[i].x - pixel / 2, geom[i].y - pixel / 2));
out.push_back(draw(VT_LINETO, geom[i].x + pixel / 2, geom[i].y - pixel / 2));
out.push_back(draw(VT_LINETO, geom[i].x + pixel / 2, geom[i].y + pixel / 2));
out.push_back(draw(VT_LINETO, geom[i].x - pixel / 2, geom[i].y + pixel / 2));
out.push_back(draw(VT_LINETO, geom[i].x - pixel / 2, geom[i].y - pixel / 2));
*accum_area -= pixel * pixel;
}
} else {
// printf("area is %f so keeping instead of %lld\n", area, pixel * pixel);
for (unsigned k = i; k <= j && k < geom.size(); k++) {
out.push_back(geom[k]);
}
*reduced = false;
}
i = j - 1;
} else {
fprintf(stderr, "how did we get here with %d in %d?\n", geom[i].op, (int) geom.size());
for (unsigned n = 0; n < geom.size(); n++) {
fprintf(stderr, "%d/%lld/%lld ", geom[n].op, geom[n].x, geom[n].y);
}
fprintf(stderr, "\n");
out.push_back(geom[i]);
}
}
return out;
}
drawvec clip_point(drawvec &geom, int z, int detail, long long buffer) {
drawvec out;
unsigned i;
long long min = 0;
long long area = 0xFFFFFFFF;
if (z != 0) {
area = 1LL << (32 - z);
min -= buffer * area / 256;
area += buffer * area / 256;
}
for (i = 0; i < geom.size(); i++) {
if (geom[i].x >= min && geom[i].y >= min && geom[i].x <= area && geom[i].y <= area) {
out.push_back(geom[i]);
}
}
return out;
}
int quick_check(long long *bbox, int z, int detail, long long buffer) {
long long min = 0;
long long area = 0xFFFFFFFF;
if (z != 0) {
area = 1LL << (32 - z);
min -= buffer * area / 256;
area += buffer * area / 256;
}
// bbox entirely outside the tile
if (bbox[0] > area || bbox[1] > area) {
return 0;
}
if (bbox[2] < min || bbox[3] < min) {
return 0;
}
// bbox entirely within the tile
if (bbox[0] > min && bbox[1] > min && bbox[2] < area && bbox[3] < area) {
return 1;
}
// some overlap of edge
return 2;
}
drawvec clip_lines(drawvec &geom, int z, int detail, long long buffer) {
drawvec out;
unsigned i;
long long min = 0;
long long area = 0xFFFFFFFF;
if (z != 0) {
area = 1LL << (32 - z);
min -= buffer * area / 256;
area += buffer * area / 256;
}
for (i = 0; i < geom.size(); i++) {
if (i > 0 && (geom[i - 1].op == VT_MOVETO || geom[i - 1].op == VT_LINETO) && geom[i].op == VT_LINETO) {
double x1 = geom[i - 1].x;
double y1 = geom[i - 1].y;
double x2 = geom[i - 0].x;
double y2 = geom[i - 0].y;
int c = clip(&x1, &y1, &x2, &y2, min, min, area, area);
if (c > 1) { // clipped
out.push_back(draw(VT_MOVETO, x1, y1));
out.push_back(draw(VT_LINETO, x2, y2));
out.push_back(draw(VT_MOVETO, geom[i].x, geom[i].y));
} else if (c == 1) { // unchanged
out.push_back(geom[i]);
} else { // clipped away entirely
out.push_back(draw(VT_MOVETO, geom[i].x, geom[i].y));
}
} else {
out.push_back(geom[i]);
}
}
return out;
}
static double square_distance_from_line(long long point_x, long long point_y, long long segA_x, long long segA_y, long long segB_x, long long segB_y) {
double p2x = segB_x - segA_x;
double p2y = segB_y - segA_y;
double something = p2x * p2x + p2y * p2y;
double u = 0 == something ? 0 : ((point_x - segA_x) * p2x + (point_y - segA_y) * p2y) / something;
if (u > 1) {
u = 1;
} else if (u < 0) {
u = 0;
}
double x = segA_x + u * p2x;
double y = segA_y + u * p2y;
double dx = x - point_x;
double dy = y - point_y;
return dx * dx + dy * dy;
}
// https://github.com/Project-OSRM/osrm-backend/blob/733d1384a40f/Algorithms/DouglasePeucker.cpp
static void douglas_peucker(drawvec &geom, int start, int n, double e) {
e = e * e;
std::stack<int> recursion_stack;
{
int left_border = 0;
int right_border = 1;
// Sweep linerarily over array and identify those ranges that need to be checked
do {
if (geom[start + right_border].necessary) {
recursion_stack.push(left_border);
recursion_stack.push(right_border);
left_border = right_border;
}
++right_border;
} while (right_border < n);
}
while (!recursion_stack.empty()) {
// pop next element
int second = recursion_stack.top();
recursion_stack.pop();
int first = recursion_stack.top();
recursion_stack.pop();
double max_distance = -1;
int farthest_element_index = second;
// find index idx of element with max_distance
int i;
for (i = first + 1; i < second; i++) {
double temp_dist = square_distance_from_line(geom[start + i].x, geom[start + i].y, geom[start + first].x, geom[start + first].y, geom[start + second].x, geom[start + second].y);
double distance = fabs(temp_dist);
if (distance > e && distance > max_distance) {
farthest_element_index = i;
max_distance = distance;
}
}
if (max_distance > e) {
// mark idx as necessary
geom[start + farthest_element_index].necessary = 1;
if (1 < farthest_element_index - first) {
recursion_stack.push(first);
recursion_stack.push(farthest_element_index);
}
if (1 < second - farthest_element_index) {
recursion_stack.push(farthest_element_index);
recursion_stack.push(second);
}
}
}
}
drawvec simplify_lines(drawvec &geom, int z, int detail) {
int res = 1 << (32 - detail - z);
unsigned i;
for (i = 0; i < geom.size(); i++) {
if (geom[i].op == VT_MOVETO) {
geom[i].necessary = 1;
} else if (geom[i].op == VT_LINETO) {
geom[i].necessary = 0;
} else {
geom[i].necessary = 1;
}
}
for (i = 0; i < geom.size(); i++) {
if (geom[i].op == VT_MOVETO) {
unsigned j;
for (j = i + 1; j < geom.size(); j++) {
if (geom[j].op != VT_LINETO) {
break;
}
}
geom[i].necessary = 1;
geom[j - 1].necessary = 1;
douglas_peucker(geom, i, j - i, res);
i = j - 1;
}
}
drawvec out;
for (i = 0; i < geom.size(); i++) {
if (geom[i].necessary) {
out.push_back(geom[i]);
}
}
return out;
}
drawvec reorder_lines(drawvec &geom) {
// Only reorder simple linestrings with a single moveto
if (geom.size() == 0) {
return geom;
}
unsigned i;
for (i = 0; i < geom.size(); i++) {
if (geom[i].op == VT_MOVETO) {
if (i != 0) {
return geom;
}
} else if (geom[i].op == VT_LINETO) {
if (i == 0) {
return geom;
}
} else {
return geom;
}
}
// Reorder anything that goes up and to the left
// instead of down and to the right
// so that it will coalesce better
unsigned long long l1 = encode(geom[0].x, geom[0].y);
unsigned long long l2 = encode(geom[geom.size() - 1].x, geom[geom.size() - 1].y);
if (l1 > l2) {
drawvec out;
for (i = 0; i < geom.size(); i++) {
out.push_back(geom[geom.size() - 1 - i]);
}
out[0].op = VT_MOVETO;
out[out.size() - 1].op = VT_LINETO;
return out;
}
return geom;
}
drawvec fix_polygon(drawvec &geom) {
int outer = 1;
drawvec out;
unsigned i;
for (i = 0; i < geom.size(); i++) {
if (geom[i].op == VT_CLOSEPATH) {
outer = 1;
} else if (geom[i].op == VT_MOVETO) {
// Find the end of the ring
unsigned j;
for (j = i + 1; j < geom.size(); j++) {
if (geom[j].op != VT_LINETO) {
break;
}
}
// Make a temporary copy of the ring.
// Close it if it isn't closed.
drawvec ring;
for (unsigned a = i; a < j; a++) {
ring.push_back(geom[a]);
}
if (j - i != 0 && (ring[0].x != ring[j - i - 1].x || ring[0].y != ring[j - i - 1].y)) {
ring.push_back(ring[0]);
}
// Reverse ring if winding order doesn't match
// inner/outer expectation
double area = 0;
for (unsigned k = 0; k < ring.size(); k++) {
area += (long double) ring[k].x * (long double) ring[(k + 1) % ring.size()].y;
area -= (long double) ring[k].y * (long double) ring[(k + 1) % ring.size()].x;
}
if ((area > 0) != outer) {
drawvec tmp;
for (int a = ring.size() - 1; a >= 0; a--) {
tmp.push_back(ring[a]);
}
ring = tmp;
}
// Copy ring into output, fixing the moveto/lineto ops if necessary because of
// reversal or closing
for (unsigned a = 0; a < ring.size(); a++) {
if (a == 0) {
out.push_back(draw(VT_MOVETO, ring[a].x, ring[a].y));
} else {
out.push_back(draw(VT_LINETO, ring[a].x, ring[a].y));
}
}
// Next ring or polygon begins on the non-lineto that ended this one
// and is not an outer ring unless there is a terminator first
i = j - 1;
outer = 0;
} else {
fprintf(stderr, "Internal error: polygon ring begins with %d, not moveto\n", geom[i].op);
exit(EXIT_FAILURE);
}
}
return out;
}

1192
geometry.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,29 +0,0 @@
struct draw {
signed char op;
long long x;
long long y;
int necessary;
draw(int op, long long x, long long y) {
this->op = op;
this->x = x;
this->y = y;
}
draw() {
}
};
typedef std::vector<draw> drawvec;
drawvec decode_geometry(char **meta, int z, unsigned tx, unsigned ty, int detail, long long *bbox);
void to_tile_scale(drawvec &geom, int z, int detail);
drawvec remove_noop(drawvec geom, int type, int shift);
drawvec clip_point(drawvec &geom, int z, int detail, long long buffer);
drawvec clean_or_clip_poly(drawvec &geom, int z, int detail, int buffer, bool clip);
drawvec reduce_tiny_poly(drawvec &geom, int z, int detail, bool *reduced, double *accum_area);
drawvec clip_lines(drawvec &geom, int z, int detail, long long buffer);
int quick_check(long long *bbox, int z, int detail, long long buffer);
drawvec simplify_lines(drawvec &geom, int z, int detail);
drawvec reorder_lines(drawvec &geom);
drawvec fix_polygon(drawvec &geom);

78
geometry.hpp Normal file
View File

@ -0,0 +1,78 @@
#ifndef GEOMETRY_HPP
#define GEOMETRY_HPP
#include <vector>
#include <sqlite3.h>
#define VT_POINT 1
#define VT_LINE 2
#define VT_POLYGON 3
#define VT_END 0
#define VT_MOVETO 1
#define VT_LINETO 2
#define VT_CLOSEPATH 7
// The bitfield is to make sizeof(draw) be 16 instead of 24
// at the cost, apparently, of a 0.7% increase in running time
// for packing and unpacking.
struct draw {
long long x : 40;
signed char op;
long long y : 40;
signed char necessary;
draw(int nop, long long nx, long long ny) {
this->op = nop;
this->x = nx;
this->y = ny;
this->necessary = 0;
}
draw() {
this->op = 0;
this->x = 0;
this->y = 0;
this->necessary = 0;
}
bool operator<(draw const &s) const {
if (y < s.y || (y == s.y && x < s.x)) {
return true;
} else {
return false;
}
}
bool operator==(draw const &s) const {
return y == s.y && x == s.x;
}
bool operator!=(draw const &s) const {
return y != s.y || x != s.x;
}
};
typedef std::vector<draw> drawvec;
drawvec decode_geometry(FILE *meta, long long *geompos, int z, unsigned tx, unsigned ty, long long *bbox, unsigned initial_x, unsigned initial_y);
void to_tile_scale(drawvec &geom, int z, int detail);
drawvec remove_noop(drawvec geom, int type, int shift);
drawvec clip_point(drawvec &geom, int z, long long buffer);
drawvec clean_or_clip_poly(drawvec &geom, int z, int detail, int buffer, bool clip);
drawvec simple_clip_poly(drawvec &geom, int z, int buffer);
drawvec close_poly(drawvec &geom);
drawvec reduce_tiny_poly(drawvec &geom, int z, int detail, bool *reduced, double *accum_area);
drawvec clip_lines(drawvec &geom, int z, long long buffer);
drawvec stairstep(drawvec &geom, int z, int detail);
bool point_within_tile(long long x, long long y, int z, long long buffer);
int quick_check(long long *bbox, int z, long long buffer);
drawvec simplify_lines(drawvec &geom, int z, int detail, bool mark_tile_bounds, double simplification, size_t retain);
drawvec reorder_lines(drawvec &geom);
drawvec fix_polygon(drawvec &geom);
std::vector<drawvec> chop_polygon(std::vector<drawvec> &geoms);
void check_polygon(drawvec &geom, drawvec &before);
double get_area(drawvec &geom, size_t i, size_t j);
double get_mp_area(drawvec &geom);
#endif

View File

@ -1,3 +1,4 @@
#define _GNU_SOURCE // for asprintf()
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
@ -7,8 +8,12 @@
#define BUFFER 10000
json_pull *json_begin(int (*read)(struct json_pull *, char *buffer, int n), void *source) {
json_pull *json_begin(ssize_t (*read)(struct json_pull *, char *buffer, size_t n), void *source) {
json_pull *j = malloc(sizeof(json_pull));
if (j == NULL) {
perror("Out of memory");
exit(EXIT_FAILURE);
}
j->error = NULL;
j->line = 1;
@ -17,40 +22,45 @@ json_pull *json_begin(int (*read)(struct json_pull *, char *buffer, int n), void
j->read = read;
j->source = source;
j->buffer = malloc(BUFFER);
j->buffer_head = 0;
j->buffer_tail = 0;
j->buffer = malloc(BUFFER);
if (j->buffer == NULL) {
perror("Out of memory");
exit(EXIT_FAILURE);
}
return j;
}
static inline int peek(json_pull *j) {
if (j->buffer_head < j->buffer_tail) {
return j->buffer[j->buffer_head];
return (unsigned char) j->buffer[j->buffer_head];
} else {
j->buffer_head = 0;
j->buffer_tail = j->read(j, j->buffer, BUFFER);
if (j->buffer_head >= j->buffer_tail) {
return EOF;
}
return j->buffer[j->buffer_head];
return (unsigned char) j->buffer[j->buffer_head];
}
}
static inline int next(json_pull *j) {
if (j->buffer_head < j->buffer_tail) {
return j->buffer[j->buffer_head++];
return (unsigned char) j->buffer[j->buffer_head++];
} else {
j->buffer_head = 0;
j->buffer_tail = j->read(j, j->buffer, BUFFER);
if (j->buffer_head >= j->buffer_tail) {
return EOF;
}
return j->buffer[j->buffer_head++];
return (unsigned char) j->buffer[j->buffer_head++];
}
}
static int read_file(json_pull *j, char *buffer, int n) {
static ssize_t read_file(json_pull *j, char *buffer, size_t n) {
return fread(buffer, 1, n, j->source);
}
@ -58,24 +68,25 @@ json_pull *json_begin_file(FILE *f) {
return json_begin(read_file, f);
}
static int read_string(json_pull *j, char *buffer, int n) {
char *cp = j->source;
int out = 0;
static ssize_t read_string(json_pull *j, char *buffer, size_t n) {
const char *cp = j->source;
size_t out = 0;
while (out < n && cp[out] != '\0') {
buffer[out] = cp[out];
out++;
}
j->source = cp + out;
j->source = (void *) (cp + out);
return out;
}
json_pull *json_begin_string(char *s) {
return json_begin(read_string, s);
json_pull *json_begin_string(const char *s) {
return json_begin(read_string, (void *) s);
}
void json_end(json_pull *p) {
json_free(p->root);
free(p->buffer);
free(p);
}
@ -90,28 +101,41 @@ static inline int read_wrap(json_pull *j) {
return c;
}
#define SIZE_FOR(i) (((i) + 31) & ~31)
#define SIZE_FOR(i, size) ((size_t)((((i) + 31) & ~31) * size))
static json_object *fabricate_object(json_object *parent, json_type type) {
static json_object *fabricate_object(json_pull *jp, json_object *parent, json_type type) {
json_object *o = malloc(sizeof(struct json_object));
if (o == NULL) {
perror("Out of memory");
exit(EXIT_FAILURE);
}
o->type = type;
o->parent = parent;
o->array = NULL;
o->keys = NULL;
o->values = NULL;
o->length = 0;
o->parser = jp;
return o;
}
static json_object *add_object(json_pull *j, json_type type) {
json_object *c = j->container;
json_object *o = fabricate_object(c, type);
json_object *o = fabricate_object(j, c, type);
if (c != NULL) {
if (c->type == JSON_ARRAY) {
if (c->expect == JSON_ITEM) {
if (SIZE_FOR(c->length + 1) != SIZE_FOR(c->length)) {
c->array = realloc(c->array, SIZE_FOR(c->length + 1) * sizeof(json_object *));
if (SIZE_FOR(c->length + 1, sizeof(json_object *)) != SIZE_FOR(c->length, sizeof(json_object *))) {
if (SIZE_FOR(c->length + 1, sizeof(json_object *)) < SIZE_FOR(c->length, sizeof(json_object *))) {
fprintf(stderr, "Array size overflow\n");
exit(EXIT_FAILURE);
}
c->array = realloc(c->array, SIZE_FOR(c->length + 1, sizeof(json_object *)));
if (c->array == NULL) {
perror("Out of memory");
exit(EXIT_FAILURE);
}
}
c->array[c->length++] = o;
@ -132,9 +156,17 @@ static json_object *add_object(json_pull *j, json_type type) {
return NULL;
}
if (SIZE_FOR(c->length + 1) != SIZE_FOR(c->length)) {
c->keys = realloc(c->keys, SIZE_FOR(c->length + 1) * sizeof(json_object *));
c->values = realloc(c->values, SIZE_FOR(c->length + 1) * sizeof(json_object *));
if (SIZE_FOR(c->length + 1, sizeof(json_object *)) != SIZE_FOR(c->length, sizeof(json_object *))) {
if (SIZE_FOR(c->length + 1, sizeof(json_object *)) < SIZE_FOR(c->length, sizeof(json_object *))) {
fprintf(stderr, "Hash size overflow\n");
exit(EXIT_FAILURE);
}
c->keys = realloc(c->keys, SIZE_FOR(c->length + 1, sizeof(json_object *)));
c->values = realloc(c->values, SIZE_FOR(c->length + 1, sizeof(json_object *)));
if (c->keys == NULL || c->values == NULL) {
perror("Out of memory");
exit(EXIT_FAILURE);
}
}
c->keys[c->length] = o;
@ -148,6 +180,10 @@ static json_object *add_object(json_pull *j, json_type type) {
}
}
} else {
if (j->root != NULL) {
json_free(j->root);
}
j->root = o;
}
@ -159,7 +195,7 @@ json_object *json_hash_get(json_object *o, const char *s) {
return NULL;
}
int i;
size_t i;
for (i = 0; i < o->length; i++) {
if (o->keys[i] != NULL && o->keys[i]->type == JSON_STRING) {
if (strcmp(o->keys[i]->string, s) == 0) {
@ -173,27 +209,64 @@ json_object *json_hash_get(json_object *o, const char *s) {
struct string {
char *buf;
int n;
int nalloc;
size_t n;
size_t nalloc;
};
static void string_init(struct string *s) {
s->nalloc = 500;
s->buf = malloc(s->nalloc);
if (s->buf == NULL) {
perror("Out of memory");
exit(EXIT_FAILURE);
}
s->n = 0;
s->buf[0] = '\0';
}
static void string_append(struct string *s, char c) {
if (s->n + 2 >= s->nalloc) {
size_t prev = s->nalloc;
s->nalloc += 500;
if (s->nalloc <= prev) {
fprintf(stderr, "String size overflowed\n");
exit(EXIT_FAILURE);
}
s->buf = realloc(s->buf, s->nalloc);
if (s->buf == NULL) {
perror("Out of memory");
exit(EXIT_FAILURE);
}
}
s->buf[s->n++] = c;
s->buf[s->n] = '\0';
}
static void string_append_string(struct string *s, char *add) {
size_t len = strlen(add);
if (s->n + len + 1 >= s->nalloc) {
size_t prev = s->nalloc;
s->nalloc += 500 + len;
if (s->nalloc <= prev) {
fprintf(stderr, "String size overflowed\n");
exit(EXIT_FAILURE);
}
s->buf = realloc(s->buf, s->nalloc);
if (s->buf == NULL) {
perror("Out of memory");
exit(EXIT_FAILURE);
}
}
for (; *add != '\0'; add++) {
s->buf[s->n++] = *add;
}
s->buf[s->n] = '\0';
}
static void string_free(struct string *s) {
free(s->buf);
}
@ -203,6 +276,10 @@ json_object *json_read_separators(json_pull *j, json_separator_callback cb, void
// In case there is an error at the top level
if (j->container == NULL) {
if (j->root != NULL) {
json_free(j->root);
}
j->root = NULL;
}
@ -218,7 +295,23 @@ again:
return NULL;
}
} while (c == ' ' || c == '\t' || c == '\r' || c == '\n');
// Byte-order mark
if (c == 0xEF) {
int c2 = peek(j);
if (c2 == 0xBB) {
c2 = read_wrap(j);
c2 = peek(j);
if (c2 == 0xBF) {
c2 = read_wrap(j);
c = ' ';
continue;
}
}
j->error = "Corrupt byte-order mark found";
return NULL;
}
} while (c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == 0x1E);
/////////////////////////// Arrays
@ -307,6 +400,32 @@ again:
return add_object(j, JSON_NULL);
}
/////////////////////////// NaN
if (c == 'N') {
if (read_wrap(j) != 'a' || read_wrap(j) != 'N') {
j->error = "Found misspelling of NaN";
return NULL;
}
j->error = "JSON does not allow NaN";
return NULL;
}
/////////////////////////// Infinity
if (c == 'I') {
if (read_wrap(j) != 'n' || read_wrap(j) != 'f' || read_wrap(j) != 'i' ||
read_wrap(j) != 'n' || read_wrap(j) != 'i' || read_wrap(j) != 't' ||
read_wrap(j) != 'y') {
j->error = "Found misspelling of Infinity";
return NULL;
}
j->error = "JSON does not allow Infinity";
return NULL;
}
/////////////////////////// True
if (c == 't') {
@ -401,6 +520,11 @@ again:
string_append(&val, read_wrap(j));
c = peek(j);
if (c < '0' || c > '9') {
j->error = "Decimal point without digits";
string_free(&val);
return NULL;
}
while (c >= '0' && c <= '9') {
string_append(&val, read_wrap(j));
c = peek(j);
@ -472,6 +596,11 @@ again:
int i;
for (i = 0; i < 4; i++) {
hex[i] = read_wrap(j);
if (hex[i] < '0' || (hex[i] > '9' && hex[i] < 'A') || (hex[i] > 'F' && hex[i] < 'a') || hex[i] > 'f') {
j->error = "Invalid \\u hex character";
string_free(&val);
return NULL;
}
}
unsigned long ch = strtoul(hex, NULL, 16);
if (ch <= 0x7F) {
@ -489,10 +618,19 @@ again:
string_free(&val);
return NULL;
}
} else if (c < ' ') {
j->error = "Found control character in string";
string_free(&val);
return NULL;
} else {
string_append(&val, c);
}
}
if (c == EOF) {
j->error = "String without closing quote mark";
string_free(&val);
return NULL;
}
json_object *s = add_object(j, JSON_STRING);
if (s != NULL) {
@ -525,7 +663,7 @@ json_object *json_read_tree(json_pull *p) {
}
void json_free(json_object *o) {
int i;
size_t i;
if (o == NULL) {
return;
@ -535,7 +673,7 @@ void json_free(json_object *o) {
if (o->type == JSON_ARRAY) {
json_object **a = o->array;
int n = o->length;
size_t n = o->length;
o->array = NULL;
o->length = 0;
@ -548,7 +686,7 @@ void json_free(json_object *o) {
} else if (o->type == JSON_HASH) {
json_object **k = o->keys;
json_object **v = o->values;
int n = o->length;
size_t n = o->length;
o->keys = NULL;
o->values = NULL;
@ -570,13 +708,30 @@ void json_free(json_object *o) {
free(o);
}
static void json_disconnect_parser(json_object *o) {
if (o->type == JSON_HASH) {
size_t i;
for (i = 0; i < o->length; i++) {
json_disconnect_parser(o->keys[i]);
json_disconnect_parser(o->values[i]);
}
} else if (o->type == JSON_ARRAY) {
size_t i;
for (i = 0; i < o->length; i++) {
json_disconnect_parser(o->array[i]);
}
}
o->parser = NULL;
}
void json_disconnect(json_object *o) {
// Expunge references to this as an array element
// or a hash key or value.
if (o->parent != NULL) {
if (o->parent->type == JSON_ARRAY) {
int i;
size_t i;
for (i = 0; i < o->parent->length; i++) {
if (o->parent->array[i] == o) {
@ -591,15 +746,15 @@ void json_disconnect(json_object *o) {
}
if (o->parent->type == JSON_HASH) {
int i;
size_t i;
for (i = 0; i < o->parent->length; i++) {
if (o->parent->keys[i] == o) {
o->parent->keys[i] = fabricate_object(o->parent, JSON_NULL);
o->parent->keys[i] = fabricate_object(o->parser, o->parent, JSON_NULL);
break;
}
if (o->parent->values[i] == o) {
o->parent->values[i] = fabricate_object(o->parent, JSON_NULL);
o->parent->values[i] = fabricate_object(o->parser, o->parent, JSON_NULL);
break;
}
}
@ -619,5 +774,88 @@ void json_disconnect(json_object *o) {
}
}
if (o->parser != NULL && o->parser->root == o) {
o->parser->root = NULL;
}
json_disconnect_parser(o);
o->parent = NULL;
}
static void json_print_one(struct string *val, json_object *o) {
if (o == NULL) {
string_append_string(val, "...");
} else if (o->type == JSON_STRING) {
string_append(val, '\"');
char *cp;
for (cp = o->string; *cp != '\0'; cp++) {
if (*cp == '\\' || *cp == '"') {
string_append(val, '\\');
string_append(val, *cp);
} else if (*cp >= 0 && *cp < ' ') {
char *s;
if (asprintf(&s, "\\u%04x", *cp) >= 0) {
string_append_string(val, s);
free(s);
}
} else {
string_append(val, *cp);
}
}
string_append(val, '\"');
} else if (o->type == JSON_NUMBER) {
string_append_string(val, o->string);
} else if (o->type == JSON_NULL) {
string_append_string(val, "null");
} else if (o->type == JSON_TRUE) {
string_append_string(val, "true");
} else if (o->type == JSON_FALSE) {
string_append_string(val, "false");
} else if (o->type == JSON_HASH) {
string_append(val, '}');
} else if (o->type == JSON_ARRAY) {
string_append(val, ']');
}
}
static void json_print(struct string *val, json_object *o) {
if (o == NULL) {
// Hash value in incompletely read hash
string_append_string(val, "...");
} else if (o->type == JSON_HASH) {
string_append(val, '{');
size_t i;
for (i = 0; i < o->length; i++) {
json_print(val, o->keys[i]);
string_append(val, ':');
json_print(val, o->values[i]);
if (i + 1 < o->length) {
string_append(val, ',');
}
}
string_append(val, '}');
} else if (o->type == JSON_ARRAY) {
string_append(val, '[');
size_t i;
for (i = 0; i < o->length; i++) {
json_print(val, o->array[i]);
if (i + 1 < o->length) {
string_append(val, ',');
}
}
string_append(val, ']');
} else {
json_print_one(val, o);
}
}
char *json_stringify(json_object *o) {
struct string val;
string_init(&val);
json_print(&val, o);
return val.buf;
}

View File

@ -1,3 +1,10 @@
#ifndef JSONPULL_H
#define JSONPULL_H
#ifdef __cplusplus
extern "C" {
#endif
typedef enum json_type {
// These types can be returned by json_read()
JSON_HASH,
@ -21,6 +28,7 @@ typedef enum json_type {
typedef struct json_object {
json_type type;
struct json_object *parent;
struct json_pull *parser;
char *string;
double number;
@ -28,7 +36,7 @@ typedef struct json_object {
struct json_object **array;
struct json_object **keys;
struct json_object **values;
int length;
size_t length;
int expect;
} json_object;
@ -37,20 +45,20 @@ typedef struct json_pull {
char *error;
int line;
int (*read)(struct json_pull *, char *buf, int n);
ssize_t (*read)(struct json_pull *, char *buf, size_t n);
void *source;
char *buffer;
int buffer_tail;
int buffer_head;
ssize_t buffer_tail;
ssize_t buffer_head;
json_object *container;
json_object *root;
} json_pull;
json_pull *json_begin_file(FILE *f);
json_pull *json_begin_string(char *s);
json_pull *json_begin_string(const char *s);
json_pull *json_begin(int (*read)(struct json_pull *, char *buffer, int n), void *source);
json_pull *json_begin(ssize_t (*read)(struct json_pull *, char *buffer, size_t n), void *source);
void json_end(json_pull *p);
typedef void (*json_separator_callback)(json_type type, json_pull *j, void *state);
@ -62,3 +70,11 @@ void json_free(json_object *j);
void json_disconnect(json_object *j);
json_object *json_hash_get(json_object *o, const char *s);
char *json_stringify(json_object *o);
#ifdef __cplusplus
}
#endif
#endif

2725
main.cpp Normal file

File diff suppressed because it is too large Load Diff

30
main.hpp Normal file
View File

@ -0,0 +1,30 @@
#ifndef MAIN_HPP
#define MAIN_HPP
#include <stddef.h>
struct index {
long long start;
long long end;
unsigned long long index;
short segment;
unsigned short t : 2;
unsigned long long seq : (64 - 18); // pack with segment and t to stay in 32 bytes
};
void checkdisk(struct reader *r, int nreader);
extern int geometry_scale;
extern int quiet;
extern size_t CPUS;
extern size_t TEMP_FILES;
extern size_t max_tile_size;
int mkstemp_cloexec(char *name);
FILE *fopen_oflag(const char *name, const char *mode, int oflag);
#define MAX_ZOOM 24
#endif

View File

@ -1,10 +1,10 @@
.TH tippecanoe
.PP
Builds vector tilesets
\[la]https://www.mapbox.com/developers/vector-tiles/\[ra] from large collections of GeoJSON
\[la]http://geojson.org/\[ra]
features. This is a tool for making maps from huge datasets
\[la]MADE_WITH.md\[ra]\&.
Builds vector tilesets \[la]https://www.mapbox.com/developers/vector-tiles/\[ra] from large (or small) collections of GeoJSON \[la]http://geojson.org/\[ra] or Geobuf \[la]https://github.com/mapbox/geobuf\[ra] features,
like these \[la]MADE_WITH.md\[ra]\&.
.PP
[Build Status](https://travis\-ci.org/mapbox/tippecanoe.svg) \[la]https://travis-ci.org/mapbox/tippecanoe\[ra]
[Coverage Status](https://coveralls.io/repos/mapbox/tippecanoe/badge.svg?branch=master&service=github) \[la]https://coveralls.io/github/mapbox/tippecanoe?branch=master\[ra]
.SH Intent
.PP
The goal of Tippecanoe is to enable making a scale\-independent view of your data,
@ -13,8 +13,7 @@ the density and texture of the data rather than a simplification from dropping
supposedly unimportant features or clustering or aggregating them.
.PP
If you give it all of OpenStreetMap and zoom out, it should give you back
something that looks like "All Streets
\[la]http://benfry.com/allstreets/map5.html\[ra]"
something that looks like "All Streets \[la]http://benfry.com/allstreets/map5.html\[ra]"
rather than something that looks like an Interstate road atlas.
.PP
If you give it all the building footprints in Los Angeles and zoom out
@ -27,8 +26,7 @@ see the shape and relative popularity of every point of interest and every
significant travel corridor.
.SH Installation
.PP
The easiest way to install tippecanoe on OSX is with Homebrew
\[la]http://brew.sh/\[ra]:
The easiest way to install tippecanoe on OSX is with Homebrew \[la]http://brew.sh/\[ra]:
.PP
.RS
.nf
@ -39,7 +37,7 @@ $ brew install tippecanoe
.PP
.RS
.nf
$ tippecanoe \-o file.mbtiles [file.json ...]
$ tippecanoe \-o file.mbtiles [options] [file.json file.geobuf ...]
.fi
.RE
.PP
@ -50,81 +48,27 @@ The GeoJSON features need not be wrapped in a FeatureCollection.
You can concatenate multiple GeoJSON features or files together,
and it will parse out the features and ignore whatever other objects
it encounters.
.SH Options
.SS Naming
.SH Try this first
.PP
If you aren't sure what options to use, try this:
.PP
.RS
.IP \(bu 2
\-l \fIname\fP: Layer name (default "file" if source is file.json or output is file.mbtiles). Only works if there is only one layer.
.IP \(bu 2
\-n \fIname\fP: Human\-readable name (default file.json)
.nf
$ tippecanoe \-o out.mbtiles \-zg \-\-drop\-densest\-as\-needed in.geojson
.fi
.RE
.SS File control
.RS
.IP \(bu 2
\-o \fIfile\fP\&.mbtiles: Name the output file.
.IP \(bu 2
\-f: Delete the mbtiles file if it already exists instead of giving an error
.IP \(bu 2
\-t \fIdirectory\fP: Put the temporary files in \fIdirectory\fP\&.
.RE
.SS Zoom levels and resolution
.RS
.IP \(bu 2
\-z \fIzoom\fP: Base (maxzoom) zoom level (default 14)
.IP \(bu 2
\-Z \fIzoom\fP: Lowest (minzoom) zoom level (default 0)
.IP \(bu 2
\-d \fIdetail\fP: Detail at base zoom level (default 12 at \-z14 or higher, or 13 at \-z13 or lower. Detail beyond 13 has rendering problems with Mapbox GL.)
.IP \(bu 2
\-D \fIdetail\fP: Detail at lower zoom levels (default 10, for tile resolution of 1024)
.IP \(bu 2
\-m \fIdetail\fP: Minimum detail that it will try if tiles are too big at regular detail (default 7)
.IP \(bu 2
\-b \fIpixels\fP: Buffer size where features are duplicated from adjacent tiles. Units are "screen pixels"\-\-1/256th of the tile width or height. (default 5)
.RE
.SS Properties
.RS
.IP \(bu 2
\-x \fIname\fP: Exclude the named properties from all features
.IP \(bu 2
\-y \fIname\fP: Include the named properties in all features, excluding all those not explicitly named
.IP \(bu 2
\-X: Exclude all properties and encode only geometries
.RE
.SS Point simplification
.RS
.IP \(bu 2
\-r \fIrate\fP: Rate at which dots are dropped at lower zoom levels (default 2.5)
.IP \(bu 2
\-g \fIgamma\fP: Rate at which especially dense dots are dropped (default 0, for no effect). A gamma of 2 reduces the number of dots less than a pixel apart to the square root of their original number.
.RE
.SS Doing more
.RS
.IP \(bu 2
\-ac: Coalesce adjacent line and polygon features that have the same properties
.IP \(bu 2
\-ar: Try reversing the directions of lines to make them coalesce and compress better
.IP \(bu 2
\-ao: Reorder features to put ones with the same properties in sequence, to try to get them to coalesce
.IP \(bu 2
\-al: Let "dot" dropping at lower zooms apply to lines too
.RE
.SS Doing less
.RS
.IP \(bu 2
\-ps: Don't simplify lines
.IP \(bu 2
\-pf: Don't limit tiles to 200,000 features
.IP \(bu 2
\-pk: Don't limit tiles to 500K bytes
.IP \(bu 2
\-pd: Dynamically drop some fraction of features from large tiles to keep them under the 500K size limit. It will probably look ugly at the tile boundaries.
.IP \(bu 2
\-pi: Preserve the original input order of features as the drawing order instead of ordering geographically. (This is implemented as a restoration of the original order at the end, so that dot\-dropping is still geographic, which means it also undoes \-ao).
.IP \(bu 2
\-q: Work quietly instead of reporting progress
.RE
.SH Example
.PP
The \fB\fC\-zg\fR option will make Tippecanoe choose a maximum zoom level that should be
high enough to reflect the precision of the original data. (If it turns out still
not to be as detailed as you want, use \fB\fC\-z\fR manually with a higher number.)
.PP
If the tiles come out too big, the \fB\fC\-\-drop\-densest\-as\-needed\fR option will make
Tippecanoe try dropping what should be the least visible features at each zoom level.
(If it drops too many features, use \fB\fC\-x\fR to leave out some feature attributes that
you didn't really need.)
.SH Examples
.PP
Create a tileset of TIGER roads for Alameda County, to zoom level 13, with a custom layer name and description:
.PP
.RS
.nf
@ -132,15 +76,319 @@ $ tippecanoe \-o alameda.mbtiles \-l alameda \-n "Alameda County from TIGER" \-z
.fi
.RE
.PP
Create a tileset of all TIGER roads, at only zoom level 12, but with higher detail than normal,
with a custom layer name and description, and leaving out the \fB\fCLINEARID\fR and \fB\fCRTTYP\fR attributes:
.PP
.RS
.nf
$ cat tiger/tl_2014_*_roads.json | tippecanoe \-o tiger.mbtiles \-l roads \-n "All TIGER roads, one zoom" \-z12 \-Z12 \-d14 \-x LINEARID \-x RTTYP
.fi
.RE
.SH Options
.PP
There are a lot of options. A lot of the time you won't want to use any of them
other than \fB\fC\-o\fR \fIoutput\fP\fB\fC\&.mbtiles\fR to name the output file, and probably \fB\fC\-f\fR to
delete the file that already exists with that name.
.PP
If you aren't sure what the right maxzoom is for your data, \fB\fC\-zg\fR will guess one for you
based on the density of features.
.PP
If you are mapping point features, you will often want to use \fB\fC\-Bg\fR to automatically choose
a base zoom level for dot dropping. If that doesn't work out for you, try
\fB\fC\-r1 \-\-drop\-fraction\-as\-needed\fR to turn off the normal dot dropping and instead
only drop features if the tiles get too big.
.PP
If you are mapping points or polygons, you will often want to use \fB\fC\-\-drop\-densest\-as\-needed\fR
to drop some of them if necessary to make the low zoom levels work.
.PP
If your features have a lot of attributes, use \fB\fC\-y\fR to keep only the ones you really need.
.PP
If your input is formatted as newline\-delimited GeoJSON, use \fB\fC\-P\fR to make input parsing a lot faster.
.SS Output tileset
.RS
.IP \(bu 2
\fB\fC\-o\fR \fIfile\fP\fB\fC\&.mbtiles\fR or \fB\fC\-\-output=\fR\fIfile\fP\fB\fC\&.mbtiles\fR: Name the output file.
.IP \(bu 2
\fB\fC\-e\fR \fIdirectory\fP or \fB\fC\-\-output\-to\-directory\fR=\fIdirectory\fP: Write tiles to the specified \fIdirectory\fP instead of to an mbtiles file.
.IP \(bu 2
\fB\fC\-f\fR or \fB\fC\-\-force\fR: Delete the mbtiles file if it already exists instead of giving an error
.IP \(bu 2
\fB\fC\-F\fR or \fB\fC\-\-allow\-existing\fR: Proceed (without deleting existing data) if the metadata or tiles table already exists
or if metadata fields can't be set. You probably don't want to use this.
.RE
.SS Tileset description and attribution
.RS
.IP \(bu 2
\fB\fC\-n\fR \fIname\fP or \fB\fC\-\-name=\fR\fIname\fP: Human\-readable name for the tileset (default file.json)
.IP \(bu 2
\fB\fC\-A\fR \fItext\fP or \fB\fC\-\-attribution=\fR\fItext\fP: Attribution (HTML) to be shown with maps that use data from this tileset.
.IP \(bu 2
\fB\fC\-N\fR \fIdescription\fP or \fB\fC\-\-description=\fR\fIdescription\fP: Description for the tileset (default file.mbtiles)
.RE
.SS Input files and layer names
.RS
.IP \(bu 2
\fIname\fP\fB\fC\&.json\fR or \fIname\fP\fB\fC\&.geojson\fR: Read the named GeoJSON input file into a layer called \fIname\fP\&.
.IP \(bu 2
\fIname\fP\fB\fC\&.geobuf\fR or \fIname\fP\fB\fC\&.geobuf\fR: Read the named Geobuf input file into a layer called \fIname\fP\&.
.IP \(bu 2
\fB\fC\-l\fR \fIname\fP or \fB\fC\-\-layer=\fR\fIname\fP: Use the specified layer name instead of deriving a name from the input filename or output tileset. If there are multiple input files
specified, the files are all merged into the single named layer, even if they try to specify individual names with \fB\fC\-L\fR\&.
.IP \(bu 2
\fB\fC\-L\fR \fIname\fP\fB\fC:\fR\fIfile.json\fP or \fB\fC\-\-named\-layer=\fR\fIname\fP\fB\fC:\fR\fIfile.json\fP: Specify layer names for individual files. If your shell supports it, you can use a subshell redirect like \fB\fC\-L\fR \fIname\fP\fB\fC:<(cat dir/*.json)\fR to specify a layer name for the output of streamed input.
.RE
.SS Parallel processing of input
.RS
.IP \(bu 2
\fB\fC\-P\fR or \fB\fC\-\-read\-parallel\fR: Use multiple threads to read different parts of each GeoJSON input file at once.
This will only work if the input is line\-delimited JSON with each Feature on its
own line, because it knows nothing of the top\-level structure around the Features. Spurious "EOF" error
messages may result otherwise.
Performance will be better if the input is a named file that can be mapped into memory
rather than a stream that can only be read sequentially.
.RE
.PP
If the input file begins with the RFC 8142 \[la]https://tools.ietf.org/html/rfc8142\[ra] record separator,
parallel processing of input will be invoked automatically, splitting at record separators rather
than at all newlines.
.PP
Parallel processing will also be automatic if the input file is in Geobuf format.
.SS Projection of input
.RS
.IP \(bu 2
\fB\fC\-s\fR \fIprojection\fP or \fB\fC\-\-projection=\fR\fIprojection\fP: Specify the projection of the input data. Currently supported are \fB\fCEPSG:4326\fR (WGS84, the default) and \fB\fCEPSG:3857\fR (Web Mercator). In general you should use WGS84 for your input files if at all possible.
.RE
.SS Zoom levels
.RS
.IP \(bu 2
\fB\fC\-z\fR \fIzoom\fP or \fB\fC\-\-maximum\-zoom=\fR\fIzoom\fP: Maxzoom: the highest zoom level for which tiles are generated (default 14)
.IP \(bu 2
\fB\fC\-zg\fR or \fB\fC\-\-maximum\-zoom=g\fR: Guess what is probably a reasonable maxzoom based on the spacing of features.
.IP \(bu 2
\fB\fC\-Z\fR \fIzoom\fP or \fB\fC\-\-minimum\-zoom=\fR\fIzoom\fP: Minzoom: the lowest zoom level for which tiles are generated (default 0)
.IP \(bu 2
\fB\fC\-ae\fR or \fB\fC\-\-extend\-zooms\-if\-still\-dropping\fR: Increase the maxzoom if features are still being dropped at that zoom level.
The detail and simplification options that ordinarily apply only to the maximum zoom level will apply both to the originally
specified maximum zoom and to any levels added beyond that.
.RE
.SS Tile resolution
.RS
.IP \(bu 2
\fB\fC\-d\fR \fIdetail\fP or \fB\fC\-\-full\-detail=\fR\fIdetail\fP: Detail at max zoom level (default 12, for tile resolution of 2
.IP \(bu 2
\fB\fC\-D\fR \fIdetail\fP or \fB\fC\-\-low\-detail=\fR\fIdetail\fP: Detail at lower zoom levels (default 12, for tile resolution of 2
.IP \(bu 2
\fB\fC\-m\fR \fIdetail\fP or \fB\fC\-\-minimum\-detail=\fR\fIdetail\fP: Minimum detail that it will try if tiles are too big at regular detail (default 7)
.RE
.PP
All internal math is done in terms of a 32\-bit tile coordinate system, so 1/(2 of the size of Earth,
or about 1cm, is the smallest distinguishable distance. If \fImaxzoom\fP + \fIdetail\fP > 32, no additional
resolution is obtained than by using a smaller \fImaxzoom\fP or \fIdetail\fP\&.
.SS Filtering feature attributes
.RS
.IP \(bu 2
\fB\fC\-x\fR \fIname\fP or \fB\fC\-\-exclude=\fR\fIname\fP: Exclude the named properties from all features
.IP \(bu 2
\fB\fC\-y\fR \fIname\fP or \fB\fC\-\-include=\fR\fIname\fP: Include the named properties in all features, excluding all those not explicitly named
.IP \(bu 2
\fB\fC\-X\fR or \fB\fC\-\-exclude\-all\fR: Exclude all properties and encode only geometries
.IP \(bu 2
\fB\fC\-T\fR\fIattribute\fP\fB\fC:\fR\fItype\fP or \fB\fC\-\-attribute\-type=\fR\fIattribute\fP\fB\fC:\fR\fItype\fP: Coerce the named feature \fIattribute\fP to be of the specified \fItype\fP\&.
The \fItype\fP may be \fB\fCstring\fR, \fB\fCfloat\fR, \fB\fCint\fR, or \fB\fCbool\fR\&.
If the type is \fB\fCbool\fR, then original attributes of \fB\fC0\fR (or, if numeric, \fB\fC0.0\fR, etc.), \fB\fCfalse\fR, \fB\fCnull\fR, or the empty string become \fB\fCfalse\fR, and otherwise become \fB\fCtrue\fR\&.
If the type is \fB\fCfloat\fR or \fB\fCint\fR and the original attribute was non\-numeric, it becomes \fB\fC0\fR\&.
If the type is \fB\fCint\fR and the original attribute was floating\-point, it is rounded to the nearest integer.
.IP \(bu 2
\fB\fC\-j\fR \fIfilter\fP or \fB\fC\-\-feature\-filter\fR=\fIfilter\fP: Check features against a per\-layer filter (as defined in the Mapbox GL Style Specification \[la]https://www.mapbox.com/mapbox-gl-js/style-spec/#types-filter\[ra]) and only include those that match. Any features in layers that have no filter specified will be passed through. Filters for the layer \fB\fC"*"\fR apply to all layers.
.IP \(bu 2
\fB\fC\-J\fR \fIfilter\-file\fP or \fB\fC\-\-feature\-filter\-file\fR=\fIfilter\-file\fP: Like \fB\fC\-j\fR, but read the filter from a file.
.RE
.PP
Example: to find the Natural Earth countries with low \fB\fCscalerank\fR but high \fB\fCLABELRANK\fR:
.PP
.RS
.nf
tippecanoe \-z5 \-o filtered.mbtiles \-j '{ "ne_10m_admin_0_countries": [ "all", [ "<", "scalerank", 3 ], [ ">", "LABELRANK", 5 ] ] }' ne_10m_admin_0_countries.geojson
.fi
.RE
.SS Dropping a fixed fraction of features by zoom level
.RS
.IP \(bu 2
\fB\fC\-r\fR \fIrate\fP or \fB\fC\-\-drop\-rate=\fR\fIrate\fP: Rate at which dots are dropped at zoom levels below basezoom (default 2.5).
If you use \fB\fC\-rg\fR, it will guess a drop rate that will keep at most 50,000 features in the densest tile.
You can also specify a marker\-width with \fB\fC\-rg\fR\fIwidth\fP to allow fewer features in the densest tile to
compensate for the larger marker, or \fB\fC\-rf\fR\fInumber\fP to allow at most \fInumber\fP features in the densest tile.
.IP \(bu 2
\fB\fC\-B\fR \fIzoom\fP or \fB\fC\-\-base\-zoom=\fR\fIzoom\fP: Base zoom, the level at and above which all points are included in the tiles (default maxzoom).
If you use \fB\fC\-Bg\fR, it will guess a zoom level that will keep at most 50,000 features in the densest tile.
You can also specify a marker\-width with \fB\fC\-Bg\fR\fIwidth\fP to allow fewer features in the densest tile to
compensate for the larger marker, or \fB\fC\-Bf\fR\fInumber\fP to allow at most \fInumber\fP features in the densest tile.
.IP \(bu 2
\fB\fC\-al\fR or \fB\fC\-\-drop\-lines\fR: Let "dot" dropping at lower zooms apply to lines too
.IP \(bu 2
\fB\fC\-ap\fR or \fB\fC\-\-drop\-polygons\fR: Let "dot" dropping at lower zooms apply to polygons too
.RE
.SS Dropping a fraction of features to keep under tile size limits
.RS
.IP \(bu 2
\fB\fC\-as\fR or \fB\fC\-\-drop\-densest\-as\-needed\fR: If a tile is too large, try to reduce it to under 500K by increasing the minimum spacing between features. The discovered spacing applies to the entire zoom level.
.IP \(bu 2
\fB\fC\-ad\fR or \fB\fC\-\-drop\-fraction\-as\-needed\fR: Dynamically drop some fraction of features from each zoom level to keep large tiles under the 500K size limit. (This is like \fB\fC\-pd\fR but applies to the entire zoom level, not to each tile.)
.IP \(bu 2
\fB\fC\-an\fR or \fB\fC\-\-drop\-smallest\-as\-needed\fR: Dynamically drop the smallest features (physically smallest: the shortest lines or the smallest polygons) from each zoom level to keep large tiles under the 500K size limit. This option will not work for point features.
.IP \(bu 2
\fB\fC\-aN\fR or \fB\fC\-\-coalesce\-smallest\-as\-needed\fR: Dynamically combine the smallest features (physically smallest: the shortest lines or the smallest polygons) from each zoom level into other nearby features to keep large tiles under the 500K size limit. This option will not work for point features, and will probably not help very much with LineStrings. It is mostly intended for polygons, to maintain the full original area covered by polygons while still reducing the feature count somehow. The attributes of the small polygons are \fInot\fP preserved into the combined features, only their geometry.
.IP \(bu 2
\fB\fC\-pd\fR or \fB\fC\-\-force\-feature\-limit\fR: Dynamically drop some fraction of features from large tiles to keep them under the 500K size limit. It will probably look ugly at the tile boundaries. (This is like \fB\fC\-ad\fR but applies to each tile individually, not to the entire zoom level.) You probably don't want to use this.
.RE
.SS Dropping tightly overlapping features
.RS
.IP \(bu 2
\fB\fC\-g\fR \fIgamma\fP or \fB\fC\-\-gamma=_gamma\fR_: Rate at which especially dense dots are dropped (default 0, for no effect). A gamma of 2 reduces the number of dots less than a pixel apart to the square root of their original number.
.IP \(bu 2
\fB\fC\-aG\fR or \fB\fC\-\-increase\-gamma\-as\-needed\fR: If a tile is too large, try to reduce it to under 500K by increasing the \fB\fC\-g\fR gamma. The discovered gamma applies to the entire zoom level. You probably want to use \fB\fC\-\-drop\-densest\-as\-needed\fR instead.
.RE
.SS Line and polygon simplification
.RS
.IP \(bu 2
\fB\fC\-S\fR \fIscale\fP or \fB\fC\-\-simplification=\fR\fIscale\fP: Multiply the tolerance for line and polygon simplification by \fIscale\fP\&. The standard tolerance tries to keep
the line or polygon within one tile unit of its proper location. You can probably go up to about 10 without too much visible difference.
.IP \(bu 2
\fB\fC\-ps\fR or \fB\fC\-\-no\-line\-simplification\fR: Don't simplify lines and polygons
.IP \(bu 2
\fB\fC\-pS\fR or \fB\fC\-\-simplify\-only\-low\-zooms\fR: Don't simplify lines and polygons at maxzoom (but do simplify at lower zooms)
.IP \(bu 2
\fB\fC\-pt\fR or \fB\fC\-\-no\-tiny\-polygon\-reduction\fR: Don't combine the area of very small polygons into small squares that represent their combined area.
.RE
.SS Attempts to improve shared polygon boundaries
.RS
.IP \(bu 2
\fB\fC\-ab\fR or \fB\fC\-\-detect\-shared\-borders\fR: In the manner of TopoJSON \[la]https://github.com/mbostock/topojson/wiki/Introduction\[ra], detect borders that are shared between multiple polygons and simplify them identically in each polygon. This takes more time and memory than considering each polygon individually.
.IP \(bu 2
\fB\fC\-aL\fR or \fB\fC\-\-grid\-low\-zooms\fR: At all zoom levels below \fImaxzoom\fP, snap all lines and polygons to a stairstep grid instead of allowing diagonals. You will also want to specify a tile resolution, probably \fB\fC\-D8\fR\&. This option provides a way to display continuous parcel, gridded, or binned data at low zooms without overwhelming the tiles with tiny polygons, since features will either get stretched out to the grid unit or lost entirely, depending on how they happened to be aligned in the original data. You probably don't want to use this.
.RE
.SS Controlling clipping to tile boundaries
.RS
.IP \(bu 2
\fB\fC\-b\fR \fIpixels\fP or \fB\fC\-\-buffer=\fR\fIpixels\fP: Buffer size where features are duplicated from adjacent tiles. Units are "screen pixels"—1/256th of the tile width or height. (default 5)
.IP \(bu 2
\fB\fC\-pc\fR or \fB\fC\-\-no\-clipping\fR: Don't clip features to the size of the tile. If a feature overlaps the tile's bounds or buffer at all, it is included completely. Be careful: this can produce very large tilesets, especially with large polygons.
.IP \(bu 2
\fB\fC\-pD\fR or \fB\fC\-\-no\-duplication\fR: As with \fB\fC\-\-no\-clipping\fR, each feature is included intact instead of cut to tile boundaries. In addition, it is included only in a single tile per zoom level rather than potentially in multiple copies. Clients of the tileset must check adjacent tiles (possibly some distance away) to ensure they have all features.
.RE
.SS Reordering features within each tile
.RS
.IP \(bu 2
\fB\fC\-pi\fR or \fB\fC\-\-preserve\-input\-order\fR: Preserve the original input order of features as the drawing order instead of ordering geographically. (This is implemented as a restoration of the original order at the end, so that dot\-dropping is still geographic, which means it also undoes \fB\fC\-ao\fR).
.IP \(bu 2
\fB\fC\-ao\fR or \fB\fC\-\-reorder\fR: Reorder features to put ones with the same properties in sequence, to try to get them to coalesce. You probably don't want to use this.
.IP \(bu 2
\fB\fC\-ac\fR or \fB\fC\-\-coalesce\fR: Coalesce adjacent line and polygon features that have the same properties. You probably don't want to use this.
.IP \(bu 2
\fB\fC\-ar\fR or \fB\fC\-\-reverse\fR: Try reversing the directions of lines to make them coalesce and compress better. You probably don't want to use this.
.RE
.SS Adding calculated attributes
.RS
.IP \(bu 2
\fB\fC\-ag\fR or \fB\fC\-\-calculate\-feature\-density\fR: Add a new attribute, \fB\fCtippecanoe_feature_density\fR, to each feature, to record how densely features are spaced in that area of the tile. You can use this attribute in the style to produce a glowing effect where points are densely packed. It can range from 0 in the sparsest areas to 255 in the densest.
.RE
.SS Trying to correct bad source geometry
.RS
.IP \(bu 2
\fB\fC\-aw\fR or \fB\fC\-\-detect\-longitude\-wraparound\fR: Detect when adjacent points within a feature jump to the other side of the world, and try to fix the geometry.
.RE
.SS Setting or disabling tile size limits
.RS
.IP \(bu 2
\fB\fC\-M\fR \fIbytes\fP or \fB\fC\-\-maximum\-tile\-bytes=\fR\fIbytes\fP: Use the specified number of \fIbytes\fP as the maximum compressed tile size instead of 500K.
.IP \(bu 2
\fB\fC\-pf\fR or \fB\fC\-\-no\-feature\-limit\fR: Don't limit tiles to 200,000 features
.IP \(bu 2
\fB\fC\-pk\fR or \fB\fC\-\-no\-tile\-size\-limit\fR: Don't limit tiles to 500K bytes
.IP \(bu 2
\fB\fC\-pC\fR or \fB\fC\-\-no\-tile\-compression\fR: Don't compress the PBF vector tile data.
.IP \(bu 2
\fB\fC\-pg\fR or \fB\fC\-\-no\-tile\-stats\fR: Don't generate the \fB\fCtilestats\fR row in the tileset metadata. Uploads without tilestats \[la]https://github.com/mapbox/mapbox-geostats\[ra] will take longer to process.
.RE
.SS Temporary storage
.RS
.IP \(bu 2
\fB\fC\-t\fR \fIdirectory\fP or \fB\fC\-\-temporary\-directory=\fR\fIdirectory\fP: Put the temporary files in \fIdirectory\fP\&.
If you don't specify, it will use \fB\fC/tmp\fR\&.
.RE
.SS Progress indicator
.RS
.IP \(bu 2
\fB\fC\-q\fR or \fB\fC\-\-quiet\fR: Work quietly instead of reporting progress
.IP \(bu 2
\fB\fC\-v\fR or \fB\fC\-\-version\fR: Report Tippecanoe's version number
.RE
.SS Filters
.RS
.IP \(bu 2
\fB\fC\-C\fR \fIcommand\fP or \fB\fC\-\-prefilter=\fR\fIcommand\fP: Specify a shell filter command to be run at the start of assembling each tile
.IP \(bu 2
\fB\fC\-c\fR \fIcommand\fP or \fB\fC\-\-postfilter=\fR\fIcommand\fP: Specify a shell filter command to be run at the end of assembling each tile
.RE
.PP
The pre\- and post\-filter commands allow you to do optional filtering or transformation on the features of each tile
as it is created. They are shell commands, run with the zoom level, X, and Y as the \fB\fC$1\fR, \fB\fC$2\fR, and \fB\fC$3\fR arguments.
Future versions of Tippecanoe may add additional arguments for more context.
.PP
The features are provided to the filter
as a series of newline\-delimited GeoJSON objects on the standard input, and \fB\fCtippecanoe\fR expects to read another
set of GeoJSON features from the filter's standard output.
.PP
The prefilter receives the features at the highest available resolution, before line simplification,
polygon topology repair, gamma calculation, dynamic feature dropping, or other internal processing.
The postfilter receives the features at tile resolution, after simplification, cleaning, and dropping.
.PP
The layer name is provided as part of the \fB\fCtippecanoe\fR element of the feature and must be passed through
to keep the feature in its correct layer. In the case of the prefilter, the \fB\fCtippecanoe\fR element may also
contain \fB\fCindex\fR, \fB\fCsequence\fR, and \fB\fCextent\fR elements, which must be passed through for internal operations like
\fB\fC\-\-drop\-densest\-as\-needed\fR, \fB\fC\-\-drop\-smallest\-as\-needed\fR, and \fB\fC\-\-preserve\-input\-order\fR to work.
.SS Examples:
.RS
.IP \(bu 2
Make a tileset of the Natural Earth countries to zoom level 5, and also copy the GeoJSON features
to files in a \fB\fCtiles/z/x/y.geojson\fR directory hierarchy.
.RE
.PP
.RS
.nf
tippecanoe \-o countries.mbtiles \-z5 \-C 'mkdir \-p tiles/$1/$2; tee tiles/$1/$2/$3.geojson' ne_10m_admin_0_countries.json
.fi
.RE
.RS
.IP \(bu 2
Make a tileset of the Natural Earth countries to zoom level 5, but including only those tiles that
intersect the bounding box of Germany \[la]https://www.flickr.com/places/info/23424829\[ra]\&.
(The \fB\fClimit\-tiles\-to\-bbox\fR script is in the Tippecanoe source directory \[la]filters/limit-tiles-to-bbox\[ra]\&.)
.RE
.PP
.RS
.nf
tippecanoe \-o countries.mbtiles \-z5 \-C './filters/limit\-tiles\-to\-bbox 5.8662 47.2702 15.0421 55.0581 $*' ne_10m_admin_0_countries.json
.fi
.RE
.RS
.IP \(bu 2
Make a tileset of TIGER roads in Tippecanoe County, leaving out all but primary and secondary roads (as classified by TIGER \[la]https://www.census.gov/geo/reference/mtfcc.html\[ra]) below zoom level 11.
.RE
.PP
.RS
.nf
tippecanoe \-o roads.mbtiles \-c 'if [ $1 \-lt 11 ]; then grep "\\"MTFCC\\": \\"S1[12]00\\""; else cat; fi' tl_2016_18157_roads.json
.fi
.RE
.SH Environment
.PP
Tippecanoe ordinarily uses as many parallel threads as the operating system claims that CPUs are available.
You can override this number by setting the \fB\fCTIPPECANOE_MAX_THREADS\fR environmental variable.
.SH GeoJSON extension
.PP
Tippecanoe defines a GeoJSON extension that you can use to specify the minimum and/or maximum zoom level
at which an individual feature will be included in the vector tile dataset being produced.
at which an individual feature will be included in the vector tileset being produced.
If you have a feature like this:
.PP
.RS
@ -159,7 +407,26 @@ If you have a feature like this:
.PP
with a \fB\fCtippecanoe\fR object specifiying a \fB\fCmaxzoom\fR of 9 and a \fB\fCminzoom\fR of 4, the feature
will only appear in the vector tiles for zoom levels 4 through 9. Note that the \fB\fCtippecanoe\fR
object belongs to the Feature, not to its \fB\fCproperties\fR\&.
object belongs to the Feature, not to its \fB\fCproperties\fR\&. If you specify a \fB\fCminzoom\fR for a feature,
it will be preserved down to that zoom level even if dot\-dropping with \fB\fC\-r\fR would otherwise have
dropped it.
.PP
You can also specify a layer name in the \fB\fCtippecanoe\fR object, which will take precedence over
the filename or name specified using \fB\fC\-\-layer\fR, like this:
.PP
.RS
.nf
{
"type" : "Feature",
"tippecanoe" : { "layer" : "streets" },
"properties" : { "FULLNAME" : "N Vasco Rd" },
"geometry" : {
"type" : "LineString",
"coordinates" : [ [ \-121.733350, 37.767671 ], [ \-121.733600, 37.767483 ], [ \-121.733131, 37.766952 ] ]
}
}
.fi
.RE
.SH Point styling
.PP
To provide a consistent density gradient as you zoom, the Mapbox Studio style needs to be
@ -167,7 +434,7 @@ coordinated with the base zoom level and dot\-dropping rate. You can use this sh
calculate the appropriate marker\-width at high zoom levels to match the fraction of dots
that were dropped at low zoom levels.
.PP
If you used \fB\fC\-z\fR to change the base zoom level or \fB\fC\-r\fR to change the
If you used \fB\fC\-B\fR or \fB\fC\-z\fR to change the base zoom level or \fB\fC\-r\fR to change the
dot\-dropping rate, replace them in the \fB\fCbasezoom\fR and \fB\fCrate\fR below.
.PP
.RS
@ -176,6 +443,7 @@ awk 'BEGIN {
dotsize = 2; # up to you to decide
basezoom = 14; # tippecanoe \-z 14
rate = 2.5; # tippecanoe \-r 2.5
print " marker\-line\-width: 0;";
print " marker\-ignore\-placement: true;";
print " marker\-allow\-overlap: true;";
@ -183,6 +451,7 @@ awk 'BEGIN {
for (i = basezoom + 1; i <= 22; i++) {
print " [zoom >= " i "] { marker\-width: " (dotsize * exp(log(sqrt(rate)) * (i \- basezoom))) "; }";
}
exit(0);
}'
.fi
@ -192,7 +461,9 @@ awk 'BEGIN {
At every zoom level, line and polygon features are subjected to Douglas\-Peucker
simplification to the resolution of the tile.
.PP
For point features, it drops 1/2.5 of the dots for each zoom level above the base.
For point features, it drops 1/2.5 of the dots for each zoom level above the
point base zoom (which is normally the same as the \fB\fC\-z\fR max zoom, but can be
a different zoom specified with \fB\fC\-B\fR if you have precise but sparse data).
I don't know why 2.5 is the appropriate number, but the densities of many different
data sets fall off at about this same rate. You can use \-r to specify a different rate.
.PP
@ -204,37 +475,28 @@ For line features, it drops any features that are too small to draw at all.
This still leaves the lower zooms too dark (and too dense for the 500K tile limit,
in some places), so I need to figure out an equitable way to throw features away.
.PP
Any polygons that are smaller than a minimum area (currently 4 square subpixels) will
Unless you specify \fB\fC\-\-no\-tiny\-polygon\-reduction\fR,
any polygons that are smaller than a minimum area (currently 4 square subpixels) will
have their probability diffused, so that some of them will be drawn as a square of
this minimum size and others will not be drawn at all, preserving the total area that
all of them should have had together.
.PP
Features in the same tile that share the same type and attributes are coalesced
together into a single geometry. You are strongly encouraged to use \-x to exclude
together into a single geometry if you use \fB\fC\-\-coalesce\fR\&. You are strongly encouraged to use \-x to exclude
any unnecessary properties to reduce wasted file size.
.PP
If a tile is larger than 500K, it will try encoding that tile at progressively
lower resolutions before failing if it still doesn't fit.
.SH Development
.PP
Requires protoc and sqlite3. Rebuilding the manpage
Requires sqlite3 and zlib (should already be installed on MacOS). Rebuilding the manpage
uses md2man (\fB\fCgem install md2man\fR).
.PP
MacOS:
.PP
.RS
.nf
brew install protobuf
.fi
.RE
.PP
Linux:
.PP
.RS
.nf
sudo apt\-get install libprotobuf\-dev
sudo apt\-get install protobuf\-compiler
sudo apt\-get install libsqlite3\-dev
sudo apt\-get install build\-essential libsqlite3\-dev zlib1g\-dev
.fi
.RE
.PP
@ -253,37 +515,122 @@ and perhaps
make install
.fi
.RE
.SH Examples
.PP
Check out some examples of maps made with tippecanoe
\[la]MADE_WITH.md\[ra]
.SH Name
Tippecanoe now requires features from the 2011 C++ standard. If your compiler is older than
that, you will need to install a newer one. On MacOS, updating to the lastest XCode should
get you a new enough version of \fB\fCclang++\fR\&. On Linux, you should be able to upgrade \fB\fCg++\fR with
.PP
The name is a joking reference
\[la]http://en.wikipedia.org/wiki/Tippecanoe_and_Tyler_Too\[ra] to a "tiler" for making map tiles.
.SH tile\-join
.PP
Tile\-join is a tool for joining new attributes from a CSV file to features that
have already been tiled with tippecanoe. It reads the tiles from an existing .mbtiles
file, matches them against the records of the CSV, and writes out a new tileset.
.PP
The options are:
.RS
.IP \(bu 2
\-o \fIout.mbtiles\fP: Write the new tiles to the specified .mbtiles file
.IP \(bu 2
\-f: Remove \fIout.mbtiles\fP if it already exists
.IP \(bu 2
\-c \fImatch.csv\fP: Use \fImatch.csv\fP as the source for new attributes to join to the features. The first line of the file should be the key names; the other lines are values. The first column is the one to match against the existing features; the other columns are the new data to add.
.IP \(bu 2
\-x \fIkey\fP: Remove attributes of type \fIkey\fP from the output. You can use this to remove the field you are matching against if you no longer need it after joining, or to remove any other attributes you don't want.
.IP \(bu 2
\-i: Only include features that matched the CSV.
.nf
sudo add\-apt\-repository \-y ppa:ubuntu\-toolchain\-r/test
sudo apt\-get update \-y
sudo apt\-get install \-y g++\-5
export CXX=g++\-5
.fi
.RE
.SH Docker Image
.PP
A tippecanoe Docker image can be built from source and executed as a task to
automatically install dependencies and allow tippecanoe to run on any system
supported by Docker.
.PP
.RS
.nf
$ docker build \-t tippecanoe:latest .
$ docker run \-it \-\-rm \\
\-v /tiledata:/data \\
tippecanoe:latest \\
tippecanoe \-\-output=/data/output.mbtiles /data/example.geojson
.fi
.RE
.PP
Because tile\-join just copies the geometries to the new .mbtiles without processing them,
The commands above will build a Docker image from the source and compile the
latest version. The image supports all tippecanoe flags and options.
.SH Examples
.PP
Check out some examples of maps made with tippecanoe \[la]MADE_WITH.md\[ra]
.SH Name
.PP
The name is a joking reference \[la]http://en.wikipedia.org/wiki/Tippecanoe_and_Tyler_Too\[ra] to a "tiler" for making map tiles.
.SH tile\-join
.PP
Tile\-join is a tool for copying and merging vector mbtiles files and for
joining new attributes from a CSV file to existing features in them.
.PP
It reads the tiles from an
existing .mbtiles file or a directory of tiles, matches them against the
records of the CSV (if one is specified), and writes out a new tileset.
.PP
If you specify multiple source mbtiles files or source directories of tiles,
all the sources are read and their combined contents are written to the new
mbtiles output. If they define the same layers or the same tiles, the layers
or tiles are merged.
.PP
The options are:
.SS Output tileset
.RS
.IP \(bu 2
\fB\fC\-o\fR \fIout.mbtiles\fP or \fB\fC\-\-output=\fR\fIout.mbtiles\fP: Write the new tiles to the specified .mbtiles file.
.IP \(bu 2
\fB\fC\-e\fR \fIdirectory\fP or \fB\fC\-\-output\-to\-directory=\fR\fIdirectory\fP: Write the new tiles to the specified directory instead of to an mbtiles file.
.IP \(bu 2
\fB\fC\-f\fR or \fB\fC\-\-force\fR: Remove \fIout.mbtiles\fP if it already exists.
.RE
.SS Tileset description and attribution
.RS
.IP \(bu 2
\fB\fC\-A\fR \fIattribution\fP or \fB\fC\-\-attribution=\fR\fIattribution\fP: Set the attribution string.
.IP \(bu 2
\fB\fC\-n\fR \fIname\fP or \fB\fC\-\-name=\fR\fIname\fP: Set the tileset name.
.IP \(bu 2
\fB\fC\-N\fR \fIdescription\fP or \fB\fC\-\-description=\fR\fIdescription\fP: Set the tileset description.
.RE
.SS Layer filtering and naming
.RS
.IP \(bu 2
\fB\fC\-l\fR \fIlayer\fP or \fB\fC\-\-layer=\fR\fIlayer\fP: Include the named layer in the output. You can specify multiple \fB\fC\-l\fR options to keep multiple layers. If you don't specify, they will all be retained.
.IP \(bu 2
\fB\fC\-L\fR \fIlayer\fP or \fB\fC\-\-exclude\-layer=\fR\fIlayer\fP: Remove the named layer from the output. You can specify multiple \fB\fC\-L\fR options to remove multiple layers.
.IP \(bu 2
\fB\fC\-R\fR\fIold\fP\fB\fC:\fR\fInew\fP or \fB\fC\-\-rename\-layer=\fR\fIold\fP\fB\fC:\fR\fInew\fP: Rename the layer named \fIold\fP to be named \fInew\fP instead. You can specify multiple \fB\fC\-R\fR options to rename multiple layers. Renaming happens before filtering.
.RE
.SS Zoom levels
.RS
.IP \(bu 2
\fB\fC\-z\fR \fIzoom\fP or \fB\fC\-\-maximum\-zoom=\fR\fIzoom\fP: Don't copy tiles from higher zoom levels than the specified zoom
.IP \(bu 2
\fB\fC\-Z\fR \fIzoom\fP or \fB\fC\-\-minimum\-zoom=\fR\fIzoom\fP: Don't copy tiles from lower zoom levels than the specified zoom
.RE
.SS Merging attributes from a CSV file
.RS
.IP \(bu 2
\fB\fC\-c\fR \fImatch\fP\fB\fC\&.csv\fR or \fB\fC\-\-csv=\fR\fImatch\fP\fB\fC\&.csv\fR: Use \fImatch\fP\fB\fC\&.csv\fR as the source for new attributes to join to the features. The first line of the file should be the key names; the other lines are values. The first column is the one to match against the existing features; the other columns are the new data to add.
.RE
.SS Filtering features and feature attributes
.RS
.IP \(bu 2
\fB\fC\-x\fR \fIkey\fP or \fB\fC\-\-exclude=\fR\fIkey\fP: Remove attributes of type \fIkey\fP from the output. You can use this to remove the field you are matching against if you no longer need it after joining, or to remove any other attributes you don't want.
.IP \(bu 2
\fB\fC\-i\fR or \fB\fC\-\-if\-matched\fR: Only include features that matched the CSV.
.IP \(bu 2
\fB\fC\-j\fR \fIfilter\fP or \fB\fC\-\-feature\-filter\fR=\fIfilter\fP: Check features against a per\-layer filter (as defined in the Mapbox GL Style Specification \[la]https://www.mapbox.com/mapbox-gl-js/style-spec/#types-filter\[ra]) and only include those that match. Any features in layers that have no filter specified will be passed through. Filters for the layer \fB\fC"*"\fR apply to all layers.
.IP \(bu 2
\fB\fC\-J\fR \fIfilter\-file\fP or \fB\fC\-\-feature\-filter\-file\fR=\fIfilter\-file\fP: Like \fB\fC\-j\fR, but read the filter from a file.
.RE
.SS Setting or disabling tile size limits
.RS
.IP \(bu 2
\fB\fC\-pk\fR or \fB\fC\-\-no\-tile\-size\-limit\fR: Don't skip tiles larger than 500K.
.IP \(bu 2
\fB\fC\-pC\fR or \fB\fC\-\-no\-tile\-compression\fR: Don't compress the PBF vector tile data.
.IP \(bu 2
\fB\fC\-pg\fR or \fB\fC\-\-no\-tile\-stats\fR: Don't generate the \fB\fCtilestats\fR row in the tileset metadata. Uploads without tilestats \[la]https://github.com/mapbox/mapbox-geostats\[ra] will take longer to process.
.RE
.PP
Because tile\-join just copies the geometries to the new .mbtiles without processing them
(except to rescale the extents if necessary),
it doesn't have any of tippecanoe's recourses if the new tiles are bigger than the 500K tile limit.
If a tile is too big, it is just left out of the new tileset.
If a tile is too big and you haven't specified \fB\fC\-pk\fR, it is just left out of the new tileset.
.SH Example
.PP
Imagine you have a tileset of census blocks:
@ -336,3 +683,57 @@ Then you can join those populations to the geometries and discard the no\-longer
\&./tile\-join \-o population.mbtiles \-x GEOID10 \-c population.csv tl_2010_06001_tabblock10.mbtiles
.fi
.RE
.SH tippecanoe\-enumerate
.PP
The \fB\fCtippecanoe\-enumerate\fR utility lists the tiles that an \fB\fCmbtiles\fR file defines.
Each line of the output lists the name of the \fB\fCmbtiles\fR file and the zoom, x, and y
coordinates of one of the tiles. It does basically the same thing as
.PP
.RS
.nf
select zoom_level, tile_column, (1 << zoom_level) \- 1 \- tile_row from tiles;
.fi
.RE
.PP
on the file in sqlite3.
.SH tippecanoe\-decode
.PP
The \fB\fCtippecanoe\-decode\fR utility turns vector mbtiles back to GeoJSON. You can use it either
on an entire file:
.PP
.RS
.nf
tippecanoe\-decode file.mbtiles
.fi
.RE
.PP
or on an individual tile:
.PP
.RS
.nf
tippecanoe\-decode file.mbtiles zoom x y
tippecanoe\-decode file.vector.pbf zoom x y
.fi
.RE
.PP
If you decode an entire file, you get a nested \fB\fCFeatureCollection\fR identifying each
tile and layer separately. Note that the same features generally appear at all zooms,
so the output for the file will have many copies of the same features at different
resolutions.
.SS Options
.RS
.IP \(bu 2
\fB\fC\-s\fR \fIprojection\fP or \fB\fC\-\-projection=\fR\fIprojection\fP: Specify the projection of the output data. Currently supported are EPSG:4326 (WGS84, the default) and EPSG:3857 (Web Mercator).
.IP \(bu 2
\fB\fC\-z\fR \fImaxzoom\fP or \fB\fC\-\-maximum\-zoom=\fR\fImaxzoom\fP: Specify the highest zoom level to decode from the tileset
.IP \(bu 2
\fB\fC\-Z\fR \fIminzoom\fP or \fB\fC\-\-minimum\-zoom=\fR\fIminzoom\fP: Specify the lowest zoom level to decode from the tileset
.IP \(bu 2
\fB\fC\-l\fR \fIlayer\fP or \fB\fC\-\-layer=\fR\fIlayer\fP: Decode only layers with the specified names. (Multiple \fB\fC\-l\fR options can be specified.)
.IP \(bu 2
\fB\fC\-c\fR or \fB\fC\-\-tag\-layer\-and\-zoom\fR: Include each feature's layer and zoom level as part of its \fB\fCtippecanoe\fR object rather than as a FeatureCollection wrapper
.IP \(bu 2
\fB\fC\-S\fR or \fB\fC\-\-stats\fR: Just report statistics about each tile's size and the number of features in it, as a JSON structure.
.IP \(bu 2
\fB\fC\-f\fR or \fB\fC\-\-force\fR: Decode tiles even if polygon ring order or closure problems are detected
.RE

View File

@ -0,0 +1,13 @@
Copyright (c) 2016, Mapbox
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

25
mapbox/LICENSE-variant Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) MapBox
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
- Neither the name "MapBox" nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

37
mapbox/LICENSE-wagyu Normal file
View File

@ -0,0 +1,37 @@
Parts of the code in the Wagyu Library are derived from the version of the
Clipper Library by Angus Johnson listed below.
Author : Angus Johnson
Version : 6.4.0
Date : 2 July 2015
Website : http://www.angusj.com
Copyright for portions of the derived code in the Wagyu library are held
by Angus Johnson, 2010-2015. All other copyright for the Wagyu Library are held by
Mapbox, 2016. This code is published in accordance with, and retains the same license
as the Clipper Library by Angus Johnson.
Copyright (c) 2010-2015, Angus Johnson
Copyright (c) 2016, Mapbox
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

13
mapbox/geometry.hpp Normal file
View File

@ -0,0 +1,13 @@
#pragma once
#include <mapbox/geometry/point.hpp>
#include <mapbox/geometry/line_string.hpp>
#include <mapbox/geometry/polygon.hpp>
#include <mapbox/geometry/multi_point.hpp>
#include <mapbox/geometry/multi_line_string.hpp>
#include <mapbox/geometry/multi_polygon.hpp>
#include <mapbox/geometry/geometry.hpp>
#include <mapbox/geometry/feature.hpp>
#include <mapbox/geometry/point_arithmetic.hpp>
#include <mapbox/geometry/for_each_point.hpp>
#include <mapbox/geometry/envelope.hpp>

34
mapbox/geometry/box.hpp Normal file
View File

@ -0,0 +1,34 @@
#pragma once
#include <mapbox/geometry/point.hpp>
namespace mapbox {
namespace geometry {
template <typename T>
struct box
{
using point_type = point<T>;
constexpr box(point_type const& min_, point_type const& max_)
: min(min_), max(max_)
{}
point_type min;
point_type max;
};
template <typename T>
constexpr bool operator==(box<T> const& lhs, box<T> const& rhs)
{
return lhs.min == rhs.min && lhs.max == rhs.max;
}
template <typename T>
constexpr bool operator!=(box<T> const& lhs, box<T> const& rhs)
{
return lhs.min != rhs.min || lhs.max != rhs.max;
}
} // namespace geometry
} // namespace mapbox

View File

@ -0,0 +1,33 @@
#pragma once
#include <mapbox/geometry/box.hpp>
#include <mapbox/geometry/for_each_point.hpp>
#include <limits>
namespace mapbox {
namespace geometry {
template <typename G, typename T = typename G::coordinate_type>
box<T> envelope(G const& geometry)
{
using limits = std::numeric_limits<T>;
T min_t = limits::has_infinity ? -limits::infinity() : limits::min();
T max_t = limits::has_infinity ? limits::infinity() : limits::max();
point<T> min(max_t, max_t);
point<T> max(min_t, min_t);
for_each_point(geometry, [&] (point<T> const& point) {
if (min.x > point.x) min.x = point.x;
if (min.y > point.y) min.y = point.y;
if (max.x < point.x) max.x = point.x;
if (max.y < point.y) max.y = point.y;
});
return box<T>(min, max);
}
} // namespace geometry
} // namespace mapbox

View File

@ -0,0 +1,81 @@
#pragma once
#include <mapbox/geometry/geometry.hpp>
#include <mapbox/variant.hpp>
#include <cstdint>
#include <string>
#include <vector>
#include <unordered_map>
#include <experimental/optional>
namespace mapbox {
namespace geometry {
struct value;
struct null_value_t
{
constexpr null_value_t() {}
constexpr null_value_t(std::nullptr_t) {}
};
constexpr bool operator==(const null_value_t&, const null_value_t&) { return true; }
constexpr bool operator!=(const null_value_t&, const null_value_t&) { return false; }
constexpr null_value_t null_value = null_value_t();
// Multiple numeric types (uint64_t, int64_t, double) are present in order to support
// the widest possible range of JSON numbers, which do not have a maximum range.
// Implementations that produce `value`s should use that order for type preference,
// using uint64_t for positive integers, int64_t for negative integers, and double
// for non-integers and integers outside the range of 64 bits.
using value_base = mapbox::util::variant<null_value_t, bool, uint64_t, int64_t, double, std::string,
mapbox::util::recursive_wrapper<std::vector<value>>,
mapbox::util::recursive_wrapper<std::unordered_map<std::string, value>>>;
struct value : value_base
{
using value_base::value_base;
};
using property_map = std::unordered_map<std::string, value>;
// The same considerations and requirement for numeric types apply as for `value_base`.
using identifier = mapbox::util::variant<uint64_t, int64_t, double, std::string>;
template <class T>
struct feature
{
using coordinate_type = T;
using geometry_type = mapbox::geometry::geometry<T>; // Fully qualified to avoid GCC -fpermissive error.
geometry_type geometry;
property_map properties {};
std::experimental::optional<identifier> id {};
};
template <class T>
constexpr bool operator==(feature<T> const& lhs, feature<T> const& rhs)
{
return lhs.id == rhs.id && lhs.geometry == rhs.geometry && lhs.properties == rhs.properties;
}
template <class T>
constexpr bool operator!=(feature<T> const& lhs, feature<T> const& rhs)
{
return !(lhs == rhs);
}
template <class T, template <typename...> class Cont = std::vector>
struct feature_collection : Cont<feature<T>>
{
using coordinate_type = T;
using feature_type = feature<T>;
using container_type = Cont<feature_type>;
using container_type::container_type;
};
} // namespace geometry
} // namespace mapbox

View File

@ -0,0 +1,45 @@
#pragma once
#include <mapbox/geometry/geometry.hpp>
namespace mapbox {
namespace geometry {
template <typename Point, typename F>
auto for_each_point(Point&& point, F&& f)
-> decltype(point.x, point.y, void())
{
f(std::forward<Point>(point));
}
template <typename Container, typename F>
auto for_each_point(Container&& container, F&& f)
-> decltype(container.begin(), container.end(), void());
template <typename...Types, typename F>
void for_each_point(mapbox::util::variant<Types...> const& geom, F&& f)
{
mapbox::util::variant<Types...>::visit(geom, [&] (auto const& g) {
for_each_point(g, f);
});
}
template <typename...Types, typename F>
void for_each_point(mapbox::util::variant<Types...> & geom, F&& f)
{
mapbox::util::variant<Types...>::visit(geom, [&] (auto & g) {
for_each_point(g, f);
});
}
template <typename Container, typename F>
auto for_each_point(Container&& container, F&& f)
-> decltype(container.begin(), container.end(), void())
{
for (auto& e: container) {
for_each_point(e, f);
}
}
} // namespace geometry
} // namespace mapbox

View File

@ -0,0 +1,53 @@
#pragma once
#include <mapbox/geometry/point.hpp>
#include <mapbox/geometry/line_string.hpp>
#include <mapbox/geometry/polygon.hpp>
#include <mapbox/geometry/multi_point.hpp>
#include <mapbox/geometry/multi_line_string.hpp>
#include <mapbox/geometry/multi_polygon.hpp>
#include <mapbox/variant.hpp>
// stl
#include <vector>
namespace mapbox {
namespace geometry {
template <typename T, template <typename...> class Cont = std::vector>
struct geometry_collection;
template <typename T>
using geometry_base = mapbox::util::variant<point<T>,
line_string<T>,
polygon<T>,
multi_point<T>,
multi_line_string<T>,
multi_polygon<T>,
geometry_collection<T>>;
template <typename T>
struct geometry : geometry_base<T>
{
using coordinate_type = T;
using geometry_base<T>::geometry_base;
/*
* The default constructor would create a point geometry with default-constructed coordinates;
* i.e. (0, 0). Since this is not particularly useful, and could hide bugs, it is disabled.
*/
geometry() = delete;
};
template <typename T, template <typename...> class Cont>
struct geometry_collection : Cont<geometry<T>>
{
using coordinate_type = T;
using geometry_type = geometry<T>;
using container_type = Cont<geometry_type>;
using container_type::container_type;
};
} // namespace geometry
} // namespace mapbox

View File

@ -0,0 +1,21 @@
#pragma once
// mapbox
#include <mapbox/geometry/point.hpp>
// stl
#include <vector>
namespace mapbox {
namespace geometry {
template <typename T, template <typename...> class Cont = std::vector>
struct line_string : Cont<point<T> >
{
using coordinate_type = T;
using point_type = point<T>;
using container_type = Cont<point_type>;
using container_type::container_type;
};
} // namespace geometry
} // namespace mapbox

View File

@ -0,0 +1,21 @@
#pragma once
// mapbox
#include <mapbox/geometry/line_string.hpp>
// stl
#include <vector>
namespace mapbox {
namespace geometry {
template <typename T, template <typename...> class Cont = std::vector>
struct multi_line_string : Cont<line_string<T>>
{
using coordinate_type = T;
using line_string_type = line_string<T>;
using container_type = Cont<line_string_type>;
using container_type::container_type;
};
} // namespace geometry
} // namespace mapbox

View File

@ -0,0 +1,21 @@
#pragma once
// mapbox
#include <mapbox/geometry/point.hpp>
// stl
#include <vector>
namespace mapbox {
namespace geometry {
template <typename T, template <typename...> class Cont = std::vector>
struct multi_point : Cont<point<T>>
{
using coordinate_type = T;
using point_type = point<T>;
using container_type = Cont<point_type>;
using container_type::container_type;
};
} // namespace geometry
} // namespace mapbox

View File

@ -0,0 +1,21 @@
#pragma once
// mapbox
#include <mapbox/geometry/polygon.hpp>
// stl
#include <vector>
namespace mapbox {
namespace geometry {
template <typename T, template <typename...> class Cont = std::vector>
struct multi_polygon : Cont<polygon<T>>
{
using coordinate_type = T;
using polygon_type = polygon<T>;
using container_type = Cont<polygon_type>;
using container_type::container_type;
};
} // namespace geometry
} // namespace mapbox

35
mapbox/geometry/point.hpp Normal file
View File

@ -0,0 +1,35 @@
#pragma once
namespace mapbox {
namespace geometry {
template <typename T>
struct point
{
using coordinate_type = T;
constexpr point()
: x(), y()
{}
constexpr point(T x_, T y_)
: x(x_), y(y_)
{}
T x;
T y;
};
template <typename T>
constexpr bool operator==(point<T> const& lhs, point<T> const& rhs)
{
return lhs.x == rhs.x && lhs.y == rhs.y;
}
template <typename T>
constexpr bool operator!=(point<T> const& lhs, point<T> const& rhs)
{
return !(lhs == rhs);
}
} // namespace geometry
} // namespace mapbox

View File

@ -0,0 +1,119 @@
#pragma once
namespace mapbox {
namespace geometry {
template <typename T>
constexpr point<T> operator+(point<T> const& lhs, point<T> const& rhs)
{
return point<T>(lhs.x + rhs.x, lhs.y + rhs.y);
}
template <typename T>
constexpr point<T> operator+(point<T> const& lhs, T const& rhs)
{
return point<T>(lhs.x + rhs, lhs.y + rhs);
}
template <typename T>
constexpr point<T> operator-(point<T> const& lhs, point<T> const& rhs)
{
return point<T>(lhs.x - rhs.x, lhs.y - rhs.y);
}
template <typename T>
constexpr point<T> operator-(point<T> const& lhs, T const& rhs)
{
return point<T>(lhs.x - rhs, lhs.y - rhs);
}
template <typename T>
constexpr point<T> operator*(point<T> const& lhs, point<T> const& rhs)
{
return point<T>(lhs.x * rhs.x, lhs.y * rhs.y);
}
template <typename T>
constexpr point<T> operator*(point<T> const& lhs, T const& rhs)
{
return point<T>(lhs.x * rhs, lhs.y * rhs);
}
template <typename T>
constexpr point<T> operator/(point<T> const& lhs, point<T> const& rhs)
{
return point<T>(lhs.x / rhs.x, lhs.y / rhs.y);
}
template <typename T>
constexpr point<T> operator/(point<T> const& lhs, T const& rhs)
{
return point<T>(lhs.x / rhs, lhs.y / rhs);
}
template <typename T>
constexpr point<T>& operator+=(point<T>& lhs, point<T> const& rhs)
{
lhs.x += rhs.x;
lhs.y += rhs.y;
return lhs;
}
template <typename T>
constexpr point<T>& operator+=(point<T>& lhs, T const& rhs)
{
lhs.x += rhs;
lhs.y += rhs;
return lhs;
}
template <typename T>
constexpr point<T>& operator-=(point<T>& lhs, point<T> const& rhs)
{
lhs.x -= rhs.x;
lhs.y -= rhs.y;
return lhs;
}
template <typename T>
constexpr point<T>& operator-=(point<T>& lhs, T const& rhs)
{
lhs.x -= rhs;
lhs.y -= rhs;
return lhs;
}
template <typename T>
constexpr point<T>& operator*=(point<T>& lhs, point<T> const& rhs)
{
lhs.x *= rhs.x;
lhs.y *= rhs.y;
return lhs;
}
template <typename T>
constexpr point<T>& operator*=(point<T>& lhs, T const& rhs)
{
lhs.x *= rhs;
lhs.y *= rhs;
return lhs;
}
template <typename T>
constexpr point<T>& operator/=(point<T>& lhs, point<T> const& rhs)
{
lhs.x /= rhs.x;
lhs.y /= rhs.y;
return lhs;
}
template <typename T>
constexpr point<T>& operator/=(point<T>& lhs, T const& rhs)
{
lhs.x /= rhs;
lhs.y /= rhs;
return lhs;
}
} // namespace geometry
} // namespace mapbox

View File

@ -0,0 +1,31 @@
#pragma once
// mapbox
#include <mapbox/geometry/point.hpp>
// stl
#include <vector>
namespace mapbox {
namespace geometry {
template <typename T, template <typename...> class Cont = std::vector>
struct linear_ring : Cont<point<T>>
{
using coordinate_type = T;
using point_type = point<T>;
using container_type = Cont<point_type>;
using container_type::container_type;
};
template <typename T, template <typename...> class Cont = std::vector>
struct polygon : Cont<linear_ring<T>>
{
using coordinate_type = T;
using linear_ring_type = linear_ring<T>;
using container_type = Cont<linear_ring_type>;
using container_type::container_type;
};
} // namespace geometry
} // namespace mapbox

View File

@ -0,0 +1,466 @@
#include <mapbox/geometry/geometry.hpp>
#include <math.h>
#include <map>
#include <set>
#include <vector>
#include <algorithm>
#include <cmath>
namespace mapbox {
namespace geometry {
template <typename T>
void add_vertical(size_t intermediate, size_t which_end, size_t into, std::vector<std::vector<point<T>>> &segments, bool &again, std::vector<size_t> &nexts) {
again = true;
std::vector<point<T>> dv;
dv.push_back(segments[intermediate][which_end]);
dv.push_back(segments[into][1]);
segments.push_back(dv);
segments[into][1] = segments[intermediate][which_end];
nexts.push_back(nexts[into]);
nexts[into] = nexts.size() - 1;
}
template <typename T>
void add_horizontal(size_t intermediate, size_t which_end, size_t into, std::vector<std::vector<point<T>>> &segments, bool &again, std::vector<size_t> &nexts) {
again = true;
T x = segments[intermediate][which_end].x;
T y = segments[intermediate][0].y +
(segments[intermediate][which_end].x - segments[intermediate][0].x) *
(segments[intermediate][1].y - segments[intermediate][0].y) /
(segments[intermediate][1].x - segments[intermediate][0].x);
point<T> d(x, y);
std::vector<point<T>> dv;
dv.push_back(d);
dv.push_back(segments[into][1]);
segments.push_back(dv);
segments[into][1] = d;
nexts.push_back(nexts[into]);
nexts[into] = nexts.size() - 1;
}
template <typename T>
void warn(std::vector<std::vector<point<T>>> &segments, size_t a, size_t b, bool do_warn) {
if (do_warn) {
fprintf(stderr, "%lld,%lld to %lld,%lld intersects %lld,%lld to %lld,%lld\n",
(long long) segments[a][0].x, (long long) segments[a][0].y,
(long long) segments[a][1].x, (long long) segments[a][1].y,
(long long) segments[b][0].x, (long long) segments[b][0].y,
(long long) segments[b][1].x, (long long) segments[b][1].y);
}
}
template <typename T>
void check_intersection(std::vector<std::vector<point<T>>> &segments, size_t a, size_t b, bool &again, std::vector<size_t> &nexts, bool do_warn, bool endpoint_ok) {
T s10_x = segments[a][1].x - segments[a][0].x;
T s10_y = segments[a][1].y - segments[a][0].y;
T s32_x = segments[b][1].x - segments[b][0].x;
T s32_y = segments[b][1].y - segments[b][0].y;
// http://stackoverflow.com/questions/563198/how-do-you-detect-where-two-line-segments-intersect
T denom = s10_x * s32_y - s32_x * s10_y;
if (denom == 0) {
// They are parallel or collinear. Find out if they are collinear.
// http://www.cpsc.ucalgary.ca/~marina/papers/Segment_intersection.ps
T ccw =
segments[a][0].x * segments[a][1].y +
segments[a][1].x * segments[b][0].y +
segments[b][0].x * segments[a][0].y -
segments[a][0].x * segments[b][0].y -
segments[a][1].x * segments[a][0].y -
segments[b][0].x * segments[a][1].y;
if (ccw == 0) {
if (segments[a][0].x == segments[a][1].x) {
// Vertical
T amin, amax, bmin, bmax;
if (segments[a][0].y < segments[a][1].y) {
amin = segments[a][0].y;
amax = segments[a][1].y;
} else {
amin = segments[a][1].y;
amax = segments[a][0].y;
}
if (segments[b][0].y < segments[b][1].y) {
bmin = segments[b][0].y;
bmax = segments[b][1].y;
} else {
bmin = segments[b][1].y;
bmax = segments[b][0].y;
}
// All of these transformations preserve verticality so we can check multiple cases
if (segments[b][0].y > amin && segments[b][0].y < amax) {
// B0 is in A
warn(segments, a, b, do_warn);
add_vertical(b, 0, a, segments, again, nexts);
}
if (segments[b][1].y > amin && segments[b][1].y < amax) {
// B1 is in A
warn(segments, a, b, do_warn);
add_vertical(b, 1, a, segments, again, nexts);
}
if (segments[a][0].y > bmin && segments[a][0].y < bmax) {
// A0 is in B
warn(segments, a, b, do_warn);
add_vertical(a, 0, b, segments, again, nexts);
}
if (segments[a][1].y > bmin && segments[a][1].y < bmax) {
// A1 is in B
warn(segments, a, b, do_warn);
add_vertical(a, 1, b, segments, again, nexts);
}
} else {
// Horizontal or diagonal
T amin, amax, bmin, bmax;
if (segments[a][0].x < segments[a][1].x) {
amin = segments[a][0].x;
amax = segments[a][1].x;
} else {
amin = segments[a][1].x;
amax = segments[a][0].x;
}
if (segments[b][0].x < segments[b][1].x) {
bmin = segments[b][0].x;
bmax = segments[b][1].x;
} else {
bmin = segments[b][1].x;
bmax = segments[b][0].x;
}
// Don't check multiples, because rounding may corrupt collinearity
if (segments[b][0].x > amin && segments[b][0].x < amax) {
// B0 is in A
add_horizontal(b, 0, a, segments, again, nexts);
warn(segments, a, b, do_warn);
} else if (segments[b][1].x > amin && segments[b][1].x < amax) {
// B1 is in A
add_horizontal(b, 1, a, segments, again, nexts);
warn(segments, a, b, do_warn);
} else if (segments[a][0].x > bmin && segments[a][0].x < bmax) {
// A0 is in B
warn(segments, a, b, do_warn);
add_horizontal(a, 0, b, segments, again, nexts);
} else if (segments[a][1].x > bmin && segments[a][1].x < bmax) {
// A1 is in B
warn(segments, a, b, do_warn);
add_horizontal(a, 1, b, segments, again, nexts);
}
}
}
} else {
// Neither parallel nor collinear, so may intersect at a single point
T s02_x = segments[a][0].x - segments[b][0].x;
T s02_y = segments[a][0].y - segments[b][0].y;
double s = (s10_x * s02_y - s10_y * s02_x) / (long double) denom;
double t = (s32_x * s02_y - s32_y * s02_x) / (long double) denom;
if (t >= 0 && t <= 1 && s >= 0 && s <= 1) {
T x = (T) round(segments[a][0].x + t * s10_x);
T y = (T) round(segments[a][0].y + t * s10_y);
if ((t > 0 && t < 1 && s > 0 && s < 1) || !endpoint_ok) {
if (t >= 0 && t <= 1) {
if ((x != segments[a][0].x || y != segments[a][0].y) && (x != segments[a][1].x || y != segments[a][1].y)) {
warn(segments, a, b, do_warn);
// splitting a
std::vector<point<T>> dv;
dv.push_back(point<T>(x, y));
dv.push_back(segments[a][1]);
segments.push_back(dv);
segments[a][1] = point<T>(x, y);
nexts.push_back(nexts[a]);
nexts[a] = nexts.size() - 1;
again = true;
}
}
if (s >= 0 && s <= 1) {
if ((x != segments[b][0].x || y != segments[b][0].y) && (x != segments[b][1].x || y != segments[b][1].y)) {
// splitting b
warn(segments, a, b, do_warn);
std::vector<point<T>> dv;
dv.push_back(point<T>(x, y));
dv.push_back(segments[b][1]);
segments.push_back(dv);
segments[b][1] = point<T>(x, y);
nexts.push_back(nexts[b]);
nexts[b] = nexts.size() - 1;
again = true;
}
}
}
}
}
}
template <typename T>
void partition(std::vector<std::vector<point<T>>> &segs, std::vector<size_t> &subset, int direction, std::set<std::pair<size_t, size_t>> &possible) {
std::vector<T> points;
// List of X or Y midpoints of edges, so we can find the median
if (direction == 0) {
for (size_t i = 0; i < subset.size(); i++) {
points.push_back((segs[subset[i]][0].x + segs[subset[i]][1].x) / 2);
}
} else {
for (size_t i = 0; i < subset.size(); i++) {
points.push_back((segs[subset[i]][0].y + segs[subset[i]][1].y) / 2);
}
}
if (points.size() == 0) {
return;
}
size_t mid = points.size() / 2;
std::nth_element(points.begin(), points.begin() + mid, points.end());
T median = points[mid];
// Partition into sets that are above or below, or to the left or to the right of, the median.
// Segments that cross the median appear in both.
std::vector<size_t> one;
std::vector<size_t> two;
if (direction == 0) {
for (size_t i = 0; i < subset.size(); i++) {
if (segs[subset[i]][0].x <= median || segs[subset[i]][1].x <= median) {
one.push_back(subset[i]);
}
if (segs[subset[i]][0].x >= median || segs[subset[i]][1].x >= median) {
two.push_back(subset[i]);
}
}
} else {
for (size_t i = 0; i < subset.size(); i++) {
if (segs[subset[i]][0].y <= median || segs[subset[i]][1].y <= median) {
one.push_back(subset[i]);
}
if (segs[subset[i]][0].y >= median || segs[subset[i]][1].y >= median) {
two.push_back(subset[i]);
}
}
}
if (one.size() >= subset.size() || two.size() >= subset.size()) {
for (size_t i = 0; i < subset.size(); i++) {
for (size_t j = i + 1; j < subset.size(); j++) {
possible.insert(std::pair<size_t, size_t>(subset[i], subset[j]));
}
}
} else {
// By experiment, stopping at 10 is a little faster than either 5 or 20
if (one.size() < 10) {
for (size_t i = 0; i < one.size(); i++) {
for (size_t j = i + 1; j < one.size(); j++) {
possible.insert(std::pair<size_t, size_t>(one[i], one[j]));
}
}
} else {
partition(segs, one, !direction, possible);
}
if (two.size() < 10) {
for (size_t i = 0; i < two.size(); i++) {
for (size_t j = i + 1; j < two.size(); j++) {
possible.insert(std::pair<size_t, size_t>(two[i], two[j]));
}
}
} else {
partition(segs, two, !direction, possible);
}
}
}
template <typename T>
std::vector<std::vector<point<T>>> intersect_segments(std::vector<std::vector<point<T>>> segments, std::vector<size_t> &nexts, bool do_warn, bool endpoint_ok) {
bool again = true;
while (again) {
again = false;
std::set<std::pair<size_t, size_t>> possible;
std::vector<size_t> subset;
for (size_t i = 0; i < segments.size(); i++) {
subset.push_back(i);
}
partition(segments, subset, 0, possible);
for (auto it = possible.begin(); it != possible.end(); ++it) {
check_intersection(segments, it->first, it->second, again, nexts, do_warn, endpoint_ok);
}
}
return segments;
}
template <typename T>
linear_ring<T> remove_collinear(linear_ring<T> ring) {
linear_ring<T> out;
size_t len = ring.size() - 1; // Exclude duplicated last point
for (size_t j = 0; j < len; j++) {
long long ccw =
ring[(j + len - 1) % len].x * ring[(j + len - 0) % len].y +
ring[(j + len - 0) % len].x * ring[(j + len + 1) % len].y +
ring[(j + len + 1) % len].x * ring[(j + len - 1) % len].y -
ring[(j + len - 1) % len].x * ring[(j + len + 1) % len].y -
ring[(j + len - 0) % len].x * ring[(j + len - 1) % len].y -
ring[(j + len + 1) % len].x * ring[(j + len - 0) % len].y;
if (ccw != 0) {
out.push_back(ring[j]);
}
if (ring.size() > 0 && ring[0] != ring[ring.size() - 1]) {
ring.push_back(ring[0]);
}
}
return out;
}
template <typename T>
multi_polygon<T> snap_round(multi_polygon<T> geom, bool do_warn, bool endpoint_ok) {
std::vector<std::vector<point<T>>> segments;
std::vector<size_t> nexts;
std::vector<std::vector<size_t>> ring_starts;
// Crunch out any 0-length segments
for (size_t i = 0; i < geom.size(); i++) {
for (size_t j = 0; j < geom[i].size(); j++) {
for (ssize_t k = geom[i][j].size() - 1; k > 0; k--) {
if (geom[i][j][k] == geom[i][j][k - 1]) {
geom[i][j].erase(geom[i][j].begin() + k);
}
}
}
}
for (size_t i = 0; i < geom.size(); i++) {
ring_starts.push_back(std::vector<size_t>());
for (size_t j = 0; j < geom[i].size(); j++) {
size_t s = geom[i][j].size();
if (s > 1) {
ring_starts[i].push_back(segments.size());
size_t first = nexts.size();
for (size_t k = 0; k + 1 < s; k++) {
std::vector<point<T>> dv;
dv.push_back(geom[i][j][k]);
dv.push_back(geom[i][j][k + 1]);
segments.push_back(dv);
nexts.push_back(nexts.size() + 1);
}
// Fabricate a point if ring was not closed
if (geom[i][j][0] != geom[i][j][s - 1]) {
std::vector<point<T>> dv;
dv.push_back(geom[i][j][s - 1]);
dv.push_back(geom[i][j][0]);
segments.push_back(dv);
nexts.push_back(nexts.size() + 1);
}
// Last point of ring points back to first
nexts[nexts.size() - 1] = first;
}
}
}
segments = intersect_segments(segments, nexts, do_warn, endpoint_ok);
multi_polygon<T> mp;
for (size_t i = 0; i < ring_starts.size(); i++) {
mp.push_back(polygon<T>());
for (size_t j = 0; j < ring_starts[i].size(); j++) {
mp[i].push_back(linear_ring<T>());
size_t k = ring_starts[i][j];
do {
mp[i][j].push_back(segments[k][0]);
k = nexts[k];
} while (k != ring_starts[i][j]);
mp[i][j].push_back(segments[ring_starts[i][j]][0]);
}
}
return mp;
}
template <typename T>
multi_line_string<T> snap_round(multi_line_string<T> geom, bool do_warn, bool endpoint_ok) {
std::vector<std::vector<point<T>>> segments;
std::vector<size_t> nexts;
std::vector<size_t> ring_starts;
// Crunch out any 0-length segments
for (size_t j = 0; j < geom.size(); j++) {
for (ssize_t k = geom[j].size() - 1; k > 0; k--) {
if (geom[j][k] == geom[j][k - 1]) {
geom[j].erase(geom[j].begin() + k);
}
}
}
for (size_t j = 0; j < geom.size(); j++) {
size_t s = geom[j].size();
if (s > 1) {
ring_starts.push_back(segments.size());
size_t first = nexts.size();
for (size_t k = 0; k + 1 < s; k++) {
std::vector<point<T>> dv;
dv.push_back(geom[j][k]);
dv.push_back(geom[j][k + 1]);
segments.push_back(dv);
nexts.push_back(nexts.size() + 1);
}
// Last point of ring points back to first
nexts[nexts.size() - 1] = first;
}
}
segments = intersect_segments(segments, nexts, do_warn, endpoint_ok);
multi_line_string<T> mp;
for (size_t j = 0; j < ring_starts.size(); j++) {
mp.push_back(line_string<T>());
size_t k = ring_starts[j];
size_t last = k;
do {
mp[j].push_back(segments[k][0]);
last = k;
k = nexts[k];
} while (k != ring_starts[j]);
mp[j].push_back(segments[last][1]);
}
return mp;
}
}
}

View File

@ -0,0 +1,425 @@
#pragma once
#ifdef DEBUG
#include <iostream>
#include <sstream>
#endif
#include <mapbox/geometry/wagyu/bound.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/edge.hpp>
#include <mapbox/geometry/wagyu/local_minimum.hpp>
#include <mapbox/geometry/wagyu/local_minimum_util.hpp>
#include <mapbox/geometry/wagyu/ring.hpp>
#include <mapbox/geometry/wagyu/scanbeam.hpp>
#include <mapbox/geometry/wagyu/util.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
using active_bound_list = std::list<bound_ptr<T>>;
template <typename T>
using active_bound_list_itr = typename active_bound_list<T>::iterator;
template <typename T>
using active_bound_list_rev_itr = typename active_bound_list<T>::reverse_iterator;
#ifdef DEBUG
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const active_bound_list<T>& bnds) {
std::size_t c = 0;
for (auto const& bnd : bnds) {
out << "Index: " << c++ << std::endl;
out << *bnd;
}
return out;
}
template <typename T>
std::string output_edges(active_bound_list<T> const& bnds) {
std::ostringstream out;
out << "[";
bool first = true;
for (auto const& bnd : bnds) {
if (first) {
first = false;
} else {
out << ",";
}
out << "[[" << bnd->current_edge->bot.x << "," << bnd->current_edge->bot.y << "],[";
out << bnd->current_edge->top.x << "," << bnd->current_edge->top.y << "]]";
}
out << "]";
return out.str();
}
#endif
template <typename T>
bool is_even_odd_fill_type(bound<T> const& bound,
fill_type subject_fill_type,
fill_type clip_fill_type) {
if (bound.poly_type == polygon_type_subject) {
return subject_fill_type == fill_type_even_odd;
} else {
return clip_fill_type == fill_type_even_odd;
}
}
template <typename T>
bool is_even_odd_alt_fill_type(bound<T> const& bound,
fill_type subject_fill_type,
fill_type clip_fill_type) {
if (bound.poly_type == polygon_type_subject) {
return clip_fill_type == fill_type_even_odd;
} else {
return subject_fill_type == fill_type_even_odd;
}
}
template <typename T>
inline bool bound2_inserts_before_bound1(bound<T> const& bound1, bound<T> const& bound2) {
if (values_are_equal(bound2.current_x, bound1.current_x)) {
if (bound2.current_edge->top.y > bound1.current_edge->top.y) {
return bound2.current_edge->top.x <
get_current_x(*(bound1.current_edge), bound2.current_edge->top.y);
} else {
return bound1.current_edge->top.x >
get_current_x(*(bound2.current_edge), bound1.current_edge->top.y);
}
} else {
return bound2.current_x < bound1.current_x;
}
}
template <typename T>
active_bound_list_itr<T> insert_bound_into_ABL(bound<T>& bnd, active_bound_list<T>& active_bounds) {
auto itr = active_bounds.begin();
while (itr != active_bounds.end() && !bound2_inserts_before_bound1(*(*itr), bnd)) {
++itr;
}
return active_bounds.insert(itr, &bnd);
}
template <typename T>
active_bound_list_itr<T> insert_bound_into_ABL(bound<T>& bnd,
active_bound_list_itr<T> itr,
active_bound_list<T>& active_bounds) {
while (itr != active_bounds.end() && !bound2_inserts_before_bound1(*(*itr), bnd)) {
++itr;
}
return active_bounds.insert(itr, &bnd);
}
template <typename T>
inline bool is_maxima(bound<T>& bnd, T y) {
return bnd.next_edge == bnd.edges.end() && bnd.current_edge->top.y == y;
}
template <typename T>
inline bool is_maxima(active_bound_list_itr<T>& bnd, T y) {
return is_maxima(*(*bnd), y);
}
template <typename T>
inline bool is_intermediate(bound<T>& bnd, T y) {
return bnd.next_edge != bnd.edges.end() && bnd.current_edge->top.y == y;
}
template <typename T>
inline bool is_intermediate(active_bound_list_itr<T>& bnd, T y) {
return is_intermediate(*(*bnd), y);
}
template <typename T>
inline bool current_edge_is_horizontal(active_bound_list_itr<T>& bnd) {
return is_horizontal(*((*bnd)->current_edge));
}
template <typename T>
inline bool next_edge_is_horizontal(active_bound_list_itr<T>& bnd) {
return is_horizontal(*((*bnd)->next_edge));
}
template <typename T>
inline void swap_positions_in_ABL(active_bound_list_itr<T>& bnd1,
active_bound_list_itr<T>& bnd2,
active_bound_list<T>& active_bounds) {
if (std::next(bnd2) == bnd1) {
active_bounds.splice(bnd2, active_bounds, bnd1);
} else {
active_bounds.splice(bnd1, active_bounds, bnd2);
}
}
template <typename T>
void next_edge_in_bound(active_bound_list_itr<T>& bnd, scanbeam_list<T>& scanbeam) {
++((*bnd)->current_edge);
if ((*bnd)->current_edge != (*bnd)->edges.end()) {
++((*bnd)->next_edge);
(*bnd)->current_x = static_cast<double>((*bnd)->current_edge->bot.x);
if (!current_edge_is_horizontal<T>(bnd)) {
scanbeam.push((*bnd)->current_edge->top.y);
}
}
}
template <typename T>
active_bound_list_itr<T> get_maxima_pair(active_bound_list_itr<T> bnd,
active_bound_list<T>& active_bounds) {
auto bnd_itr = active_bounds.begin();
while (bnd_itr != active_bounds.end()) {
if (*bnd_itr == (*bnd)->maximum_bound) {
break;
}
++bnd_itr;
}
return bnd_itr;
}
template <typename T>
void set_winding_count(active_bound_list_itr<T>& bnd_itr,
active_bound_list<T>& active_bounds,
fill_type subject_fill_type,
fill_type clip_fill_type) {
auto rev_bnd_itr = active_bound_list_rev_itr<T>(bnd_itr);
if (rev_bnd_itr == active_bounds.rend()) {
(*bnd_itr)->winding_count = (*bnd_itr)->winding_delta;
(*bnd_itr)->winding_count2 = 0;
return;
}
// find the edge of the same polytype that immediately preceeds 'edge' in
// AEL
while (rev_bnd_itr != active_bounds.rend() &&
(*rev_bnd_itr)->poly_type != (*bnd_itr)->poly_type) {
++rev_bnd_itr;
}
if (rev_bnd_itr == active_bounds.rend()) {
(*bnd_itr)->winding_count = (*bnd_itr)->winding_delta;
(*bnd_itr)->winding_count2 = 0;
} else if (is_even_odd_fill_type(*(*bnd_itr), subject_fill_type, clip_fill_type)) {
// EvenOdd filling ...
(*bnd_itr)->winding_count = (*bnd_itr)->winding_delta;
(*bnd_itr)->winding_count2 = (*rev_bnd_itr)->winding_count2;
} else {
// nonZero, Positive or Negative filling ...
if ((*rev_bnd_itr)->winding_count * (*rev_bnd_itr)->winding_delta < 0) {
// prev edge is 'decreasing' WindCount (WC) toward zero
// so we're outside the previous polygon ...
if (std::abs(static_cast<int>((*rev_bnd_itr)->winding_count)) > 1) {
// outside prev poly but still inside another.
// when reversing direction of prev poly use the same WC
if ((*rev_bnd_itr)->winding_delta * (*bnd_itr)->winding_delta < 0) {
(*bnd_itr)->winding_count = (*rev_bnd_itr)->winding_count;
} else {
// otherwise continue to 'decrease' WC ...
(*bnd_itr)->winding_count =
(*rev_bnd_itr)->winding_count + (*bnd_itr)->winding_delta;
}
} else {
// now outside all polys of same polytype so set own WC ...
(*bnd_itr)->winding_count = (*bnd_itr)->winding_delta;
}
} else {
// prev edge is 'increasing' WindCount (WC) away from zero
// so we're inside the previous polygon ...
if ((*rev_bnd_itr)->winding_delta * (*bnd_itr)->winding_delta < 0) {
// if wind direction is reversing prev then use same WC
(*bnd_itr)->winding_count = (*rev_bnd_itr)->winding_count;
} else {
// otherwise add to WC ...
(*bnd_itr)->winding_count =
(*rev_bnd_itr)->winding_count + (*bnd_itr)->winding_delta;
}
}
(*bnd_itr)->winding_count2 = (*rev_bnd_itr)->winding_count2;
}
// update winding_count2 ...
auto bnd_itr_forward = rev_bnd_itr.base();
if (is_even_odd_alt_fill_type(*(*bnd_itr), subject_fill_type, clip_fill_type)) {
// EvenOdd filling ...
while (bnd_itr_forward != bnd_itr) {
if ((*bnd_itr_forward)->winding_delta != 0) {
(*bnd_itr)->winding_count2 = ((*bnd_itr)->winding_count2 == 0 ? 1 : 0);
}
++bnd_itr_forward;
}
} else {
// nonZero, Positive or Negative filling ...
while (bnd_itr_forward != bnd_itr) {
(*bnd_itr)->winding_count2 += (*bnd_itr_forward)->winding_delta;
++bnd_itr_forward;
}
}
}
template <typename T>
bool is_contributing(bound<T> const& bnd,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type) {
fill_type pft = subject_fill_type;
fill_type pft2 = clip_fill_type;
if (bnd.poly_type != polygon_type_subject) {
pft = clip_fill_type;
pft2 = subject_fill_type;
}
switch (pft) {
case fill_type_even_odd:
break;
case fill_type_non_zero:
if (std::abs(static_cast<int>(bnd.winding_count)) != 1) {
return false;
}
break;
case fill_type_positive:
if (bnd.winding_count != 1) {
return false;
}
break;
case fill_type_negative:
default:
if (bnd.winding_count != -1) {
return false;
}
}
switch (cliptype) {
case clip_type_intersection:
switch (pft2) {
case fill_type_even_odd:
case fill_type_non_zero:
return (bnd.winding_count2 != 0);
case fill_type_positive:
return (bnd.winding_count2 > 0);
case fill_type_negative:
default:
return (bnd.winding_count2 < 0);
}
break;
case clip_type_union:
switch (pft2) {
case fill_type_even_odd:
case fill_type_non_zero:
return (bnd.winding_count2 == 0);
case fill_type_positive:
return (bnd.winding_count2 <= 0);
case fill_type_negative:
default:
return (bnd.winding_count2 >= 0);
}
break;
case clip_type_difference:
if (bnd.poly_type == polygon_type_subject) {
switch (pft2) {
case fill_type_even_odd:
case fill_type_non_zero:
return (bnd.winding_count2 == 0);
case fill_type_positive:
return (bnd.winding_count2 <= 0);
case fill_type_negative:
default:
return (bnd.winding_count2 >= 0);
}
} else {
switch (pft2) {
case fill_type_even_odd:
case fill_type_non_zero:
return (bnd.winding_count2 != 0);
case fill_type_positive:
return (bnd.winding_count2 > 0);
case fill_type_negative:
default:
return (bnd.winding_count2 < 0);
}
}
break;
case clip_type_x_or:
return true;
break;
default:
return true;
}
}
template <typename T>
void insert_lm_left_and_right_bound(bound<T>& left_bound,
bound<T>& right_bound,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings,
scanbeam_list<T>& scanbeam,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type) {
// Both left and right bound
auto lb_abl_itr = insert_bound_into_ABL(left_bound, active_bounds);
auto rb_abl_itr = active_bounds.insert(std::next(lb_abl_itr), &right_bound);
set_winding_count(lb_abl_itr, active_bounds, subject_fill_type, clip_fill_type);
(*rb_abl_itr)->winding_count = (*lb_abl_itr)->winding_count;
(*rb_abl_itr)->winding_count2 = (*lb_abl_itr)->winding_count2;
if (is_contributing(left_bound, cliptype, subject_fill_type, clip_fill_type)) {
add_local_minimum_point(lb_abl_itr, rb_abl_itr, active_bounds,
(*lb_abl_itr)->current_edge->bot, rings);
}
// Add top of edges to scanbeam
scanbeam.push((*lb_abl_itr)->current_edge->top.y);
if (!current_edge_is_horizontal<T>(rb_abl_itr)) {
scanbeam.push((*rb_abl_itr)->current_edge->top.y);
}
}
template <typename T>
void insert_local_minima_into_ABL(T const bot_y,
local_minimum_ptr_list<T> const& minima_sorted,
local_minimum_ptr_list_itr<T>& current_lm,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings,
scanbeam_list<T>& scanbeam,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type) {
while (current_lm != minima_sorted.end() && bot_y == (*current_lm)->y) {
initialize_lm<T>(current_lm);
auto& left_bound = (*current_lm)->left_bound;
auto& right_bound = (*current_lm)->right_bound;
insert_lm_left_and_right_bound(left_bound, right_bound, active_bounds, rings, scanbeam,
cliptype, subject_fill_type, clip_fill_type);
++current_lm;
}
}
template <typename T>
void insert_horizontal_local_minima_into_ABL(T const top_y,
local_minimum_ptr_list<T> const& minima_sorted,
local_minimum_ptr_list_itr<T>& current_lm,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings,
scanbeam_list<T>& scanbeam,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type) {
while (current_lm != minima_sorted.end() && top_y == (*current_lm)->y &&
(*current_lm)->minimum_has_horizontal) {
initialize_lm<T>(current_lm);
auto& left_bound = (*current_lm)->left_bound;
auto& right_bound = (*current_lm)->right_bound;
insert_lm_left_and_right_bound(left_bound, right_bound, active_bounds, rings, scanbeam,
cliptype, subject_fill_type, clip_fill_type);
++current_lm;
}
}
}
}
}

View File

@ -0,0 +1,95 @@
#pragma once
#include <list>
#include <mapbox/geometry/point.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/edge.hpp>
#include <mapbox/geometry/wagyu/ring.hpp>
#ifdef DEBUG
#include <iostream>
#endif
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
struct bound {
edge_list<T> edges;
edge_list_itr<T> current_edge;
edge_list_itr<T> next_edge;
mapbox::geometry::point<T> last_point;
ring_ptr<T> ring;
bound_ptr<T> maximum_bound; // the bound who's maximum connects with this bound
double current_x;
std::size_t pos;
std::int32_t winding_count;
std::int32_t winding_count2; // winding count of the opposite polytype
std::int8_t winding_delta; // 1 or -1 depending on winding direction - 0 for linestrings
polygon_type poly_type;
edge_side side; // side only refers to current side of solution poly
bound() noexcept
: edges(),
current_edge(edges.end()),
last_point({ 0, 0 }),
ring(nullptr),
maximum_bound(nullptr),
current_x(0.0),
pos(0),
winding_count(0),
winding_count2(0),
winding_delta(0),
poly_type(polygon_type_subject),
side(edge_left) {
}
bound(bound<T>&& b) noexcept
: edges(std::move(b.edges)),
current_edge(std::move(b.current_edge)),
last_point(std::move(b.last_point)),
ring(std::move(b.ring)),
maximum_bound(std::move(b.maximum_bound)),
current_x(std::move(b.current_x)),
pos(std::move(b.pos)),
winding_count(std::move(b.winding_count)),
winding_count2(std::move(b.winding_count2)),
winding_delta(std::move(b.winding_delta)),
poly_type(std::move(b.poly_type)),
side(std::move(b.side)) {
}
};
#ifdef DEBUG
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const bound<T>& bnd) {
out << " Bound: " << &bnd << std::endl;
out << " current_x: " << bnd.current_x << std::endl;
out << " last_point: " << bnd.last_point.x << ", " << bnd.last_point.y << std::endl;
out << *(bnd.current_edge);
out << " winding count: " << bnd.winding_count << std::endl;
out << " winding_count2: " << bnd.winding_count2 << std::endl;
out << " winding_delta: " << static_cast<int>(bnd.winding_delta) << std::endl;
out << " maximum_bound: " << bnd.maximum_bound << std::endl;
if (bnd.side == edge_left) {
out << " side: left" << std::endl;
} else {
out << " side: right" << std::endl;
}
out << " ring: " << bnd.ring << std::endl;
if (bnd.ring) {
out << " ring index: " << bnd.ring->ring_index << std::endl;
}
return out;
}
#endif
}
}
}

View File

@ -0,0 +1,181 @@
#pragma once
#include <mapbox/geometry/line_string.hpp>
#include <mapbox/geometry/point.hpp>
#include <mapbox/geometry/polygon.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/edge.hpp>
#include <mapbox/geometry/wagyu/util.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
bool point_2_is_between_point_1_and_point_3(mapbox::geometry::point<T> const& pt1,
mapbox::geometry::point<T> const& pt2,
mapbox::geometry::point<T> const& pt3) {
if ((pt1 == pt3) || (pt1 == pt2) || (pt3 == pt2)) {
return false;
} else if (pt1.x != pt3.x) {
return (pt2.x > pt1.x) == (pt2.x < pt3.x);
} else {
return (pt2.y > pt1.y) == (pt2.y < pt3.y);
}
}
template <typename T>
bool build_edge_list(mapbox::geometry::linear_ring<T> const& path_geometry, edge_list<T>& edges) {
if (path_geometry.size() < 3) {
return false;
}
// As this is a loop, we need to first go backwards from end to try and find
// the proper starting point for the iterators before the beginning
auto itr_rev = path_geometry.rbegin();
auto itr = path_geometry.begin();
mapbox::geometry::point<T> pt1 = *itr_rev;
mapbox::geometry::point<T> pt2 = *itr;
// Find next non repeated point going backwards from
// end for pt1
while (pt1 == pt2) {
++itr_rev;
if (itr_rev == path_geometry.rend()) {
return false;
}
pt1 = *itr_rev;
}
++itr;
mapbox::geometry::point<T> pt3 = *itr;
auto itr_last = itr_rev.base();
mapbox::geometry::point<T> front_pt;
mapbox::geometry::point<T> back_pt;
while (true) {
if (pt3 == pt2) {
// Duplicate point advance itr, but do not
// advance other points
if (itr == itr_last) {
break;
}
++itr;
if (itr == itr_last) {
if (edges.empty()) {
break;
}
pt3 = front_pt;
} else {
pt3 = *itr;
}
continue;
}
// Now check if slopes are equal between two segments - either
// a spike or a collinear point - if so drop point number 2.
if (slopes_equal(pt1, pt2, pt3)) {
// We need to reconsider previously added points
// because the point it was using was found to be collinear
// or a spike
pt2 = pt1;
if (!edges.empty()) {
edges.pop_back(); // remove previous edge (pt1)
}
if (!edges.empty()) {
if (back_pt == edges.back().top) {
pt1 = edges.back().bot;
} else {
pt1 = edges.back().top;
}
back_pt = pt1;
} else {
// If this occurs we must look to the back of the
// ring for new points.
while (*itr_rev == pt2) {
++itr_rev;
if ((itr + 1) == itr_rev.base()) {
return false;
}
}
pt1 = *itr_rev;
itr_last = itr_rev.base();
}
continue;
}
if (edges.empty()) {
front_pt = pt2;
}
edges.emplace_back(pt2, pt3);
back_pt = pt2;
if (itr == itr_last) {
break;
}
pt1 = pt2;
pt2 = pt3;
++itr;
if (itr == itr_last) {
if (edges.empty()) {
break;
}
pt3 = front_pt;
} else {
pt3 = *itr;
}
}
bool modified = false;
do {
modified = false;
if (edges.size() < 3) {
return false;
}
auto& f = edges.front();
auto& b = edges.back();
if (slopes_equal(f, b)) {
if (f.bot == b.top) {
if (f.top == b.bot) {
edges.pop_back();
edges.erase(edges.begin());
} else {
f.bot = b.bot;
edges.pop_back();
}
modified = true;
} else if (f.top == b.bot) {
f.top = b.top;
edges.pop_back();
modified = true;
} else if (f.top == b.top && f.bot == b.bot) {
edges.pop_back();
edges.erase(edges.begin());
modified = true;
} else if (f.top == b.top) {
if (point_2_is_between_point_1_and_point_3(f.top, f.bot, b.bot)) {
b.top = f.bot;
edges.erase(edges.begin());
} else {
f.top = b.bot;
edges.pop_back();
}
modified = true;
} else if (f.bot == b.bot) {
if (point_2_is_between_point_1_and_point_3(f.bot, f.top, b.top)) {
b.bot = f.top;
edges.erase(edges.begin());
} else {
f.bot = b.top;
edges.pop_back();
}
modified = true;
}
}
} while (modified);
return true;
}
}
}
}

View File

@ -0,0 +1,39 @@
#pragma once
#include <mapbox/geometry/wagyu/build_edges.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/local_minimum.hpp>
#include <mapbox/geometry/wagyu/local_minimum_util.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
bool add_line_string(mapbox::geometry::line_string<T> const& path_geometry,
local_minimum_list<T>& minima_list) {
bool is_flat = true;
edge_list<T> new_edges;
new_edges.reserve(path_geometry.size());
if (!build_edge_list(path_geometry, new_edges, is_flat) || new_edges.empty()) {
return false;
}
add_line_to_local_minima_list(new_edges, minima_list, polygon_type_subject);
return true;
}
template <typename T>
bool add_linear_ring(mapbox::geometry::linear_ring<T> const& path_geometry,
local_minimum_list<T>& minima_list,
polygon_type p_type) {
edge_list<T> new_edges;
new_edges.reserve(path_geometry.size());
if (!build_edge_list(path_geometry, new_edges) || new_edges.empty()) {
return false;
}
add_ring_to_local_minima_list(new_edges, minima_list, p_type);
return true;
}
}
}
}

View File

@ -0,0 +1,68 @@
#pragma once
#include <mapbox/geometry/wagyu/ring.hpp>
#include <mapbox/geometry/wagyu/ring_util.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
void push_ring_to_polygon(mapbox::geometry::polygon<T>& poly, ring_ptr<T>& r, bool reverse_output) {
mapbox::geometry::linear_ring<T> lr;
lr.reserve(r->size + 1);
auto firstPt = r->points;
auto ptIt = r->points;
if (reverse_output) {
do {
lr.emplace_back(ptIt->x, ptIt->y);
ptIt = ptIt->next;
} while (ptIt != firstPt);
} else {
do {
lr.emplace_back(ptIt->x, ptIt->y);
ptIt = ptIt->prev;
} while (ptIt != firstPt);
}
lr.emplace_back(firstPt->x, firstPt->y); // close the ring
poly.push_back(lr);
}
template <typename T>
void build_result_polygons(std::vector<mapbox::geometry::polygon<T>>& solution,
ring_list<T>& rings,
bool reverse_output) {
for (auto& r : rings) {
assert(r->points);
std::size_t cnt = point_count(r->points);
if (cnt < 3) {
continue;
}
solution.emplace_back();
push_ring_to_polygon(solution.back(), r, reverse_output);
for (auto& c : r->children) {
assert(c->points);
cnt = point_count(c->points);
if (cnt < 3) {
continue;
}
push_ring_to_polygon(solution.back(), c, reverse_output);
}
for (auto& c : r->children) {
if (!c->children.empty()) {
build_result_polygons(solution, c->children, reverse_output);
}
}
}
}
template <typename T>
void build_result(std::vector<mapbox::geometry::polygon<T>>& solution,
ring_manager<T>& rings,
bool reverse_output) {
build_result_polygons(solution, rings.children, reverse_output);
}
}
}
}

View File

@ -0,0 +1,53 @@
#pragma once
#include <cassert>
#include <cstdint>
#include <list>
#include <stdexcept>
namespace mapbox {
namespace geometry {
namespace wagyu {
enum clip_type : std::uint8_t {
clip_type_intersection = 0,
clip_type_union,
clip_type_difference,
clip_type_x_or
};
enum polygon_type : std::uint8_t { polygon_type_subject = 0, polygon_type_clip };
enum fill_type : std::uint8_t {
fill_type_even_odd = 0,
fill_type_non_zero,
fill_type_positive,
fill_type_negative
};
static double const def_arc_tolerance = 0.25;
static int const EDGE_UNASSIGNED = -1; // edge not currently 'owning' a solution
static int const EDGE_SKIP = -2; // edge that would otherwise close a path
static std::int64_t const LOW_RANGE = 0x3FFFFFFF;
static std::int64_t const HIGH_RANGE = 0x3FFFFFFFFFFFFFFFLL;
enum horizontal_direction : std::uint8_t { right_to_left = 0, left_to_right = 1 };
enum edge_side : std::uint8_t { edge_left = 0, edge_right };
enum join_type : std::uint8_t { join_type_square = 0, join_type_round, join_type_miter };
enum end_type {
end_type_closed_polygon = 0,
end_type_closed_line,
end_type_open_butt,
end_type_open_square,
end_type_open_round
};
template <typename T>
using maxima_list = std::list<T>;
}
}
}

View File

@ -0,0 +1,120 @@
#pragma once
#include <cmath>
#include <limits>
#include <list>
#include <mapbox/geometry/point.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/util.hpp>
#ifdef DEBUG
#include <iostream>
#endif
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
struct bound;
template <typename T>
using bound_ptr = bound<T>*;
template <typename T>
struct edge {
mapbox::geometry::point<T> bot;
mapbox::geometry::point<T> top;
double dx;
edge(edge<T>&& e) noexcept : bot(std::move(e.bot)), top(std::move(e.top)), dx(std::move(e.dx)) {
}
edge& operator=(edge<T>&& e) noexcept {
bot = std::move(e.bot);
top = std::move(e.top);
dx = std::move(e.dx);
return *this;
}
edge(mapbox::geometry::point<T> const& current,
mapbox::geometry::point<T> const& next_pt) noexcept
: bot(current), top(current), dx(0.0) {
if (current.y >= next_pt.y) {
top = next_pt;
} else {
bot = next_pt;
}
double dy = static_cast<double>(top.y - bot.y);
if (value_is_zero(dy)) {
dx = std::numeric_limits<double>::infinity();
} else {
dx = static_cast<double>(top.x - bot.x) / dy;
}
}
};
template <typename T>
using edge_ptr = edge<T>*;
template <typename T>
using edge_list = std::vector<edge<T>>;
template <typename T>
using edge_list_itr = typename edge_list<T>::iterator;
template <typename T>
bool slopes_equal(edge<T> const& e1, edge<T> const& e2) {
return (e1.top.y - e1.bot.y) * (e2.top.x - e2.bot.x) ==
(e1.top.x - e1.bot.x) * (e2.top.y - e2.bot.y);
}
template <typename T>
inline bool is_horizontal(edge<T> const& e) {
return std::isinf(e.dx);
}
template <typename T>
inline double get_current_x(edge<T> const& edge, const T current_y) {
if (current_y == edge.top.y) {
return static_cast<double>(edge.top.x);
} else {
return static_cast<double>(edge.bot.x) +
edge.dx * static_cast<double>(current_y - edge.bot.y);
}
}
#ifdef DEBUG
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const edge<T>& e) {
out << " Edge: " << std::endl;
out << " bot x: " << e.bot.x << " y: " << e.bot.y << std::endl;
out << " top x: " << e.top.x << " y: " << e.top.y << std::endl;
return out;
}
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
edge_list<T> const& edges) {
out << "[";
bool first = true;
for (auto const& e : edges) {
if (first) {
first = false;
} else {
out << ",";
}
out << "[[" << e.bot.x << "," << e.bot.y << "],[";
out << e.top.x << "," << e.top.y << "]]";
}
out << "]";
return out;
}
#endif
}
}
}

View File

@ -0,0 +1,23 @@
#pragma once
#include <stdexcept>
namespace mapbox {
namespace geometry {
namespace wagyu {
class clipper_exception : public std::exception {
private:
std::string m_descr;
public:
clipper_exception(const char* description) : m_descr(description) {
}
virtual ~clipper_exception() noexcept {
}
virtual const char* what() const noexcept {
return m_descr.c_str();
}
};
}
}
}

View File

@ -0,0 +1,72 @@
#pragma once
#include <set>
#include <mapbox/geometry/point.hpp>
#include <mapbox/geometry/wagyu/active_bound_list.hpp>
#ifdef DEBUG
#include <iostream>
#endif
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
struct intersect_node {
active_bound_list_itr<T> bound1;
active_bound_list_itr<T> bound2;
mapbox::geometry::point<double> pt;
intersect_node(intersect_node<T>&& n)
: bound1(std::move(n.bound1)), bound2(std::move(n.bound2)), pt(std::move(n.pt)) {
}
intersect_node& operator=(intersect_node<T>&& n) {
bound1 = std::move(n.bound1);
bound2 = std::move(n.bound2);
pt = std::move(n.pt);
return *this;
}
intersect_node(active_bound_list_itr<T> const& bound1_,
active_bound_list_itr<T> const& bound2_,
mapbox::geometry::point<double> const& pt_)
: bound1(bound1_), bound2(bound2_), pt(pt_) {
}
};
template <typename T>
using intersect_list = std::vector<intersect_node<T>>;
#ifdef DEBUG
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const intersect_node<T>& e) {
out << " point x: " << e.pt.x << " y: " << e.pt.y << std::endl;
out << " bound 1: " << std::endl;
out << *(*e.bound1) << std::endl;
out << " bound 2: " << std::endl;
out << *(*e.bound2) << std::endl;
return out;
}
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const intersect_list<T>& ints) {
std::size_t c = 0;
for (auto const& i : ints) {
out << "Intersection: " << c++ << std::endl;
out << i;
}
return out;
}
#endif
}
}
}

View File

@ -0,0 +1,345 @@
#pragma once
#include <mapbox/geometry/wagyu/active_bound_list.hpp>
#include <mapbox/geometry/wagyu/bound.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/intersect.hpp>
#include <mapbox/geometry/wagyu/ring_util.hpp>
#include <mapbox/geometry/wagyu/util.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
struct intersect_list_sorter {
inline bool operator()(intersect_node<T> const& node1, intersect_node<T> const& node2) {
if (!values_are_equal(node2.pt.y, node1.pt.y)) {
return node2.pt.y < node1.pt.y;
} else {
return ((*node2.bound1)->winding_count2 + (*node2.bound2)->winding_count2) >
((*node1.bound1)->winding_count2 + (*node1.bound2)->winding_count2);
}
}
};
template <typename T>
inline mapbox::geometry::point<T> round_point(mapbox::geometry::point<double> const& pt) {
return mapbox::geometry::point<T>(round_towards_max<T>(pt.x), round_towards_max<T>(pt.y));
}
template <typename T>
inline void swap_rings(bound<T>& b1, bound<T>& b2) {
ring_ptr<T> ring = b1.ring;
b1.ring = b2.ring;
b2.ring = ring;
}
template <typename T>
inline void swap_sides(bound<T>& b1, bound<T>& b2) {
edge_side side = b1.side;
b1.side = b2.side;
b2.side = side;
}
template <typename T1, typename T2>
bool get_edge_intersection(edge<T1> const& e1,
edge<T1> const& e2,
mapbox::geometry::point<T2>& pt) {
T2 p0_x = static_cast<T2>(e1.bot.x);
T2 p0_y = static_cast<T2>(e1.bot.y);
T2 p1_x = static_cast<T2>(e1.top.x);
T2 p1_y = static_cast<T2>(e1.top.y);
T2 p2_x = static_cast<T2>(e2.bot.x);
T2 p2_y = static_cast<T2>(e2.bot.y);
T2 p3_x = static_cast<T2>(e2.top.x);
T2 p3_y = static_cast<T2>(e2.top.y);
T2 s1_x, s1_y, s2_x, s2_y;
s1_x = p1_x - p0_x;
s1_y = p1_y - p0_y;
s2_x = p3_x - p2_x;
s2_y = p3_y - p2_y;
T2 s, t;
s = (-s1_y * (p0_x - p2_x) + s1_x * (p0_y - p2_y)) / (-s2_x * s1_y + s1_x * s2_y);
t = (s2_x * (p0_y - p2_y) - s2_y * (p0_x - p2_x)) / (-s2_x * s1_y + s1_x * s2_y);
if (s >= 0.0 && s <= 1.0 && t >= 0.0 && t <= 1.0) {
pt.x = p0_x + (t * s1_x);
pt.y = p0_y + (t * s1_y);
return true;
}
// LCOV_EXCL_START
return false;
// LCOV_EXCL_END
}
template <typename T>
void build_intersect_list(active_bound_list<T>& active_bounds, intersect_list<T>& intersects) {
// bubblesort ...
bool isModified = false;
do {
isModified = false;
auto bnd = active_bounds.begin();
auto bnd_next = std::next(bnd);
while (bnd_next != active_bounds.end()) {
if ((*bnd)->current_x > (*bnd_next)->current_x &&
!slopes_equal(*((*bnd)->current_edge), *((*bnd_next)->current_edge))) {
mapbox::geometry::point<double> pt;
if (!get_edge_intersection<T, double>(*((*bnd)->current_edge),
*((*bnd_next)->current_edge), pt)) {
// LCOV_EXCL_START
throw std::runtime_error(
"Trying to find intersection of lines that do not intersect");
// LCOV_EXCL_END
}
intersects.emplace_back(bnd, bnd_next, pt);
swap_positions_in_ABL(bnd, bnd_next, active_bounds);
bnd_next = std::next(bnd);
isModified = true;
} else {
bnd = bnd_next;
++bnd_next;
}
}
} while (isModified);
}
template <typename T>
void intersect_bounds(active_bound_list_itr<T>& b1,
active_bound_list_itr<T>& b2,
mapbox::geometry::point<T> const& pt,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type,
ring_manager<T>& rings,
active_bound_list<T>& active_bounds) {
bool b1Contributing = ((*b1)->ring != nullptr);
bool b2Contributing = ((*b2)->ring != nullptr);
// update winding counts...
// assumes that b1 will be to the Right of b2 ABOVE the intersection
if ((*b1)->poly_type == (*b2)->poly_type) {
if (is_even_odd_fill_type(*(*b1), subject_fill_type, clip_fill_type)) {
std::int32_t oldE1winding_count = (*b1)->winding_count;
(*b1)->winding_count = (*b2)->winding_count;
(*b2)->winding_count = oldE1winding_count;
} else {
if ((*b1)->winding_count + (*b2)->winding_delta == 0) {
(*b1)->winding_count = -(*b1)->winding_count;
} else {
(*b1)->winding_count += (*b2)->winding_delta;
}
if ((*b2)->winding_count - (*b1)->winding_delta == 0) {
(*b2)->winding_count = -(*b2)->winding_count;
} else {
(*b2)->winding_count -= (*b1)->winding_delta;
}
}
} else {
if (!is_even_odd_fill_type(*(*b2), subject_fill_type, clip_fill_type)) {
(*b1)->winding_count2 += (*b2)->winding_delta;
} else {
(*b1)->winding_count2 = ((*b1)->winding_count2 == 0) ? 1 : 0;
}
if (!is_even_odd_fill_type(*(*b1), subject_fill_type, clip_fill_type)) {
(*b2)->winding_count2 -= (*b1)->winding_delta;
} else {
(*b2)->winding_count2 = ((*b2)->winding_count2 == 0) ? 1 : 0;
}
}
fill_type b1FillType, b2FillType, b1FillType2, b2FillType2;
if ((*b1)->poly_type == polygon_type_subject) {
b1FillType = subject_fill_type;
b1FillType2 = clip_fill_type;
} else {
b1FillType = clip_fill_type;
b1FillType2 = subject_fill_type;
}
if ((*b2)->poly_type == polygon_type_subject) {
b2FillType = subject_fill_type;
b2FillType2 = clip_fill_type;
} else {
b2FillType = clip_fill_type;
b2FillType2 = subject_fill_type;
}
std::int32_t b1Wc, b2Wc;
switch (b1FillType) {
case fill_type_positive:
b1Wc = (*b1)->winding_count;
break;
case fill_type_negative:
b1Wc = -(*b1)->winding_count;
break;
case fill_type_even_odd:
case fill_type_non_zero:
default:
b1Wc = std::abs(static_cast<int>((*b1)->winding_count));
}
switch (b2FillType) {
case fill_type_positive:
b2Wc = (*b2)->winding_count;
break;
case fill_type_negative:
b2Wc = -(*b2)->winding_count;
break;
case fill_type_even_odd:
case fill_type_non_zero:
default:
b2Wc = std::abs(static_cast<int>((*b2)->winding_count));
}
if (b1Contributing && b2Contributing) {
if ((b1Wc != 0 && b1Wc != 1) || (b2Wc != 0 && b2Wc != 1) ||
((*b1)->poly_type != (*b2)->poly_type && cliptype != clip_type_x_or)) {
add_local_maximum_point(b1, b2, pt, rings, active_bounds);
} else {
add_point(b1, active_bounds, pt, rings);
add_point(b2, active_bounds, pt, rings);
swap_sides(*(*b1), *(*b2));
swap_rings(*(*b1), *(*b2));
}
} else if (b1Contributing) {
if (b2Wc == 0 || b2Wc == 1) {
add_point(b1, active_bounds, pt, rings);
(*b2)->last_point = pt;
swap_sides(*(*b1), *(*b2));
swap_rings(*(*b1), *(*b2));
}
} else if (b2Contributing) {
if (b1Wc == 0 || b1Wc == 1) {
(*b1)->last_point = pt;
add_point(b2, active_bounds, pt, rings);
swap_sides(*(*b1), *(*b2));
swap_rings(*(*b1), *(*b2));
}
} else if ((b1Wc == 0 || b1Wc == 1) && (b2Wc == 0 || b2Wc == 1)) {
// neither bound is currently contributing ...
std::int32_t b1Wc2, b2Wc2;
switch (b1FillType2) {
case fill_type_positive:
b1Wc2 = (*b1)->winding_count2;
break;
case fill_type_negative:
b1Wc2 = -(*b1)->winding_count2;
break;
case fill_type_even_odd:
case fill_type_non_zero:
default:
b1Wc2 = std::abs(static_cast<int>((*b1)->winding_count2));
}
switch (b2FillType2) {
case fill_type_positive:
b2Wc2 = (*b2)->winding_count2;
break;
case fill_type_negative:
b2Wc2 = -(*b2)->winding_count2;
break;
case fill_type_even_odd:
case fill_type_non_zero:
default:
b2Wc2 = std::abs(static_cast<int>((*b2)->winding_count2));
}
if ((*b1)->poly_type != (*b2)->poly_type) {
add_local_minimum_point(b1, b2, active_bounds, pt, rings);
} else if (b1Wc == 1 && b2Wc == 1) {
switch (cliptype) {
case clip_type_intersection:
if (b1Wc2 > 0 && b2Wc2 > 0) {
add_local_minimum_point(b1, b2, active_bounds, pt, rings);
}
break;
default:
case clip_type_union:
if (b1Wc2 <= 0 && b2Wc2 <= 0) {
add_local_minimum_point(b1, b2, active_bounds, pt, rings);
}
break;
case clip_type_difference:
if ((((*b1)->poly_type == polygon_type_clip) && (b1Wc2 > 0) && (b2Wc2 > 0)) ||
(((*b1)->poly_type == polygon_type_subject) && (b1Wc2 <= 0) && (b2Wc2 <= 0))) {
add_local_minimum_point(b1, b2, active_bounds, pt, rings);
}
break;
case clip_type_x_or:
add_local_minimum_point(b1, b2, active_bounds, pt, rings);
}
} else {
swap_sides(*(*b1), *(*b2));
}
}
}
template <typename T>
bool bounds_adjacent(intersect_node<T> const& inode) {
return (std::next(inode.bound1) == inode.bound2) || (std::next(inode.bound2) == inode.bound1);
}
template <typename T>
void process_intersect_list(intersect_list<T>& intersects,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type,
ring_manager<T>& rings,
active_bound_list<T>& active_bounds) {
for (auto node_itr = intersects.begin(); node_itr != intersects.end(); ++node_itr) {
if (!bounds_adjacent(*node_itr)) {
auto next_itr = std::next(node_itr);
while (next_itr != intersects.end() && !bounds_adjacent(*next_itr)) {
++next_itr;
}
if (next_itr == intersects.end()) {
throw std::runtime_error("Could not properly correct intersection order.");
}
std::iter_swap(node_itr, next_itr);
}
mapbox::geometry::point<T> pt = round_point<T>(node_itr->pt);
intersect_bounds(node_itr->bound1, node_itr->bound2, pt, cliptype, subject_fill_type,
clip_fill_type, rings, active_bounds);
swap_positions_in_ABL(node_itr->bound1, node_itr->bound2, active_bounds);
}
}
template <typename T>
void update_current_x(active_bound_list<T>& active_bounds, T top_y) {
std::size_t pos = 0;
for (auto& bnd : active_bounds) {
bnd->pos = pos++;
bnd->current_x = get_current_x(*bnd->current_edge, top_y);
}
}
template <typename T>
void process_intersections(T top_y,
active_bound_list<T>& active_bounds,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type,
ring_manager<T>& rings) {
if (active_bounds.empty()) {
return;
}
update_current_x(active_bounds, top_y);
intersect_list<T> intersects;
build_intersect_list(active_bounds, intersects);
if (intersects.empty()) {
return;
}
// Restore order of active bounds list
active_bounds.sort(
[](bound_ptr<T> const& b1, bound_ptr<T> const& b2) { return b1->pos < b2->pos; });
// Sort the intersection list
std::stable_sort(intersects.begin(), intersects.end(), intersect_list_sorter<T>());
process_intersect_list(intersects, cliptype, subject_fill_type, clip_fill_type, rings,
active_bounds);
}
}
}
}

View File

@ -0,0 +1,118 @@
#pragma once
#ifdef DEBUG
#include <iostream>
#include <sstream>
#endif
#include <queue>
#include <mapbox/geometry/wagyu/bound.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
struct local_minimum {
bound<T> left_bound;
bound<T> right_bound;
T y;
bool minimum_has_horizontal;
local_minimum(bound<T>&& left_bound_, bound<T>&& right_bound_, T y_, bool has_horz_)
: left_bound(std::move(left_bound_)),
right_bound(std::move(right_bound_)),
y(y_),
minimum_has_horizontal(has_horz_) {
}
};
template <typename T>
using local_minimum_list = std::deque<local_minimum<T>>;
template <typename T>
using local_minimum_itr = typename local_minimum_list<T>::iterator;
template <typename T>
using local_minimum_ptr = local_minimum<T>*;
template <typename T>
using local_minimum_ptr_list = std::vector<local_minimum_ptr<T>>;
template <typename T>
using local_minimum_ptr_list_itr = typename local_minimum_ptr_list<T>::iterator;
template <typename T>
struct local_minimum_sorter {
inline bool operator()(local_minimum_ptr<T> const& locMin1,
local_minimum_ptr<T> const& locMin2) {
if (locMin2->y == locMin1->y) {
return locMin2->minimum_has_horizontal != locMin1->minimum_has_horizontal &&
locMin1->minimum_has_horizontal;
}
return locMin2->y < locMin1->y;
}
};
#ifdef DEBUG
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const local_minimum<T>& lm) {
out << " Local Minimum:" << std::endl;
out << " y: " << lm.y << std::endl;
if (lm.minimum_has_horizontal) {
out << " minimum_has_horizontal: true" << std::endl;
} else {
out << " minimum_has_horizontal: false" << std::endl;
}
out << " left_bound: " << std::endl;
out << lm.left_bound << std::endl;
out << " right_bound: " << std::endl;
out << lm.right_bound << std::endl;
return out;
}
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const local_minimum_ptr_list<T>& lms) {
for (auto const& lm : lms) {
out << *lm;
}
return out;
}
template <typename T>
std::string output_all_edges(local_minimum_ptr_list<T> const& lms) {
std::ostringstream out;
out << "[";
bool first = true;
for (auto const& lm : lms) {
for (auto const& e : lm->left_bound.edges) {
if (first) {
first = false;
} else {
out << ",";
}
out << "[[" << e.bot.x << "," << e.bot.y << "],[";
out << e.top.x << "," << e.top.y << "]]";
}
for (auto const& e : lm->right_bound.edges) {
if (first) {
first = false;
} else {
out << ",";
}
out << "[[" << e.bot.x << "," << e.bot.y << "],[";
out << e.top.x << "," << e.top.y << "]]";
}
}
out << "]";
return out.str();
}
#endif
}
}
}

View File

@ -0,0 +1,321 @@
#pragma once
#include <mapbox/geometry/wagyu/edge.hpp>
#include <mapbox/geometry/wagyu/local_minimum.hpp>
#ifdef DEBUG
#include <stdexcept>
#endif
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
inline void reverse_horizontal(edge<T>& e) {
// swap horizontal edges' top and bottom x's so they follow the natural
// progression of the bounds - ie so their xbots will align with the
// adjoining lower edge. [Helpful in the process_horizontal() method.]
std::swap(e.top.x, e.bot.x);
}
// Make a list start on a local maximum by
// shifting all the points not on a local maximum to the
template <typename T>
void start_list_on_local_maximum(edge_list<T>& edges) {
if (edges.size() <= 2) {
return;
}
// Find the first local maximum going forward in the list
auto prev_edge = edges.end();
--prev_edge;
bool prev_edge_is_horizontal = is_horizontal(*prev_edge);
auto edge = edges.begin();
bool edge_is_horizontal;
bool y_decreasing_before_last_horizontal = false; // assume false at start
while (edge != edges.end()) {
edge_is_horizontal = is_horizontal(*edge);
if ((!prev_edge_is_horizontal && !edge_is_horizontal && edge->top == prev_edge->top)) {
break;
}
if (!edge_is_horizontal && prev_edge_is_horizontal) {
if (y_decreasing_before_last_horizontal &&
(edge->top == prev_edge->bot || edge->top == prev_edge->top)) {
break;
}
} else if (!y_decreasing_before_last_horizontal && !prev_edge_is_horizontal &&
edge_is_horizontal &&
(prev_edge->top == edge->top || prev_edge->top == edge->bot)) {
y_decreasing_before_last_horizontal = true;
}
prev_edge_is_horizontal = edge_is_horizontal;
prev_edge = edge;
++edge;
}
std::rotate(edges.begin(), edge, edges.end());
}
template <typename T>
bound<T> create_bound_towards_minimum(edge_list<T>& edges) {
if (edges.size() == 1) {
if (is_horizontal(edges.front())) {
reverse_horizontal(edges.front());
}
bound<T> bnd;
std::swap(bnd.edges, edges);
return bnd;
}
auto next_edge = edges.begin();
auto edge = next_edge;
++next_edge;
bool edge_is_horizontal = is_horizontal(*edge);
if (edge_is_horizontal) {
reverse_horizontal(*edge);
}
bool next_edge_is_horizontal;
bool y_increasing_before_last_horizontal = false; // assume false at start
while (next_edge != edges.end()) {
next_edge_is_horizontal = is_horizontal(*next_edge);
if ((!next_edge_is_horizontal && !edge_is_horizontal && edge->bot == next_edge->bot)) {
break;
}
if (!next_edge_is_horizontal && edge_is_horizontal) {
if (y_increasing_before_last_horizontal &&
(next_edge->bot == edge->bot || next_edge->bot == edge->top)) {
break;
}
} else if (!y_increasing_before_last_horizontal && !edge_is_horizontal &&
next_edge_is_horizontal &&
(edge->bot == next_edge->top || edge->bot == next_edge->bot)) {
y_increasing_before_last_horizontal = true;
}
edge_is_horizontal = next_edge_is_horizontal;
edge = next_edge;
if (edge_is_horizontal) {
reverse_horizontal(*edge);
}
++next_edge;
}
bound<T> bnd;
if (next_edge == edges.end()) {
std::swap(edges, bnd.edges);
} else {
bnd.edges.reserve(std::distance(edges.begin(), next_edge));
std::move(edges.begin(), next_edge, std::back_inserter(bnd.edges));
edges.erase(edges.begin(), next_edge);
}
std::reverse(bnd.edges.begin(), bnd.edges.end());
return bnd;
}
template <typename T>
bound<T> create_bound_towards_maximum(edge_list<T>& edges) {
if (edges.size() == 1) {
bound<T> bnd;
std::swap(bnd.edges, edges);
return bnd;
}
auto next_edge = edges.begin();
auto edge = next_edge;
++next_edge;
bool edge_is_horizontal = is_horizontal(*edge);
bool next_edge_is_horizontal;
bool y_decreasing_before_last_horizontal = false; // assume false at start
while (next_edge != edges.end()) {
next_edge_is_horizontal = is_horizontal(*next_edge);
if ((!next_edge_is_horizontal && !edge_is_horizontal && edge->top == next_edge->top)) {
break;
}
if (!next_edge_is_horizontal && edge_is_horizontal) {
if (y_decreasing_before_last_horizontal &&
(next_edge->top == edge->bot || next_edge->top == edge->top)) {
break;
}
} else if (!y_decreasing_before_last_horizontal && !edge_is_horizontal &&
next_edge_is_horizontal &&
(edge->top == next_edge->top || edge->top == next_edge->bot)) {
y_decreasing_before_last_horizontal = true;
}
edge_is_horizontal = next_edge_is_horizontal;
edge = next_edge;
++next_edge;
}
bound<T> bnd;
if (next_edge == edges.end()) {
std::swap(bnd.edges, edges);
} else {
bnd.edges.reserve(std::distance(edges.begin(), next_edge));
std::move(edges.begin(), next_edge, std::back_inserter(bnd.edges));
edges.erase(edges.begin(), next_edge);
}
return bnd;
}
template <typename T>
void fix_horizontals(bound<T>& bnd) {
auto edge_itr = bnd.edges.begin();
auto next_itr = std::next(edge_itr);
if (next_itr == bnd.edges.end()) {
return;
}
if (is_horizontal(*edge_itr) && next_itr->bot != edge_itr->top) {
reverse_horizontal(*edge_itr);
}
auto prev_itr = edge_itr++;
while (edge_itr != bnd.edges.end()) {
if (is_horizontal(*edge_itr) && prev_itr->top != edge_itr->bot) {
reverse_horizontal(*edge_itr);
}
prev_itr = edge_itr;
++edge_itr;
}
}
template <typename T>
void move_horizontals_on_left_to_right(bound<T>& left_bound, bound<T>& right_bound) {
// We want all the horizontal segments that are at the same Y as the minimum to be on the right
// bound
auto edge_itr = left_bound.edges.begin();
while (edge_itr != left_bound.edges.end()) {
if (!is_horizontal(*edge_itr)) {
break;
}
reverse_horizontal(*edge_itr);
++edge_itr;
}
if (edge_itr == left_bound.edges.begin()) {
return;
}
std::reverse(left_bound.edges.begin(), edge_itr);
auto dist = std::distance(left_bound.edges.begin(), edge_itr);
std::move(left_bound.edges.begin(), edge_itr, std::back_inserter(right_bound.edges));
left_bound.edges.erase(left_bound.edges.begin(), edge_itr);
std::rotate(right_bound.edges.begin(), std::prev(right_bound.edges.end(), dist),
right_bound.edges.end());
}
template <typename T>
void add_ring_to_local_minima_list(edge_list<T>& edges,
local_minimum_list<T>& minima_list,
polygon_type poly_type) {
if (edges.empty()) {
return;
}
// Adjust the order of the ring so we start on a local maximum
// therefore we start right away on a bound.
start_list_on_local_maximum(edges);
bound_ptr<T> first_minimum = nullptr;
bound_ptr<T> last_maximum = nullptr;
while (!edges.empty()) {
bool lm_minimum_has_horizontal = false;
auto to_minimum = create_bound_towards_minimum(edges);
if (edges.empty()) {
throw std::runtime_error("Edges is empty after only creating a single bound.");
}
auto to_maximum = create_bound_towards_maximum(edges);
fix_horizontals(to_minimum);
fix_horizontals(to_maximum);
auto to_max_first_non_horizontal = to_maximum.edges.begin();
auto to_min_first_non_horizontal = to_minimum.edges.begin();
bool minimum_is_left = true;
while (to_max_first_non_horizontal != to_maximum.edges.end() &&
is_horizontal(*to_max_first_non_horizontal)) {
lm_minimum_has_horizontal = true;
++to_max_first_non_horizontal;
}
while (to_min_first_non_horizontal != to_minimum.edges.end() &&
is_horizontal(*to_min_first_non_horizontal)) {
lm_minimum_has_horizontal = true;
++to_min_first_non_horizontal;
}
#ifdef DEBUG
if (to_max_first_non_horizontal == to_maximum.edges.end() ||
to_min_first_non_horizontal == to_minimum.edges.end()) {
throw std::runtime_error("should not have a horizontal only bound for a ring");
}
#endif
if (lm_minimum_has_horizontal) {
if (to_max_first_non_horizontal->bot.x > to_min_first_non_horizontal->bot.x) {
minimum_is_left = true;
move_horizontals_on_left_to_right(to_minimum, to_maximum);
} else {
minimum_is_left = false;
move_horizontals_on_left_to_right(to_maximum, to_minimum);
}
} else {
if (to_max_first_non_horizontal->dx > to_min_first_non_horizontal->dx) {
minimum_is_left = false;
} else {
minimum_is_left = true;
}
}
assert(!to_minimum.edges.empty());
assert(!to_maximum.edges.empty());
auto const& min_front = to_minimum.edges.front();
if (last_maximum) {
to_minimum.maximum_bound = last_maximum;
}
to_minimum.poly_type = poly_type;
to_maximum.poly_type = poly_type;
if (!minimum_is_left) {
to_minimum.side = edge_right;
to_maximum.side = edge_left;
to_minimum.winding_delta = -1;
to_maximum.winding_delta = 1;
minima_list.emplace_back(std::move(to_maximum), std::move(to_minimum), min_front.bot.y,
lm_minimum_has_horizontal);
if (!last_maximum) {
first_minimum = &(minima_list.back().right_bound);
} else {
last_maximum->maximum_bound = &(minima_list.back().right_bound);
}
last_maximum = &(minima_list.back().left_bound);
} else {
to_minimum.side = edge_left;
to_maximum.side = edge_right;
to_minimum.winding_delta = -1;
to_maximum.winding_delta = 1;
minima_list.emplace_back(std::move(to_minimum), std::move(to_maximum), min_front.bot.y,
lm_minimum_has_horizontal);
if (!last_maximum) {
first_minimum = &(minima_list.back().left_bound);
} else {
last_maximum->maximum_bound = &(minima_list.back().left_bound);
}
last_maximum = &(minima_list.back().right_bound);
}
}
last_maximum->maximum_bound = first_minimum;
first_minimum->maximum_bound = last_maximum;
}
template <typename T>
void initialize_lm(local_minimum_ptr_list_itr<T>& lm) {
if (!(*lm)->left_bound.edges.empty()) {
(*lm)->left_bound.current_edge = (*lm)->left_bound.edges.begin();
(*lm)->left_bound.next_edge = std::next((*lm)->left_bound.current_edge);
(*lm)->left_bound.current_x = static_cast<double>((*lm)->left_bound.current_edge->bot.x);
(*lm)->left_bound.winding_count = 0;
(*lm)->left_bound.winding_count2 = 0;
(*lm)->left_bound.side = edge_left;
(*lm)->left_bound.ring = nullptr;
}
if (!(*lm)->right_bound.edges.empty()) {
(*lm)->right_bound.current_edge = (*lm)->right_bound.edges.begin();
(*lm)->right_bound.next_edge = std::next((*lm)->right_bound.current_edge);
(*lm)->right_bound.current_x = static_cast<double>((*lm)->right_bound.current_edge->bot.x);
(*lm)->right_bound.winding_count = 0;
(*lm)->right_bound.winding_count2 = 0;
(*lm)->right_bound.side = edge_right;
(*lm)->right_bound.ring = nullptr;
}
}
}
}
}

View File

@ -0,0 +1,111 @@
#pragma once
#include <mapbox/geometry/point.hpp>
#ifdef DEBUG
#include <iostream>
#endif
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
struct point;
template <typename T>
using point_ptr = point<T>*;
template <typename T>
using const_point_ptr = point<T>* const;
template <typename T>
struct ring;
template <typename T>
using ring_ptr = ring<T>*;
template <typename T>
using const_ring_ptr = ring<T>* const;
template <typename T>
struct point {
using coordinate_type = T;
ring_ptr<T> ring;
T x;
T y;
point_ptr<T> next;
point_ptr<T> prev;
point(point<T>&& p)
: ring(std::move(p.ring)),
x(std::move(p.x)),
y(std::move(p.y)),
next(std::move(p.next)),
prev(std::move(p.prev)) {
}
point() : ring(nullptr), x(0), y(0), prev(this), next(this) {
}
point(T x_, T y_) : ring(nullptr), x(x_), y(y_), next(this), prev(this) {
}
point(ring_ptr<T> ring_, mapbox::geometry::point<T> const& pt)
: ring(ring_), x(pt.x), y(pt.y), next(this), prev(this) {
}
point(ring_ptr<T> ring_, mapbox::geometry::point<T> const& pt, point_ptr<T> before_this_point)
: ring(ring_), x(pt.x), y(pt.y), next(before_this_point), prev(before_this_point->prev) {
before_this_point->prev = this;
prev->next = this;
}
};
template <typename T>
bool operator==(point<T> const& lhs, point<T> const& rhs) {
return lhs.x == rhs.x && lhs.y == rhs.y;
}
template <typename T>
bool operator==(mapbox::geometry::point<T> const& lhs, point<T> const& rhs) {
return lhs.x == rhs.x && lhs.y == rhs.y;
}
template <typename T>
bool operator==(point<T> const& lhs, mapbox::geometry::point<T> const& rhs) {
return lhs.x == rhs.x && lhs.y == rhs.y;
}
template <typename T>
bool operator!=(point<T> const& lhs, point<T> const& rhs) {
return lhs.x != rhs.x || lhs.y != rhs.y;
}
template <typename T>
bool operator!=(mapbox::geometry::point<T> const& lhs, point<T> const& rhs) {
return lhs.x != rhs.x || lhs.y != rhs.y;
}
template <typename T>
bool operator!=(point<T> const& lhs, mapbox::geometry::point<T> const& rhs) {
return lhs.x != rhs.x || lhs.y != rhs.y;
}
#ifdef DEBUG
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const point<T>& p) {
out << " point at: " << p.x << ", " << p.y;
return out;
}
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const mapbox::geometry::point<T>& p) {
out << " point at: " << p.x << ", " << p.y;
return out;
}
#endif
}
}
}

View File

@ -0,0 +1,273 @@
#pragma once
#include <mapbox/geometry/line_string.hpp>
#include <mapbox/geometry/point.hpp>
#include <mapbox/geometry/polygon.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/edge.hpp>
#include <mapbox/geometry/wagyu/local_minimum.hpp>
#include <mapbox/geometry/wagyu/util.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
active_bound_list_itr<T> process_horizontal_left_to_right(T scanline_y,
active_bound_list_itr<T>& horz_bound,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings,
scanbeam_list<T>& scanbeam,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type) {
auto horizontal_itr_behind = horz_bound;
bool is_maxima_edge = is_maxima(horz_bound, scanline_y);
auto bound_max_pair = active_bounds.end();
if (is_maxima_edge) {
bound_max_pair = get_maxima_pair<T>(horz_bound, active_bounds);
}
auto hp_itr = rings.current_hp_itr;
while (hp_itr != rings.hot_pixels.end() &&
(hp_itr->y > scanline_y ||
(hp_itr->y == scanline_y && hp_itr->x < (*horz_bound)->current_edge->bot.x))) {
++hp_itr;
}
auto bnd = std::next(horz_bound);
while (bnd != active_bounds.end()) {
// this code block inserts extra coords into horizontal edges (in output
// polygons) wherever hot pixels touch these horizontal edges. This helps
//'simplifying' polygons (ie if the Simplify property is set).
while (hp_itr != rings.hot_pixels.end() && hp_itr->y == scanline_y &&
hp_itr->x < std::llround((*bnd)->current_x) &&
hp_itr->x < (*horz_bound)->current_edge->top.x) {
if ((*horz_bound)->ring) {
add_point_to_ring(*(*horz_bound), *hp_itr, rings);
}
++hp_itr;
}
if ((*bnd)->current_x > static_cast<double>((*horz_bound)->current_edge->top.x)) {
break;
}
// Also break if we've got to the end of an intermediate horizontal edge ...
// nb: Smaller Dx's are to the right of larger Dx's ABOVE the horizontal.
if (std::llround((*bnd)->current_x) == (*horz_bound)->current_edge->top.x &&
(*horz_bound)->next_edge != (*horz_bound)->edges.end() &&
(*horz_bound)->current_edge->dx < (*horz_bound)->next_edge->dx) {
break;
}
// note: may be done multiple times
if ((*horz_bound)->ring) {
add_point_to_ring(
*(*horz_bound),
mapbox::geometry::point<T>(std::llround((*bnd)->current_x), scanline_y), rings);
}
// OK, so far we're still in range of the horizontal Edge but make sure
// we're at the last of consec. horizontals when matching with eMaxPair
if (is_maxima_edge && bnd == bound_max_pair) {
if ((*horz_bound)->ring) {
add_local_maximum_point(horz_bound, bound_max_pair,
(*horz_bound)->current_edge->top, rings, active_bounds);
}
active_bounds.erase(bound_max_pair);
auto after_horz = active_bounds.erase(horz_bound);
if (horizontal_itr_behind != horz_bound) {
return horizontal_itr_behind;
} else {
return after_horz;
}
}
intersect_bounds(horz_bound, bnd,
mapbox::geometry::point<T>(std::llround((*bnd)->current_x), scanline_y),
cliptype, subject_fill_type, clip_fill_type, rings, active_bounds);
auto next_bnd = std::next(bnd);
swap_positions_in_ABL(horz_bound, bnd, active_bounds);
if (current_edge_is_horizontal<T>(bnd) && horizontal_itr_behind == horz_bound) {
horizontal_itr_behind = bnd;
}
bnd = next_bnd;
} // end while (bnd != active_bounds.end())
if ((*horz_bound)->ring) {
while (hp_itr != rings.hot_pixels.end() && hp_itr->y == scanline_y &&
hp_itr->x < std::llround((*horz_bound)->current_edge->top.x)) {
add_point_to_ring(*(*horz_bound), *hp_itr, rings);
++hp_itr;
}
}
if ((*horz_bound)->next_edge != (*horz_bound)->edges.end()) {
if ((*horz_bound)->ring) {
add_point_to_ring(*(*horz_bound), (*horz_bound)->current_edge->top, rings);
next_edge_in_bound(horz_bound, scanbeam);
} else {
next_edge_in_bound(horz_bound, scanbeam);
}
if (horizontal_itr_behind != horz_bound) {
return horizontal_itr_behind;
} else {
return std::next(horz_bound);
}
} else {
if ((*horz_bound)->ring) {
add_point_to_ring(*(*horz_bound), (*horz_bound)->current_edge->top, rings);
}
auto after_horz = active_bounds.erase(horz_bound);
if (horizontal_itr_behind != horz_bound) {
return horizontal_itr_behind;
} else {
return after_horz;
}
}
}
template <typename T>
active_bound_list_itr<T> process_horizontal_right_to_left(T scanline_y,
active_bound_list_itr<T>& horz_bound,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings,
scanbeam_list<T>& scanbeam,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type) {
bool is_maxima_edge = is_maxima(horz_bound, scanline_y);
auto bound_max_pair = active_bounds.end();
if (is_maxima_edge) {
bound_max_pair = get_maxima_pair<T>(horz_bound, active_bounds);
}
auto hp_itr_fwd = rings.current_hp_itr;
while (hp_itr_fwd != rings.hot_pixels.end() &&
(hp_itr_fwd->y < scanline_y ||
(hp_itr_fwd->y == scanline_y && hp_itr_fwd->x < (*horz_bound)->current_edge->top.x))) {
++hp_itr_fwd;
}
auto hp_itr = hot_pixel_rev_itr<T>(hp_itr_fwd);
auto bnd = active_bound_list_rev_itr<T>(horz_bound);
while (bnd != active_bounds.rend()) {
// this code block inserts extra coords into horizontal edges (in output
// polygons) wherever hot pixels touch these horizontal edges.
while (hp_itr != rings.hot_pixels.rend() && hp_itr->y == scanline_y &&
hp_itr->x > std::llround((*bnd)->current_x) &&
hp_itr->x > (*horz_bound)->current_edge->top.x) {
if ((*horz_bound)->ring) {
add_point_to_ring(*(*horz_bound), *hp_itr, rings);
}
++hp_itr;
}
if ((*bnd)->current_x < static_cast<double>((*horz_bound)->current_edge->top.x)) {
break;
}
// Also break if we've got to the end of an intermediate horizontal edge ...
// nb: Smaller Dx's are to the right of larger Dx's ABOVE the horizontal.
if (std::llround((*bnd)->current_x) == (*horz_bound)->current_edge->top.x &&
(*horz_bound)->next_edge != (*horz_bound)->edges.end() &&
(*horz_bound)->current_edge->dx < (*horz_bound)->next_edge->dx) {
break;
}
// note: may be done multiple times
if ((*horz_bound)->ring) {
add_point_to_ring(
*(*horz_bound),
mapbox::geometry::point<T>(std::llround((*bnd)->current_x), scanline_y), rings);
}
auto bnd_forward = --(bnd.base());
// OK, so far we're still in range of the horizontal Edge but make sure
// we're at the last of consec. horizontals when matching with eMaxPair
if (is_maxima_edge && bnd_forward == bound_max_pair) {
if ((*horz_bound)->ring) {
add_local_maximum_point(horz_bound, bound_max_pair,
(*horz_bound)->current_edge->top, rings, active_bounds);
}
active_bounds.erase(bound_max_pair);
return active_bounds.erase(horz_bound);
}
intersect_bounds(bnd_forward, horz_bound,
mapbox::geometry::point<T>(std::llround((*bnd)->current_x), scanline_y),
cliptype, subject_fill_type, clip_fill_type, rings, active_bounds);
swap_positions_in_ABL(horz_bound, bnd_forward, active_bounds);
// Why are we not incrementing the bnd iterator here:
// It is because reverse iterators point to a `base()` iterator that is a forward
// iterator that is one ahead of the reverse bound. This will always be the horizontal
// bound,
// so what the reverse bound points to will have changed.
} // end while (bnd != active_bounds.rend())
if ((*horz_bound)->ring) {
while (hp_itr != rings.hot_pixels.rend() && hp_itr->y == scanline_y &&
hp_itr->x > (*horz_bound)->current_edge->top.x) {
add_point_to_ring(*(*horz_bound), *hp_itr, rings);
++hp_itr;
}
}
if ((*horz_bound)->next_edge != (*horz_bound)->edges.end()) {
if ((*horz_bound)->ring) {
add_point_to_ring(*(*horz_bound), (*horz_bound)->current_edge->top, rings);
next_edge_in_bound(horz_bound, scanbeam);
} else {
next_edge_in_bound(horz_bound, scanbeam);
}
return std::next(horz_bound);
} else {
if ((*horz_bound)->ring) {
add_point_to_ring(*(*horz_bound), (*horz_bound)->current_edge->top, rings);
}
return active_bounds.erase(horz_bound);
}
}
template <typename T>
active_bound_list_itr<T> process_horizontal(T scanline_y,
active_bound_list_itr<T>& horz_bound,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings,
scanbeam_list<T>& scanbeam,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type) {
if ((*horz_bound)->current_edge->bot.x < (*horz_bound)->current_edge->top.x) {
return process_horizontal_left_to_right(scanline_y, horz_bound, active_bounds, rings,
scanbeam, cliptype, subject_fill_type,
clip_fill_type);
} else {
return process_horizontal_right_to_left(scanline_y, horz_bound, active_bounds, rings,
scanbeam, cliptype, subject_fill_type,
clip_fill_type);
}
}
template <typename T>
void process_horizontals(T scanline_y,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings,
scanbeam_list<T>& scanbeam,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type) {
for (auto bnd_itr = active_bounds.begin(); bnd_itr != active_bounds.end();) {
if (current_edge_is_horizontal<T>(bnd_itr)) {
bnd_itr = process_horizontal(scanline_y, bnd_itr, active_bounds, rings, scanbeam,
cliptype, subject_fill_type, clip_fill_type);
} else {
++bnd_itr;
}
}
}
}
}
}

View File

@ -0,0 +1,125 @@
#pragma once
#include <mapbox/geometry/wagyu/active_bound_list.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/edge.hpp>
#include <mapbox/geometry/wagyu/intersect_util.hpp>
#include <mapbox/geometry/wagyu/local_minimum.hpp>
#include <mapbox/geometry/wagyu/local_minimum_util.hpp>
#include <mapbox/geometry/wagyu/process_horizontal.hpp>
#include <mapbox/geometry/wagyu/ring.hpp>
#include <mapbox/geometry/wagyu/ring_util.hpp>
#include <mapbox/geometry/wagyu/topology_correction.hpp>
#include <mapbox/geometry/wagyu/util.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
active_bound_list_itr<T> do_maxima(active_bound_list_itr<T>& bnd,
active_bound_list_itr<T>& bndMaxPair,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type,
ring_manager<T>& rings,
active_bound_list<T>& active_bounds) {
if (bndMaxPair == active_bounds.end()) {
if ((*bnd)->ring) {
add_point_to_ring(*(*bnd), (*bnd)->current_edge->top, rings);
}
return active_bounds.erase(bnd);
}
auto bnd_next = std::next(bnd);
auto return_bnd = bnd_next;
bool skipped = false;
while (bnd_next != active_bounds.end() && bnd_next != bndMaxPair) {
skipped = true;
intersect_bounds(bnd, bnd_next, (*bnd)->current_edge->top, cliptype, subject_fill_type,
clip_fill_type, rings, active_bounds);
swap_positions_in_ABL(bnd, bnd_next, active_bounds);
bnd_next = std::next(bnd);
}
if (!(*bnd)->ring && !(*bndMaxPair)->ring) {
active_bounds.erase(bndMaxPair);
} else if ((*bnd)->ring && (*bndMaxPair)->ring) {
add_local_maximum_point(bnd, bndMaxPair, (*bnd)->current_edge->top, rings, active_bounds);
active_bounds.erase(bndMaxPair);
} else {
throw std::runtime_error("DoMaxima error");
}
auto prev_itr = active_bounds.erase(bnd);
if (skipped) {
return return_bnd;
} else {
return prev_itr;
}
}
template <typename T>
void process_edges_at_top_of_scanbeam(T top_y,
active_bound_list<T>& active_bounds,
scanbeam_list<T>& scanbeam,
local_minimum_ptr_list<T> const& minima_sorted,
local_minimum_ptr_list_itr<T>& current_lm,
ring_manager<T>& rings,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type) {
for (auto bnd = active_bounds.begin(); bnd != active_bounds.end();) {
// 1. Process maxima, treating them as if they are "bent" horizontal edges,
// but exclude maxima with horizontal edges.
bool is_maxima_edge = is_maxima(bnd, top_y);
if (is_maxima_edge) {
auto bnd_max_pair = get_maxima_pair(bnd, active_bounds);
is_maxima_edge = ((bnd_max_pair == active_bounds.end() ||
!current_edge_is_horizontal<T>(bnd_max_pair)) &&
is_maxima(bnd_max_pair, top_y));
if (is_maxima_edge) {
bnd = do_maxima(bnd, bnd_max_pair, cliptype, subject_fill_type, clip_fill_type,
rings, active_bounds);
continue;
}
}
// 2. Promote horizontal edges.
if (is_intermediate(bnd, top_y) && next_edge_is_horizontal<T>(bnd)) {
if ((*bnd)->ring) {
insert_hot_pixels_in_path(*(*bnd), (*bnd)->current_edge->top, rings, false);
}
next_edge_in_bound(bnd, scanbeam);
if ((*bnd)->ring) {
add_point_to_ring(*(*bnd), (*bnd)->current_edge->bot, rings);
}
} else {
(*bnd)->current_x = get_current_x(*((*bnd)->current_edge), top_y);
}
++bnd;
}
insert_horizontal_local_minima_into_ABL(top_y, minima_sorted, current_lm, active_bounds, rings,
scanbeam, cliptype, subject_fill_type, clip_fill_type);
process_horizontals(top_y, active_bounds, rings, scanbeam, cliptype, subject_fill_type,
clip_fill_type);
// 4. Promote intermediate vertices
for (auto bnd = active_bounds.begin(); bnd != active_bounds.end(); ++bnd) {
if (is_intermediate(bnd, top_y)) {
if ((*bnd)->ring) {
add_point_to_ring(*(*bnd), (*bnd)->current_edge->top, rings);
insert_hot_pixels_in_path(*(*bnd), (*bnd)->current_edge->top, rings, false);
}
next_edge_in_bound(bnd, scanbeam);
}
}
}
}
}
}

View File

@ -0,0 +1,133 @@
#pragma once
#include <mapbox/geometry/box.hpp>
#include <mapbox/geometry/multi_polygon.hpp>
#include <mapbox/geometry/polygon.hpp>
#include <mapbox/geometry/wagyu/wagyu.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
namespace quick_clip {
template <typename T>
mapbox::geometry::point<T> intersect(mapbox::geometry::point<T> a,
mapbox::geometry::point<T> b,
size_t edge,
mapbox::geometry::box<T> const& box) {
switch (edge) {
case 0:
return mapbox::geometry::point<T>(
static_cast<T>(a.x + static_cast<double>(b.x - a.x) * (box.min.y - a.y) / (b.y - a.y)),
box.min.y);
case 1:
return mapbox::geometry::point<T>(
box.max.x,
static_cast<T>(a.y + static_cast<double>(b.y - a.y) * (box.max.x - a.x) / (b.x - a.x)));
case 2:
return mapbox::geometry::point<T>(
static_cast<T>(a.x + static_cast<double>(b.x - a.x) * (box.max.y - a.y) / (b.y - a.y)),
box.max.y);
default: // case 3
return mapbox::geometry::point<T>(
box.min.x,
static_cast<T>(a.y + static_cast<double>(b.y - a.y) * (box.min.x - a.x) / (b.x - a.x)));
}
}
template <typename T>
bool inside(mapbox::geometry::point<T> p, size_t edge, mapbox::geometry::box<T> const& b) {
switch (edge) {
case 0:
return p.y > b.min.y;
case 1:
return p.x < b.max.x;
case 2:
return p.y < b.max.y;
default: // case 3
return p.x > b.min.x;
}
}
template <typename T>
mapbox::geometry::linear_ring<T> quick_lr_clip(mapbox::geometry::linear_ring<T> const& ring,
mapbox::geometry::box<T> const& b) {
mapbox::geometry::linear_ring<T> out = ring;
for (size_t edge = 0; edge < 4; edge++) {
if (out.size() > 0) {
mapbox::geometry::linear_ring<T> in = out;
mapbox::geometry::point<T> S = in[in.size() - 1];
out.resize(0);
for (size_t e = 0; e < in.size(); e++) {
mapbox::geometry::point<T> E = in[e];
if (inside(E, edge, b)) {
if (!inside(S, edge, b)) {
out.push_back(intersect(S, E, edge, b));
}
out.push_back(E);
} else if (inside(S, edge, b)) {
out.push_back(intersect(S, E, edge, b));
}
S = E;
}
}
}
if (out.size() < 3) {
out.clear();
return out;
}
// Close the ring if the first/last point was outside
if (out[0] != out[out.size() - 1]) {
out.push_back(out[0]);
}
return out;
}
}
template <typename T>
mapbox::geometry::multi_polygon<T> clip(mapbox::geometry::polygon<T> const& poly,
mapbox::geometry::box<T> const& b,
fill_type subject_fill_type) {
mapbox::geometry::multi_polygon<T> result;
wagyu<T> clipper;
for (auto const& lr : poly) {
auto new_lr = quick_clip::quick_lr_clip(lr, b);
if (!new_lr.empty()) {
clipper.add_ring(new_lr, polygon_type_subject);
}
}
clipper.execute(clip_type_union, result, subject_fill_type, fill_type_even_odd);
return result;
}
template <typename T>
mapbox::geometry::multi_polygon<T> clip(mapbox::geometry::multi_polygon<T> const& mp,
mapbox::geometry::box<T> const& b,
fill_type subject_fill_type) {
mapbox::geometry::multi_polygon<T> result;
wagyu<T> clipper;
for (auto const& poly : mp) {
for (auto const& lr : poly) {
auto new_lr = quick_clip::quick_lr_clip(lr, b);
if (!new_lr.empty()) {
clipper.add_ring(new_lr, polygon_type_subject);
}
}
}
clipper.execute(clip_type_union, result, subject_fill_type, fill_type_even_odd);
return result;
}
}
}
}

View File

@ -0,0 +1,475 @@
#pragma once
#include <assert.h>
#include <cmath>
#include <deque>
#include <list>
#include <map>
#include <mapbox/geometry/wagyu/point.hpp>
#include <set>
#include <sstream>
#include <vector>
#ifdef DEBUG
#include <execinfo.h>
#include <iostream>
#include <sstream>
#include <stdio.h>
//
// void* callstack[128];
// int i, frames = backtrace(callstack, 128);
// char** strs = backtrace_symbols(callstack, frames);
// for (i = 0; i < frames; ++i) {
// printf("%s\n", strs[i]);
// }
// free(strs);
#endif
namespace mapbox {
namespace geometry {
namespace wagyu {
// NOTE: ring and ring_ptr are forward declared in wagyu/point.hpp
template <typename T>
using ring_vector = std::vector<ring_ptr<T>>;
template <typename T>
using ring_list = std::list<ring_ptr<T>>;
template <typename T>
struct ring {
std::size_t ring_index; // To support unset 0 is undefined and indexes offset by 1
std::size_t size;
double area;
ring_ptr<T> parent;
ring_list<T> children;
point_ptr<T> points;
point_ptr<T> bottom_point;
ring(ring const&) = delete;
ring& operator=(ring const&) = delete;
ring()
: ring_index(0),
size(0),
area(std::numeric_limits<double>::quiet_NaN()),
parent(nullptr),
children(),
points(nullptr),
bottom_point(nullptr) {
}
};
template <typename T>
using hot_pixel_vector = std::vector<mapbox::geometry::point<T>>;
template <typename T>
using hot_pixel_itr = typename hot_pixel_vector<T>::iterator;
template <typename T>
using hot_pixel_rev_itr = typename hot_pixel_vector<T>::reverse_iterator;
template <typename T>
struct ring_manager {
ring_list<T> children;
std::vector<point_ptr<T>> all_points;
hot_pixel_vector<T> hot_pixels;
hot_pixel_itr<T> current_hp_itr;
std::deque<point<T>> points;
std::deque<ring<T>> rings;
std::vector<point<T>> storage;
std::size_t index;
ring_manager(ring_manager const&) = delete;
ring_manager& operator=(ring_manager const&) = delete;
ring_manager()
: children(),
all_points(),
hot_pixels(),
current_hp_itr(hot_pixels.end()),
points(),
rings(),
storage(),
index(0) {
}
};
template <typename T>
void preallocate_point_memory(ring_manager<T>& rings, std::size_t size) {
rings.storage.reserve(size);
rings.all_points.reserve(size);
}
template <typename T>
ring_ptr<T> create_new_ring(ring_manager<T>& rings) {
rings.rings.emplace_back();
ring_ptr<T> result = &rings.rings.back();
result->ring_index = rings.index++;
return result;
}
template <typename T>
point_ptr<T>
create_new_point(ring_ptr<T> r, mapbox::geometry::point<T> const& pt, ring_manager<T>& rings) {
point_ptr<T> point;
if (rings.storage.size() < rings.storage.capacity()) {
rings.storage.emplace_back(r, pt);
point = &rings.storage.back();
} else {
rings.points.emplace_back(r, pt);
point = &rings.points.back();
}
rings.all_points.push_back(point);
return point;
}
template <typename T>
point_ptr<T> create_new_point(ring_ptr<T> r,
mapbox::geometry::point<T> const& pt,
point_ptr<T> before_this_point,
ring_manager<T>& rings) {
point_ptr<T> point;
if (rings.storage.size() < rings.storage.capacity()) {
rings.storage.emplace_back(r, pt, before_this_point);
point = &rings.storage.back();
} else {
rings.points.emplace_back(r, pt, before_this_point);
point = &rings.points.back();
}
rings.all_points.push_back(point);
return point;
}
template <typename T>
void ring1_child_of_ring2(ring_ptr<T> ring1, ring_ptr<T> ring2, ring_manager<T>& manager) {
assert(ring1 != ring2);
if (ring1->parent == ring2) {
return;
}
if (ring1->parent == nullptr) {
manager.children.remove(ring1);
} else {
ring1->parent->children.remove(ring1);
}
if (ring2 == nullptr) {
ring1->parent = nullptr;
manager.children.push_back(ring1);
} else {
ring1->parent = ring2;
ring2->children.push_back(ring1);
}
}
template <typename T>
void ring1_sibling_of_ring2(ring_ptr<T> ring1, ring_ptr<T> ring2, ring_manager<T>& manager) {
assert(ring1 != ring2);
if (ring1->parent == ring2->parent) {
return;
}
if (ring1->parent == nullptr) {
manager.children.remove(ring1);
} else {
ring1->parent->children.remove(ring1);
}
if (ring2->parent == nullptr) {
manager.children.push_back(ring1);
} else {
ring2->parent->children.push_back(ring1);
}
ring1->parent = ring2->parent;
}
template <typename T>
void ring1_replaces_ring2(ring_ptr<T> ring1, ring_ptr<T> ring2, ring_manager<T>& manager) {
assert(ring1 != ring2);
if (ring2->parent == nullptr) {
manager.children.remove(ring2);
} else {
ring2->parent->children.remove(ring2);
}
for (auto& c : ring2->children) {
c->parent = ring1;
}
if (ring1 == nullptr) {
manager.children.splice(manager.children.end(), ring2->children);
} else {
ring1->children.splice(ring1->children.end(), ring2->children);
}
ring2->parent = nullptr;
}
template <typename T>
void remove_ring(ring_ptr<T> r, ring_manager<T>& manager) {
if (r->parent == nullptr) {
manager.children.remove(r);
for (auto& c : r->children) {
c->parent = nullptr;
}
manager.children.splice(manager.children.end(), r->children);
} else {
r->parent->children.remove(r);
for (auto& c : r->children) {
c->parent = r->parent;
}
r->parent->children.splice(r->parent->children.end(), r->children);
r->parent = nullptr;
}
}
template <typename T>
inline std::size_t ring_depth(ring_ptr<T> r) {
std::size_t depth = 0;
if (!r) {
return depth;
}
while (r->parent) {
depth++;
r = r->parent;
}
return depth;
}
template <typename T>
inline bool ring_is_hole(ring_ptr<T> r) {
return ring_depth(r) & 1;
}
template <typename T>
void set_next(const_point_ptr<T>& node, const const_point_ptr<T>& next_node) {
node->next = next_node;
}
template <typename T>
point_ptr<T> get_next(const_point_ptr<T>& node) {
return node->next;
}
template <typename T>
point_ptr<T> get_prev(const_point_ptr<T>& node) {
return node->prev;
}
template <typename T>
void set_prev(const_point_ptr<T>& node, const const_point_ptr<T>& prev_node) {
node->prev = prev_node;
}
template <typename T>
void init(const_point_ptr<T>& node) {
set_next(node, node);
set_prev(node, node);
}
template <typename T>
std::size_t point_count(const const_point_ptr<T>& orig_node) {
std::size_t size = 0;
point_ptr<T> n = orig_node;
do {
n = get_next(n);
++size;
} while (n != orig_node);
return size;
}
template <typename T>
void link_before(point_ptr<T>& node, point_ptr<T>& new_node) {
point_ptr<T> prev_node = get_prev(node);
set_prev(new_node, prev_node);
set_next(new_node, node);
set_prev(node, new_node);
set_next(prev_node, new_node);
}
template <typename T>
void link_after(point_ptr<T>& node, point_ptr<T>& new_node) {
point_ptr<T> next_node = get_next(node);
set_prev(new_node, node);
set_next(new_node, next_node);
set_next(node, new_node);
set_prev(next_node, new_node);
}
template <typename T>
void transfer_point(point_ptr<T>& p, point_ptr<T>& b, point_ptr<T>& e) {
if (b != e) {
point_ptr<T> prev_p = get_prev(p);
point_ptr<T> prev_b = get_prev(b);
point_ptr<T> prev_e = get_prev(e);
set_next(prev_e, p);
set_prev(p, prev_e);
set_next(prev_b, e);
set_prev(e, prev_b);
set_next(prev_p, b);
set_prev(b, prev_p);
} else {
link_before(p, b);
}
}
template <typename T>
void reverse_ring(point_ptr<T> pp) {
if (!pp) {
return;
}
point_ptr<T> pp1;
point_ptr<T> pp2;
pp1 = pp;
do {
pp2 = pp1->next;
pp1->next = pp1->prev;
pp1->prev = pp2;
pp1 = pp2;
} while (pp1 != pp);
}
template <typename T>
double area_from_point(point_ptr<T> op, std::size_t& size) {
point_ptr<T> startOp = op;
size = 1;
double a = 0.0;
do {
++size;
a += static_cast<double>(op->prev->x + op->x) * static_cast<double>(op->prev->y - op->y);
op = op->next;
} while (op != startOp);
return a * 0.5;
}
template <typename T>
double area(ring_ptr<T> r) {
assert(r != nullptr);
if (std::isnan(r->area)) {
r->area = area_from_point(r->points, r->size);
}
return r->area;
}
#ifdef DEBUG
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const ring<T>& r) {
out << " ring_index: " << r.ring_index << std::endl;
if (!r.parent) {
// out << " parent_ring ptr: nullptr" << std::endl;
out << " parent_index: -----" << std::endl;
} else {
// out << " parent_ring ptr: " << r.parent << std::endl;
out << " parent_ring idx: " << r.parent->ring_index << std::endl;
}
ring_ptr<T> n = const_cast<ring_ptr<T>>(&r);
if (ring_is_hole(n)) {
out << " is_hole: true " << std::endl;
} else {
out << " is_hole: false " << std::endl;
}
auto pt_itr = r.points;
if (pt_itr) {
out << " area: " << r.area << std::endl;
out << " points:" << std::endl;
out << " [[[" << pt_itr->x << "," << pt_itr->y << "],";
pt_itr = pt_itr->next;
while (pt_itr != r.points) {
out << "[" << pt_itr->x << "," << pt_itr->y << "],";
pt_itr = pt_itr->next;
}
out << "[" << pt_itr->x << "," << pt_itr->y << "]]]" << std::endl;
} else {
out << " area: NONE" << std::endl;
out << " points: NONE" << std::endl;
}
return out;
}
template <typename T>
std::string output_as_polygon(ring_ptr<T> r) {
std::ostringstream out;
auto pt_itr = r->points;
if (pt_itr) {
out << "[";
out << "[[" << pt_itr->x << "," << pt_itr->y << "],";
pt_itr = pt_itr->next;
while (pt_itr != r->points) {
out << "[" << pt_itr->x << "," << pt_itr->y << "],";
pt_itr = pt_itr->next;
}
out << "[" << pt_itr->x << "," << pt_itr->y << "]]";
for (auto const& c : r->children) {
pt_itr = c->points;
if (pt_itr) {
out << ",[[" << pt_itr->x << "," << pt_itr->y << "],";
pt_itr = pt_itr->next;
while (pt_itr != c->points) {
out << "[" << pt_itr->x << "," << pt_itr->y << "],";
pt_itr = pt_itr->next;
}
out << "[" << pt_itr->x << "," << pt_itr->y << "]]";
}
}
out << "]" << std::endl;
} else {
out << "[]" << std::endl;
}
return out.str();
}
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const ring_list<T>& rings) {
out << "START RING LIST" << std::endl;
for (auto& r : rings) {
out << " ring: " << r->ring_index << " - " << r << std::endl;
out << *r;
}
out << "END RING LIST" << std::endl;
return out;
}
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const ring_vector<T>& rings) {
out << "START RING VECTOR" << std::endl;
for (auto& r : rings) {
if (!r->points) {
continue;
}
out << " ring: " << r->ring_index << " - " << r << std::endl;
out << *r;
}
out << "END RING VECTOR" << std::endl;
return out;
}
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const std::deque<ring<T>>& rings) {
out << "START RING VECTOR" << std::endl;
for (auto& r : rings) {
if (!r.points) {
continue;
}
out << " ring: " << r.ring_index << std::endl;
out << r;
}
out << "END RING VECTOR" << std::endl;
return out;
}
template <class charT, class traits, typename T>
inline std::basic_ostream<charT, traits>& operator<<(std::basic_ostream<charT, traits>& out,
const hot_pixel_vector<T>& hp_vec) {
out << "Hot Pixels: " << std::endl;
for (auto& hp : hp_vec) {
out << hp << std::endl;
}
return out;
}
#endif
}
}
}

View File

@ -0,0 +1,906 @@
#pragma once
#ifdef DEBUG
#include <iostream>
// Example debug print for backtrace - only works on IOS
#include <execinfo.h>
#include <stdio.h>
//
// void* callstack[128];
// int i, frames = backtrace(callstack, 128);
// char** strs = backtrace_symbols(callstack, frames);
// for (i = 0; i < frames; ++i) {
// printf("%s\n", strs[i]);
// }
// free(strs);
#endif
#include <queue>
#include <mapbox/geometry/wagyu/active_bound_list.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/edge.hpp>
#include <mapbox/geometry/wagyu/ring.hpp>
#include <mapbox/geometry/wagyu/util.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
void set_hole_state(active_bound_list_itr<T>& bnd,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings) {
auto bnd2 = active_bound_list_rev_itr<T>(bnd);
bound_ptr<T> bndTmp = nullptr;
// Find first non line ring to the left of current bound.
while (bnd2 != active_bounds.rend()) {
if ((*bnd2)->ring && (*bnd2)->winding_delta != 0) {
if (!bndTmp) {
bndTmp = (*bnd2);
} else if (bndTmp->ring == (*bnd2)->ring) {
bndTmp = nullptr;
}
}
++bnd2;
}
if (!bndTmp) {
(*bnd)->ring->parent = nullptr;
rings.children.push_back((*bnd)->ring);
} else {
(*bnd)->ring->parent = bndTmp->ring;
bndTmp->ring->children.push_back((*bnd)->ring);
}
}
template <typename T>
void set_hole_state(active_bound_list_rev_itr<T>& bnd,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings) {
auto bnd2 = std::next(bnd);
bound_ptr<T> bndTmp = nullptr;
// Find first non line ring to the left of current bound.
while (bnd2 != active_bounds.rend()) {
if ((*bnd2)->ring && (*bnd2)->winding_delta != 0) {
if (!bndTmp) {
bndTmp = (*bnd2);
} else if (bndTmp->ring == (*bnd2)->ring) {
bndTmp = nullptr;
}
}
++bnd2;
}
if (!bndTmp) {
(*bnd)->ring->parent = nullptr;
rings.children.push_back((*bnd)->ring);
} else {
(*bnd)->ring->parent = bndTmp->ring;
bndTmp->ring->children.push_back((*bnd)->ring);
}
}
template <typename T>
void update_current_hp_itr(T scanline_y, ring_manager<T>& rings) {
while (rings.current_hp_itr->y > scanline_y) {
++rings.current_hp_itr;
}
}
template <typename T>
struct hot_pixel_sorter {
inline bool operator()(mapbox::geometry::point<T> const& pt1,
mapbox::geometry::point<T> const& pt2) {
if (pt1.y == pt2.y) {
return pt1.x < pt2.x;
} else {
return pt1.y > pt2.y;
}
}
};
// Due to the nature of floating point calculations
// and the high likely hood of values around X.5, we
// need to fudge what is X.5 some for our rounding.
const double rounding_offset = 1e-12;
const double rounding_offset_y = 5e-13;
template <typename T>
T round_towards_min(double val) {
// 0.5 rounds to 0
// 0.0 rounds to 0
// -0.5 rounds to -1
return static_cast<T>(std::ceil(val - 0.5 + rounding_offset));
}
template <typename T>
T round_towards_max(double val) {
// 0.5 rounds to 1
// 0.0 rounds to 0
// -0.5 rounds to 0
return static_cast<T>(std::floor(val + 0.5 + rounding_offset));
}
template <typename T>
inline T get_edge_min_x(edge<T> const& edge, const T current_y) {
if (is_horizontal(edge)) {
if (edge.bot.x < edge.top.x) {
return edge.bot.x;
} else {
return edge.top.x;
}
} else if (edge.dx > 0.0) {
if (current_y == edge.top.y) {
return edge.top.x;
} else {
double lower_range_y = static_cast<double>(current_y - edge.bot.y) - 0.5;
double return_val = static_cast<double>(edge.bot.x) + edge.dx * lower_range_y;
T value = round_towards_min<T>(return_val);
return value;
}
} else {
if (current_y == edge.bot.y) {
return edge.bot.x;
} else {
double return_val =
static_cast<double>(edge.bot.x) +
edge.dx * (static_cast<double>(current_y - edge.bot.y) + 0.5 - rounding_offset_y);
T value = round_towards_min<T>(return_val);
return value;
}
}
}
template <typename T>
inline T get_edge_max_x(edge<T> const& edge, const T current_y) {
if (is_horizontal(edge)) {
if (edge.bot.x > edge.top.x) {
return edge.bot.x;
} else {
return edge.top.x;
}
} else if (edge.dx < 0.0) {
if (current_y == edge.top.y) {
return edge.top.x;
} else {
double lower_range_y = static_cast<double>(current_y - edge.bot.y) - 0.5;
double return_val = static_cast<double>(edge.bot.x) + edge.dx * lower_range_y;
T value = round_towards_max<T>(return_val);
return value;
}
} else {
if (current_y == edge.bot.y) {
return edge.bot.x;
} else {
double return_val =
static_cast<double>(edge.bot.x) +
edge.dx * (static_cast<double>(current_y - edge.bot.y) + 0.5 - rounding_offset_y);
T value = round_towards_max<T>(return_val);
return value;
}
}
}
template <typename T>
void hot_pixel_set_left_to_right(T y,
T start_x,
T end_x,
bound<T>& bnd,
ring_manager<T>& rings,
hot_pixel_itr<T>& itr,
hot_pixel_itr<T>& end,
bool add_end_point) {
T x_min = get_edge_min_x(*(bnd.current_edge), y);
x_min = std::max(x_min, start_x);
T x_max = get_edge_max_x(*(bnd.current_edge), y);
x_max = std::min(x_max, end_x);
for (; itr != end; ++itr) {
if (itr->x < x_min) {
continue;
}
if (itr->x > x_max) {
break;
}
if (!add_end_point && itr->x == end_x) {
continue;
}
point_ptr<T> op = bnd.ring->points;
bool to_front = (bnd.side == edge_left);
if (to_front && (*itr == *op)) {
continue;
} else if (!to_front && (*itr == *op->prev)) {
continue;
}
point_ptr<T> new_point = create_new_point(bnd.ring, *itr, op, rings);
if (to_front) {
bnd.ring->points = new_point;
}
}
}
template <typename T>
void hot_pixel_set_right_to_left(T y,
T start_x,
T end_x,
bound<T>& bnd,
ring_manager<T>& rings,
hot_pixel_rev_itr<T>& itr,
hot_pixel_rev_itr<T>& end,
bool add_end_point) {
T x_min = get_edge_min_x(*(bnd.current_edge), y);
x_min = std::max(x_min, end_x);
T x_max = get_edge_max_x(*(bnd.current_edge), y);
x_max = std::min(x_max, start_x);
for (; itr != end; ++itr) {
if (itr->x > x_max) {
continue;
}
if (itr->x < x_min) {
break;
}
if (!add_end_point && itr->x == end_x) {
continue;
}
point_ptr<T> op = bnd.ring->points;
bool to_front = (bnd.side == edge_left);
if (to_front && (*itr == *op)) {
continue;
} else if (!to_front && (*itr == *op->prev)) {
continue;
}
point_ptr<T> new_point = create_new_point(bnd.ring, *itr, op, rings);
if (to_front) {
bnd.ring->points = new_point;
}
}
}
template <typename T>
void sort_hot_pixels(ring_manager<T>& rings) {
std::sort(rings.hot_pixels.begin(), rings.hot_pixels.end(), hot_pixel_sorter<T>());
auto last = std::unique(rings.hot_pixels.begin(), rings.hot_pixels.end());
rings.hot_pixels.erase(last, rings.hot_pixels.end());
}
template <typename T>
void insert_hot_pixels_in_path(bound<T>& bnd,
mapbox::geometry::point<T> const& end_pt,
ring_manager<T>& rings,
bool add_end_point) {
if (end_pt == bnd.last_point) {
return;
}
T start_y = bnd.last_point.y;
T start_x = bnd.last_point.x;
T end_y = end_pt.y;
T end_x = end_pt.x;
auto itr = rings.current_hp_itr;
while (itr->y <= start_y && itr != rings.hot_pixels.begin()) {
--itr;
}
if (start_x > end_x) {
for (; itr != rings.hot_pixels.end();) {
if (itr->y > start_y) {
++itr;
continue;
}
if (itr->y < end_y) {
break;
}
T y = itr->y;
auto last_itr = hot_pixel_rev_itr<T>(itr);
while (itr != rings.hot_pixels.end() && itr->y == y) {
++itr;
}
auto first_itr = hot_pixel_rev_itr<T>(itr);
bool add_end_point_itr = (y != end_pt.y || add_end_point);
hot_pixel_set_right_to_left(y, start_x, end_x, bnd, rings, first_itr, last_itr,
add_end_point_itr);
}
} else {
for (; itr != rings.hot_pixels.end();) {
if (itr->y > start_y) {
++itr;
continue;
}
if (itr->y < end_y) {
break;
}
T y = itr->y;
auto first_itr = itr;
while (itr != rings.hot_pixels.end() && itr->y == y) {
++itr;
}
auto last_itr = itr;
bool add_end_point_itr = (y != end_pt.y || add_end_point);
hot_pixel_set_left_to_right(y, start_x, end_x, bnd, rings, first_itr, last_itr,
add_end_point_itr);
}
}
bnd.last_point = end_pt;
}
template <typename T>
void add_to_hot_pixels(mapbox::geometry::point<T> const& pt, ring_manager<T>& rings) {
rings.hot_pixels.push_back(pt);
}
template <typename T>
void add_first_point(active_bound_list_itr<T>& bnd,
active_bound_list<T>& active_bounds,
mapbox::geometry::point<T> const& pt,
ring_manager<T>& rings) {
ring_ptr<T> r = create_new_ring(rings);
(*bnd)->ring = r;
r->points = create_new_point(r, pt, rings);
set_hole_state(bnd, active_bounds, rings);
(*bnd)->last_point = pt;
}
template <typename T>
void add_first_point(active_bound_list_rev_itr<T>& bnd,
active_bound_list<T>& active_bounds,
mapbox::geometry::point<T> const& pt,
ring_manager<T>& rings) {
ring_ptr<T> r = create_new_ring(rings);
// no ring currently set!
(*bnd)->ring = r;
r->points = create_new_point(r, pt, rings);
set_hole_state(bnd, active_bounds, rings);
(*bnd)->last_point = pt;
}
template <typename T>
void add_point_to_ring(bound<T>& bnd,
mapbox::geometry::point<T> const& pt,
ring_manager<T>& rings) {
assert(bnd.ring);
// Handle hot pixels
insert_hot_pixels_in_path(bnd, pt, rings, false);
// bnd.ring->points is the 'Left-most' point & bnd.ring->points->prev is the
// 'Right-most'
point_ptr<T> op = bnd.ring->points;
bool to_front = (bnd.side == edge_left);
if (to_front && (pt == *op)) {
return;
} else if (!to_front && (pt == *op->prev)) {
return;
}
point_ptr<T> new_point = create_new_point(bnd.ring, pt, bnd.ring->points, rings);
if (to_front) {
bnd.ring->points = new_point;
}
}
template <typename T>
void add_point(active_bound_list_itr<T>& bnd,
active_bound_list<T>& active_bounds,
mapbox::geometry::point<T> const& pt,
ring_manager<T>& rings) {
if (!(*bnd)->ring) {
add_first_point(bnd, active_bounds, pt, rings);
} else {
add_point_to_ring(*(*bnd), pt, rings);
}
}
template <typename T>
void add_point(active_bound_list_rev_itr<T>& bnd,
active_bound_list<T>& active_bounds,
mapbox::geometry::point<T> const& pt,
ring_manager<T>& rings) {
if (!(*bnd)->ring) {
add_first_point(bnd, active_bounds, pt, rings);
} else {
add_point_to_ring(*(*bnd), pt, rings);
}
}
template <typename T>
void add_local_minimum_point(active_bound_list_itr<T> b1,
active_bound_list_itr<T> b2,
active_bound_list<T>& active_bounds,
mapbox::geometry::point<T> const& pt,
ring_manager<T>& rings) {
active_bound_list_itr<T> b;
active_bound_list_rev_itr<T> prev_bound;
active_bound_list_rev_itr<T> prev_b1(b1);
active_bound_list_rev_itr<T> prev_b2(b2);
if (is_horizontal(*((*b2)->current_edge)) ||
((*b1)->current_edge->dx > (*b2)->current_edge->dx)) {
add_point(b1, active_bounds, pt, rings);
(*b2)->last_point = pt;
(*b2)->ring = (*b1)->ring;
(*b1)->side = edge_left;
(*b2)->side = edge_right;
b = b1;
if (prev_b1 != active_bounds.rend() && std::prev(b) == b2) {
prev_bound = prev_b2;
} else {
prev_bound = prev_b1;
}
} else {
add_point(b2, active_bounds, pt, rings);
(*b1)->last_point = pt;
(*b1)->ring = (*b2)->ring;
(*b1)->side = edge_right;
(*b2)->side = edge_left;
b = b2;
if (prev_b2 != active_bounds.rend() && std::prev(b) == b1) {
prev_bound = prev_b1;
} else {
prev_bound = prev_b2;
}
}
}
template <typename T>
inline double get_dx(point<T> const& pt1, point<T> const& pt2) {
if (pt1.y == pt2.y) {
return std::numeric_limits<double>::infinity();
} else {
return static_cast<double>(pt2.x - pt2.x) / static_cast<double>(pt2.y - pt1.y);
}
}
template <typename T>
bool first_is_bottom_point(const_point_ptr<T> btmPt1, const_point_ptr<T> btmPt2) {
point_ptr<T> p = btmPt1->prev;
while ((*p == *btmPt1) && (p != btmPt1)) {
p = p->prev;
}
double dx1p = std::fabs(get_dx(*btmPt1, *p));
p = btmPt1->next;
while ((*p == *btmPt1) && (p != btmPt1)) {
p = p->next;
}
double dx1n = std::fabs(get_dx(*btmPt1, *p));
p = btmPt2->prev;
while ((*p == *btmPt2) && (p != btmPt2)) {
p = p->prev;
}
double dx2p = std::fabs(get_dx(*btmPt2, *p));
p = btmPt2->next;
while ((*p == *btmPt2) && (p != btmPt2)) {
p = p->next;
}
double dx2n = std::fabs(get_dx(*btmPt2, *p));
if (values_are_equal(std::max(dx1p, dx1n), std::max(dx2p, dx2n)) &&
values_are_equal(std::min(dx1p, dx1n), std::min(dx2p, dx2n))) {
std::size_t s = 0;
return area_from_point(btmPt1, s) > 0.0; // if otherwise identical use orientation
} else {
return (greater_than_or_equal(dx1p, dx2p) && greater_than_or_equal(dx1p, dx2n)) ||
(greater_than_or_equal(dx1n, dx2p) && greater_than_or_equal(dx1n, dx2n));
}
}
template <typename T>
point_ptr<T> get_bottom_point(point_ptr<T> pp) {
point_ptr<T> dups = nullptr;
point_ptr<T> p = pp->next;
while (p != pp) {
if (p->y > pp->y) {
pp = p;
dups = nullptr;
} else if (p->y == pp->y && p->x <= pp->x) {
if (p->x < pp->x) {
dups = nullptr;
pp = p;
} else {
if (p->next != pp && p->prev != pp) {
dups = p;
}
}
}
p = p->next;
}
if (dups) {
// there appears to be at least 2 vertices at bottom_point so ...
while (dups != p) {
if (!first_is_bottom_point(p, dups)) {
pp = dups;
}
dups = dups->next;
while (*dups != *pp) {
dups = dups->next;
}
}
}
return pp;
}
template <typename T>
ring_ptr<T> get_lower_most_ring(ring_ptr<T> outRec1, ring_ptr<T> outRec2) {
// work out which polygon fragment has the correct hole state ...
if (!outRec1->bottom_point) {
outRec1->bottom_point = get_bottom_point(outRec1->points);
}
if (!outRec2->bottom_point) {
outRec2->bottom_point = get_bottom_point(outRec2->points);
}
point_ptr<T> OutPt1 = outRec1->bottom_point;
point_ptr<T> OutPt2 = outRec2->bottom_point;
if (OutPt1->y > OutPt2->y) {
return outRec1;
} else if (OutPt1->y < OutPt2->y) {
return outRec2;
} else if (OutPt1->x < OutPt2->x) {
return outRec1;
} else if (OutPt1->x > OutPt2->x) {
return outRec2;
} else if (OutPt1->next == OutPt1) {
return outRec2;
} else if (OutPt2->next == OutPt2) {
return outRec1;
} else if (first_is_bottom_point(OutPt1, OutPt2)) {
return outRec1;
} else {
return outRec2;
}
}
template <typename T>
bool ring1_child_below_ring2(ring_ptr<T> ring1, ring_ptr<T> ring2) {
do {
ring1 = ring1->parent;
if (ring1 == ring2) {
return true;
}
} while (ring1);
return false;
}
template <typename T>
void update_points_ring(ring_ptr<T> ring) {
point_ptr<T> op = ring->points;
do {
op->ring = ring;
op = op->prev;
} while (op != ring->points);
}
template <typename T>
void append_ring(active_bound_list_itr<T>& b1,
active_bound_list_itr<T>& b2,
active_bound_list<T>& active_bounds,
ring_manager<T>& manager) {
// get the start and ends of both output polygons ...
ring_ptr<T> outRec1 = (*b1)->ring;
ring_ptr<T> outRec2 = (*b2)->ring;
ring_ptr<T> keep_ring;
bound_ptr<T> keep_bound;
ring_ptr<T> remove_ring;
bound_ptr<T> remove_bound;
if (ring1_child_below_ring2(outRec1, outRec2)) {
keep_ring = outRec2;
keep_bound = *b2;
remove_ring = outRec1;
remove_bound = *b1;
} else if (ring1_child_below_ring2(outRec2, outRec1)) {
keep_ring = outRec1;
keep_bound = *b1;
remove_ring = outRec2;
remove_bound = *b2;
} else if (outRec1 == get_lower_most_ring(outRec1, outRec2)) {
keep_ring = outRec1;
keep_bound = *b1;
remove_ring = outRec2;
remove_bound = *b2;
} else {
keep_ring = outRec2;
keep_bound = *b2;
remove_ring = outRec1;
remove_bound = *b1;
}
// get the start and ends of both output polygons and
// join b2 poly onto b1 poly and delete pointers to b2 ...
point_ptr<T> p1_lft = keep_ring->points;
point_ptr<T> p1_rt = p1_lft->prev;
point_ptr<T> p2_lft = remove_ring->points;
point_ptr<T> p2_rt = p2_lft->prev;
// join b2 poly onto b1 poly and delete pointers to b2 ...
if (keep_bound->side == edge_left) {
if (remove_bound->side == edge_left) {
// z y x a b c
reverse_ring(p2_lft);
p2_lft->next = p1_lft;
p1_lft->prev = p2_lft;
p1_rt->next = p2_rt;
p2_rt->prev = p1_rt;
keep_ring->points = p2_rt;
} else {
// x y z a b c
p2_rt->next = p1_lft;
p1_lft->prev = p2_rt;
p2_lft->prev = p1_rt;
p1_rt->next = p2_lft;
keep_ring->points = p2_lft;
}
} else {
if (remove_bound->side == edge_right) {
// a b c z y x
reverse_ring(p2_lft);
p1_rt->next = p2_rt;
p2_rt->prev = p1_rt;
p2_lft->next = p1_lft;
p1_lft->prev = p2_lft;
} else {
// a b c x y z
p1_rt->next = p2_lft;
p2_lft->prev = p1_rt;
p1_lft->prev = p2_rt;
p2_rt->next = p1_lft;
}
}
keep_ring->bottom_point = nullptr;
bool keep_is_hole = ring_is_hole(keep_ring);
bool remove_is_hole = ring_is_hole(remove_ring);
remove_ring->points = nullptr;
remove_ring->bottom_point = nullptr;
if (keep_is_hole != remove_is_hole) {
ring1_replaces_ring2(keep_ring->parent, remove_ring, manager);
} else {
ring1_replaces_ring2(keep_ring, remove_ring, manager);
}
update_points_ring(keep_ring);
// nb: safe because we only get here via AddLocalMaxPoly
keep_bound->ring = nullptr;
remove_bound->ring = nullptr;
for (auto& b : active_bounds) {
if (b->ring == remove_ring) {
b->ring = keep_ring;
b->side = keep_bound->side;
break; // Not sure why there is a break here but was transfered logic from angus
}
}
}
template <typename T>
void add_local_maximum_point(active_bound_list_itr<T>& b1,
active_bound_list_itr<T>& b2,
mapbox::geometry::point<T> const& pt,
ring_manager<T>& rings,
active_bound_list<T>& active_bounds) {
insert_hot_pixels_in_path(*(*b2), pt, rings, false);
add_point(b1, active_bounds, pt, rings);
if ((*b1)->ring == (*b2)->ring) {
(*b1)->ring = nullptr;
(*b2)->ring = nullptr;
// I am not certain that order is important here?
} else if ((*b1)->ring->ring_index < (*b2)->ring->ring_index) {
append_ring(b1, b2, active_bounds, rings);
} else {
append_ring(b2, b1, active_bounds, rings);
}
}
enum point_in_polygon_result : std::int8_t {
point_on_polygon = -1,
point_inside_polygon = 0,
point_outside_polygon = 1
};
template <typename T>
point_in_polygon_result point_in_polygon(point<T> const& pt, point_ptr<T> op) {
// returns 0 if false, +1 if true, -1 if pt ON polygon boundary
point_in_polygon_result result = point_outside_polygon;
point_ptr<T> startOp = op;
do {
if (op->next->y == pt.y) {
if ((op->next->x == pt.x) ||
(op->y == pt.y && ((op->next->x > pt.x) == (op->x < pt.x)))) {
return point_on_polygon;
}
}
if ((op->y < pt.y) != (op->next->y < pt.y)) {
if (op->x >= pt.x) {
if (op->next->x > pt.x) {
// Switch between point outside polygon and point inside
// polygon
if (result == point_outside_polygon) {
result = point_inside_polygon;
} else {
result = point_outside_polygon;
}
} else {
double d =
static_cast<double>(op->x - pt.x) *
static_cast<double>(op->next->y - pt.y) -
static_cast<double>(op->next->x - pt.x) * static_cast<double>(op->y - pt.y);
if (value_is_zero(d)) {
return point_on_polygon;
}
if ((d > 0) == (op->next->y > op->y)) {
// Switch between point outside polygon and point inside
// polygon
if (result == point_outside_polygon) {
result = point_inside_polygon;
} else {
result = point_outside_polygon;
}
}
}
} else {
if (op->next->x > pt.x) {
double d =
static_cast<double>(op->x - pt.x) *
static_cast<double>(op->next->y - pt.y) -
static_cast<double>(op->next->x - pt.x) * static_cast<double>(op->y - pt.y);
if (value_is_zero(d)) {
return point_on_polygon;
}
if ((d > 0) == (op->next->y > op->y)) {
// Switch between point outside polygon and point inside
// polygon
if (result == point_outside_polygon) {
result = point_inside_polygon;
} else {
result = point_outside_polygon;
}
}
}
}
}
op = op->next;
} while (startOp != op);
return result;
}
template <typename T>
point_in_polygon_result point_in_polygon(mapbox::geometry::point<double> const& pt,
point_ptr<T> op) {
// returns 0 if false, +1 if true, -1 if pt ON polygon boundary
point_in_polygon_result result = point_outside_polygon;
point_ptr<T> startOp = op;
do {
double op_x = static_cast<double>(op->x);
double op_y = static_cast<double>(op->y);
double op_next_x = static_cast<double>(op->next->x);
double op_next_y = static_cast<double>(op->next->y);
if (values_are_equal(op_next_y, pt.y)) {
if (values_are_equal(op_next_x, pt.x) ||
(values_are_equal(op_y, pt.y) && ((op_next_x > pt.x) == (op_x < pt.x)))) {
return point_on_polygon;
}
}
if ((op_y < pt.y) != (op_next_y < pt.y)) {
if (greater_than_or_equal(op_x, pt.x)) {
if (op_next_x > pt.x) {
// Switch between point outside polygon and point inside
// polygon
if (result == point_outside_polygon) {
result = point_inside_polygon;
} else {
result = point_outside_polygon;
}
} else {
double d =
(op_x - pt.x) * (op_next_y - pt.y) - (op_next_x - pt.x) * (op_y - pt.y);
if (value_is_zero(d)) {
return point_on_polygon;
}
if ((d > 0.0) == (op_next_y > op->y)) {
// Switch between point outside polygon and point inside
// polygon
if (result == point_outside_polygon) {
result = point_inside_polygon;
} else {
result = point_outside_polygon;
}
}
}
} else {
if (op_next_x > pt.x) {
double d =
(op_x - pt.x) * (op_next_y - pt.y) - (op_next_x - pt.x) * (op_y - pt.y);
if (value_is_zero(d)) {
return point_on_polygon;
}
if ((d > 0.0) == (op_next_y > op->y)) {
// Switch between point outside polygon and point inside
// polygon
if (result == point_outside_polygon) {
result = point_inside_polygon;
} else {
result = point_outside_polygon;
}
}
}
}
}
op = op->next;
} while (startOp != op);
return result;
}
template <typename T>
point_in_polygon_result inside_or_outside_special(point_ptr<T> first_pt, point_ptr<T> other_poly) {
if (value_is_zero(area(first_pt->ring))) {
return point_inside_polygon;
}
if (value_is_zero(area(other_poly->ring))) {
return point_outside_polygon;
}
point_ptr<T> pt = first_pt;
do {
if (*pt == *(pt->prev) || *pt == *(pt->next) || *(pt->next) == *(pt->prev) ||
slopes_equal(*(pt->prev), *pt, *(pt->next))) {
pt = pt->next;
continue;
}
double dx = ((pt->prev->x - pt->x) / 3.0) + ((pt->next->x - pt->x) / 3.0);
double dy = ((pt->prev->y - pt->y) / 3.0) + ((pt->next->y - pt->y) / 3.0);
mapbox::geometry::point<double> offset_pt(pt->x + dx, pt->y + dy);
point_in_polygon_result res = point_in_polygon(offset_pt, pt);
if (res != point_inside_polygon) {
offset_pt.x = pt->x - dx;
offset_pt.y = pt->y - dy;
res = point_in_polygon(offset_pt, pt);
if (res != point_inside_polygon) {
pt = pt->next;
continue;
}
}
res = point_in_polygon(offset_pt, other_poly);
if (res == point_on_polygon) {
pt = pt->next;
continue;
}
return res;
} while (pt != first_pt);
return point_inside_polygon;
}
template <typename T>
bool poly2_contains_poly1(ring_ptr<T> ring1, ring_ptr<T> ring2) {
point_ptr<T> outpt1 = ring1->points->next;
point_ptr<T> outpt2 = ring2->points->next;
point_ptr<T> op = outpt1;
do {
// nb: PointInPolygon returns 0 if false, +1 if true, -1 if pt on polygon
point_in_polygon_result res = point_in_polygon(*op, outpt2);
if (res != point_on_polygon) {
return res == point_inside_polygon;
}
op = op->next;
} while (op != outpt1);
point_in_polygon_result res = inside_or_outside_special(outpt1, outpt2);
return res == point_inside_polygon;
}
template <typename T>
void dispose_out_points(point_ptr<T>& pp) {
if (pp == nullptr) {
return;
}
pp->prev->next = nullptr;
while (pp) {
point_ptr<T> tmpPp = pp;
pp = pp->next;
tmpPp->next = tmpPp;
tmpPp->prev = tmpPp;
tmpPp->ring = nullptr;
}
}
}
}
}

View File

@ -0,0 +1,37 @@
#pragma once
#include <queue>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/local_minimum.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
using scanbeam_list = std::priority_queue<T>;
template <typename T>
bool pop_from_scanbeam(T& Y, scanbeam_list<T>& scanbeam) {
if (scanbeam.empty()) {
return false;
}
Y = scanbeam.top();
scanbeam.pop();
while (!scanbeam.empty() && Y == scanbeam.top()) {
scanbeam.pop();
} // Pop duplicates.
return true;
}
template <typename T>
void setup_scanbeam(local_minimum_list<T>& minima_list, scanbeam_list<T>& scanbeam) {
for (auto lm = minima_list.begin(); lm != minima_list.end(); ++lm) {
scanbeam.push(lm->y);
}
}
}
}
}

View File

@ -0,0 +1,172 @@
#pragma once
#include <mapbox/geometry/wagyu/active_bound_list.hpp>
#include <mapbox/geometry/wagyu/bound.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/edge.hpp>
#include <mapbox/geometry/wagyu/intersect.hpp>
#include <mapbox/geometry/wagyu/intersect_util.hpp>
#include <mapbox/geometry/wagyu/ring.hpp>
#include <mapbox/geometry/wagyu/ring_util.hpp>
#include <mapbox/geometry/wagyu/util.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
void process_hot_pixel_intersections(T top_y,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings) {
if (active_bounds.empty()) {
return;
}
update_current_x(active_bounds, top_y);
// bubblesort ...
bool isModified;
do {
isModified = false;
auto bnd = active_bounds.begin();
auto bnd_next = std::next(bnd);
while (bnd_next != active_bounds.end()) {
if ((*bnd)->current_x > (*bnd_next)->current_x &&
!slopes_equal(*(*bnd)->current_edge, *(*bnd_next)->current_edge)) {
mapbox::geometry::point<double> pt;
if (!get_edge_intersection<T, double>(*((*bnd)->current_edge),
*((*bnd_next)->current_edge), pt)) {
// LCOV_EXCL_START
throw std::runtime_error("Edges do not intersect!");
// LCOV_EXCL_END
}
add_to_hot_pixels(round_point<T>(pt), rings);
swap_positions_in_ABL(bnd, bnd_next, active_bounds);
bnd_next = std::next(bnd);
isModified = true;
} else {
bnd = bnd_next;
++bnd_next;
}
}
} while (isModified);
}
template <typename T>
void process_hot_pixel_edges_at_top_of_scanbeam(T top_y,
scanbeam_list<T>& scanbeam,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings) {
for (auto bnd = active_bounds.begin(); bnd != active_bounds.end();) {
auto bnd_2 = std::next(bnd);
while ((*bnd)->current_edge != (*bnd)->edges.end() &&
(*bnd)->current_edge->top.y == top_y) {
add_to_hot_pixels((*bnd)->current_edge->top, rings);
if (current_edge_is_horizontal<T>(bnd)) {
(*bnd)->current_x = static_cast<double>((*bnd)->current_edge->top.x);
if ((*bnd)->current_edge->bot.x < (*bnd)->current_edge->top.x) {
// left to right
auto bnd_next = std::next(bnd);
while (bnd_next != active_bounds.end() &&
(*bnd_next)->current_x < (*bnd)->current_x) {
if (std::llround((*bnd_next)->current_edge->top.y) != top_y &&
std::llround((*bnd_next)->current_edge->bot.y) != top_y) {
mapbox::geometry::point<T> pt(std::llround((*bnd_next)->current_x),
top_y);
add_to_hot_pixels(pt, rings);
}
swap_positions_in_ABL(bnd, bnd_next, active_bounds);
bnd_next = std::next(bnd);
}
} else {
// right to left
if (bnd != active_bounds.begin()) {
auto bnd_prev = std::prev(bnd);
while (bnd != active_bounds.begin() &&
(*bnd_prev)->current_x > (*bnd)->current_x) {
if (std::llround((*bnd_prev)->current_edge->top.y) != top_y &&
std::llround((*bnd_prev)->current_edge->bot.y) != top_y) {
mapbox::geometry::point<T> pt(std::llround((*bnd_prev)->current_x),
top_y);
add_to_hot_pixels(pt, rings);
}
swap_positions_in_ABL(bnd, bnd_prev, active_bounds);
bnd_prev = std::prev(bnd);
}
}
}
}
next_edge_in_bound(bnd, scanbeam);
}
if ((*bnd)->current_edge == (*bnd)->edges.end()) {
active_bounds.erase(bnd);
}
bnd = bnd_2;
}
}
template <typename T>
void insert_local_minima_into_ABL_hot_pixel(T top_y,
local_minimum_ptr_list<T>& minima_sorted,
local_minimum_ptr_list_itr<T>& lm,
active_bound_list<T>& active_bounds,
ring_manager<T>& rings,
scanbeam_list<T>& scanbeam) {
while (lm != minima_sorted.end() && (*lm)->y == top_y) {
add_to_hot_pixels((*lm)->left_bound.edges.front().bot, rings);
auto& left_bound = (*lm)->left_bound;
left_bound.current_edge = left_bound.edges.begin();
left_bound.current_x = static_cast<double>(left_bound.current_edge->bot.x);
auto lb_abl_itr = insert_bound_into_ABL(left_bound, active_bounds);
if (!current_edge_is_horizontal<T>(lb_abl_itr)) {
scanbeam.push((*lb_abl_itr)->current_edge->top.y);
}
auto& right_bound = (*lm)->right_bound;
right_bound.current_edge = right_bound.edges.begin();
right_bound.current_x = static_cast<double>(right_bound.current_edge->bot.x);
auto rb_abl_itr = insert_bound_into_ABL(right_bound, lb_abl_itr, active_bounds);
if (!current_edge_is_horizontal<T>(rb_abl_itr)) {
scanbeam.push((*rb_abl_itr)->current_edge->top.y);
}
++lm;
}
}
template <typename T>
void build_hot_pixels(local_minimum_list<T>& minima_list, ring_manager<T>& rings) {
active_bound_list<T> active_bounds;
scanbeam_list<T> scanbeam;
T scanline_y = std::numeric_limits<T>::max();
local_minimum_ptr_list<T> minima_sorted;
minima_sorted.reserve(minima_list.size());
for (auto& lm : minima_list) {
minima_sorted.push_back(&lm);
}
std::stable_sort(minima_sorted.begin(), minima_sorted.end(), local_minimum_sorter<T>());
local_minimum_ptr_list_itr<T> current_lm = minima_sorted.begin();
setup_scanbeam(minima_list, scanbeam);
// Estimate size for reserving hot pixels
std::size_t reserve = 0;
for (auto& lm : minima_list) {
reserve += lm.left_bound.edges.size() + 2;
reserve += lm.right_bound.edges.size() + 2;
}
rings.hot_pixels.reserve(reserve);
while (pop_from_scanbeam(scanline_y, scanbeam) || current_lm != minima_sorted.end()) {
process_hot_pixel_intersections(scanline_y, active_bounds, rings);
insert_local_minima_into_ABL_hot_pixel(scanline_y, minima_sorted, current_lm, active_bounds,
rings, scanbeam);
process_hot_pixel_edges_at_top_of_scanbeam(scanline_y, scanbeam, active_bounds, rings);
}
preallocate_point_memory(rings, rings.hot_pixels.size());
sort_hot_pixels(rings);
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,79 @@
#pragma once
#include <cmath>
#include <mapbox/geometry/point.hpp>
#include <mapbox/geometry/polygon.hpp>
#include <mapbox/geometry/wagyu/point.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
double area(mapbox::geometry::linear_ring<T> const& poly) {
std::size_t size = poly.size();
if (size < 3) {
return 0.0;
}
double a = 0.0;
auto itr = poly.begin();
auto itr_prev = poly.end();
--itr_prev;
a += static_cast<double>(itr_prev->x + itr->x) * static_cast<double>(itr_prev->y - itr->y);
++itr;
itr_prev = poly.begin();
for (; itr != poly.end(); ++itr, ++itr_prev) {
a += static_cast<double>(itr_prev->x + itr->x) * static_cast<double>(itr_prev->y - itr->y);
}
return -a * 0.5;
}
inline bool value_is_zero(double val) {
return std::fabs(val) < std::numeric_limits<double>::epsilon();
}
inline bool values_are_equal(double x, double y) {
return value_is_zero(x - y);
}
inline bool values_near_equal(double x, double y) {
return std::fabs(x - y) < (5.0 * std::numeric_limits<double>::epsilon());
}
inline bool greater_than_or_equal(double x, double y) {
return x > y || values_are_equal(x, y);
}
template <typename T>
bool slopes_equal(mapbox::geometry::point<T> const& pt1,
mapbox::geometry::point<T> const& pt2,
mapbox::geometry::point<T> const& pt3) {
return (pt1.y - pt2.y) * (pt2.x - pt3.x) == (pt1.x - pt2.x) * (pt2.y - pt3.y);
}
template <typename T>
bool slopes_equal(mapbox::geometry::wagyu::point<T> const& pt1,
mapbox::geometry::wagyu::point<T> const& pt2,
mapbox::geometry::point<T> const& pt3) {
return (pt1.y - pt2.y) * (pt2.x - pt3.x) == (pt1.x - pt2.x) * (pt2.y - pt3.y);
}
template <typename T>
bool slopes_equal(mapbox::geometry::wagyu::point<T> const& pt1,
mapbox::geometry::wagyu::point<T> const& pt2,
mapbox::geometry::wagyu::point<T> const& pt3) {
return (pt1.y - pt2.y) * (pt2.x - pt3.x) == (pt1.x - pt2.x) * (pt2.y - pt3.y);
}
template <typename T>
bool slopes_equal(mapbox::geometry::point<T> const& pt1,
mapbox::geometry::point<T> const& pt2,
mapbox::geometry::point<T> const& pt3,
mapbox::geometry::point<T> const& pt4) {
return (pt1.y - pt2.y) * (pt3.x - pt4.x) == (pt1.x - pt2.x) * (pt3.y - pt4.y);
}
}
}
}

View File

@ -0,0 +1,74 @@
#pragma once
#include <algorithm>
#include <set>
#include <mapbox/geometry/wagyu/active_bound_list.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/intersect_util.hpp>
#include <mapbox/geometry/wagyu/local_minimum.hpp>
#include <mapbox/geometry/wagyu/local_minimum_util.hpp>
#include <mapbox/geometry/wagyu/process_horizontal.hpp>
#include <mapbox/geometry/wagyu/process_maxima.hpp>
#include <mapbox/geometry/wagyu/ring.hpp>
#include <mapbox/geometry/wagyu/ring_util.hpp>
#include <mapbox/geometry/wagyu/util.hpp>
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
bool execute_vatti(local_minimum_list<T>& minima_list,
ring_manager<T>& rings,
clip_type cliptype,
fill_type subject_fill_type,
fill_type clip_fill_type) {
if (minima_list.empty()) {
return false;
}
active_bound_list<T> active_bounds;
scanbeam_list<T> scanbeam;
T scanline_y = std::numeric_limits<T>::max();
local_minimum_ptr_list<T> minima_sorted;
minima_sorted.reserve(minima_list.size());
for (auto& lm : minima_list) {
minima_sorted.push_back(&lm);
}
std::stable_sort(minima_sorted.begin(), minima_sorted.end(), local_minimum_sorter<T>());
local_minimum_ptr_list_itr<T> current_lm = minima_sorted.begin();
// std::clog << output_all_edges(minima_sorted) << std::endl;
setup_scanbeam(minima_list, scanbeam);
rings.current_hp_itr = rings.hot_pixels.begin();
while (pop_from_scanbeam(scanline_y, scanbeam) || current_lm != minima_sorted.end()) {
process_intersections(scanline_y, active_bounds, cliptype, subject_fill_type,
clip_fill_type, rings);
update_current_hp_itr(scanline_y, rings);
// First we process bounds that has already been added to the active bound list --
// if the active bound list is empty local minima that are at this scanline_y and
// have a horizontal edge at the local minima will be processed
process_edges_at_top_of_scanbeam(scanline_y, active_bounds, scanbeam, minima_sorted,
current_lm, rings, cliptype, subject_fill_type,
clip_fill_type);
// Next we will add local minima bounds to the active bounds list that are on the local
// minima queue at
// this current scanline_y
insert_local_minima_into_ABL(scanline_y, minima_sorted, current_lm, active_bounds, rings,
scanbeam, cliptype, subject_fill_type, clip_fill_type);
}
// std::clog << rings.rings << std::endl;
// std::clog << output_as_polygon(rings.all_rings[0]);
return true;
}
}
}
}

View File

@ -0,0 +1,137 @@
#pragma once
#include <list>
#include <mapbox/geometry/box.hpp>
#include <mapbox/geometry/line_string.hpp>
#include <mapbox/geometry/multi_polygon.hpp>
#include <mapbox/geometry/polygon.hpp>
#include <mapbox/geometry/wagyu/build_local_minima_list.hpp>
#include <mapbox/geometry/wagyu/build_result.hpp>
#include <mapbox/geometry/wagyu/config.hpp>
#include <mapbox/geometry/wagyu/local_minimum.hpp>
#include <mapbox/geometry/wagyu/snap_rounding.hpp>
#include <mapbox/geometry/wagyu/topology_correction.hpp>
#include <mapbox/geometry/wagyu/vatti.hpp>
#define WAGYU_MAJOR_VERSION 0
#define WAGYU_MINOR_VERSION 3
#define WAGYU_PATCH_VERSION 0
#define WAGYU_VERSION (WAGYU_MAJOR_VERSION * 100000) + (WAGYU_MINOR_VERSION * 100) + (WAGYU_PATCH_VERSION)
namespace mapbox {
namespace geometry {
namespace wagyu {
template <typename T>
class wagyu {
private:
using value_type = T;
local_minimum_list<value_type> minima_list;
bool reverse_output;
wagyu(wagyu const&) = delete;
wagyu& operator=(wagyu const&) = delete;
public:
wagyu() : minima_list(), reverse_output(false) {
}
~wagyu() {
clear();
}
bool add_ring(mapbox::geometry::linear_ring<value_type> const& pg,
polygon_type p_type = polygon_type_subject) {
return add_linear_ring(pg, minima_list, p_type);
}
bool add_polygon(mapbox::geometry::polygon<value_type> const& ppg,
polygon_type p_type = polygon_type_subject) {
bool result = false;
for (auto const& r : ppg) {
if (add_ring(r, p_type)) {
result = true;
}
}
return result;
}
void reverse_rings(bool value) {
reverse_output = value;
}
void clear() {
minima_list.clear();
}
mapbox::geometry::box<value_type> get_bounds() {
mapbox::geometry::point<value_type> min = { 0, 0 };
mapbox::geometry::point<value_type> max = { 0, 0 };
if (minima_list.empty()) {
return mapbox::geometry::box<value_type>(min, max);
}
bool first_set = false;
for (auto const& lm : minima_list) {
if (!lm.left_bound.edges.empty()) {
if (!first_set) {
min = lm.left_bound.edges.front().top;
max = lm.left_bound.edges.back().bot;
first_set = true;
} else {
min.y = std::min(min.y, lm.left_bound.edges.front().top.y);
max.y = std::max(max.y, lm.left_bound.edges.back().bot.y);
max.x = std::max(max.x, lm.left_bound.edges.back().top.x);
min.x = std::min(min.x, lm.left_bound.edges.back().top.x);
}
for (auto const& e : lm.left_bound.edges) {
max.x = std::max(max.x, e.bot.x);
min.x = std::min(min.x, e.bot.x);
}
}
if (!lm.right_bound.edges.empty()) {
if (!first_set) {
min = lm.right_bound.edges.front().top;
max = lm.right_bound.edges.back().bot;
first_set = true;
} else {
min.y = std::min(min.y, lm.right_bound.edges.front().top.y);
max.y = std::max(max.y, lm.right_bound.edges.back().bot.y);
max.x = std::max(max.x, lm.right_bound.edges.back().top.x);
min.x = std::min(min.x, lm.right_bound.edges.back().top.x);
}
for (auto const& e : lm.right_bound.edges) {
max.x = std::max(max.x, e.bot.x);
min.x = std::min(min.x, e.bot.x);
}
}
}
return mapbox::geometry::box<value_type>(min, max);
}
bool execute(clip_type cliptype,
mapbox::geometry::multi_polygon<value_type>& solution,
fill_type subject_fill_type,
fill_type clip_fill_type) {
ring_manager<T> rings;
build_hot_pixels(minima_list, rings);
if (!execute_vatti(minima_list, rings, cliptype, subject_fill_type, clip_fill_type)) {
return false;
}
do_simple_polygons(rings);
build_result(solution, rings, reverse_output);
return true;
}
};
}
}
}

74
mapbox/optional.hpp Normal file
View File

@ -0,0 +1,74 @@
#ifndef MAPBOX_UTIL_OPTIONAL_HPP
#define MAPBOX_UTIL_OPTIONAL_HPP
#pragma message("This implementation of optional is deprecated. See https://github.com/mapbox/variant/issues/64.")
#include <type_traits>
#include <utility>
#include <mapbox/variant.hpp>
namespace mapbox {
namespace util {
template <typename T>
class optional
{
static_assert(!std::is_reference<T>::value, "optional doesn't support references");
struct none_type
{
};
variant<none_type, T> variant_;
public:
optional() = default;
optional(optional const& rhs)
{
if (this != &rhs)
{ // protect against invalid self-assignment
variant_ = rhs.variant_;
}
}
optional(T const& v) { variant_ = v; }
explicit operator bool() const noexcept { return variant_.template is<T>(); }
T const& get() const { return variant_.template get<T>(); }
T& get() { return variant_.template get<T>(); }
T const& operator*() const { return this->get(); }
T operator*() { return this->get(); }
optional& operator=(T const& v)
{
variant_ = v;
return *this;
}
optional& operator=(optional const& rhs)
{
if (this != &rhs)
{
variant_ = rhs.variant_;
}
return *this;
}
template <typename... Args>
void emplace(Args&&... args)
{
variant_ = T{std::forward<Args>(args)...};
}
void reset() { variant_ = none_type{}; }
}; // class optional
} // namespace util
} // namespace mapbox
#endif // MAPBOX_UTIL_OPTIONAL_HPP

View File

@ -0,0 +1,122 @@
#ifndef MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP
#define MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP
// Based on variant/recursive_wrapper.hpp from boost.
//
// Original license:
//
// Copyright (c) 2002-2003
// Eric Friedman, Itay Maman
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <cassert>
#include <utility>
namespace mapbox {
namespace util {
template <typename T>
class recursive_wrapper
{
T* p_;
void assign(T const& rhs)
{
this->get() = rhs;
}
public:
using type = T;
/**
* Default constructor default initializes the internally stored value.
* For POD types this means nothing is done and the storage is
* uninitialized.
*
* @throws std::bad_alloc if there is insufficient memory for an object
* of type T.
* @throws any exception thrown by the default constructur of T.
*/
recursive_wrapper()
: p_(new T){}
~recursive_wrapper() noexcept { delete p_; }
recursive_wrapper(recursive_wrapper const& operand)
: p_(new T(operand.get())) {}
recursive_wrapper(T const& operand)
: p_(new T(operand)) {}
recursive_wrapper(recursive_wrapper&& operand)
: p_(new T(std::move(operand.get()))) {}
recursive_wrapper(T&& operand)
: p_(new T(std::move(operand))) {}
inline recursive_wrapper& operator=(recursive_wrapper const& rhs)
{
assign(rhs.get());
return *this;
}
inline recursive_wrapper& operator=(T const& rhs)
{
assign(rhs);
return *this;
}
inline void swap(recursive_wrapper& operand) noexcept
{
T* temp = operand.p_;
operand.p_ = p_;
p_ = temp;
}
recursive_wrapper& operator=(recursive_wrapper&& rhs) noexcept
{
swap(rhs);
return *this;
}
recursive_wrapper& operator=(T&& rhs)
{
get() = std::move(rhs);
return *this;
}
T& get()
{
assert(p_);
return *get_pointer();
}
T const& get() const
{
assert(p_);
return *get_pointer();
}
T* get_pointer() { return p_; }
const T* get_pointer() const { return p_; }
operator T const&() const { return this->get(); }
operator T&() { return this->get(); }
}; // class recursive_wrapper
template <typename T>
inline void swap(recursive_wrapper<T>& lhs, recursive_wrapper<T>& rhs) noexcept
{
lhs.swap(rhs);
}
} // namespace util
} // namespace mapbox
#endif // MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP

1013
mapbox/variant.hpp Normal file

File diff suppressed because it is too large Load Diff

45
mapbox/variant_io.hpp Normal file
View File

@ -0,0 +1,45 @@
#ifndef MAPBOX_UTIL_VARIANT_IO_HPP
#define MAPBOX_UTIL_VARIANT_IO_HPP
#include <iosfwd>
#include <mapbox/variant.hpp>
namespace mapbox {
namespace util {
namespace detail {
// operator<< helper
template <typename Out>
class printer
{
public:
explicit printer(Out& out)
: out_(out) {}
printer& operator=(printer const&) = delete;
// visitor
template <typename T>
void operator()(T const& operand) const
{
out_ << operand;
}
private:
Out& out_;
};
}
// operator<<
template <typename CharT, typename Traits, typename... Types>
VARIANT_INLINE std::basic_ostream<CharT, Traits>&
operator<<(std::basic_ostream<CharT, Traits>& out, variant<Types...> const& rhs)
{
detail::printer<std::basic_ostream<CharT, Traits>> visitor(out);
apply_visitor(visitor, rhs);
return out;
}
} // namespace util
} // namespace mapbox
#endif // MAPBOX_UTIL_VARIANT_IO_HPP

View File

@ -0,0 +1,38 @@
#ifndef MAPBOX_UTIL_VARIANT_VISITOR_HPP
#define MAPBOX_UTIL_VARIANT_VISITOR_HPP
namespace mapbox {
namespace util {
template <typename... Fns>
struct visitor;
template <typename Fn>
struct visitor<Fn> : Fn
{
using type = Fn;
using Fn::operator();
visitor(Fn fn) : Fn(fn) {}
};
template <typename Fn, typename... Fns>
struct visitor<Fn, Fns...> : Fn, visitor<Fns...>
{
using type = visitor;
using Fn::operator();
using visitor<Fns...>::operator();
visitor(Fn fn, Fns... fns) : Fn(fn), visitor<Fns...>(fns...) {}
};
template <typename... Fns>
visitor<Fns...> make_visitor(Fns... fns)
{
return visitor<Fns...>(fns...);
}
} // namespace util
} // namespace mapbox
#endif // MAPBOX_UTIL_VARIANT_VISITOR_HPP

238
mbtiles.c
View File

@ -1,238 +0,0 @@
// for vasprintf() on Linux
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sqlite3.h>
#include "pool.h"
#include "tile.h"
sqlite3 *mbtiles_open(char *dbname, char **argv) {
sqlite3 *outdb;
if (sqlite3_open(dbname, &outdb) != SQLITE_OK) {
fprintf(stderr, "%s: %s: %s\n", argv[0], dbname, sqlite3_errmsg(outdb));
exit(EXIT_FAILURE);
}
char *err = NULL;
if (sqlite3_exec(outdb, "PRAGMA synchronous=0", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: async: %s\n", argv[0], err);
exit(EXIT_FAILURE);
}
if (sqlite3_exec(outdb, "PRAGMA locking_mode=EXCLUSIVE", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: async: %s\n", argv[0], err);
exit(EXIT_FAILURE);
}
if (sqlite3_exec(outdb, "PRAGMA journal_mode=DELETE", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: async: %s\n", argv[0], err);
exit(EXIT_FAILURE);
}
if (sqlite3_exec(outdb, "CREATE TABLE metadata (name text, value text);", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: create metadata table: %s\n", argv[0], err);
exit(EXIT_FAILURE);
}
if (sqlite3_exec(outdb, "CREATE TABLE tiles (zoom_level integer, tile_column integer, tile_row integer, tile_data blob);", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: create tiles table: %s\n", argv[0], err);
exit(EXIT_FAILURE);
}
if (sqlite3_exec(outdb, "create unique index name on metadata (name);", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: index metadata: %s\n", argv[0], err);
exit(EXIT_FAILURE);
}
if (sqlite3_exec(outdb, "create unique index tile_index on tiles (zoom_level, tile_column, tile_row);", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: index tiles: %s\n", argv[0], err);
exit(EXIT_FAILURE);
}
return outdb;
}
void mbtiles_write_tile(sqlite3 *outdb, int z, int tx, int ty, const char *data, int size) {
sqlite3_stmt *stmt;
const char *query = "insert into tiles (zoom_level, tile_column, tile_row, tile_data) values (?, ?, ?, ?)";
if (sqlite3_prepare_v2(outdb, query, -1, &stmt, NULL) != SQLITE_OK) {
fprintf(stderr, "sqlite3 insert prep failed\n");
exit(EXIT_FAILURE);
}
sqlite3_bind_int(stmt, 1, z);
sqlite3_bind_int(stmt, 2, tx);
sqlite3_bind_int(stmt, 3, (1 << z) - 1 - ty);
sqlite3_bind_blob(stmt, 4, data, size, NULL);
if (sqlite3_step(stmt) != SQLITE_DONE) {
fprintf(stderr, "sqlite3 insert failed: %s\n", sqlite3_errmsg(outdb));
}
if (sqlite3_finalize(stmt) != SQLITE_OK) {
fprintf(stderr, "sqlite3 finalize failed: %s\n", sqlite3_errmsg(outdb));
}
}
static void quote(char **buf, const char *s) {
char tmp[strlen(s) * 8 + 1];
char *out = tmp;
for (; *s != '\0'; s++) {
unsigned char ch = (unsigned char) *s;
if (ch == '\\' || ch == '\"') {
*out++ = '\\';
*out++ = ch;
} else if (ch < ' ') {
sprintf(out, "\\u%04x", ch);
out = out + strlen(out);
} else {
*out++ = ch;
}
}
*out = '\0';
*buf = realloc(*buf, strlen(*buf) + strlen(tmp) + 1);
if (*buf == NULL) {
perror("realloc");
exit(EXIT_FAILURE);
}
strcat(*buf, tmp);
}
static void aprintf(char **buf, const char *format, ...) {
va_list ap;
char *tmp;
va_start(ap, format);
if (vasprintf(&tmp, format, ap) < 0) {
fprintf(stderr, "memory allocation failure\n");
exit(EXIT_FAILURE);
}
va_end(ap);
*buf = realloc(*buf, strlen(*buf) + strlen(tmp) + 1);
strcat(*buf, tmp);
free(tmp);
}
void mbtiles_write_metadata(sqlite3 *outdb, const char *fname, char **layername, int minzoom, int maxzoom, double minlat, double minlon, double maxlat, double maxlon, double midlat, double midlon, struct pool **file_keys, int nlayers) {
char *sql, *err;
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('name', %Q);", fname);
if (sqlite3_exec(outdb, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set name in metadata: %s\n", err);
exit(EXIT_FAILURE);
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('description', %Q);", fname);
if (sqlite3_exec(outdb, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set description in metadata: %s\n", err);
exit(EXIT_FAILURE);
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('version', %d);", 1);
if (sqlite3_exec(outdb, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set metadata: %s\n", err);
exit(EXIT_FAILURE);
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('minzoom', %d);", minzoom);
if (sqlite3_exec(outdb, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set metadata: %s\n", err);
exit(EXIT_FAILURE);
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('maxzoom', %d);", maxzoom);
if (sqlite3_exec(outdb, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set metadata: %s\n", err);
exit(EXIT_FAILURE);
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('center', '%f,%f,%d');", midlon, midlat, maxzoom);
if (sqlite3_exec(outdb, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set metadata: %s\n", err);
exit(EXIT_FAILURE);
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('bounds', '%f,%f,%f,%f');", minlon, minlat, maxlon, maxlat);
if (sqlite3_exec(outdb, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set metadata: %s\n", err);
exit(EXIT_FAILURE);
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('type', %Q);", "overlay");
if (sqlite3_exec(outdb, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set metadata: %s\n", err);
exit(EXIT_FAILURE);
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('format', %Q);", "pbf");
if (sqlite3_exec(outdb, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set metadata: %s\n", err);
exit(EXIT_FAILURE);
}
sqlite3_free(sql);
char *buf = strdup("{");
aprintf(&buf, "\"vector_layers\": [ ");
int i;
for (i = 0; i < nlayers; i++) {
if (i != 0) {
aprintf(&buf, ", ");
}
aprintf(&buf, "{ \"id\": \"");
quote(&buf, layername[i]);
aprintf(&buf, "\", \"description\": \"\", \"minzoom\": %d, \"maxzoom\": %d, \"fields\": {", minzoom, maxzoom);
struct pool_val *pv;
for (pv = file_keys[i]->head; pv != NULL; pv = pv->next) {
aprintf(&buf, "\"");
quote(&buf, pv->s);
if (pv->type == VT_NUMBER) {
aprintf(&buf, "\": \"Number\"");
} else if (pv->type == VT_BOOLEAN) {
aprintf(&buf, "\": \"Boolean\"");
} else {
aprintf(&buf, "\": \"String\"");
}
if (pv->next != NULL) {
aprintf(&buf, ", ");
}
}
aprintf(&buf, "} }");
}
aprintf(&buf, " ] }");
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('json', %Q);", buf);
if (sqlite3_exec(outdb, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set metadata: %s\n", err);
exit(EXIT_FAILURE);
}
sqlite3_free(sql);
free(buf);
}
void mbtiles_close(sqlite3 *outdb, char **argv) {
char *err;
if (sqlite3_exec(outdb, "ANALYZE;", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: ANALYZE failed: %s\n", argv[0], err);
exit(EXIT_FAILURE);
}
if (sqlite3_close(outdb) != SQLITE_OK) {
fprintf(stderr, "%s: could not close database: %s\n", argv[0], sqlite3_errmsg(outdb));
exit(EXIT_FAILURE);
}
}

661
mbtiles.cpp Normal file
View File

@ -0,0 +1,661 @@
// for vasprintf() on Linux
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sqlite3.h>
#include <vector>
#include <string>
#include <set>
#include <map>
#include "mvt.hpp"
#include "mbtiles.hpp"
#include "text.hpp"
#include "milo/dtoa_milo.h"
sqlite3 *mbtiles_open(char *dbname, char **argv, int forcetable) {
sqlite3 *outdb;
if (sqlite3_open(dbname, &outdb) != SQLITE_OK) {
fprintf(stderr, "%s: %s: %s\n", argv[0], dbname, sqlite3_errmsg(outdb));
exit(EXIT_FAILURE);
}
char *err = NULL;
if (sqlite3_exec(outdb, "PRAGMA synchronous=0", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: async: %s\n", argv[0], err);
exit(EXIT_FAILURE);
}
if (sqlite3_exec(outdb, "PRAGMA locking_mode=EXCLUSIVE", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: async: %s\n", argv[0], err);
exit(EXIT_FAILURE);
}
if (sqlite3_exec(outdb, "PRAGMA journal_mode=DELETE", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: async: %s\n", argv[0], err);
exit(EXIT_FAILURE);
}
if (sqlite3_exec(outdb, "CREATE TABLE metadata (name text, value text);", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: create metadata table: %s\n", argv[0], err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
if (sqlite3_exec(outdb, "CREATE TABLE tiles (zoom_level integer, tile_column integer, tile_row integer, tile_data blob);", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: create tiles table: %s\n", argv[0], err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
if (sqlite3_exec(outdb, "create unique index name on metadata (name);", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: index metadata: %s\n", argv[0], err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
if (sqlite3_exec(outdb, "create unique index tile_index on tiles (zoom_level, tile_column, tile_row);", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: index tiles: %s\n", argv[0], err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
return outdb;
}
void mbtiles_write_tile(sqlite3 *outdb, int z, int tx, int ty, const char *data, int size) {
sqlite3_stmt *stmt;
const char *query = "insert into tiles (zoom_level, tile_column, tile_row, tile_data) values (?, ?, ?, ?)";
if (sqlite3_prepare_v2(outdb, query, -1, &stmt, NULL) != SQLITE_OK) {
fprintf(stderr, "sqlite3 insert prep failed\n");
exit(EXIT_FAILURE);
}
sqlite3_bind_int(stmt, 1, z);
sqlite3_bind_int(stmt, 2, tx);
sqlite3_bind_int(stmt, 3, (1 << z) - 1 - ty);
sqlite3_bind_blob(stmt, 4, data, size, NULL);
if (sqlite3_step(stmt) != SQLITE_DONE) {
fprintf(stderr, "sqlite3 insert failed: %s\n", sqlite3_errmsg(outdb));
}
if (sqlite3_finalize(stmt) != SQLITE_OK) {
fprintf(stderr, "sqlite3 finalize failed: %s\n", sqlite3_errmsg(outdb));
}
}
static void quote(std::string &buf, std::string const &s) {
for (size_t i = 0; i < s.size(); i++) {
unsigned char ch = s[i];
if (ch == '\\' || ch == '\"') {
buf.push_back('\\');
buf.push_back(ch);
} else if (ch < ' ') {
char tmp[7];
sprintf(tmp, "\\u%04x", ch);
buf.append(std::string(tmp));
} else {
buf.push_back(ch);
}
}
}
void aprintf(std::string *buf, const char *format, ...) {
va_list ap;
char *tmp;
va_start(ap, format);
if (vasprintf(&tmp, format, ap) < 0) {
fprintf(stderr, "memory allocation failure\n");
exit(EXIT_FAILURE);
}
va_end(ap);
buf->append(tmp, strlen(tmp));
free(tmp);
}
bool type_and_string::operator<(const type_and_string &o) const {
if (string < o.string) {
return true;
}
if (string == o.string && type < o.type) {
return true;
}
return false;
}
bool type_and_string::operator!=(const type_and_string &o) const {
if (type != o.type) {
return true;
}
if (string != o.string) {
return true;
}
return false;
}
std::string tilestats(std::map<std::string, layermap_entry> const &layermap1, size_t elements) {
// Consolidate layers/attributes whose names are truncated
std::vector<std::map<std::string, layermap_entry>> lmv;
lmv.push_back(layermap1);
std::map<std::string, layermap_entry> layermap = merge_layermaps(lmv, true);
std::string out = "{\n";
out.append("\t\"layerCount\": ");
out.append(std::to_string(layermap.size()));
out.append(",\n");
out.append("\t\"layers\": [\n");
bool first = true;
for (auto layer : layermap) {
if (!first) {
out.append(",\n");
}
first = false;
out.append("\t\t{\n");
out.append("\t\t\t\"layer\": \"");
quote(out, layer.first.c_str());
out.append("\",\n");
out.append("\t\t\t\"count\": ");
out.append(std::to_string(layer.second.points + layer.second.lines + layer.second.polygons));
out.append(",\n");
std::string geomtype = "Polygon";
if (layer.second.points >= layer.second.lines && layer.second.points >= layer.second.polygons) {
geomtype = "Point";
} else if (layer.second.lines >= layer.second.polygons && layer.second.lines >= layer.second.points) {
geomtype = "LineString";
}
out.append("\t\t\t\"geometry\": \"");
quote(out, geomtype.c_str());
out.append("\",\n");
size_t attrib_count = layer.second.file_keys.size();
if (attrib_count > 1000) {
attrib_count = 1000;
}
out.append("\t\t\t\"attributeCount\": ");
out.append(std::to_string(attrib_count));
out.append(",\n");
out.append("\t\t\t\"attributes\": [\n");
size_t attrs = 0;
for (auto attribute : layer.second.file_keys) {
if (attrs == elements) {
break;
}
if (attrs != 0) {
out.append(",\n");
}
attrs++;
out.append("\t\t\t\t{\n");
out.append("\t\t\t\t\t\"attribute\": \"");
quote(out, attribute.first.c_str());
out.append("\",\n");
size_t val_count = attribute.second.sample_values.size();
if (val_count > 1000) {
val_count = 1000;
}
out.append("\t\t\t\t\t\"count\": ");
out.append(std::to_string(val_count));
out.append(",\n");
int type = 0;
for (auto s : attribute.second.sample_values) {
type |= (1 << s.type);
}
std::string type_str;
// No "null" because null attributes are dropped
if (type == (1 << mvt_double)) {
type_str = "number";
} else if (type == (1 << mvt_bool)) {
type_str = "boolean";
} else if (type == (1 << mvt_string)) {
type_str = "string";
} else {
type_str = "mixed";
}
out.append("\t\t\t\t\t\"type\": \"");
quote(out, type_str.c_str());
out.append("\",\n");
out.append("\t\t\t\t\t\"values\": [\n");
size_t vals = 0;
for (auto value : attribute.second.sample_values) {
if (vals == elements) {
break;
}
if (value.type == mvt_double || value.type == mvt_bool) {
if (vals != 0) {
out.append(",\n");
}
vals++;
out.append("\t\t\t\t\t\t");
out.append(value.string);
} else {
std::string trunc = truncate16(value.string, 256);
if (trunc.size() == value.string.size()) {
if (vals != 0) {
out.append(",\n");
}
vals++;
out.append("\t\t\t\t\t\t\"");
quote(out, value.string.c_str());
out.append("\"");
}
}
}
out.append("\n");
out.append("\t\t\t\t\t]");
if ((type & (1 << mvt_double)) != 0) {
out.append(",\n");
out.append("\t\t\t\t\t\"min\": ");
out.append(milo::dtoa_milo(attribute.second.min));
out.append(",\n");
out.append("\t\t\t\t\t\"max\": ");
out.append(milo::dtoa_milo(attribute.second.max));
}
out.append("\n");
out.append("\t\t\t\t}");
}
out.append("\n\t\t\t]\n");
out.append("\t\t}");
}
out.append("\n");
out.append("\t]\n");
out.append("}");
std::string out2;
for (size_t i = 0; i < out.size(); i++) {
if (out[i] != '\t' && out[i] != '\n') {
out2.push_back(out[i]);
}
}
return out2;
}
void mbtiles_write_metadata(sqlite3 *outdb, const char *outdir, const char *fname, int minzoom, int maxzoom, double minlat, double minlon, double maxlat, double maxlon, double midlat, double midlon, int forcetable, const char *attribution, std::map<std::string, layermap_entry> const &layermap, bool vector, const char *description, bool do_tilestats) {
char *sql, *err;
sqlite3 *db = outdb;
if (outdb == NULL) {
if (sqlite3_open("", &db) != SQLITE_OK) {
fprintf(stderr, "Temporary db: %s\n", sqlite3_errmsg(db));
exit(EXIT_FAILURE);
}
if (sqlite3_exec(db, "CREATE TABLE metadata (name text, value text);", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "Create metadata table: %s\n", err);
exit(EXIT_FAILURE);
}
}
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('name', %Q);", fname);
if (sqlite3_exec(db, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set name in metadata: %s\n", err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('description', %Q);", description != NULL ? description : fname);
if (sqlite3_exec(db, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set description in metadata: %s\n", err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('version', %d);", 2);
if (sqlite3_exec(db, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set version : %s\n", err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('minzoom', %d);", minzoom);
if (sqlite3_exec(db, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set minzoom: %s\n", err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('maxzoom', %d);", maxzoom);
if (sqlite3_exec(db, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set maxzoom: %s\n", err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('center', '%f,%f,%d');", midlon, midlat, maxzoom);
if (sqlite3_exec(db, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set center: %s\n", err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('bounds', '%f,%f,%f,%f');", minlon, minlat, maxlon, maxlat);
if (sqlite3_exec(db, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set bounds: %s\n", err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
sqlite3_free(sql);
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('type', %Q);", "overlay");
if (sqlite3_exec(db, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set type: %s\n", err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
sqlite3_free(sql);
if (attribution != NULL) {
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('attribution', %Q);", attribution);
if (sqlite3_exec(db, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set type: %s\n", err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
sqlite3_free(sql);
}
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('format', %Q);", vector ? "pbf" : "png");
if (sqlite3_exec(db, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set format: %s\n", err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
sqlite3_free(sql);
if (vector) {
size_t elements = 100;
std::string buf;
{
buf = "{";
aprintf(&buf, "\"vector_layers\": [ ");
std::vector<std::string> lnames;
for (auto ai = layermap.begin(); ai != layermap.end(); ++ai) {
lnames.push_back(ai->first);
}
for (size_t i = 0; i < lnames.size(); i++) {
if (i != 0) {
aprintf(&buf, ", ");
}
auto fk = layermap.find(lnames[i]);
aprintf(&buf, "{ \"id\": \"");
quote(buf, lnames[i]);
aprintf(&buf, "\", \"description\": \"\", \"minzoom\": %d, \"maxzoom\": %d, \"fields\": {", fk->second.minzoom, fk->second.maxzoom);
bool first = true;
for (auto j = fk->second.file_keys.begin(); j != fk->second.file_keys.end(); ++j) {
if (first) {
first = false;
} else {
aprintf(&buf, ", ");
}
aprintf(&buf, "\"");
quote(buf, j->first.c_str());
int type = 0;
for (auto s : j->second.sample_values) {
type |= (1 << s.type);
}
if (type == (1 << mvt_double)) {
aprintf(&buf, "\": \"Number\"");
} else if (type == (1 << mvt_bool)) {
aprintf(&buf, "\": \"Boolean\"");
} else if (type == (1 << mvt_string)) {
aprintf(&buf, "\": \"String\"");
} else {
aprintf(&buf, "\": \"Mixed\"");
}
}
aprintf(&buf, "} }");
}
aprintf(&buf, " ]");
if (do_tilestats && elements > 0) {
aprintf(&buf, ",\"tilestats\": %s", tilestats(layermap, elements).c_str());
}
aprintf(&buf, "}");
}
sql = sqlite3_mprintf("INSERT INTO metadata (name, value) VALUES ('json', %Q);", buf.c_str());
if (sqlite3_exec(db, sql, NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "set json: %s\n", err);
if (!forcetable) {
exit(EXIT_FAILURE);
}
}
sqlite3_free(sql);
}
if (outdir != NULL) {
std::string metadata = std::string(outdir) + "/metadata.json";
FILE *fp = fopen(metadata.c_str(), "w");
if (fp == NULL) {
perror(metadata.c_str());
exit(EXIT_FAILURE);
}
fprintf(fp, "{\n");
sqlite3_stmt *stmt;
bool first = true;
if (sqlite3_prepare_v2(db, "SELECT name, value from metadata;", -1, &stmt, NULL) == SQLITE_OK) {
while (sqlite3_step(stmt) == SQLITE_ROW) {
std::string key, value;
const char *k = (const char *) sqlite3_column_text(stmt, 0);
const char *v = (const char *) sqlite3_column_text(stmt, 1);
if (k == NULL || v == NULL) {
fprintf(stderr, "Corrupt mbtiles file: null metadata\n");
exit(EXIT_FAILURE);
}
quote(key, k);
quote(value, v);
if (!first) {
fprintf(fp, ",\n");
}
fprintf(fp, " \"%s\": \"%s\"", key.c_str(), value.c_str());
first = false;
}
sqlite3_finalize(stmt);
}
fprintf(fp, "\n}\n");
fclose(fp);
}
if (outdb == NULL) {
if (sqlite3_close(db) != SQLITE_OK) {
fprintf(stderr, "Could not close temp database: %s\n", sqlite3_errmsg(db));
exit(EXIT_FAILURE);
}
}
}
void mbtiles_close(sqlite3 *outdb, const char *pgm) {
char *err;
if (sqlite3_exec(outdb, "ANALYZE;", NULL, NULL, &err) != SQLITE_OK) {
fprintf(stderr, "%s: ANALYZE failed: %s\n", pgm, err);
exit(EXIT_FAILURE);
}
if (sqlite3_close(outdb) != SQLITE_OK) {
fprintf(stderr, "%s: could not close database: %s\n", pgm, sqlite3_errmsg(outdb));
exit(EXIT_FAILURE);
}
}
std::map<std::string, layermap_entry> merge_layermaps(std::vector<std::map<std::string, layermap_entry>> const &maps) {
return merge_layermaps(maps, false);
}
std::map<std::string, layermap_entry> merge_layermaps(std::vector<std::map<std::string, layermap_entry>> const &maps, bool trunc) {
std::map<std::string, layermap_entry> out;
for (size_t i = 0; i < maps.size(); i++) {
for (auto map = maps[i].begin(); map != maps[i].end(); ++map) {
if (map->second.points + map->second.lines + map->second.polygons == 0) {
continue;
}
std::string layername = map->first;
if (trunc) {
layername = truncate16(layername, 256);
}
if (out.count(layername) == 0) {
out.insert(std::pair<std::string, layermap_entry>(layername, layermap_entry(out.size())));
auto out_entry = out.find(layername);
out_entry->second.minzoom = map->second.minzoom;
out_entry->second.maxzoom = map->second.maxzoom;
}
auto out_entry = out.find(layername);
if (out_entry == out.end()) {
fprintf(stderr, "Internal error merging layers\n");
exit(EXIT_FAILURE);
}
for (auto fk = map->second.file_keys.begin(); fk != map->second.file_keys.end(); ++fk) {
std::string attribname = fk->first;
if (trunc) {
attribname = truncate16(attribname, 256);
}
auto fk2 = out_entry->second.file_keys.find(attribname);
if (fk2 == out_entry->second.file_keys.end()) {
out_entry->second.file_keys.insert(std::pair<std::string, type_and_string_stats>(attribname, fk->second));
} else {
for (auto val : fk->second.sample_values) {
auto pt = std::lower_bound(fk2->second.sample_values.begin(), fk2->second.sample_values.end(), val);
if (pt == fk2->second.sample_values.end() || *pt != val) { // not found
fk2->second.sample_values.insert(pt, val);
if (fk2->second.sample_values.size() > 1000) {
fk2->second.sample_values.pop_back();
}
}
}
fk2->second.type |= fk->second.type;
if (fk->second.min < fk2->second.min) {
fk2->second.min = fk->second.min;
}
if (fk->second.max > fk2->second.max) {
fk2->second.max = fk->second.max;
}
}
}
if (map->second.minzoom < out_entry->second.minzoom) {
out_entry->second.minzoom = map->second.minzoom;
}
if (map->second.maxzoom > out_entry->second.maxzoom) {
out_entry->second.maxzoom = map->second.maxzoom;
}
out_entry->second.points += map->second.points;
out_entry->second.lines += map->second.lines;
out_entry->second.polygons += map->second.polygons;
}
}
return out;
}
void add_to_file_keys(std::map<std::string, type_and_string_stats> &file_keys, std::string const &attrib, type_and_string const &val) {
auto fka = file_keys.find(attrib);
if (fka == file_keys.end()) {
file_keys.insert(std::pair<std::string, type_and_string_stats>(attrib, type_and_string_stats()));
fka = file_keys.find(attrib);
}
if (fka == file_keys.end()) {
fprintf(stderr, "Can't happen (tilestats)\n");
exit(EXIT_FAILURE);
}
if (val.type == mvt_double) {
double d = atof(val.string.c_str());
if (d < fka->second.min) {
fka->second.min = d;
}
if (d > fka->second.max) {
fka->second.max = d;
}
}
auto pt = std::lower_bound(fka->second.sample_values.begin(), fka->second.sample_values.end(), val);
if (pt == fka->second.sample_values.end() || *pt != val) { // not found
fka->second.sample_values.insert(pt, val);
if (fka->second.sample_values.size() > 1000) {
fka->second.sample_values.pop_back();
}
}
fka->second.type |= (1 << val.type);
}

View File

@ -1,7 +0,0 @@
sqlite3 *mbtiles_open(char *dbname, char **argv);
void mbtiles_write_tile(sqlite3 *outdb, int z, int tx, int ty, const char *data, int size);
void mbtiles_write_metadata(sqlite3 *outdb, const char *fname, char **layername, int minzoom, int maxzoom, double minlat, double minlon, double maxlat, double maxlon, double midlat, double midlon, struct pool **file_keys, int nlayers);
void mbtiles_close(sqlite3 *outdb, char **argv);

53
mbtiles.hpp Normal file
View File

@ -0,0 +1,53 @@
#ifndef MBTILES_HPP
#define MBTILES_HPP
#include <math.h>
#include <map>
#include "mvt.hpp"
struct type_and_string {
int type;
std::string string;
bool operator<(const type_and_string &o) const;
bool operator!=(const type_and_string &o) const;
};
struct type_and_string_stats {
std::vector<type_and_string> sample_values; // sorted
double min = INFINITY;
double max = -INFINITY;
int type = 0;
};
struct layermap_entry {
size_t id;
std::map<std::string, type_and_string_stats> file_keys;
int minzoom;
int maxzoom;
size_t points = 0;
size_t lines = 0;
size_t polygons = 0;
layermap_entry(size_t _id) {
id = _id;
}
};
sqlite3 *mbtiles_open(char *dbname, char **argv, int forcetable);
void mbtiles_write_tile(sqlite3 *outdb, int z, int tx, int ty, const char *data, int size);
void mbtiles_write_metadata(sqlite3 *outdb, const char *outdir, const char *fname, int minzoom, int maxzoom, double minlat, double minlon, double maxlat, double maxlon, double midlat, double midlon, int forcetable, const char *attribution, std::map<std::string, layermap_entry> const &layermap, bool vector, const char *description, bool do_tilestats);
void mbtiles_close(sqlite3 *outdb, const char *pgm);
void aprintf(std::string *buf, const char *format, ...);
std::map<std::string, layermap_entry> merge_layermaps(std::vector<std::map<std::string, layermap_entry> > const &maps);
std::map<std::string, layermap_entry> merge_layermaps(std::vector<std::map<std::string, layermap_entry> > const &maps, bool trunc);
void add_to_file_keys(std::map<std::string, type_and_string_stats> &file_keys, std::string const &layername, type_and_string const &val);
#endif

View File

@ -2,30 +2,32 @@
#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
#include "memfile.h"
#include "memfile.hpp"
#define INCREMENT 131072
#define INITIAL 256
struct memfile *memfile_open(int fd) {
if (ftruncate(fd, INCREMENT) != 0) {
if (ftruncate(fd, INITIAL) != 0) {
return NULL;
}
char *map = mmap(NULL, INCREMENT, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
char *map = (char *) mmap(NULL, INITIAL, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (map == MAP_FAILED) {
return NULL;
}
struct memfile *mf = malloc(sizeof(struct memfile));
struct memfile *mf = new memfile;
if (mf == NULL) {
munmap(map, INCREMENT);
munmap(map, INITIAL);
return NULL;
}
mf->fd = fd;
mf->map = map;
mf->len = INCREMENT;
mf->len = INITIAL;
mf->off = 0;
mf->tree = 0;
return mf;
}
@ -41,7 +43,7 @@ int memfile_close(struct memfile *file) {
}
}
free(file);
delete file;
return 0;
}
@ -51,13 +53,13 @@ int memfile_write(struct memfile *file, void *s, long long len) {
return -1;
}
file->len += INCREMENT;
file->len += (len + INCREMENT + 1) / INCREMENT * INCREMENT;
if (ftruncate(file->fd, file->len) != 0) {
return -1;
}
file->map = mmap(NULL, file->len, PROT_READ | PROT_WRITE, MAP_SHARED, file->fd, 0);
file->map = (char *) mmap(NULL, file->len, PROT_READ | PROT_WRITE, MAP_SHARED, file->fd, 0);
if (file->map == MAP_FAILED) {
return -1;
}

View File

@ -1,10 +1,16 @@
#ifndef MEMFILE_HPP
#define MEMFILE_HPP
struct memfile {
int fd;
char *map;
long long len;
long long off;
unsigned long tree;
};
struct memfile *memfile_open(int fd);
int memfile_close(struct memfile *file);
int memfile_write(struct memfile *file, void *s, long long len);
#endif

19
milo/LICENSE.txt Normal file
View File

@ -0,0 +1,19 @@
Copyright (C) 2014 Milo Yip
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

414
milo/dtoa_milo.h Normal file
View File

@ -0,0 +1,414 @@
#pragma once
#include <assert.h>
#include <math.h>
#include <cmath>
#if defined(_MSC_VER)
#include "msinttypes/stdint.h"
#include <intrin.h>
#else
#include <stdint.h>
#endif
namespace milo {
#define UINT64_C2(h, l) ((static_cast<uint64_t>(h) << 32) | static_cast<uint64_t>(l))
struct DiyFp {
DiyFp() {}
DiyFp(uint64_t ff, int ee) : f(ff), e(ee) {}
DiyFp(double d) {
union {
double d;
uint64_t u64;
} u = { d };
int biased_e = (u.u64 & kDpExponentMask) >> kDpSignificandSize;
uint64_t significand = (u.u64 & kDpSignificandMask);
if (biased_e != 0) {
f = significand + kDpHiddenBit;
e = biased_e - kDpExponentBias;
}
else {
f = significand;
e = kDpMinExponent + 1;
}
}
DiyFp operator-(const DiyFp& rhs) const {
assert(e == rhs.e);
assert(f >= rhs.f);
return DiyFp(f - rhs.f, e);
}
DiyFp operator*(const DiyFp& rhs) const {
#if defined(_MSC_VER) && defined(_M_AMD64)
uint64_t h;
uint64_t l = _umul128(f, rhs.f, &h);
if (l & (uint64_t(1) << 63)) // rounding
h++;
return DiyFp(h, e + rhs.e + 64);
#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
unsigned __int128 p = static_cast<unsigned __int128>(f) * static_cast<unsigned __int128>(rhs.f);
uint64_t h = p >> 64;
uint64_t l = static_cast<uint64_t>(p);
if (l & (uint64_t(1) << 63)) // rounding
h++;
return DiyFp(h, e + rhs.e + 64);
#else
const uint64_t M32 = 0xFFFFFFFF;
const uint64_t a = f >> 32;
const uint64_t b = f & M32;
const uint64_t c = rhs.f >> 32;
const uint64_t d = rhs.f & M32;
const uint64_t ac = a * c;
const uint64_t bc = b * c;
const uint64_t ad = a * d;
const uint64_t bd = b * d;
uint64_t tmp = (bd >> 32) + (ad & M32) + (bc & M32);
tmp += 1U << 31; /// mult_round
return DiyFp(ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), e + rhs.e + 64);
#endif
}
DiyFp Normalize() const {
#if defined(_MSC_VER) && defined(_M_AMD64)
unsigned long index;
_BitScanReverse64(&index, f);
return DiyFp(f << (63 - index), e - (63 - index));
#elif defined(__GNUC__)
int s = __builtin_clzll(f);
return DiyFp(f << s, e - s);
#else
DiyFp res = *this;
while (!(res.f & kDpHiddenBit)) {
res.f <<= 1;
res.e--;
}
res.f <<= (kDiySignificandSize - kDpSignificandSize - 1);
res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 1);
return res;
#endif
}
DiyFp NormalizeBoundary() const {
#if defined(_MSC_VER) && defined(_M_AMD64)
unsigned long index;
_BitScanReverse64(&index, f);
return DiyFp (f << (63 - index), e - (63 - index));
#else
DiyFp res = *this;
while (!(res.f & (kDpHiddenBit << 1))) {
res.f <<= 1;
res.e--;
}
res.f <<= (kDiySignificandSize - kDpSignificandSize - 2);
res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 2);
return res;
#endif
}
void NormalizedBoundaries(DiyFp* minus, DiyFp* plus) const {
DiyFp pl = DiyFp((f << 1) + 1, e - 1).NormalizeBoundary();
DiyFp mi = (f == kDpHiddenBit) ? DiyFp((f << 2) - 1, e - 2) : DiyFp((f << 1) - 1, e - 1);
mi.f <<= mi.e - pl.e;
mi.e = pl.e;
*plus = pl;
*minus = mi;
}
static const int kDiySignificandSize = 64;
static const int kDpSignificandSize = 52;
static const int kDpExponentBias = 0x3FF + kDpSignificandSize;
static const int kDpMinExponent = -kDpExponentBias;
static const uint64_t kDpExponentMask = UINT64_C2(0x7FF00000, 0x00000000);
static const uint64_t kDpSignificandMask = UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
static const uint64_t kDpHiddenBit = UINT64_C2(0x00100000, 0x00000000);
uint64_t f;
int e;
};
inline DiyFp GetCachedPower(int e, int* K) {
// 10^-348, 10^-340, ..., 10^340
static const uint64_t kCachedPowers_F[] = {
UINT64_C2(0xfa8fd5a0, 0x081c0288), UINT64_C2(0xbaaee17f, 0xa23ebf76),
UINT64_C2(0x8b16fb20, 0x3055ac76), UINT64_C2(0xcf42894a, 0x5dce35ea),
UINT64_C2(0x9a6bb0aa, 0x55653b2d), UINT64_C2(0xe61acf03, 0x3d1a45df),
UINT64_C2(0xab70fe17, 0xc79ac6ca), UINT64_C2(0xff77b1fc, 0xbebcdc4f),
UINT64_C2(0xbe5691ef, 0x416bd60c), UINT64_C2(0x8dd01fad, 0x907ffc3c),
UINT64_C2(0xd3515c28, 0x31559a83), UINT64_C2(0x9d71ac8f, 0xada6c9b5),
UINT64_C2(0xea9c2277, 0x23ee8bcb), UINT64_C2(0xaecc4991, 0x4078536d),
UINT64_C2(0x823c1279, 0x5db6ce57), UINT64_C2(0xc2109436, 0x4dfb5637),
UINT64_C2(0x9096ea6f, 0x3848984f), UINT64_C2(0xd77485cb, 0x25823ac7),
UINT64_C2(0xa086cfcd, 0x97bf97f4), UINT64_C2(0xef340a98, 0x172aace5),
UINT64_C2(0xb23867fb, 0x2a35b28e), UINT64_C2(0x84c8d4df, 0xd2c63f3b),
UINT64_C2(0xc5dd4427, 0x1ad3cdba), UINT64_C2(0x936b9fce, 0xbb25c996),
UINT64_C2(0xdbac6c24, 0x7d62a584), UINT64_C2(0xa3ab6658, 0x0d5fdaf6),
UINT64_C2(0xf3e2f893, 0xdec3f126), UINT64_C2(0xb5b5ada8, 0xaaff80b8),
UINT64_C2(0x87625f05, 0x6c7c4a8b), UINT64_C2(0xc9bcff60, 0x34c13053),
UINT64_C2(0x964e858c, 0x91ba2655), UINT64_C2(0xdff97724, 0x70297ebd),
UINT64_C2(0xa6dfbd9f, 0xb8e5b88f), UINT64_C2(0xf8a95fcf, 0x88747d94),
UINT64_C2(0xb9447093, 0x8fa89bcf), UINT64_C2(0x8a08f0f8, 0xbf0f156b),
UINT64_C2(0xcdb02555, 0x653131b6), UINT64_C2(0x993fe2c6, 0xd07b7fac),
UINT64_C2(0xe45c10c4, 0x2a2b3b06), UINT64_C2(0xaa242499, 0x697392d3),
UINT64_C2(0xfd87b5f2, 0x8300ca0e), UINT64_C2(0xbce50864, 0x92111aeb),
UINT64_C2(0x8cbccc09, 0x6f5088cc), UINT64_C2(0xd1b71758, 0xe219652c),
UINT64_C2(0x9c400000, 0x00000000), UINT64_C2(0xe8d4a510, 0x00000000),
UINT64_C2(0xad78ebc5, 0xac620000), UINT64_C2(0x813f3978, 0xf8940984),
UINT64_C2(0xc097ce7b, 0xc90715b3), UINT64_C2(0x8f7e32ce, 0x7bea5c70),
UINT64_C2(0xd5d238a4, 0xabe98068), UINT64_C2(0x9f4f2726, 0x179a2245),
UINT64_C2(0xed63a231, 0xd4c4fb27), UINT64_C2(0xb0de6538, 0x8cc8ada8),
UINT64_C2(0x83c7088e, 0x1aab65db), UINT64_C2(0xc45d1df9, 0x42711d9a),
UINT64_C2(0x924d692c, 0xa61be758), UINT64_C2(0xda01ee64, 0x1a708dea),
UINT64_C2(0xa26da399, 0x9aef774a), UINT64_C2(0xf209787b, 0xb47d6b85),
UINT64_C2(0xb454e4a1, 0x79dd1877), UINT64_C2(0x865b8692, 0x5b9bc5c2),
UINT64_C2(0xc83553c5, 0xc8965d3d), UINT64_C2(0x952ab45c, 0xfa97a0b3),
UINT64_C2(0xde469fbd, 0x99a05fe3), UINT64_C2(0xa59bc234, 0xdb398c25),
UINT64_C2(0xf6c69a72, 0xa3989f5c), UINT64_C2(0xb7dcbf53, 0x54e9bece),
UINT64_C2(0x88fcf317, 0xf22241e2), UINT64_C2(0xcc20ce9b, 0xd35c78a5),
UINT64_C2(0x98165af3, 0x7b2153df), UINT64_C2(0xe2a0b5dc, 0x971f303a),
UINT64_C2(0xa8d9d153, 0x5ce3b396), UINT64_C2(0xfb9b7cd9, 0xa4a7443c),
UINT64_C2(0xbb764c4c, 0xa7a44410), UINT64_C2(0x8bab8eef, 0xb6409c1a),
UINT64_C2(0xd01fef10, 0xa657842c), UINT64_C2(0x9b10a4e5, 0xe9913129),
UINT64_C2(0xe7109bfb, 0xa19c0c9d), UINT64_C2(0xac2820d9, 0x623bf429),
UINT64_C2(0x80444b5e, 0x7aa7cf85), UINT64_C2(0xbf21e440, 0x03acdd2d),
UINT64_C2(0x8e679c2f, 0x5e44ff8f), UINT64_C2(0xd433179d, 0x9c8cb841),
UINT64_C2(0x9e19db92, 0xb4e31ba9), UINT64_C2(0xeb96bf6e, 0xbadf77d9),
UINT64_C2(0xaf87023b, 0x9bf0ee6b)
};
static const int16_t kCachedPowers_E[] = {
-1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980,
-954, -927, -901, -874, -847, -821, -794, -768, -741, -715,
-688, -661, -635, -608, -582, -555, -529, -502, -475, -449,
-422, -396, -369, -343, -316, -289, -263, -236, -210, -183,
-157, -130, -103, -77, -50, -24, 3, 30, 56, 83,
109, 136, 162, 189, 216, 242, 269, 295, 322, 348,
375, 402, 428, 455, 481, 508, 534, 561, 588, 614,
641, 667, 694, 720, 747, 774, 800, 827, 853, 880,
907, 933, 960, 986, 1013, 1039, 1066
};
//int k = static_cast<int>(ceil((-61 - e) * 0.30102999566398114)) + 374;
double dk = (-61 - e) * 0.30102999566398114 + 347; // dk must be positive, so can do ceiling in positive
int k = static_cast<int>(dk);
if (k != dk)
k++;
unsigned index = static_cast<unsigned>((k >> 3) + 1);
*K = -(-348 + static_cast<int>(index << 3)); // decimal exponent no need lookup table
assert(index < sizeof(kCachedPowers_F) / sizeof(kCachedPowers_F[0]));
return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]);
}
inline void GrisuRound(std::string &buffer, int len, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t wp_w) {
while (rest < wp_w && delta - rest >= ten_kappa &&
(rest + ten_kappa < wp_w || /// closer
wp_w - rest > rest + ten_kappa - wp_w)) {
buffer[len - 1]--;
rest += ten_kappa;
}
}
inline unsigned CountDecimalDigit32(uint32_t n) {
// Simple pure C++ implementation was faster than __builtin_clz version in this situation.
if (n < 10) return 1;
if (n < 100) return 2;
if (n < 1000) return 3;
if (n < 10000) return 4;
if (n < 100000) return 5;
if (n < 1000000) return 6;
if (n < 10000000) return 7;
if (n < 100000000) return 8;
if (n < 1000000000) return 9;
return 10;
}
inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, std::string &buffer, int* len, int* K) {
static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
const DiyFp one(uint64_t(1) << -Mp.e, Mp.e);
const DiyFp wp_w = Mp - W;
uint32_t p1 = static_cast<uint32_t>(Mp.f >> -one.e);
uint64_t p2 = Mp.f & (one.f - 1);
int kappa = static_cast<int>(CountDecimalDigit32(p1));
*len = 0;
while (kappa > 0) {
uint32_t d;
switch (kappa) {
case 10: d = p1 / 1000000000; p1 %= 1000000000; break;
case 9: d = p1 / 100000000; p1 %= 100000000; break;
case 8: d = p1 / 10000000; p1 %= 10000000; break;
case 7: d = p1 / 1000000; p1 %= 1000000; break;
case 6: d = p1 / 100000; p1 %= 100000; break;
case 5: d = p1 / 10000; p1 %= 10000; break;
case 4: d = p1 / 1000; p1 %= 1000; break;
case 3: d = p1 / 100; p1 %= 100; break;
case 2: d = p1 / 10; p1 %= 10; break;
case 1: d = p1; p1 = 0; break;
default:
#if defined(_MSC_VER)
__assume(0);
#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
__builtin_unreachable();
#else
d = 0;
#endif
}
if (d || *len) {
buffer.push_back('0' + static_cast<char>(d));
(*len)++;
}
kappa--;
uint64_t tmp = (static_cast<uint64_t>(p1) << -one.e) + p2;
if (tmp <= delta) {
*K += kappa;
GrisuRound(buffer, *len, delta, tmp, static_cast<uint64_t>(kPow10[kappa]) << -one.e, wp_w.f);
return;
}
}
// kappa = 0
for (;;) {
p2 *= 10;
delta *= 10;
char d = static_cast<char>(p2 >> -one.e);
if (d || *len) {
buffer.push_back('0' + d);
(*len)++;
}
p2 &= one.f - 1;
kappa--;
if (p2 < delta) {
*K += kappa;
GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * kPow10[-kappa]);
return;
}
}
}
inline void Grisu2(double value, std::string &buffer, int* length, int* K) {
const DiyFp v(value);
DiyFp w_m, w_p;
v.NormalizedBoundaries(&w_m, &w_p);
const DiyFp c_mk = GetCachedPower(w_p.e, K);
const DiyFp W = v.Normalize() * c_mk;
DiyFp Wp = w_p * c_mk;
DiyFp Wm = w_m * c_mk;
Wm.f++;
Wp.f--;
DigitGen(W, Wp, Wp.f - Wm.f, buffer, length, K);
}
inline const char* GetDigitsLut() {
static const char cDigitsLut[200] = {
'0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9',
'1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9',
'2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9',
'3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9',
'4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9',
'5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9',
'6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9',
'7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9',
'8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9',
'9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9'
};
return cDigitsLut;
}
inline void WriteExponent(int K, std::string &buffer) {
if (K < 0) {
buffer.push_back('-');
K = -K;
} else {
buffer.push_back('+');
}
if (K >= 100) {
buffer.push_back('0' + static_cast<char>(K / 100));
K %= 100;
const char* d = GetDigitsLut() + K * 2;
buffer.push_back(d[0]);
buffer.push_back(d[1]);
}
else if (K >= 10) {
const char* d = GetDigitsLut() + K * 2;
buffer.push_back(d[0]);
buffer.push_back(d[1]);
}
else
buffer.push_back('0' + static_cast<char>(K));
}
inline void Prettify(std::string &buffer, int length, int k) {
const int kk = length + k; // 10^(kk-1) <= v < 10^kk
if (length <= kk && kk <= 21) {
// 1234e7 -> 12340000000
for (int i = length; i < kk; i++)
buffer.push_back('0');
}
else if (0 < kk && kk <= 21) {
// 1234e-2 -> 12.34
buffer.insert(buffer.begin() + kk, '.');
}
else if (-6 < kk && kk <= 0) {
// 1234e-6 -> 0.001234
const int offset = 2 - kk;
buffer.insert(buffer.begin(), '0');
buffer.insert(buffer.begin() + 1, '.');
for (int i = 2; i < offset; i++)
buffer.insert(buffer.begin() + 2, '0');
}
else if (length == 1) {
// 1e30
buffer.push_back('e');
WriteExponent(kk - 1, buffer);
}
else {
// 1234e30 -> 1.234e33
buffer.insert(buffer.begin() + 1, '.');
buffer.push_back('e');
WriteExponent(kk - 1, buffer);
}
}
inline std::string dtoa_milo(double value) {
std::string buffer;
if (std::isnan(value)) {
return "nan";
}
if (std::isinf(value)) {
if (value < 0) {
return "-inf";
} else {
return "inf";
}
}
if (value == 0) {
buffer = "0";
}
else {
bool minus = false;
if (value < 0) {
minus = true;
value = -value;
}
int length, K;
Grisu2(value, buffer, &length, &K);
Prettify(buffer, length, K);
if (minus) {
buffer.insert(buffer.begin(), '-');
}
}
return buffer;
}
}

541
mvt.cpp Normal file
View File

@ -0,0 +1,541 @@
#include <stdio.h>
#include <string.h>
#include <string>
#include <vector>
#include <map>
#include <zlib.h>
#include <errno.h>
#include <limits.h>
#include "mvt.hpp"
#include "geometry.hpp"
#include "protozero/varint.hpp"
#include "protozero/pbf_reader.hpp"
#include "protozero/pbf_writer.hpp"
#include "milo/dtoa_milo.h"
mvt_geometry::mvt_geometry(int nop, long long nx, long long ny) {
this->op = nop;
this->x = nx;
this->y = ny;
}
// https://github.com/mapbox/mapnik-vector-tile/blob/master/src/vector_tile_compression.hpp
bool is_compressed(std::string const &data) {
return data.size() > 2 && (((uint8_t) data[0] == 0x78 && (uint8_t) data[1] == 0x9C) || ((uint8_t) data[0] == 0x1F && (uint8_t) data[1] == 0x8B));
}
// https://github.com/mapbox/mapnik-vector-tile/blob/master/src/vector_tile_compression.hpp
int decompress(std::string const &input, std::string &output) {
z_stream inflate_s;
inflate_s.zalloc = Z_NULL;
inflate_s.zfree = Z_NULL;
inflate_s.opaque = Z_NULL;
inflate_s.avail_in = 0;
inflate_s.next_in = Z_NULL;
if (inflateInit2(&inflate_s, 32 + 15) != Z_OK) {
fprintf(stderr, "error: %s\n", inflate_s.msg);
}
inflate_s.next_in = (Bytef *) input.data();
inflate_s.avail_in = input.size();
size_t length = 0;
do {
output.resize(length + 2 * input.size());
inflate_s.avail_out = 2 * input.size();
inflate_s.next_out = (Bytef *) (output.data() + length);
int ret = inflate(&inflate_s, Z_FINISH);
if (ret != Z_STREAM_END && ret != Z_OK && ret != Z_BUF_ERROR) {
fprintf(stderr, "error: %s\n", inflate_s.msg);
return 0;
}
length += (2 * input.size() - inflate_s.avail_out);
} while (inflate_s.avail_out == 0);
inflateEnd(&inflate_s);
output.resize(length);
return 1;
}
// https://github.com/mapbox/mapnik-vector-tile/blob/master/src/vector_tile_compression.hpp
int compress(std::string const &input, std::string &output) {
z_stream deflate_s;
deflate_s.zalloc = Z_NULL;
deflate_s.zfree = Z_NULL;
deflate_s.opaque = Z_NULL;
deflate_s.avail_in = 0;
deflate_s.next_in = Z_NULL;
deflateInit2(&deflate_s, Z_BEST_COMPRESSION, Z_DEFLATED, 31, 8, Z_DEFAULT_STRATEGY);
deflate_s.next_in = (Bytef *) input.data();
deflate_s.avail_in = input.size();
size_t length = 0;
do {
size_t increase = input.size() / 2 + 1024;
output.resize(length + increase);
deflate_s.avail_out = increase;
deflate_s.next_out = (Bytef *) (output.data() + length);
int ret = deflate(&deflate_s, Z_FINISH);
if (ret != Z_STREAM_END && ret != Z_OK && ret != Z_BUF_ERROR) {
return -1;
}
length += (increase - deflate_s.avail_out);
} while (deflate_s.avail_out == 0);
deflateEnd(&deflate_s);
output.resize(length);
return 0;
}
bool mvt_tile::decode(std::string &message, bool &was_compressed) {
layers.clear();
std::string src;
if (is_compressed(message)) {
std::string uncompressed;
decompress(message, uncompressed);
src = uncompressed;
was_compressed = true;
} else {
src = message;
was_compressed = false;
}
protozero::pbf_reader reader(src);
while (reader.next()) {
switch (reader.tag()) {
case 3: /* layer */
{
protozero::pbf_reader layer_reader(reader.get_message());
mvt_layer layer;
while (layer_reader.next()) {
switch (layer_reader.tag()) {
case 1: /* name */
layer.name = layer_reader.get_string();
break;
case 3: /* key */
layer.keys.push_back(layer_reader.get_string());
break;
case 4: /* value */
{
protozero::pbf_reader value_reader(layer_reader.get_message());
mvt_value value;
while (value_reader.next()) {
switch (value_reader.tag()) {
case 1: /* string */
value.type = mvt_string;
value.string_value = value_reader.get_string();
break;
case 2: /* float */
value.type = mvt_float;
value.numeric_value.float_value = value_reader.get_float();
break;
case 3: /* double */
value.type = mvt_double;
value.numeric_value.double_value = value_reader.get_double();
break;
case 4: /* int */
value.type = mvt_int;
value.numeric_value.int_value = value_reader.get_int64();
break;
case 5: /* uint */
value.type = mvt_uint;
value.numeric_value.uint_value = value_reader.get_uint64();
break;
case 6: /* sint */
value.type = mvt_sint;
value.numeric_value.sint_value = value_reader.get_sint64();
break;
case 7: /* bool */
value.type = mvt_bool;
value.numeric_value.bool_value = value_reader.get_bool();
break;
default:
value_reader.skip();
break;
}
}
layer.values.push_back(value);
break;
}
case 5: /* extent */
layer.extent = layer_reader.get_uint32();
break;
case 15: /* version */
layer.version = layer_reader.get_uint32();
break;
case 2: /* feature */
{
protozero::pbf_reader feature_reader(layer_reader.get_message());
mvt_feature feature;
std::vector<uint32_t> geoms;
while (feature_reader.next()) {
switch (feature_reader.tag()) {
case 1: /* id */
feature.id = feature_reader.get_uint64();
feature.has_id = true;
break;
case 2: /* tag */
{
auto pi = feature_reader.get_packed_uint32();
for (auto it = pi.first; it != pi.second; ++it) {
feature.tags.push_back(*it);
}
break;
}
case 3: /* feature type */
feature.type = feature_reader.get_enum();
break;
case 4: /* geometry */
{
auto pi = feature_reader.get_packed_uint32();
for (auto it = pi.first; it != pi.second; ++it) {
geoms.push_back(*it);
}
break;
}
default:
feature_reader.skip();
break;
}
}
long long px = 0, py = 0;
for (size_t g = 0; g < geoms.size(); g++) {
uint32_t geom = geoms[g];
uint32_t op = geom & 7;
uint32_t count = geom >> 3;
if (op == mvt_moveto || op == mvt_lineto) {
for (size_t k = 0; k < count && g + 2 < geoms.size(); k++) {
px += protozero::decode_zigzag32(geoms[g + 1]);
py += protozero::decode_zigzag32(geoms[g + 2]);
g += 2;
feature.geometry.push_back(mvt_geometry(op, px, py));
}
} else {
feature.geometry.push_back(mvt_geometry(op, 0, 0));
}
}
layer.features.push_back(feature);
break;
}
default:
layer_reader.skip();
break;
}
}
for (size_t i = 0; i < layer.keys.size(); i++) {
layer.key_map.insert(std::pair<std::string, size_t>(layer.keys[i], i));
}
for (size_t i = 0; i < layer.values.size(); i++) {
layer.value_map.insert(std::pair<mvt_value, size_t>(layer.values[i], i));
}
layers.push_back(layer);
break;
}
default:
reader.skip();
break;
}
}
return true;
}
std::string mvt_tile::encode() {
std::string data;
protozero::pbf_writer writer(data);
for (size_t i = 0; i < layers.size(); i++) {
std::string layer_string;
protozero::pbf_writer layer_writer(layer_string);
layer_writer.add_uint32(15, layers[i].version); /* version */
layer_writer.add_string(1, layers[i].name); /* name */
layer_writer.add_uint32(5, layers[i].extent); /* extent */
for (size_t j = 0; j < layers[i].keys.size(); j++) {
layer_writer.add_string(3, layers[i].keys[j]); /* key */
}
for (size_t v = 0; v < layers[i].values.size(); v++) {
std::string value_string;
protozero::pbf_writer value_writer(value_string);
mvt_value &pbv = layers[i].values[v];
if (pbv.type == mvt_string) {
value_writer.add_string(1, pbv.string_value);
} else if (pbv.type == mvt_float) {
value_writer.add_float(2, pbv.numeric_value.float_value);
} else if (pbv.type == mvt_double) {
value_writer.add_double(3, pbv.numeric_value.double_value);
} else if (pbv.type == mvt_int) {
value_writer.add_int64(4, pbv.numeric_value.int_value);
} else if (pbv.type == mvt_uint) {
value_writer.add_uint64(5, pbv.numeric_value.uint_value);
} else if (pbv.type == mvt_sint) {
value_writer.add_sint64(6, pbv.numeric_value.sint_value);
} else if (pbv.type == mvt_bool) {
value_writer.add_bool(7, pbv.numeric_value.bool_value);
}
layer_writer.add_message(4, value_string);
}
for (size_t f = 0; f < layers[i].features.size(); f++) {
std::string feature_string;
protozero::pbf_writer feature_writer(feature_string);
feature_writer.add_enum(3, layers[i].features[f].type);
feature_writer.add_packed_uint32(2, std::begin(layers[i].features[f].tags), std::end(layers[i].features[f].tags));
if (layers[i].features[f].has_id) {
feature_writer.add_uint64(1, layers[i].features[f].id);
}
std::vector<uint32_t> geometry;
int px = 0, py = 0;
int cmd_idx = -1;
int cmd = -1;
int length = 0;
std::vector<mvt_geometry> &geom = layers[i].features[f].geometry;
for (size_t g = 0; g < geom.size(); g++) {
int op = geom[g].op;
if (op != cmd) {
if (cmd_idx >= 0) {
geometry[cmd_idx] = (length << 3) | (cmd & ((1 << 3) - 1));
}
cmd = op;
length = 0;
cmd_idx = geometry.size();
geometry.push_back(0);
}
if (op == mvt_moveto || op == mvt_lineto) {
long long wwx = geom[g].x;
long long wwy = geom[g].y;
int dx = wwx - px;
int dy = wwy - py;
geometry.push_back(protozero::encode_zigzag32(dx));
geometry.push_back(protozero::encode_zigzag32(dy));
px = wwx;
py = wwy;
length++;
} else if (op == mvt_closepath) {
length++;
} else {
fprintf(stderr, "\nInternal error: corrupted geometry\n");
exit(EXIT_FAILURE);
}
}
if (cmd_idx >= 0) {
geometry[cmd_idx] = (length << 3) | (cmd & ((1 << 3) - 1));
}
feature_writer.add_packed_uint32(4, std::begin(geometry), std::end(geometry));
layer_writer.add_message(2, feature_string);
}
writer.add_message(3, layer_string);
}
return data;
}
bool mvt_value::operator<(const mvt_value &o) const {
if (type < o.type) {
return true;
}
if (type == o.type) {
if ((type == mvt_string && string_value < o.string_value) ||
(type == mvt_float && numeric_value.float_value < o.numeric_value.float_value) ||
(type == mvt_double && numeric_value.double_value < o.numeric_value.double_value) ||
(type == mvt_int && numeric_value.int_value < o.numeric_value.int_value) ||
(type == mvt_uint && numeric_value.uint_value < o.numeric_value.uint_value) ||
(type == mvt_sint && numeric_value.sint_value < o.numeric_value.sint_value) ||
(type == mvt_bool && numeric_value.bool_value < o.numeric_value.bool_value)) {
return true;
}
}
return false;
}
static std::string quote(std::string const &s) {
std::string buf;
for (size_t i = 0; i < s.size(); i++) {
unsigned char ch = s[i];
if (ch == '\\' || ch == '\"') {
buf.push_back('\\');
buf.push_back(ch);
} else if (ch < ' ') {
char tmp[7];
sprintf(tmp, "\\u%04x", ch);
buf.append(std::string(tmp));
} else {
buf.push_back(ch);
}
}
return buf;
}
std::string mvt_value::toString() {
if (type == mvt_string) {
return quote(string_value);
} else if (type == mvt_int) {
return std::to_string((long long) numeric_value.int_value);
} else if (type == mvt_double) {
double v = numeric_value.double_value;
if (v == (long long) v) {
return std::to_string((long long) v);
} else {
return milo::dtoa_milo(v);
}
} else if (type == mvt_float) {
double v = numeric_value.float_value;
if (v == (long long) v) {
return std::to_string((long long) v);
} else {
return milo::dtoa_milo(v);
}
} else if (type == mvt_sint) {
return std::to_string((long long) numeric_value.sint_value);
} else if (type == mvt_uint) {
return std::to_string((long long) numeric_value.uint_value);
} else if (type == mvt_bool) {
return numeric_value.bool_value ? "true" : "false";
} else {
return "unknown";
}
}
void mvt_layer::tag(mvt_feature &feature, std::string key, mvt_value value) {
size_t ko, vo;
std::map<std::string, size_t>::iterator ki = key_map.find(key);
std::map<mvt_value, size_t>::iterator vi = value_map.find(value);
if (ki == key_map.end()) {
ko = keys.size();
keys.push_back(key);
key_map.insert(std::pair<std::string, size_t>(key, ko));
} else {
ko = ki->second;
}
if (vi == value_map.end()) {
vo = values.size();
values.push_back(value);
value_map.insert(std::pair<mvt_value, size_t>(value, vo));
} else {
vo = vi->second;
}
feature.tags.push_back(ko);
feature.tags.push_back(vo);
}
static int is_integer(const char *s, long long *v) {
errno = 0;
char *endptr;
*v = strtoll(s, &endptr, 0);
if (*v == 0 && errno != 0) {
return 0;
}
if ((*v == LLONG_MIN || *v == LLONG_MAX) && (errno == ERANGE)) {
return 0;
}
if (*endptr != '\0') {
// Special case: If it is an integer followed by .0000 or similar,
// it is still an integer
if (*endptr != '.') {
return 0;
}
endptr++;
for (; *endptr != '\0'; endptr++) {
if (*endptr != '0') {
return 0;
}
}
return 1;
}
return 1;
}
mvt_value stringified_to_mvt_value(int type, const char *s) {
mvt_value tv;
if (type == mvt_double) {
long long v;
if (is_integer(s, &v)) {
if (v >= 0) {
tv.type = mvt_int;
tv.numeric_value.int_value = v;
} else {
tv.type = mvt_sint;
tv.numeric_value.sint_value = v;
}
} else {
double d = atof(s);
if (d == (float) d) {
tv.type = mvt_float;
tv.numeric_value.float_value = d;
} else {
tv.type = mvt_double;
tv.numeric_value.double_value = d;
}
}
} else if (type == mvt_bool) {
tv.type = mvt_bool;
tv.numeric_value.bool_value = (s[0] == 't');
} else if (type == mvt_null) {
tv.type = mvt_null;
} else {
tv.type = mvt_string;
tv.string_value = s;
}
return tv;
}

114
mvt.hpp Normal file
View File

@ -0,0 +1,114 @@
#ifndef MVT_HPP
#define MVT_HPP
#include <sqlite3.h>
#include <string>
#include <map>
#include <set>
#include <vector>
struct mvt_value;
struct mvt_layer;
enum mvt_operation {
mvt_moveto = 1,
mvt_lineto = 2,
mvt_closepath = 7
};
struct mvt_geometry {
long long x;
long long y;
int /* mvt_operation */ op;
mvt_geometry(int op, long long x, long long y);
bool operator<(mvt_geometry const &s) const {
if (y < s.y || (y == s.y && x < s.x)) {
return true;
} else {
return false;
}
}
bool operator==(mvt_geometry const &s) const {
return y == s.y && x == s.x;
}
};
enum mvt_geometry_type {
mvt_point = 1,
mvt_linestring = 2,
mvt_polygon = 3
};
struct mvt_feature {
std::vector<unsigned> tags;
std::vector<mvt_geometry> geometry;
int /* mvt_geometry_type */ type;
unsigned long long id;
bool has_id;
mvt_feature() {
has_id = false;
id = 0;
}
};
enum mvt_value_type {
mvt_string,
mvt_float,
mvt_double,
mvt_int,
mvt_uint,
mvt_sint,
mvt_bool,
mvt_null,
};
struct mvt_value {
mvt_value_type type;
std::string string_value;
union {
float float_value;
double double_value;
long long int_value;
unsigned long long uint_value;
long long sint_value;
bool bool_value;
} numeric_value;
bool operator<(const mvt_value &o) const;
std::string toString();
};
struct mvt_layer {
int version;
std::string name;
std::vector<mvt_feature> features;
std::vector<std::string> keys;
std::vector<mvt_value> values;
long long extent;
// Add a key-value pair to a feature, using this layer's constant pool
void tag(mvt_feature &feature, std::string key, mvt_value value);
// For tracking the key-value constants already used in this layer
std::map<std::string, size_t> key_map;
std::map<mvt_value, size_t> value_map;
};
struct mvt_tile {
std::vector<mvt_layer> layers;
std::string encode();
bool decode(std::string &message, bool &was_compressed);
};
bool is_compressed(std::string const &data);
int decompress(std::string const &input, std::string &output);
int compress(std::string const &input, std::string &output);
int dezig(unsigned n);
mvt_value stringified_to_mvt_value(int type, const char *s);
#endif

39
options.hpp Normal file
View File

@ -0,0 +1,39 @@
#ifndef OPTIONS_HPP
#define OPTIONS_HPP
#define A_COALESCE ((int) 'c')
#define A_REVERSE ((int) 'r')
#define A_REORDER ((int) 'o')
#define A_LINE_DROP ((int) 'l')
#define A_DEBUG_POLYGON ((int) 'D')
#define A_POLYGON_DROP ((int) 'p')
#define A_DETECT_SHARED_BORDERS ((int) 'b')
#define A_PREFER_RADIX_SORT ((int) 'R')
#define A_CALCULATE_FEATURE_DENSITY ((int) 'g')
#define A_INCREASE_GAMMA_AS_NEEDED ((int) 'G')
#define A_MERGE_POLYGONS_AS_NEEDED ((int) 'm')
#define A_DROP_DENSEST_AS_NEEDED ((int) 's')
#define A_DROP_FRACTION_AS_NEEDED ((int) 'd')
#define A_DROP_SMALLEST_AS_NEEDED ((int) 'n')
#define A_COALESCE_SMALLEST_AS_NEEDED ((int) 'N')
#define A_GRID_LOW_ZOOMS ((int) 'L')
#define A_DETECT_WRAPAROUND ((int) 'w')
#define A_EXTEND_ZOOMS ((int) 'e')
#define P_SIMPLIFY ((int) 's')
#define P_SIMPLIFY_LOW ((int) 'S')
#define P_FEATURE_LIMIT ((int) 'f')
#define P_KILOBYTE_LIMIT ((int) 'k')
#define P_DYNAMIC_DROP ((int) 'd')
#define P_INPUT_ORDER ((int) 'i')
#define P_POLYGON_SPLIT ((int) 'p')
#define P_CLIPPING ((int) 'c')
#define P_DUPLICATION ((int) 'D')
#define P_TINY_POLYGON_REDUCTION ((int) 't')
#define P_TILE_COMPRESSION ((int) 'C')
#define P_TILE_STATS ((int) 'g')
extern int prevent[256];
extern int additional[256];
#endif

648
plugin.cpp Normal file
View File

@ -0,0 +1,648 @@
#ifdef __APPLE__
#define _DARWIN_UNLIMITED_STREAMS
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include <string>
#include <map>
#include <set>
#include <pthread.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <cmath>
#include <sys/types.h>
#include <sys/wait.h>
#include <sqlite3.h>
#include <limits.h>
#include "main.hpp"
#include "mvt.hpp"
#include "mbtiles.hpp"
#include "projection.hpp"
#include "geometry.hpp"
#include "serial.hpp"
extern "C" {
#include "jsonpull/jsonpull.h"
}
#include "plugin.hpp"
#include "write_json.hpp"
#include "read_json.hpp"
struct writer_arg {
int write_to;
std::vector<mvt_layer> *layers;
unsigned z;
unsigned x;
unsigned y;
int extent;
};
void *run_writer(void *a) {
writer_arg *wa = (writer_arg *) a;
FILE *fp = fdopen(wa->write_to, "w");
if (fp == NULL) {
perror("fdopen (pipe writer)");
exit(EXIT_FAILURE);
}
for (size_t i = 0; i < wa->layers->size(); i++) {
layer_to_geojson(fp, (*(wa->layers))[i], wa->z, wa->x, wa->y, false, true, false, 0, 0, 0, true);
}
if (fclose(fp) != 0) {
if (errno == EPIPE) {
static bool warned = false;
if (!warned) {
fprintf(stderr, "Warning: broken pipe in postfilter\n");
warned = true;
}
} else {
perror("fclose output to filter");
exit(EXIT_FAILURE);
}
}
return NULL;
}
// XXX deduplicate
static std::vector<mvt_geometry> to_feature(drawvec &geom) {
std::vector<mvt_geometry> out;
for (size_t i = 0; i < geom.size(); i++) {
out.push_back(mvt_geometry(geom[i].op, geom[i].x, geom[i].y));
}
return out;
}
// Reads from the postfilter
std::vector<mvt_layer> parse_layers(int fd, int z, unsigned x, unsigned y, std::vector<std::map<std::string, layermap_entry>> *layermaps, size_t tiling_seg, std::vector<std::vector<std::string>> *layer_unmaps, int extent) {
std::map<std::string, mvt_layer> ret;
FILE *f = fdopen(fd, "r");
if (f == NULL) {
perror("fdopen filter output");
exit(EXIT_FAILURE);
}
json_pull *jp = json_begin_file(f);
while (1) {
json_object *j = json_read(jp);
if (j == NULL) {
if (jp->error != NULL) {
fprintf(stderr, "Filter output:%d: %s\n", jp->line, jp->error);
if (jp->root != NULL) {
json_context(jp->root);
}
exit(EXIT_FAILURE);
}
json_free(jp->root);
break;
}
json_object *type = json_hash_get(j, "type");
if (type == NULL || type->type != JSON_STRING) {
continue;
}
if (strcmp(type->string, "Feature") != 0) {
continue;
}
json_object *geometry = json_hash_get(j, "geometry");
if (geometry == NULL) {
fprintf(stderr, "Filter output:%d: filtered feature with no geometry\n", jp->line);
json_context(j);
json_free(j);
exit(EXIT_FAILURE);
}
json_object *properties = json_hash_get(j, "properties");
if (properties == NULL || (properties->type != JSON_HASH && properties->type != JSON_NULL)) {
fprintf(stderr, "Filter output:%d: feature without properties hash\n", jp->line);
json_context(j);
json_free(j);
exit(EXIT_FAILURE);
}
json_object *geometry_type = json_hash_get(geometry, "type");
if (geometry_type == NULL) {
fprintf(stderr, "Filter output:%d: null geometry (additional not reported)\n", jp->line);
json_context(j);
exit(EXIT_FAILURE);
}
if (geometry_type->type != JSON_STRING) {
fprintf(stderr, "Filter output:%d: geometry type is not a string\n", jp->line);
json_context(j);
exit(EXIT_FAILURE);
}
json_object *coordinates = json_hash_get(geometry, "coordinates");
if (coordinates == NULL || coordinates->type != JSON_ARRAY) {
fprintf(stderr, "Filter output:%d: feature without coordinates array\n", jp->line);
json_context(j);
exit(EXIT_FAILURE);
}
int t;
for (t = 0; t < GEOM_TYPES; t++) {
if (strcmp(geometry_type->string, geometry_names[t]) == 0) {
break;
}
}
if (t >= GEOM_TYPES) {
fprintf(stderr, "Filter output:%d: Can't handle geometry type %s\n", jp->line, geometry_type->string);
json_context(j);
exit(EXIT_FAILURE);
}
std::string layername = "unknown";
json_object *tippecanoe = json_hash_get(j, "tippecanoe");
json_object *layer = NULL;
if (tippecanoe != NULL) {
layer = json_hash_get(tippecanoe, "layer");
if (layer != NULL && layer->type == JSON_STRING) {
layername = std::string(layer->string);
}
}
if (ret.count(layername) == 0) {
mvt_layer l;
l.name = layername;
l.version = 2;
l.extent = extent;
ret.insert(std::pair<std::string, mvt_layer>(layername, l));
}
auto l = ret.find(layername);
drawvec dv;
parse_geometry(t, coordinates, dv, VT_MOVETO, "Filter output", jp->line, j);
if (mb_geometry[t] == VT_POLYGON) {
dv = fix_polygon(dv);
}
// Scale and offset geometry from global to tile
for (size_t i = 0; i < dv.size(); i++) {
long long scale = 1LL << (32 - z);
dv[i].x = std::round((dv[i].x - scale * x) * extent / (double) scale);
dv[i].y = std::round((dv[i].y - scale * y) * extent / (double) scale);
}
if (mb_geometry[t] == VT_POLYGON) {
dv = clean_or_clip_poly(dv, 0, 0, 0, false);
if (dv.size() < 3) {
dv.clear();
}
}
dv = remove_noop(dv, mb_geometry[t], 0);
if (mb_geometry[t] == VT_POLYGON) {
dv = close_poly(dv);
}
if (dv.size() > 0) {
mvt_feature feature;
feature.type = mb_geometry[t];
feature.geometry = to_feature(dv);
json_object *id = json_hash_get(j, "id");
if (id != NULL) {
feature.id = atoll(id->string);
feature.has_id = true;
}
std::map<std::string, layermap_entry> &layermap = (*layermaps)[tiling_seg];
if (layermap.count(layername) == 0) {
layermap_entry lme = layermap_entry(layermap.size());
lme.minzoom = z;
lme.maxzoom = z;
layermap.insert(std::pair<std::string, layermap_entry>(layername, lme));
if (lme.id >= (*layer_unmaps)[tiling_seg].size()) {
(*layer_unmaps)[tiling_seg].resize(lme.id + 1);
(*layer_unmaps)[tiling_seg][lme.id] = layername;
}
}
auto fk = layermap.find(layername);
if (fk == layermap.end()) {
fprintf(stderr, "Internal error: layer %s not found\n", layername.c_str());
exit(EXIT_FAILURE);
}
if (z < fk->second.minzoom) {
fk->second.minzoom = z;
}
if (z > fk->second.maxzoom) {
fk->second.maxzoom = z;
}
if (feature.type == mvt_point) {
fk->second.points++;
} else if (feature.type == mvt_linestring) {
fk->second.lines++;
} else if (feature.type == mvt_polygon) {
fk->second.polygons++;
}
for (size_t i = 0; i < properties->length; i++) {
int tp = -1;
std::string s;
stringify_value(properties->values[i], tp, s, "Filter output", jp->line, j, "");
if (tp >= 0) {
mvt_value v = stringified_to_mvt_value(tp, s.c_str());
l->second.tag(feature, std::string(properties->keys[i]->string), v);
type_and_string attrib;
attrib.type = tp;
attrib.string = s;
add_to_file_keys(fk->second.file_keys, std::string(properties->keys[i]->string), attrib);
}
}
l->second.features.push_back(feature);
}
json_free(j);
}
json_end(jp);
if (fclose(f) != 0) {
perror("fclose postfilter output");
exit(EXIT_FAILURE);
}
std::vector<mvt_layer> final;
for (auto a : ret) {
final.push_back(a.second);
}
return final;
}
// Reads from the prefilter
serial_feature parse_feature(json_pull *jp, int z, unsigned x, unsigned y, std::vector<std::map<std::string, layermap_entry>> *layermaps, size_t tiling_seg, std::vector<std::vector<std::string>> *layer_unmaps, bool postfilter) {
serial_feature sf;
while (1) {
json_object *j = json_read(jp);
if (j == NULL) {
if (jp->error != NULL) {
fprintf(stderr, "Filter output:%d: %s\n", jp->line, jp->error);
if (jp->root != NULL) {
json_context(jp->root);
}
exit(EXIT_FAILURE);
}
json_free(jp->root);
sf.t = -1;
return sf;
}
json_object *type = json_hash_get(j, "type");
if (type == NULL || type->type != JSON_STRING) {
continue;
}
if (strcmp(type->string, "Feature") != 0) {
continue;
}
json_object *geometry = json_hash_get(j, "geometry");
if (geometry == NULL) {
fprintf(stderr, "Filter output:%d: filtered feature with no geometry\n", jp->line);
json_context(j);
json_free(j);
exit(EXIT_FAILURE);
}
json_object *properties = json_hash_get(j, "properties");
if (properties == NULL || (properties->type != JSON_HASH && properties->type != JSON_NULL)) {
fprintf(stderr, "Filter output:%d: feature without properties hash\n", jp->line);
json_context(j);
json_free(j);
exit(EXIT_FAILURE);
}
json_object *geometry_type = json_hash_get(geometry, "type");
if (geometry_type == NULL) {
fprintf(stderr, "Filter output:%d: null geometry (additional not reported)\n", jp->line);
json_context(j);
exit(EXIT_FAILURE);
}
if (geometry_type->type != JSON_STRING) {
fprintf(stderr, "Filter output:%d: geometry type is not a string\n", jp->line);
json_context(j);
exit(EXIT_FAILURE);
}
json_object *coordinates = json_hash_get(geometry, "coordinates");
if (coordinates == NULL || coordinates->type != JSON_ARRAY) {
fprintf(stderr, "Filter output:%d: feature without coordinates array\n", jp->line);
json_context(j);
exit(EXIT_FAILURE);
}
int t;
for (t = 0; t < GEOM_TYPES; t++) {
if (strcmp(geometry_type->string, geometry_names[t]) == 0) {
break;
}
}
if (t >= GEOM_TYPES) {
fprintf(stderr, "Filter output:%d: Can't handle geometry type %s\n", jp->line, geometry_type->string);
json_context(j);
exit(EXIT_FAILURE);
}
drawvec dv;
parse_geometry(t, coordinates, dv, VT_MOVETO, "Filter output", jp->line, j);
if (mb_geometry[t] == VT_POLYGON) {
dv = fix_polygon(dv);
}
// Scale and offset geometry from global to tile
double scale = 1LL << geometry_scale;
for (size_t i = 0; i < dv.size(); i++) {
unsigned sx = 0, sy = 0;
if (z != 0) {
sx = x << (32 - z);
sy = y << (32 - z);
}
dv[i].x = std::round(dv[i].x / scale) * scale - sx;
dv[i].y = std::round(dv[i].y / scale) * scale - sy;
}
if (dv.size() > 0) {
sf.t = mb_geometry[t];
sf.segment = tiling_seg;
sf.geometry = dv;
sf.seq = 0;
sf.index = 0;
sf.bbox[0] = sf.bbox[1] = LLONG_MAX;
sf.bbox[2] = sf.bbox[3] = LLONG_MIN;
sf.extent = 0;
sf.m = 0;
sf.metapos = 0;
sf.has_id = false;
std::string layername = "unknown";
json_object *tippecanoe = json_hash_get(j, "tippecanoe");
if (tippecanoe != NULL) {
json_object *layer = json_hash_get(tippecanoe, "layer");
if (layer != NULL && layer->type == JSON_STRING) {
layername = std::string(layer->string);
}
json_object *index = json_hash_get(tippecanoe, "index");
if (index != NULL && index->type == JSON_NUMBER) {
sf.index = index->number;
}
json_object *sequence = json_hash_get(tippecanoe, "sequence");
if (sequence != NULL && sequence->type == JSON_NUMBER) {
sf.seq = sequence->number;
}
json_object *extent = json_hash_get(tippecanoe, "extent");
if (extent != NULL && sequence->type == JSON_NUMBER) {
sf.extent = extent->number;
}
}
for (size_t i = 0; i < dv.size(); i++) {
if (dv[i].op == VT_MOVETO || dv[i].op == VT_LINETO) {
if (dv[i].x < sf.bbox[0]) {
sf.bbox[0] = dv[i].x;
}
if (dv[i].y < sf.bbox[1]) {
sf.bbox[1] = dv[i].y;
}
if (dv[i].x > sf.bbox[2]) {
sf.bbox[2] = dv[i].x;
}
if (dv[i].y > sf.bbox[3]) {
sf.bbox[3] = dv[i].y;
}
}
}
json_object *id = json_hash_get(j, "id");
if (id != NULL) {
sf.id = atoll(id->string);
sf.has_id = true;
}
std::map<std::string, layermap_entry> &layermap = (*layermaps)[tiling_seg];
if (layermap.count(layername) == 0) {
layermap_entry lme = layermap_entry(layermap.size());
lme.minzoom = z;
lme.maxzoom = z;
layermap.insert(std::pair<std::string, layermap_entry>(layername, lme));
if (lme.id >= (*layer_unmaps)[tiling_seg].size()) {
(*layer_unmaps)[tiling_seg].resize(lme.id + 1);
(*layer_unmaps)[tiling_seg][lme.id] = layername;
}
}
auto fk = layermap.find(layername);
if (fk == layermap.end()) {
fprintf(stderr, "Internal error: layer %s not found\n", layername.c_str());
exit(EXIT_FAILURE);
}
sf.layer = fk->second.id;
if (z < fk->second.minzoom) {
fk->second.minzoom = z;
}
if (z > fk->second.maxzoom) {
fk->second.maxzoom = z;
}
if (!postfilter) {
if (sf.t == mvt_point) {
fk->second.points++;
} else if (sf.t == mvt_linestring) {
fk->second.lines++;
} else if (sf.t == mvt_polygon) {
fk->second.polygons++;
}
}
for (size_t i = 0; i < properties->length; i++) {
serial_val v;
v.type = -1;
stringify_value(properties->values[i], v.type, v.s, "Filter output", jp->line, j, "");
if (v.type >= 0) {
sf.full_keys.push_back(std::string(properties->keys[i]->string));
sf.full_values.push_back(v);
type_and_string attrib;
attrib.string = v.s;
attrib.type = v.type;
if (!postfilter) {
add_to_file_keys(fk->second.file_keys, std::string(properties->keys[i]->string), attrib);
}
}
}
json_free(j);
return sf;
}
json_free(j);
}
}
static pthread_mutex_t pipe_lock = PTHREAD_MUTEX_INITIALIZER;
void setup_filter(const char *filter, int *write_to, int *read_from, pid_t *pid, unsigned z, unsigned x, unsigned y) {
// This will create two pipes, a new thread, and a new process.
//
// The new process will read from one pipe and write to the other, and execute the filter.
// The new thread will write the GeoJSON to the pipe that leads to the filter.
// The original thread will read the GeoJSON from the filter and convert it back into vector tiles.
if (pthread_mutex_lock(&pipe_lock) != 0) {
perror("pthread_mutex_lock (pipe)");
exit(EXIT_FAILURE);
}
int pipe_orig[2], pipe_filtered[2];
if (pipe(pipe_orig) < 0) {
perror("pipe (original features)");
exit(EXIT_FAILURE);
}
if (pipe(pipe_filtered) < 0) {
perror("pipe (filtered features)");
exit(EXIT_FAILURE);
}
std::string z_str = std::to_string(z);
std::string x_str = std::to_string(x);
std::string y_str = std::to_string(y);
*pid = fork();
if (*pid < 0) {
perror("fork");
exit(EXIT_FAILURE);
} else if (*pid == 0) {
// child
if (dup2(pipe_orig[0], 0) < 0) {
perror("dup child stdin");
exit(EXIT_FAILURE);
}
if (dup2(pipe_filtered[1], 1) < 0) {
perror("dup child stdout");
exit(EXIT_FAILURE);
}
if (close(pipe_orig[1]) != 0) {
perror("close output to filter");
exit(EXIT_FAILURE);
}
if (close(pipe_filtered[0]) != 0) {
perror("close input from filter");
exit(EXIT_FAILURE);
}
if (close(pipe_orig[0]) != 0) {
perror("close dup input of filter");
exit(EXIT_FAILURE);
}
if (close(pipe_filtered[1]) != 0) {
perror("close dup output of filter");
exit(EXIT_FAILURE);
}
// XXX close other fds?
if (execlp("sh", "sh", "-c", filter, "sh", z_str.c_str(), x_str.c_str(), y_str.c_str(), NULL) != 0) {
perror("exec");
exit(EXIT_FAILURE);
}
} else {
// parent
if (close(pipe_orig[0]) != 0) {
perror("close filter-side reader");
exit(EXIT_FAILURE);
}
if (close(pipe_filtered[1]) != 0) {
perror("close filter-side writer");
exit(EXIT_FAILURE);
}
if (fcntl(pipe_orig[1], F_SETFD, FD_CLOEXEC) != 0) {
perror("cloxec output to filter");
exit(EXIT_FAILURE);
}
if (fcntl(pipe_filtered[0], F_SETFD, FD_CLOEXEC) != 0) {
perror("cloxec input from filter");
exit(EXIT_FAILURE);
}
if (pthread_mutex_unlock(&pipe_lock) != 0) {
perror("pthread_mutex_unlock (pipe_lock)");
exit(EXIT_FAILURE);
}
*write_to = pipe_orig[1];
*read_from = pipe_filtered[0];
}
}
std::vector<mvt_layer> filter_layers(const char *filter, std::vector<mvt_layer> &layers, unsigned z, unsigned x, unsigned y, std::vector<std::map<std::string, layermap_entry>> *layermaps, size_t tiling_seg, std::vector<std::vector<std::string>> *layer_unmaps, int extent) {
int write_to, read_from;
pid_t pid;
setup_filter(filter, &write_to, &read_from, &pid, z, x, y);
writer_arg wa;
wa.write_to = write_to;
wa.layers = &layers;
wa.z = z;
wa.x = x;
wa.y = y;
wa.extent = extent;
pthread_t writer;
if (pthread_create(&writer, NULL, run_writer, &wa) != 0) {
perror("pthread_create (filter writer)");
exit(EXIT_FAILURE);
}
std::vector<mvt_layer> nlayers = parse_layers(read_from, z, x, y, layermaps, tiling_seg, layer_unmaps, extent);
while (1) {
int stat_loc;
if (waitpid(pid, &stat_loc, 0) < 0) {
perror("waitpid for filter\n");
exit(EXIT_FAILURE);
}
if (WIFEXITED(stat_loc) || WIFSIGNALED(stat_loc)) {
break;
}
}
void *ret;
if (pthread_join(writer, &ret) != 0) {
perror("pthread_join filter writer");
exit(EXIT_FAILURE);
}
return nlayers;
}

3
plugin.hpp Normal file
View File

@ -0,0 +1,3 @@
std::vector<mvt_layer> filter_layers(const char *filter, std::vector<mvt_layer> &layer, unsigned z, unsigned x, unsigned y, std::vector<std::map<std::string, layermap_entry>> *layermaps, size_t tiling_seg, std::vector<std::vector<std::string>> *layer_unmaps, int extent);
void setup_filter(const char *filter, int *write_to, int *read_from, pid_t *pid, unsigned z, unsigned x, unsigned y);
serial_feature parse_feature(json_pull *jp, int z, unsigned x, unsigned y, std::vector<std::map<std::string, layermap_entry>> *layermaps, size_t tiling_seg, std::vector<std::vector<std::string>> *layer_unmaps, bool filters);

119
pool.c
View File

@ -1,119 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "pool.h"
#define POOL_WIDTH 256
static int hash(const char *s) {
int h = 0;
for (; *s; s++) {
h = h * 37 + *s;
}
h = h & 0xFF;
return h;
}
struct pool_val *pool(struct pool *p, const char *s, int type) {
int h = hash(s);
struct pool_val **v = &(p->vals[h]);
while (*v != NULL) {
int cmp = strcmp(s, (*v)->s);
if (cmp == 0) {
cmp = type - (*v)->type;
}
if (cmp == 0) {
return *v;
} else if (cmp < 0) {
v = &((*v)->left);
} else {
v = &((*v)->right);
}
}
struct pool_val *nv = malloc(sizeof(struct pool_val));
if (nv == NULL) {
fprintf(stderr, "out of memory making string pool\n");
exit(EXIT_FAILURE);
}
nv->left = NULL;
nv->right = NULL;
nv->next = NULL;
nv->s = s;
nv->type = type;
nv->n = p->n++;
if (p->tail != NULL) {
p->tail->next = nv;
}
p->tail = nv;
if (p->head == NULL) {
p->head = nv;
}
*v = nv;
return *v;
}
int is_pooled(struct pool *p, const char *s, int type) {
int h = hash(s);
struct pool_val **v = &(p->vals[h]);
while (*v != NULL) {
int cmp = strcmp(s, (*v)->s);
if (cmp == 0) {
cmp = type - (*v)->type;
}
if (cmp == 0) {
return 1;
} else if (cmp < 0) {
v = &((*v)->left);
} else {
v = &((*v)->right);
}
}
return 0;
}
void pool_free1(struct pool *p, void (*func)(void *)) {
while (p->head != NULL) {
if (func != NULL) {
func((void *) p->head->s);
}
struct pool_val *next = p->head->next;
free(p->head);
p->head = next;
}
p->head = NULL;
p->tail = NULL;
free(p->vals);
p->vals = NULL;
}
void pool_free(struct pool *p) {
pool_free1(p, NULL);
}
void pool_free_strings(struct pool *p) {
pool_free1(p, free);
}
void pool_init(struct pool *p, int n) {
p->n = n;
p->vals = calloc(POOL_WIDTH, sizeof(struct pool_val *));
if (p->vals == NULL) {
fprintf(stderr, "out of memory creating string pool\n");
exit(EXIT_FAILURE);
}
p->head = NULL;
p->tail = NULL;
}

121
pool.cpp Normal file
View File

@ -0,0 +1,121 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include "memfile.hpp"
#include "pool.hpp"
int swizzlecmp(const char *a, const char *b) {
ssize_t alen = strlen(a);
ssize_t blen = strlen(b);
if (strcmp(a, b) == 0) {
return 0;
}
long long hash1 = 0, hash2 = 0;
for (ssize_t i = alen - 1; i >= 0; i--) {
hash1 = (hash1 * 37 + a[i]) & INT_MAX;
}
for (ssize_t i = blen - 1; i >= 0; i--) {
hash2 = (hash2 * 37 + b[i]) & INT_MAX;
}
int h1 = hash1, h2 = hash2;
if (h1 == h2) {
return strcmp(a, b);
}
return h1 - h2;
}
long long addpool(struct memfile *poolfile, struct memfile *treefile, const char *s, char type) {
unsigned long *sp = &treefile->tree;
size_t depth = 0;
// In typical data, traversal depth generally stays under 2.5x
size_t max = 3 * log(treefile->off / sizeof(struct stringpool)) / log(2);
if (max < 30) {
max = 30;
}
while (*sp != 0) {
int cmp = swizzlecmp(s, poolfile->map + ((struct stringpool *) (treefile->map + *sp))->off + 1);
if (cmp == 0) {
cmp = type - (poolfile->map + ((struct stringpool *) (treefile->map + *sp))->off)[0];
}
if (cmp < 0) {
sp = &(((struct stringpool *) (treefile->map + *sp))->left);
} else if (cmp > 0) {
sp = &(((struct stringpool *) (treefile->map + *sp))->right);
} else {
return ((struct stringpool *) (treefile->map + *sp))->off;
}
depth++;
if (depth > max) {
// Search is very deep, so string is probably unique.
// Add it to the pool without adding it to the search tree.
long long off = poolfile->off;
if (memfile_write(poolfile, &type, 1) < 0) {
perror("memfile write");
exit(EXIT_FAILURE);
}
if (memfile_write(poolfile, (void *) s, strlen(s) + 1) < 0) {
perror("memfile write");
exit(EXIT_FAILURE);
}
return off;
}
}
// *sp is probably in the memory-mapped file, and will move if the file grows.
long long ssp;
if (sp == &treefile->tree) {
ssp = -1;
} else {
ssp = ((char *) sp) - treefile->map;
}
long long off = poolfile->off;
if (memfile_write(poolfile, &type, 1) < 0) {
perror("memfile write");
exit(EXIT_FAILURE);
}
if (memfile_write(poolfile, (void *) s, strlen(s) + 1) < 0) {
perror("memfile write");
exit(EXIT_FAILURE);
}
if (off >= LONG_MAX || treefile->off >= LONG_MAX) {
// Tree or pool is bigger than 2GB
static bool warned = false;
if (!warned) {
fprintf(stderr, "Warning: string pool is very large.\n");
warned = true;
}
return off;
}
struct stringpool tsp;
tsp.left = 0;
tsp.right = 0;
tsp.off = off;
long long p = treefile->off;
if (memfile_write(treefile, &tsp, sizeof(struct stringpool)) < 0) {
perror("memfile write");
exit(EXIT_FAILURE);
}
if (ssp == -1) {
treefile->tree = p;
} else {
*((long long *) (treefile->map + ssp)) = p;
}
return off;
}

24
pool.h
View File

@ -1,24 +0,0 @@
struct pool_val {
const char *s;
int type;
int n;
struct pool_val *left;
struct pool_val *right;
struct pool_val *next;
};
struct pool {
struct pool_val **vals;
struct pool_val *head;
struct pool_val *tail;
int n;
};
struct pool_val *pool(struct pool *p, const char *s, int type);
void pool_free(struct pool *p);
void pool_free_strings(struct pool *p);
void pool_init(struct pool *p, int n);
int is_pooled(struct pool *p, const char *s, int type);

Some files were not shown because too many files have changed in this diff Show More