diff --git a/main.cpp b/main.cpp index ae41d82..898bb5b 100644 --- a/main.cpp +++ b/main.cpp @@ -74,6 +74,7 @@ int geometry_scale = 0; double simplification = 1; size_t max_tile_size = 500000; size_t max_tile_features = 200000; +size_t min_cluster_features = 0; int cluster_distance = 0; long justx = -1, justy = -1; std::string attribute_for_id = ""; @@ -2605,6 +2606,7 @@ int main(int argc, char **argv) { {"maximum-tile-bytes", required_argument, 0, 'M'}, {"maximum-tile-features", required_argument, 0, 'O'}, {"no-feature-limit", no_argument, &prevent[P_FEATURE_LIMIT], 1}, + {"minimum-cluster-features", required_argument, 0, 'i'}, {"no-tile-size-limit", no_argument, &prevent[P_KILOBYTE_LIMIT], 1}, {"no-tile-compression", no_argument, &prevent[P_TILE_COMPRESSION], 1}, {"no-tile-stats", no_argument, &prevent[P_TILE_STATS], 1}, @@ -2990,6 +2992,10 @@ int main(int argc, char **argv) { max_tile_features = atoll_require(optarg, "Max tile features"); break; + case 'i': + min_cluster_features = atoll_require(optarg, "Min cluster features"); + break; + case 'c': postfilter = optarg; break; diff --git a/main.hpp b/main.hpp index 8d89ab6..5508df9 100644 --- a/main.hpp +++ b/main.hpp @@ -46,6 +46,7 @@ extern size_t TEMP_FILES; extern size_t max_tile_size; extern size_t max_tile_features; +extern size_t min_cluster_features; extern int cluster_distance; extern std::string attribute_for_id; diff --git a/tile.cpp b/tile.cpp index cf41982..e1dc3c7 100644 --- a/tile.cpp +++ b/tile.cpp @@ -385,6 +385,7 @@ struct partial { long long clustered = 0; std::set need_tilestats; std::map attribute_accum_state; + std::vector clustered_features = std::vector(); }; struct partial_arg { @@ -1715,6 +1716,32 @@ static bool line_is_too_small(drawvec const &geometry, int z, int detail) { return true; } +partial create_partial(serial_feature sf, bool reduced, int z, int line_detail, int maxzoom, double spacing, double simplification) { + partial p; + p.geoms.push_back(sf.geometry); + p.layer = sf.layer; + p.t = sf.t; + p.segment = sf.segment; + p.original_seq = sf.seq; + p.reduced = reduced; + p.z = z; + p.line_detail = line_detail; + p.maxzoom = maxzoom; + p.keys = sf.keys; + p.values = sf.values; + p.full_keys = sf.full_keys; + p.full_values = sf.full_values; + p.spacing = spacing; + p.simplification = simplification; + p.id = sf.id; + p.has_id = sf.has_id; + p.index = sf.index; + p.renamed = -1; + p.extent = sf.extent; + p.clustered = 0; + return p; +} + long long write_tile(FILE *geoms, std::atomic *geompos_in, char *metabase, char *stringpool, int z, unsigned tx, unsigned ty, int detail, int min_detail, sqlite3 *outdb, const char *outdir, int buffer, const char *fname, FILE **geomfile, int minzoom, int maxzoom, double todo, std::atomic *along, long long alongminus, double gamma, int child_shards, long long *meta_off, long long *pool_off, unsigned *initial_x, unsigned *initial_y, std::atomic *running, double simplification, std::vector> *layermaps, std::vector> *layer_unmaps, size_t tiling_seg, size_t pass, size_t passes, unsigned long long mingap, long long minextent, double fraction, const char *prefilter, const char *postfilter, struct json_object *filter, write_tile_args *arg) { int line_detail; double merge_fraction = 1; @@ -1890,6 +1917,24 @@ long long write_tile(FILE *geoms, std::atomic *geompos_in, char *meta partials[which_partial].geoms.size() == 1 && partials[which_partial].geoms[0].size() == 1 && sf.geometry.size() == 1) { + if (min_cluster_features > 0) { + if (partials[which_partial].clustered + 1 < (long long int)min_cluster_features) { + if (partials[which_partial].clustered_features.empty()) { + // First feature being added to the cluster, add a copy of the original partial geom location + partial copy; + copy.geoms.push_back(partials[which_partial].geoms[0]); + partials[which_partial].clustered_features.push_back(copy); + } + // Save the feature that was added to the cluster until it passes the minimum number of features + partial p = create_partial(sf, false, z, line_detail, maxzoom, spacing, simplification); + partials[which_partial].clustered_features.push_back(p); + } else { + // Clear out any saved features as the cluster will not be backed out + if (!partials[which_partial].clustered_features.empty()) { + partials[which_partial].clustered_features.clear(); + } + } + } double x = (double) partials[which_partial].geoms[0][0].x * partials[which_partial].clustered; double y = (double) partials[which_partial].geoms[0][0].y * partials[which_partial].clustered; x += sf.geometry[0].x; @@ -1974,28 +2019,7 @@ long long write_tile(FILE *geoms, std::atomic *geompos_in, char *meta } } - partial p; - p.geoms.push_back(sf.geometry); - p.layer = sf.layer; - p.t = sf.t; - p.segment = sf.segment; - p.original_seq = sf.seq; - p.reduced = reduced; - p.z = z; - p.line_detail = line_detail; - p.maxzoom = maxzoom; - p.keys = sf.keys; - p.values = sf.values; - p.full_keys = sf.full_keys; - p.full_values = sf.full_values; - p.spacing = spacing; - p.simplification = simplification; - p.id = sf.id; - p.has_id = sf.has_id; - p.index = sf.index; - p.renamed = -1; - p.extent = sf.extent; - p.clustered = 0; + partial p = create_partial(sf, reduced, z, line_detail, maxzoom, spacing, simplification); partials.push_back(p); } @@ -2022,6 +2046,33 @@ long long write_tile(FILE *geoms, std::atomic *geompos_in, char *meta shared_nodes = just_shared_nodes; } + { + std::vector backed_out_partials; + for (size_t i = 0; i < partials.size(); i++) { + partial &p = partials[i]; + + if (p.clustered > 0 && p.clustered+1 < (long long int)min_cluster_features) { + // Not enough features, back out the cluster + for (size_t j = 0; j < p.clustered_features.size(); j++) { + partial copy = p.clustered_features[j]; + if (j == 0 && + p.t == VT_POINT && + p.geoms.size() == 1 && + p.geoms[0].size() == 1) { + // The copy with the location for this feature, restore it's location + p.geoms.clear(); + p.geoms.push_back(p.clustered_features.front().geoms.front()); + p.clustered = 0; + } else { + // One of the features clustered into this feature, restore it + backed_out_partials.push_back(copy); + } + } + } + } + partials.insert( partials.end(), backed_out_partials.begin(), backed_out_partials.end()); + } + for (size_t i = 0; i < partials.size(); i++) { partial &p = partials[i];