Merge pull request #10 from protomaps/flatgeobuf

FlatGeobuf input support [#2]
This commit is contained in:
Brandon Liu 2022-03-27 19:22:49 +08:00 committed by GitHub
commit 35b576d0fd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 6418 additions and 1 deletions

View File

@ -47,7 +47,7 @@ C = $(wildcard *.c) $(wildcard *.cpp)
INCLUDES = -I/usr/local/include -I.
LIBS = -L/usr/local/lib
tippecanoe: geojson.o jsonpull/jsonpull.o tile.o pool.o mbtiles.o geometry.o projection.o memfile.o mvt.o serial.o main.o text.o dirtiles.o plugin.o read_json.o write_json.o geobuf.o evaluator.o geocsv.o csv.o geojson-loop.o
tippecanoe: geojson.o jsonpull/jsonpull.o tile.o pool.o mbtiles.o geometry.o projection.o memfile.o mvt.o serial.o main.o text.o dirtiles.o plugin.o read_json.o write_json.o geobuf.o flatgeobuf.o evaluator.o geocsv.o csv.o geojson-loop.o
$(CXX) $(PG) $(LIBS) $(FINAL_FLAGS) $(CXXFLAGS) -o $@ $^ $(LDFLAGS) -lm -lz -lsqlite3 -lpthread
tippecanoe-enumerate: enumerate.o

68
flatbuffers/allocator.h Normal file
View File

@ -0,0 +1,68 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_ALLOCATOR_H_
#define FLATBUFFERS_ALLOCATOR_H_
#include "flatbuffers/base.h"
namespace flatbuffers {
// Allocator interface. This is flatbuffers-specific and meant only for
// `vector_downward` usage.
class Allocator {
public:
virtual ~Allocator() {}
// Allocate `size` bytes of memory.
virtual uint8_t *allocate(size_t size) = 0;
// Deallocate `size` bytes of memory at `p` allocated by this allocator.
virtual void deallocate(uint8_t *p, size_t size) = 0;
// Reallocate `new_size` bytes of memory, replacing the old region of size
// `old_size` at `p`. In contrast to a normal realloc, this grows downwards,
// and is intended specifcally for `vector_downward` use.
// `in_use_back` and `in_use_front` indicate how much of `old_size` is
// actually in use at each end, and needs to be copied.
virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size,
size_t new_size, size_t in_use_back,
size_t in_use_front) {
FLATBUFFERS_ASSERT(new_size > old_size); // vector_downward only grows
uint8_t *new_p = allocate(new_size);
memcpy_downward(old_p, old_size, new_p, new_size, in_use_back,
in_use_front);
deallocate(old_p, old_size);
return new_p;
}
protected:
// Called by `reallocate_downward` to copy memory from `old_p` of `old_size`
// to `new_p` of `new_size`. Only memory of size `in_use_front` and
// `in_use_back` will be copied from the front and back of the old memory
// allocation.
void memcpy_downward(uint8_t *old_p, size_t old_size, uint8_t *new_p,
size_t new_size, size_t in_use_back,
size_t in_use_front) {
memcpy(new_p + new_size - in_use_back, old_p + old_size - in_use_back,
in_use_back);
memcpy(new_p, old_p, in_use_front);
}
};
} // namespace flatbuffers
#endif // FLATBUFFERS_ALLOCATOR_H_

243
flatbuffers/array.h Normal file
View File

@ -0,0 +1,243 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_ARRAY_H_
#define FLATBUFFERS_ARRAY_H_
#include "flatbuffers/base.h"
#include "flatbuffers/stl_emulation.h"
#include "flatbuffers/vector.h"
namespace flatbuffers {
// This is used as a helper type for accessing arrays.
template<typename T, uint16_t length> class Array {
// Array<T> can carry only POD data types (scalars or structs).
typedef typename flatbuffers::bool_constant<flatbuffers::is_scalar<T>::value>
scalar_tag;
typedef
typename flatbuffers::conditional<scalar_tag::value, T, const T *>::type
IndirectHelperType;
public:
typedef uint16_t size_type;
typedef typename IndirectHelper<IndirectHelperType>::return_type return_type;
typedef VectorIterator<T, return_type> const_iterator;
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
// If T is a LE-scalar or a struct (!scalar_tag::value).
static FLATBUFFERS_CONSTEXPR bool is_span_observable =
(scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1)) ||
!scalar_tag::value;
FLATBUFFERS_CONSTEXPR uint16_t size() const { return length; }
return_type Get(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return IndirectHelper<IndirectHelperType>::Read(Data(), i);
}
return_type operator[](uoffset_t i) const { return Get(i); }
// If this is a Vector of enums, T will be its storage type, not the enum
// type. This function makes it convenient to retrieve value with enum
// type E.
template<typename E> E GetEnum(uoffset_t i) const {
return static_cast<E>(Get(i));
}
const_iterator begin() const { return const_iterator(Data(), 0); }
const_iterator end() const { return const_iterator(Data(), size()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
const_reverse_iterator crbegin() const { return rbegin(); }
const_reverse_iterator crend() const { return rend(); }
// Get a mutable pointer to elements inside this array.
// This method used to mutate arrays of structs followed by a @p Mutate
// operation. For primitive types use @p Mutate directly.
// @warning Assignments and reads to/from the dereferenced pointer are not
// automatically converted to the correct endianness.
typename flatbuffers::conditional<scalar_tag::value, void, T *>::type
GetMutablePointer(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return const_cast<T *>(&data()[i]);
}
// Change elements if you have a non-const pointer to this object.
void Mutate(uoffset_t i, const T &val) { MutateImpl(scalar_tag(), i, val); }
// The raw data in little endian format. Use with care.
const uint8_t *Data() const { return data_; }
uint8_t *Data() { return data_; }
// Similarly, but typed, much like std::vector::data
const T *data() const { return reinterpret_cast<const T *>(Data()); }
T *data() { return reinterpret_cast<T *>(Data()); }
// Copy data from a span with endian conversion.
// If this Array and the span overlap, the behavior is undefined.
void CopyFromSpan(flatbuffers::span<const T, length> src) {
const auto p1 = reinterpret_cast<const uint8_t *>(src.data());
const auto p2 = Data();
FLATBUFFERS_ASSERT(!(p1 >= p2 && p1 < (p2 + length)) &&
!(p2 >= p1 && p2 < (p1 + length)));
(void)p1;
(void)p2;
CopyFromSpanImpl(flatbuffers::bool_constant<is_span_observable>(), src);
}
protected:
void MutateImpl(flatbuffers::true_type, uoffset_t i, const T &val) {
FLATBUFFERS_ASSERT(i < size());
WriteScalar(data() + i, val);
}
void MutateImpl(flatbuffers::false_type, uoffset_t i, const T &val) {
*(GetMutablePointer(i)) = val;
}
void CopyFromSpanImpl(flatbuffers::true_type,
flatbuffers::span<const T, length> src) {
// Use std::memcpy() instead of std::copy() to avoid performance degradation
// due to aliasing if T is char or unsigned char.
// The size is known at compile time, so memcpy would be inlined.
std::memcpy(data(), src.data(), length * sizeof(T));
}
// Copy data from flatbuffers::span with endian conversion.
void CopyFromSpanImpl(flatbuffers::false_type,
flatbuffers::span<const T, length> src) {
for (size_type k = 0; k < length; k++) { Mutate(k, src[k]); }
}
// This class is only used to access pre-existing data. Don't ever
// try to construct these manually.
// 'constexpr' allows us to use 'size()' at compile time.
// @note Must not use 'FLATBUFFERS_CONSTEXPR' here, as const is not allowed on
// a constructor.
#if defined(__cpp_constexpr)
constexpr Array();
#else
Array();
#endif
uint8_t data_[length * sizeof(T)];
private:
// This class is a pointer. Copying will therefore create an invalid object.
// Private and unimplemented copy constructor.
Array(const Array &);
Array &operator=(const Array &);
};
// Specialization for Array[struct] with access using Offset<void> pointer.
// This specialization used by idl_gen_text.cpp.
template<typename T, uint16_t length> class Array<Offset<T>, length> {
static_assert(flatbuffers::is_same<T, void>::value, "unexpected type T");
public:
typedef const void *return_type;
const uint8_t *Data() const { return data_; }
// Make idl_gen_text.cpp::PrintContainer happy.
return_type operator[](uoffset_t) const {
FLATBUFFERS_ASSERT(false);
return nullptr;
}
private:
// This class is only used to access pre-existing data.
Array();
Array(const Array &);
Array &operator=(const Array &);
uint8_t data_[1];
};
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U, N> make_span(Array<U, N> &arr)
FLATBUFFERS_NOEXCEPT {
static_assert(
Array<U, N>::is_span_observable,
"wrong type U, only plain struct, LE-scalar, or byte types are allowed");
return span<U, N>(arr.data(), N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const U, N> make_span(
const Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(
Array<U, N>::is_span_observable,
"wrong type U, only plain struct, LE-scalar, or byte types are allowed");
return span<const U, N>(arr.data(), N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<uint8_t, sizeof(U) * N>
make_bytes_span(Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(Array<U, N>::is_span_observable,
"internal error, Array<T> might hold only scalars or structs");
return span<uint8_t, sizeof(U) * N>(arr.Data(), sizeof(U) * N);
}
template<class U, uint16_t N>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const uint8_t, sizeof(U) * N>
make_bytes_span(const Array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
static_assert(Array<U, N>::is_span_observable,
"internal error, Array<T> might hold only scalars or structs");
return span<const uint8_t, sizeof(U) * N>(arr.Data(), sizeof(U) * N);
}
// Cast a raw T[length] to a raw flatbuffers::Array<T, length>
// without endian conversion. Use with care.
// TODO: move these Cast-methods to `internal` namespace.
template<typename T, uint16_t length>
Array<T, length> &CastToArray(T (&arr)[length]) {
return *reinterpret_cast<Array<T, length> *>(arr);
}
template<typename T, uint16_t length>
const Array<T, length> &CastToArray(const T (&arr)[length]) {
return *reinterpret_cast<const Array<T, length> *>(arr);
}
template<typename E, typename T, uint16_t length>
Array<E, length> &CastToArrayOfEnum(T (&arr)[length]) {
static_assert(sizeof(E) == sizeof(T), "invalid enum type E");
return *reinterpret_cast<Array<E, length> *>(arr);
}
template<typename E, typename T, uint16_t length>
const Array<E, length> &CastToArrayOfEnum(const T (&arr)[length]) {
static_assert(sizeof(E) == sizeof(T), "invalid enum type E");
return *reinterpret_cast<const Array<E, length> *>(arr);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_ARRAY_H_

474
flatbuffers/base.h Normal file
View File

@ -0,0 +1,474 @@
#ifndef FLATBUFFERS_BASE_H_
#define FLATBUFFERS_BASE_H_
// clang-format off
// If activate should be declared and included first.
#if defined(FLATBUFFERS_MEMORY_LEAK_TRACKING) && \
defined(_MSC_VER) && defined(_DEBUG)
// The _CRTDBG_MAP_ALLOC inside <crtdbg.h> will replace
// calloc/free (etc) to its debug version using #define directives.
#define _CRTDBG_MAP_ALLOC
#include <stdlib.h>
#include <crtdbg.h>
// Replace operator new by trace-enabled version.
#define DEBUG_NEW new(_NORMAL_BLOCK, __FILE__, __LINE__)
#define new DEBUG_NEW
#endif
#if !defined(FLATBUFFERS_ASSERT)
#include <assert.h>
#define FLATBUFFERS_ASSERT assert
#elif defined(FLATBUFFERS_ASSERT_INCLUDE)
// Include file with forward declaration
#include FLATBUFFERS_ASSERT_INCLUDE
#endif
#ifndef ARDUINO
#include <cstdint>
#endif
#include <cstddef>
#include <cstdlib>
#include <cstring>
#if defined(ARDUINO) && !defined(ARDUINOSTL_M_H)
#include <utility.h>
#else
#include <utility>
#endif
#include <string>
#include <type_traits>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <memory>
#if defined(__unix__) && !defined(FLATBUFFERS_LOCALE_INDEPENDENT)
#include <unistd.h>
#endif
#ifdef __ANDROID__
#include <android/api-level.h>
#endif
#if defined(__ICCARM__)
#include <intrinsics.h>
#endif
// Note the __clang__ check is needed, because clang presents itself
// as an older GNUC compiler (4.2).
// Clang 3.3 and later implement all of the ISO C++ 2011 standard.
// Clang 3.4 and later implement all of the ISO C++ 2014 standard.
// http://clang.llvm.org/cxx_status.html
// Note the MSVC value '__cplusplus' may be incorrect:
// The '__cplusplus' predefined macro in the MSVC stuck at the value 199711L,
// indicating (erroneously!) that the compiler conformed to the C++98 Standard.
// This value should be correct starting from MSVC2017-15.7-Preview-3.
// The '__cplusplus' will be valid only if MSVC2017-15.7-P3 and the `/Zc:__cplusplus` switch is set.
// Workaround (for details see MSDN):
// Use the _MSC_VER and _MSVC_LANG definition instead of the __cplusplus for compatibility.
// The _MSVC_LANG macro reports the Standard version regardless of the '/Zc:__cplusplus' switch.
#if defined(__GNUC__) && !defined(__clang__)
#define FLATBUFFERS_GCC (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
#else
#define FLATBUFFERS_GCC 0
#endif
#if defined(__clang__)
#define FLATBUFFERS_CLANG (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
#else
#define FLATBUFFERS_CLANG 0
#endif
/// @cond FLATBUFFERS_INTERNAL
#if __cplusplus <= 199711L && \
(!defined(_MSC_VER) || _MSC_VER < 1600) && \
(!defined(__GNUC__) || \
(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40400))
#error A C++11 compatible compiler with support for the auto typing is \
required for FlatBuffers.
#error __cplusplus _MSC_VER __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__
#endif
#if !defined(__clang__) && \
defined(__GNUC__) && \
(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40600)
// Backwards compatibility for g++ 4.4, and 4.5 which don't have the nullptr
// and constexpr keywords. Note the __clang__ check is needed, because clang
// presents itself as an older GNUC compiler.
#ifndef nullptr_t
const class nullptr_t {
public:
template<class T> inline operator T*() const { return 0; }
private:
void operator&() const;
} nullptr = {};
#endif
#ifndef constexpr
#define constexpr const
#endif
#endif
// The wire format uses a little endian encoding (since that's efficient for
// the common platforms).
#if defined(__s390x__)
#define FLATBUFFERS_LITTLEENDIAN 0
#endif // __s390x__
#if !defined(FLATBUFFERS_LITTLEENDIAN)
#if defined(__GNUC__) || defined(__clang__) || defined(__ICCARM__)
#if (defined(__BIG_ENDIAN__) || \
(defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
#define FLATBUFFERS_LITTLEENDIAN 0
#else
#define FLATBUFFERS_LITTLEENDIAN 1
#endif // __BIG_ENDIAN__
#elif defined(_MSC_VER)
#if defined(_M_PPC)
#define FLATBUFFERS_LITTLEENDIAN 0
#else
#define FLATBUFFERS_LITTLEENDIAN 1
#endif
#else
#error Unable to determine endianness, define FLATBUFFERS_LITTLEENDIAN.
#endif
#endif // !defined(FLATBUFFERS_LITTLEENDIAN)
#define FLATBUFFERS_VERSION_MAJOR 2
#define FLATBUFFERS_VERSION_MINOR 0
#define FLATBUFFERS_VERSION_REVISION 5
#define FLATBUFFERS_STRING_EXPAND(X) #X
#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X)
namespace flatbuffers {
// Returns version as string "MAJOR.MINOR.REVISION".
const char* FLATBUFFERS_VERSION();
}
#if (!defined(_MSC_VER) || _MSC_VER > 1600) && \
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 407)) || \
defined(__clang__)
#define FLATBUFFERS_FINAL_CLASS final
#define FLATBUFFERS_OVERRIDE override
#define FLATBUFFERS_EXPLICIT_CPP11 explicit
#define FLATBUFFERS_VTABLE_UNDERLYING_TYPE : flatbuffers::voffset_t
#else
#define FLATBUFFERS_FINAL_CLASS
#define FLATBUFFERS_OVERRIDE
#define FLATBUFFERS_EXPLICIT_CPP11
#define FLATBUFFERS_VTABLE_UNDERLYING_TYPE
#endif
#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \
(defined(__cpp_constexpr) && __cpp_constexpr >= 200704)
#define FLATBUFFERS_CONSTEXPR constexpr
#define FLATBUFFERS_CONSTEXPR_CPP11 constexpr
#define FLATBUFFERS_CONSTEXPR_DEFINED
#else
#define FLATBUFFERS_CONSTEXPR const
#define FLATBUFFERS_CONSTEXPR_CPP11
#endif
#if (defined(__cplusplus) && __cplusplus >= 201402L) || \
(defined(__cpp_constexpr) && __cpp_constexpr >= 201304)
#define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR_CPP11
#else
#define FLATBUFFERS_CONSTEXPR_CPP14
#endif
#if (defined(__GXX_EXPERIMENTAL_CXX0X__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \
(defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 190023026)) || \
defined(__clang__)
#define FLATBUFFERS_NOEXCEPT noexcept
#else
#define FLATBUFFERS_NOEXCEPT
#endif
// NOTE: the FLATBUFFERS_DELETE_FUNC macro may change the access mode to
// private, so be sure to put it at the end or reset access mode explicitly.
#if (!defined(_MSC_VER) || _MSC_FULL_VER >= 180020827) && \
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 404)) || \
defined(__clang__)
#define FLATBUFFERS_DELETE_FUNC(func) func = delete
#else
#define FLATBUFFERS_DELETE_FUNC(func) private: func
#endif
#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \
defined(__clang__)
#define FLATBUFFERS_DEFAULT_DECLARATION
#endif
// Check if we can use template aliases
// Not possible if Microsoft Compiler before 2012
// Possible is the language feature __cpp_alias_templates is defined well
// Or possible if the C++ std is C+11 or newer
#if (defined(_MSC_VER) && _MSC_VER > 1700 /* MSVC2012 */) \
|| (defined(__cpp_alias_templates) && __cpp_alias_templates >= 200704) \
|| (defined(__cplusplus) && __cplusplus >= 201103L)
#define FLATBUFFERS_TEMPLATES_ALIASES
#endif
#ifndef FLATBUFFERS_HAS_STRING_VIEW
// Only provide flatbuffers::string_view if __has_include can be used
// to detect a header that provides an implementation
#if defined(__has_include)
// Check for std::string_view (in c++17)
#if __has_include(<string_view>) && (__cplusplus >= 201606 || (defined(_HAS_CXX17) && _HAS_CXX17))
#include <string_view>
namespace flatbuffers {
typedef std::string_view string_view;
}
#define FLATBUFFERS_HAS_STRING_VIEW 1
// Check for std::experimental::string_view (in c++14, compiler-dependent)
#elif __has_include(<experimental/string_view>) && (__cplusplus >= 201411)
#include <experimental/string_view>
namespace flatbuffers {
typedef std::experimental::string_view string_view;
}
#define FLATBUFFERS_HAS_STRING_VIEW 1
// Check for absl::string_view
#elif __has_include("absl/strings/string_view.h")
#include "absl/strings/string_view.h"
namespace flatbuffers {
typedef absl::string_view string_view;
}
#define FLATBUFFERS_HAS_STRING_VIEW 1
#endif
#endif // __has_include
#endif // !FLATBUFFERS_HAS_STRING_VIEW
#ifndef FLATBUFFERS_GENERAL_HEAP_ALLOC_OK
// Allow heap allocations to be used
#define FLATBUFFERS_GENERAL_HEAP_ALLOC_OK 1
#endif // !FLATBUFFERS_GENERAL_HEAP_ALLOC_OK
#ifndef FLATBUFFERS_HAS_NEW_STRTOD
// Modern (C++11) strtod and strtof functions are available for use.
// 1) nan/inf strings as argument of strtod;
// 2) hex-float as argument of strtod/strtof.
#if (defined(_MSC_VER) && _MSC_VER >= 1900) || \
(defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \
(defined(__clang__))
#define FLATBUFFERS_HAS_NEW_STRTOD 1
#endif
#endif // !FLATBUFFERS_HAS_NEW_STRTOD
#ifndef FLATBUFFERS_LOCALE_INDEPENDENT
// Enable locale independent functions {strtof_l, strtod_l,strtoll_l, strtoull_l}.
#if ((defined(_MSC_VER) && _MSC_VER >= 1800) || \
(defined(_XOPEN_VERSION) && (_XOPEN_VERSION>=700)) && (!defined(__ANDROID_API__) || (defined(__ANDROID_API__) && (__ANDROID_API__>=21))))
#define FLATBUFFERS_LOCALE_INDEPENDENT 1
#else
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
#endif
#endif // !FLATBUFFERS_LOCALE_INDEPENDENT
// Suppress Undefined Behavior Sanitizer (recoverable only). Usage:
// - __supress_ubsan__("undefined")
// - __supress_ubsan__("signed-integer-overflow")
#if defined(__clang__) && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >=7))
#define __supress_ubsan__(type) __attribute__((no_sanitize(type)))
#elif defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)
#define __supress_ubsan__(type) __attribute__((no_sanitize_undefined))
#else
#define __supress_ubsan__(type)
#endif
// This is constexpr function used for checking compile-time constants.
// Avoid `#pragma warning(disable: 4127) // C4127: expression is constant`.
template<typename T> FLATBUFFERS_CONSTEXPR inline bool IsConstTrue(T t) {
return !!t;
}
// Enable C++ attribute [[]] if std:c++17 or higher.
#if ((__cplusplus >= 201703L) \
|| (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L)))
// All attributes unknown to an implementation are ignored without causing an error.
#define FLATBUFFERS_ATTRIBUTE(attr) attr
#define FLATBUFFERS_FALLTHROUGH() [[fallthrough]]
#else
#define FLATBUFFERS_ATTRIBUTE(attr)
#if FLATBUFFERS_CLANG >= 30800
#define FLATBUFFERS_FALLTHROUGH() [[clang::fallthrough]]
#elif FLATBUFFERS_GCC >= 70300
#define FLATBUFFERS_FALLTHROUGH() [[gnu::fallthrough]]
#else
#define FLATBUFFERS_FALLTHROUGH()
#endif
#endif
/// @endcond
/// @file
namespace flatbuffers {
/// @cond FLATBUFFERS_INTERNAL
// Our default offset / size type, 32bit on purpose on 64bit systems.
// Also, using a consistent offset type maintains compatibility of serialized
// offset values between 32bit and 64bit systems.
typedef uint32_t uoffset_t;
// Signed offsets for references that can go in both directions.
typedef int32_t soffset_t;
// Offset/index used in v-tables, can be changed to uint8_t in
// format forks to save a bit of space if desired.
typedef uint16_t voffset_t;
typedef uintmax_t largest_scalar_t;
// In 32bits, this evaluates to 2GB - 1
#define FLATBUFFERS_MAX_BUFFER_SIZE ((1ULL << (sizeof(::flatbuffers::soffset_t) * 8 - 1)) - 1)
// We support aligning the contents of buffers up to this size.
#define FLATBUFFERS_MAX_ALIGNMENT 16
/// @brief The length of a FlatBuffer file header.
static const size_t kFileIdentifierLength = 4;
inline bool VerifyAlignmentRequirements(size_t align, size_t min_align = 1) {
return (min_align <= align) && (align <= (FLATBUFFERS_MAX_ALIGNMENT)) &&
(align & (align - 1)) == 0; // must be power of 2
}
#if defined(_MSC_VER)
#pragma warning(disable: 4351) // C4351: new behavior: elements of array ... will be default initialized
#pragma warning(push)
#pragma warning(disable: 4127) // C4127: conditional expression is constant
#endif
template<typename T> T EndianSwap(T t) {
#if defined(_MSC_VER)
#define FLATBUFFERS_BYTESWAP16 _byteswap_ushort
#define FLATBUFFERS_BYTESWAP32 _byteswap_ulong
#define FLATBUFFERS_BYTESWAP64 _byteswap_uint64
#elif defined(__ICCARM__)
#define FLATBUFFERS_BYTESWAP16 __REV16
#define FLATBUFFERS_BYTESWAP32 __REV
#define FLATBUFFERS_BYTESWAP64(x) \
((__REV(static_cast<uint32_t>(x >> 32U))) | (static_cast<uint64_t>(__REV(static_cast<uint32_t>(x)))) << 32U)
#else
#if defined(__GNUC__) && __GNUC__ * 100 + __GNUC_MINOR__ < 408 && !defined(__clang__)
// __builtin_bswap16 was missing prior to GCC 4.8.
#define FLATBUFFERS_BYTESWAP16(x) \
static_cast<uint16_t>(__builtin_bswap32(static_cast<uint32_t>(x) << 16))
#else
#define FLATBUFFERS_BYTESWAP16 __builtin_bswap16
#endif
#define FLATBUFFERS_BYTESWAP32 __builtin_bswap32
#define FLATBUFFERS_BYTESWAP64 __builtin_bswap64
#endif
if (sizeof(T) == 1) { // Compile-time if-then's.
return t;
} else if (sizeof(T) == 2) {
union { T t; uint16_t i; } u = { t };
u.i = FLATBUFFERS_BYTESWAP16(u.i);
return u.t;
} else if (sizeof(T) == 4) {
union { T t; uint32_t i; } u = { t };
u.i = FLATBUFFERS_BYTESWAP32(u.i);
return u.t;
} else if (sizeof(T) == 8) {
union { T t; uint64_t i; } u = { t };
u.i = FLATBUFFERS_BYTESWAP64(u.i);
return u.t;
} else {
FLATBUFFERS_ASSERT(0);
return t;
}
}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
template<typename T> T EndianScalar(T t) {
#if FLATBUFFERS_LITTLEENDIAN
return t;
#else
return EndianSwap(t);
#endif
}
template<typename T>
// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details.
__supress_ubsan__("alignment")
T ReadScalar(const void *p) {
return EndianScalar(*reinterpret_cast<const T *>(p));
}
// See https://github.com/google/flatbuffers/issues/5950
#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstringop-overflow"
#endif
template<typename T>
// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details.
__supress_ubsan__("alignment")
void WriteScalar(void *p, T t) {
*reinterpret_cast<T *>(p) = EndianScalar(t);
}
template<typename T> struct Offset;
template<typename T> __supress_ubsan__("alignment") void WriteScalar(void *p, Offset<T> t) {
*reinterpret_cast<uoffset_t *>(p) = EndianScalar(t.o);
}
#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000)
#pragma GCC diagnostic pop
#endif
// Computes how many bytes you'd have to pad to be able to write an
// "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in
// memory).
__supress_ubsan__("unsigned-integer-overflow")
inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) {
return ((~buf_size) + 1) & (scalar_size - 1);
}
// Generic 'operator==' with conditional specialisations.
// T e - new value of a scalar field.
// T def - default of scalar (is known at compile-time).
template<typename T> inline bool IsTheSameAs(T e, T def) { return e == def; }
#if defined(FLATBUFFERS_NAN_DEFAULTS) && \
defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0)
// Like `operator==(e, def)` with weak NaN if T=(float|double).
template<typename T> inline bool IsFloatTheSameAs(T e, T def) {
return (e == def) || ((def != def) && (e != e));
}
template<> inline bool IsTheSameAs<float>(float e, float def) {
return IsFloatTheSameAs(e, def);
}
template<> inline bool IsTheSameAs<double>(double e, double def) {
return IsFloatTheSameAs(e, def);
}
#endif
// Check 'v' is out of closed range [low; high].
// Workaround for GCC warning [-Werror=type-limits]:
// comparison is always true due to limited range of data type.
template<typename T>
inline bool IsOutRange(const T &v, const T &low, const T &high) {
return (v < low) || (high < v);
}
// Check 'v' is in closed range [low; high].
template<typename T>
inline bool IsInRange(const T &v, const T &low, const T &high) {
return !IsOutRange(v, low, high);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_BASE_H_

142
flatbuffers/buffer.h Normal file
View File

@ -0,0 +1,142 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_BUFFER_H_
#define FLATBUFFERS_BUFFER_H_
#include "flatbuffers/base.h"
namespace flatbuffers {
// Wrapper for uoffset_t to allow safe template specialization.
// Value is allowed to be 0 to indicate a null object (see e.g. AddOffset).
template<typename T> struct Offset {
uoffset_t o;
Offset() : o(0) {}
Offset(uoffset_t _o) : o(_o) {}
Offset<void> Union() const { return Offset<void>(o); }
bool IsNull() const { return !o; }
};
inline void EndianCheck() {
int endiantest = 1;
// If this fails, see FLATBUFFERS_LITTLEENDIAN above.
FLATBUFFERS_ASSERT(*reinterpret_cast<char *>(&endiantest) ==
FLATBUFFERS_LITTLEENDIAN);
(void)endiantest;
}
template<typename T> FLATBUFFERS_CONSTEXPR size_t AlignOf() {
// clang-format off
#ifdef _MSC_VER
return __alignof(T);
#else
#ifndef alignof
return __alignof__(T);
#else
return alignof(T);
#endif
#endif
// clang-format on
}
// Lexicographically compare two strings (possibly containing nulls), and
// return true if the first is less than the second.
static inline bool StringLessThan(const char *a_data, uoffset_t a_size,
const char *b_data, uoffset_t b_size) {
const auto cmp = memcmp(a_data, b_data, (std::min)(a_size, b_size));
return cmp == 0 ? a_size < b_size : cmp < 0;
}
// When we read serialized data from memory, in the case of most scalars,
// we want to just read T, but in the case of Offset, we want to actually
// perform the indirection and return a pointer.
// The template specialization below does just that.
// It is wrapped in a struct since function templates can't overload on the
// return type like this.
// The typedef is for the convenience of callers of this function
// (avoiding the need for a trailing return decltype)
template<typename T> struct IndirectHelper {
typedef T return_type;
typedef T mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, uoffset_t i) {
return EndianScalar((reinterpret_cast<const T *>(p))[i]);
}
};
template<typename T> struct IndirectHelper<Offset<T>> {
typedef const T *return_type;
typedef T *mutable_return_type;
static const size_t element_stride = sizeof(uoffset_t);
static return_type Read(const uint8_t *p, uoffset_t i) {
p += i * sizeof(uoffset_t);
return reinterpret_cast<return_type>(p + ReadScalar<uoffset_t>(p));
}
};
template<typename T> struct IndirectHelper<const T *> {
typedef const T *return_type;
typedef T *mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, uoffset_t i) {
return reinterpret_cast<const T *>(p + i * sizeof(T));
}
};
/// @brief Get a pointer to the the file_identifier section of the buffer.
/// @return Returns a const char pointer to the start of the file_identifier
/// characters in the buffer. The returned char * has length
/// 'flatbuffers::FlatBufferBuilder::kFileIdentifierLength'.
/// This function is UNDEFINED for FlatBuffers whose schema does not include
/// a file_identifier (likely points at padding or the start of a the root
/// vtable).
inline const char *GetBufferIdentifier(const void *buf,
bool size_prefixed = false) {
return reinterpret_cast<const char *>(buf) +
((size_prefixed) ? 2 * sizeof(uoffset_t) : sizeof(uoffset_t));
}
// Helper to see if the identifier in a buffer has the expected value.
inline bool BufferHasIdentifier(const void *buf, const char *identifier,
bool size_prefixed = false) {
return strncmp(GetBufferIdentifier(buf, size_prefixed), identifier,
flatbuffers::kFileIdentifierLength) == 0;
}
/// @cond FLATBUFFERS_INTERNAL
// Helpers to get a typed pointer to the root object contained in the buffer.
template<typename T> T *GetMutableRoot(void *buf) {
EndianCheck();
return reinterpret_cast<T *>(
reinterpret_cast<uint8_t *>(buf) +
EndianScalar(*reinterpret_cast<uoffset_t *>(buf)));
}
template<typename T> T *GetMutableSizePrefixedRoot(void *buf) {
return GetMutableRoot<T>(reinterpret_cast<uint8_t *>(buf) +
sizeof(uoffset_t));
}
template<typename T> const T *GetRoot(const void *buf) {
return GetMutableRoot<T>(const_cast<void *>(buf));
}
template<typename T> const T *GetSizePrefixedRoot(const void *buf) {
return GetRoot<T>(reinterpret_cast<const uint8_t *>(buf) + sizeof(uoffset_t));
}
} // namespace flatbuffers
#endif // FLATBUFFERS_BUFFER_H_

53
flatbuffers/buffer_ref.h Normal file
View File

@ -0,0 +1,53 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_BUFFER_REF_H_
#define FLATBUFFERS_BUFFER_REF_H_
#include "flatbuffers/base.h"
#include "flatbuffers/verifier.h"
namespace flatbuffers {
// Convenient way to bundle a buffer and its length, to pass it around
// typed by its root.
// A BufferRef does not own its buffer.
struct BufferRefBase {}; // for std::is_base_of
template<typename T> struct BufferRef : BufferRefBase {
BufferRef() : buf(nullptr), len(0), must_free(false) {}
BufferRef(uint8_t *_buf, uoffset_t _len)
: buf(_buf), len(_len), must_free(false) {}
~BufferRef() {
if (must_free) free(buf);
}
const T *GetRoot() const { return flatbuffers::GetRoot<T>(buf); }
bool Verify() {
Verifier verifier(buf, len);
return verifier.VerifyBuffer<T>(nullptr);
}
uint8_t *buf;
uoffset_t len;
bool must_free;
};
} // namespace flatbuffers
#endif // FLATBUFFERS_BUFFER_REF_H_

View File

@ -0,0 +1,64 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_DEFAULT_ALLOCATOR_H_
#define FLATBUFFERS_DEFAULT_ALLOCATOR_H_
#include "flatbuffers/allocator.h"
#include "flatbuffers/base.h"
namespace flatbuffers {
// DefaultAllocator uses new/delete to allocate memory regions
class DefaultAllocator : public Allocator {
public:
uint8_t *allocate(size_t size) FLATBUFFERS_OVERRIDE {
return new uint8_t[size];
}
void deallocate(uint8_t *p, size_t) FLATBUFFERS_OVERRIDE { delete[] p; }
static void dealloc(void *p, size_t) { delete[] static_cast<uint8_t *>(p); }
};
// These functions allow for a null allocator to mean use the default allocator,
// as used by DetachedBuffer and vector_downward below.
// This is to avoid having a statically or dynamically allocated default
// allocator, or having to move it between the classes that may own it.
inline uint8_t *Allocate(Allocator *allocator, size_t size) {
return allocator ? allocator->allocate(size)
: DefaultAllocator().allocate(size);
}
inline void Deallocate(Allocator *allocator, uint8_t *p, size_t size) {
if (allocator)
allocator->deallocate(p, size);
else
DefaultAllocator().deallocate(p, size);
}
inline uint8_t *ReallocateDownward(Allocator *allocator, uint8_t *old_p,
size_t old_size, size_t new_size,
size_t in_use_back, size_t in_use_front) {
return allocator ? allocator->reallocate_downward(old_p, old_size, new_size,
in_use_back, in_use_front)
: DefaultAllocator().reallocate_downward(
old_p, old_size, new_size, in_use_back, in_use_front);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_DEFAULT_ALLOCATOR_H_

View File

@ -0,0 +1,114 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_DETACHED_BUFFER_H_
#define FLATBUFFERS_DETACHED_BUFFER_H_
#include "flatbuffers/allocator.h"
#include "flatbuffers/base.h"
#include "flatbuffers/default_allocator.h"
namespace flatbuffers {
// DetachedBuffer is a finished flatbuffer memory region, detached from its
// builder. The original memory region and allocator are also stored so that
// the DetachedBuffer can manage the memory lifetime.
class DetachedBuffer {
public:
DetachedBuffer()
: allocator_(nullptr),
own_allocator_(false),
buf_(nullptr),
reserved_(0),
cur_(nullptr),
size_(0) {}
DetachedBuffer(Allocator *allocator, bool own_allocator, uint8_t *buf,
size_t reserved, uint8_t *cur, size_t sz)
: allocator_(allocator),
own_allocator_(own_allocator),
buf_(buf),
reserved_(reserved),
cur_(cur),
size_(sz) {}
DetachedBuffer(DetachedBuffer &&other)
: allocator_(other.allocator_),
own_allocator_(other.own_allocator_),
buf_(other.buf_),
reserved_(other.reserved_),
cur_(other.cur_),
size_(other.size_) {
other.reset();
}
DetachedBuffer &operator=(DetachedBuffer &&other) {
if (this == &other) return *this;
destroy();
allocator_ = other.allocator_;
own_allocator_ = other.own_allocator_;
buf_ = other.buf_;
reserved_ = other.reserved_;
cur_ = other.cur_;
size_ = other.size_;
other.reset();
return *this;
}
~DetachedBuffer() { destroy(); }
const uint8_t *data() const { return cur_; }
uint8_t *data() { return cur_; }
size_t size() const { return size_; }
// These may change access mode, leave these at end of public section
FLATBUFFERS_DELETE_FUNC(DetachedBuffer(const DetachedBuffer &other));
FLATBUFFERS_DELETE_FUNC(
DetachedBuffer &operator=(const DetachedBuffer &other));
protected:
Allocator *allocator_;
bool own_allocator_;
uint8_t *buf_;
size_t reserved_;
uint8_t *cur_;
size_t size_;
inline void destroy() {
if (buf_) Deallocate(allocator_, buf_, reserved_);
if (own_allocator_ && allocator_) { delete allocator_; }
reset();
}
inline void reset() {
allocator_ = nullptr;
own_allocator_ = false;
buf_ = nullptr;
reserved_ = 0;
cur_ = nullptr;
size_ = 0;
}
};
} // namespace flatbuffers
#endif // FLATBUFFERS_DETACHED_BUFFER_H_

File diff suppressed because it is too large Load Diff

284
flatbuffers/flatbuffers.h Normal file
View File

@ -0,0 +1,284 @@
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_H_
#define FLATBUFFERS_H_
// TODO: These includes are for mitigating the pains of users editing their
// source because they relied on flatbuffers.h to include everything for them.
#include "flatbuffers/array.h"
#include "flatbuffers/base.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/buffer_ref.h"
#include "flatbuffers/detached_buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/stl_emulation.h"
#include "flatbuffers/string.h"
#include "flatbuffers/struct.h"
#include "flatbuffers/table.h"
#include "flatbuffers/vector.h"
#include "flatbuffers/vector_downward.h"
#include "flatbuffers/verifier.h"
namespace flatbuffers {
/// @brief This can compute the start of a FlatBuffer from a root pointer, i.e.
/// it is the opposite transformation of GetRoot().
/// This may be useful if you want to pass on a root and have the recipient
/// delete the buffer afterwards.
inline const uint8_t *GetBufferStartFromRootPointer(const void *root) {
auto table = reinterpret_cast<const Table *>(root);
auto vtable = table->GetVTable();
// Either the vtable is before the root or after the root.
auto start = (std::min)(vtable, reinterpret_cast<const uint8_t *>(root));
// Align to at least sizeof(uoffset_t).
start = reinterpret_cast<const uint8_t *>(reinterpret_cast<uintptr_t>(start) &
~(sizeof(uoffset_t) - 1));
// Additionally, there may be a file_identifier in the buffer, and the root
// offset. The buffer may have been aligned to any size between
// sizeof(uoffset_t) and FLATBUFFERS_MAX_ALIGNMENT (see "force_align").
// Sadly, the exact alignment is only known when constructing the buffer,
// since it depends on the presence of values with said alignment properties.
// So instead, we simply look at the next uoffset_t values (root,
// file_identifier, and alignment padding) to see which points to the root.
// None of the other values can "impersonate" the root since they will either
// be 0 or four ASCII characters.
static_assert(flatbuffers::kFileIdentifierLength == sizeof(uoffset_t),
"file_identifier is assumed to be the same size as uoffset_t");
for (auto possible_roots = FLATBUFFERS_MAX_ALIGNMENT / sizeof(uoffset_t) + 1;
possible_roots; possible_roots--) {
start -= sizeof(uoffset_t);
if (ReadScalar<uoffset_t>(start) + start ==
reinterpret_cast<const uint8_t *>(root))
return start;
}
// We didn't find the root, either the "root" passed isn't really a root,
// or the buffer is corrupt.
// Assert, because calling this function with bad data may cause reads
// outside of buffer boundaries.
FLATBUFFERS_ASSERT(false);
return nullptr;
}
/// @brief This return the prefixed size of a FlatBuffer.
inline uoffset_t GetPrefixedSize(const uint8_t *buf) {
return ReadScalar<uoffset_t>(buf);
}
// Base class for native objects (FlatBuffer data de-serialized into native
// C++ data structures).
// Contains no functionality, purely documentative.
struct NativeTable {};
/// @brief Function types to be used with resolving hashes into objects and
/// back again. The resolver gets a pointer to a field inside an object API
/// object that is of the type specified in the schema using the attribute
/// `cpp_type` (it is thus important whatever you write to this address
/// matches that type). The value of this field is initially null, so you
/// may choose to implement a delayed binding lookup using this function
/// if you wish. The resolver does the opposite lookup, for when the object
/// is being serialized again.
typedef uint64_t hash_value_t;
typedef std::function<void(void **pointer_adr, hash_value_t hash)>
resolver_function_t;
typedef std::function<hash_value_t(void *pointer)> rehasher_function_t;
// Helper function to test if a field is present, using any of the field
// enums in the generated code.
// `table` must be a generated table type. Since this is a template parameter,
// this is not typechecked to be a subclass of Table, so beware!
// Note: this function will return false for fields equal to the default
// value, since they're not stored in the buffer (unless force_defaults was
// used).
template<typename T>
bool IsFieldPresent(const T *table, typename T::FlatBuffersVTableOffset field) {
// Cast, since Table is a private baseclass of any table types.
return reinterpret_cast<const Table *>(table)->CheckField(
static_cast<voffset_t>(field));
}
// Utility function for reverse lookups on the EnumNames*() functions
// (in the generated C++ code)
// names must be NULL terminated.
inline int LookupEnum(const char **names, const char *name) {
for (const char **p = names; *p; p++)
if (!strcmp(*p, name)) return static_cast<int>(p - names);
return -1;
}
// These macros allow us to layout a struct with a guarantee that they'll end
// up looking the same on different compilers and platforms.
// It does this by disallowing the compiler to do any padding, and then
// does padding itself by inserting extra padding fields that make every
// element aligned to its own size.
// Additionally, it manually sets the alignment of the struct as a whole,
// which is typically its largest element, or a custom size set in the schema
// by the force_align attribute.
// These are used in the generated code only.
// clang-format off
#if defined(_MSC_VER)
#define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \
__pragma(pack(1)) \
struct __declspec(align(alignment))
#define FLATBUFFERS_STRUCT_END(name, size) \
__pragma(pack()) \
static_assert(sizeof(name) == size, "compiler breaks packing rules")
#elif defined(__GNUC__) || defined(__clang__) || defined(__ICCARM__)
#define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \
_Pragma("pack(1)") \
struct __attribute__((aligned(alignment)))
#define FLATBUFFERS_STRUCT_END(name, size) \
_Pragma("pack()") \
static_assert(sizeof(name) == size, "compiler breaks packing rules")
#else
#error Unknown compiler, please define structure alignment macros
#endif
// clang-format on
// Minimal reflection via code generation.
// Besides full-fat reflection (see reflection.h) and parsing/printing by
// loading schemas (see idl.h), we can also have code generation for minimal
// reflection data which allows pretty-printing and other uses without needing
// a schema or a parser.
// Generate code with --reflect-types (types only) or --reflect-names (names
// also) to enable.
// See minireflect.h for utilities using this functionality.
// These types are organized slightly differently as the ones in idl.h.
enum SequenceType { ST_TABLE, ST_STRUCT, ST_UNION, ST_ENUM };
// Scalars have the same order as in idl.h
// clang-format off
#define FLATBUFFERS_GEN_ELEMENTARY_TYPES(ET) \
ET(ET_UTYPE) \
ET(ET_BOOL) \
ET(ET_CHAR) \
ET(ET_UCHAR) \
ET(ET_SHORT) \
ET(ET_USHORT) \
ET(ET_INT) \
ET(ET_UINT) \
ET(ET_LONG) \
ET(ET_ULONG) \
ET(ET_FLOAT) \
ET(ET_DOUBLE) \
ET(ET_STRING) \
ET(ET_SEQUENCE) // See SequenceType.
enum ElementaryType {
#define FLATBUFFERS_ET(E) E,
FLATBUFFERS_GEN_ELEMENTARY_TYPES(FLATBUFFERS_ET)
#undef FLATBUFFERS_ET
};
inline const char * const *ElementaryTypeNames() {
static const char * const names[] = {
#define FLATBUFFERS_ET(E) #E,
FLATBUFFERS_GEN_ELEMENTARY_TYPES(FLATBUFFERS_ET)
#undef FLATBUFFERS_ET
};
return names;
}
// clang-format on
// Basic type info cost just 16bits per field!
// We're explicitly defining the signedness since the signedness of integer
// bitfields is otherwise implementation-defined and causes warnings on older
// GCC compilers.
struct TypeCode {
// ElementaryType
unsigned short base_type : 4;
// Either vector (in table) or array (in struct)
unsigned short is_repeating : 1;
// Index into type_refs below, or -1 for none.
signed short sequence_ref : 11;
};
static_assert(sizeof(TypeCode) == 2, "TypeCode");
struct TypeTable;
// Signature of the static method present in each type.
typedef const TypeTable *(*TypeFunction)();
struct TypeTable {
SequenceType st;
size_t num_elems; // of type_codes, values, names (but not type_refs).
const TypeCode *type_codes; // num_elems count
const TypeFunction *type_refs; // less than num_elems entries (see TypeCode).
const int16_t *array_sizes; // less than num_elems entries (see TypeCode).
const int64_t *values; // Only set for non-consecutive enum/union or structs.
const char *const *names; // Only set if compiled with --reflect-names.
};
// String which identifies the current version of FlatBuffers.
// flatbuffer_version_string is used by Google developers to identify which
// applications uploaded to Google Play are using this library. This allows
// the development team at Google to determine the popularity of the library.
// How it works: Applications that are uploaded to the Google Play Store are
// scanned for this version string. We track which applications are using it
// to measure popularity. You are free to remove it (of course) but we would
// appreciate if you left it in.
// Weak linkage is culled by VS & doesn't work on cygwin.
// clang-format off
#if !defined(_WIN32) && !defined(__CYGWIN__)
extern volatile __attribute__((weak)) const char *flatbuffer_version_string;
volatile __attribute__((weak)) const char *flatbuffer_version_string =
"FlatBuffers "
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "."
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "."
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION);
#endif // !defined(_WIN32) && !defined(__CYGWIN__)
#define FLATBUFFERS_DEFINE_BITMASK_OPERATORS(E, T)\
inline E operator | (E lhs, E rhs){\
return E(T(lhs) | T(rhs));\
}\
inline E operator & (E lhs, E rhs){\
return E(T(lhs) & T(rhs));\
}\
inline E operator ^ (E lhs, E rhs){\
return E(T(lhs) ^ T(rhs));\
}\
inline E operator ~ (E lhs){\
return E(~T(lhs));\
}\
inline E operator |= (E &lhs, E rhs){\
lhs = lhs | rhs;\
return lhs;\
}\
inline E operator &= (E &lhs, E rhs){\
lhs = lhs & rhs;\
return lhs;\
}\
inline E operator ^= (E &lhs, E rhs){\
lhs = lhs ^ rhs;\
return lhs;\
}\
inline bool operator !(E rhs) \
{\
return !bool(T(rhs)); \
}
/// @endcond
} // namespace flatbuffers
// clang-format on
#endif // FLATBUFFERS_H_

509
flatbuffers/stl_emulation.h Normal file
View File

@ -0,0 +1,509 @@
/*
* Copyright 2017 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_STL_EMULATION_H_
#define FLATBUFFERS_STL_EMULATION_H_
// clang-format off
#include "flatbuffers/base.h"
#include <string>
#include <type_traits>
#include <vector>
#include <memory>
#include <limits>
// Detect C++17 compatible compiler.
// __cplusplus >= 201703L - a compiler has support of 'static inline' variables.
#if defined(FLATBUFFERS_USE_STD_OPTIONAL) \
|| (defined(__cplusplus) && __cplusplus >= 201703L) \
|| (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L))
#include <optional>
#ifndef FLATBUFFERS_USE_STD_OPTIONAL
#define FLATBUFFERS_USE_STD_OPTIONAL
#endif
#endif // defined(FLATBUFFERS_USE_STD_OPTIONAL) ...
// The __cpp_lib_span is the predefined feature macro.
#if defined(FLATBUFFERS_USE_STD_SPAN)
#include <span>
#elif defined(__cpp_lib_span) && defined(__has_include)
#if __has_include(<span>)
#include <span>
#define FLATBUFFERS_USE_STD_SPAN
#endif
#else
// Disable non-trivial ctors if FLATBUFFERS_SPAN_MINIMAL defined.
#if !defined(FLATBUFFERS_TEMPLATES_ALIASES)
#define FLATBUFFERS_SPAN_MINIMAL
#else
// Enable implicit construction of a span<T,N> from a std::array<T,N>.
#include <array>
#endif
#endif // defined(FLATBUFFERS_USE_STD_SPAN)
// This header provides backwards compatibility for older versions of the STL.
namespace flatbuffers {
#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
template <typename T>
using numeric_limits = std::numeric_limits<T>;
#else
template <typename T> class numeric_limits :
public std::numeric_limits<T> {};
#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
template <typename T> using is_scalar = std::is_scalar<T>;
template <typename T, typename U> using is_same = std::is_same<T,U>;
template <typename T> using is_floating_point = std::is_floating_point<T>;
template <typename T> using is_unsigned = std::is_unsigned<T>;
template <typename T> using is_enum = std::is_enum<T>;
template <typename T> using make_unsigned = std::make_unsigned<T>;
template<bool B, class T, class F>
using conditional = std::conditional<B, T, F>;
template<class T, T v>
using integral_constant = std::integral_constant<T, v>;
template <bool B>
using bool_constant = integral_constant<bool, B>;
using true_type = std::true_type;
using false_type = std::false_type;
#else
// MSVC 2010 doesn't support C++11 aliases.
template <typename T> struct is_scalar : public std::is_scalar<T> {};
template <typename T, typename U> struct is_same : public std::is_same<T,U> {};
template <typename T> struct is_floating_point :
public std::is_floating_point<T> {};
template <typename T> struct is_unsigned : public std::is_unsigned<T> {};
template <typename T> struct is_enum : public std::is_enum<T> {};
template <typename T> struct make_unsigned : public std::make_unsigned<T> {};
template<bool B, class T, class F>
struct conditional : public std::conditional<B, T, F> {};
template<class T, T v>
struct integral_constant : public std::integral_constant<T, v> {};
template <bool B>
struct bool_constant : public integral_constant<bool, B> {};
typedef bool_constant<true> true_type;
typedef bool_constant<false> false_type;
#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
template <class T> using unique_ptr = std::unique_ptr<T>;
#else
// MSVC 2010 doesn't support C++11 aliases.
// We're manually "aliasing" the class here as we want to bring unique_ptr
// into the flatbuffers namespace. We have unique_ptr in the flatbuffers
// namespace we have a completely independent implementation (see below)
// for C++98 STL implementations.
template <class T> class unique_ptr : public std::unique_ptr<T> {
public:
unique_ptr() {}
explicit unique_ptr(T* p) : std::unique_ptr<T>(p) {}
unique_ptr(std::unique_ptr<T>&& u) { *this = std::move(u); }
unique_ptr(unique_ptr&& u) { *this = std::move(u); }
unique_ptr& operator=(std::unique_ptr<T>&& u) {
std::unique_ptr<T>::reset(u.release());
return *this;
}
unique_ptr& operator=(unique_ptr&& u) {
std::unique_ptr<T>::reset(u.release());
return *this;
}
unique_ptr& operator=(T* p) {
return std::unique_ptr<T>::operator=(p);
}
};
#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
#ifdef FLATBUFFERS_USE_STD_OPTIONAL
template<class T>
using Optional = std::optional<T>;
using nullopt_t = std::nullopt_t;
inline constexpr nullopt_t nullopt = std::nullopt;
#else
// Limited implementation of Optional<T> type for a scalar T.
// This implementation limited by trivial types compatible with
// std::is_arithmetic<T> or std::is_enum<T> type traits.
// A tag to indicate an empty flatbuffers::optional<T>.
struct nullopt_t {
explicit FLATBUFFERS_CONSTEXPR_CPP11 nullopt_t(int) {}
};
#if defined(FLATBUFFERS_CONSTEXPR_DEFINED)
namespace internal {
template <class> struct nullopt_holder {
static constexpr nullopt_t instance_ = nullopt_t(0);
};
template<class Dummy>
constexpr nullopt_t nullopt_holder<Dummy>::instance_;
}
static constexpr const nullopt_t &nullopt = internal::nullopt_holder<void>::instance_;
#else
namespace internal {
template <class> struct nullopt_holder {
static const nullopt_t instance_;
};
template<class Dummy>
const nullopt_t nullopt_holder<Dummy>::instance_ = nullopt_t(0);
}
static const nullopt_t &nullopt = internal::nullopt_holder<void>::instance_;
#endif
template<class T>
class Optional FLATBUFFERS_FINAL_CLASS {
// Non-scalar 'T' would extremely complicated Optional<T>.
// Use is_scalar<T> checking because flatbuffers flatbuffers::is_arithmetic<T>
// isn't implemented.
static_assert(flatbuffers::is_scalar<T>::value, "unexpected type T");
public:
~Optional() {}
FLATBUFFERS_CONSTEXPR_CPP11 Optional() FLATBUFFERS_NOEXCEPT
: value_(), has_value_(false) {}
FLATBUFFERS_CONSTEXPR_CPP11 Optional(nullopt_t) FLATBUFFERS_NOEXCEPT
: value_(), has_value_(false) {}
FLATBUFFERS_CONSTEXPR_CPP11 Optional(T val) FLATBUFFERS_NOEXCEPT
: value_(val), has_value_(true) {}
FLATBUFFERS_CONSTEXPR_CPP11 Optional(const Optional &other) FLATBUFFERS_NOEXCEPT
: value_(other.value_), has_value_(other.has_value_) {}
FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(const Optional &other) FLATBUFFERS_NOEXCEPT {
value_ = other.value_;
has_value_ = other.has_value_;
return *this;
}
FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(nullopt_t) FLATBUFFERS_NOEXCEPT {
value_ = T();
has_value_ = false;
return *this;
}
FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(T val) FLATBUFFERS_NOEXCEPT {
value_ = val;
has_value_ = true;
return *this;
}
void reset() FLATBUFFERS_NOEXCEPT {
*this = nullopt;
}
void swap(Optional &other) FLATBUFFERS_NOEXCEPT {
std::swap(value_, other.value_);
std::swap(has_value_, other.has_value_);
}
FLATBUFFERS_CONSTEXPR_CPP11 FLATBUFFERS_EXPLICIT_CPP11 operator bool() const FLATBUFFERS_NOEXCEPT {
return has_value_;
}
FLATBUFFERS_CONSTEXPR_CPP11 bool has_value() const FLATBUFFERS_NOEXCEPT {
return has_value_;
}
FLATBUFFERS_CONSTEXPR_CPP11 const T& operator*() const FLATBUFFERS_NOEXCEPT {
return value_;
}
const T& value() const {
FLATBUFFERS_ASSERT(has_value());
return value_;
}
T value_or(T default_value) const FLATBUFFERS_NOEXCEPT {
return has_value() ? value_ : default_value;
}
private:
T value_;
bool has_value_;
};
template<class T>
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& opt, nullopt_t) FLATBUFFERS_NOEXCEPT {
return !opt;
}
template<class T>
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(nullopt_t, const Optional<T>& opt) FLATBUFFERS_NOEXCEPT {
return !opt;
}
template<class T, class U>
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& lhs, const U& rhs) FLATBUFFERS_NOEXCEPT {
return static_cast<bool>(lhs) && (*lhs == rhs);
}
template<class T, class U>
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const T& lhs, const Optional<U>& rhs) FLATBUFFERS_NOEXCEPT {
return static_cast<bool>(rhs) && (lhs == *rhs);
}
template<class T, class U>
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& lhs, const Optional<U>& rhs) FLATBUFFERS_NOEXCEPT {
return static_cast<bool>(lhs) != static_cast<bool>(rhs)
? false
: !static_cast<bool>(lhs) ? false : (*lhs == *rhs);
}
#endif // FLATBUFFERS_USE_STD_OPTIONAL
// Very limited and naive partial implementation of C++20 std::span<T,Extent>.
#if defined(FLATBUFFERS_USE_STD_SPAN)
inline constexpr std::size_t dynamic_extent = std::dynamic_extent;
template<class T, std::size_t Extent = std::dynamic_extent>
using span = std::span<T, Extent>;
#else // !defined(FLATBUFFERS_USE_STD_SPAN)
FLATBUFFERS_CONSTEXPR std::size_t dynamic_extent = static_cast<std::size_t>(-1);
// Exclude this code if MSVC2010 or non-STL Android is active.
// The non-STL Android doesn't have `std::is_convertible` required for SFINAE.
#if !defined(FLATBUFFERS_SPAN_MINIMAL)
namespace internal {
// This is SFINAE helper class for checking of a common condition:
// > This overload only participates in overload resolution
// > Check whether a pointer to an array of U can be converted
// > to a pointer to an array of E.
// This helper is used for checking of 'U -> const U'.
template<class E, std::size_t Extent, class U, std::size_t N>
struct is_span_convertable {
using type =
typename std::conditional<std::is_convertible<U (*)[], E (*)[]>::value
&& (Extent == dynamic_extent || N == Extent),
int, void>::type;
};
template<typename T>
struct SpanIterator {
// TODO: upgrade to std::random_access_iterator_tag.
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = typename std::remove_cv<T>::type;
using reference = T&;
using pointer = T*;
// Convince MSVC compiler that this iterator is trusted (it is verified).
#ifdef _MSC_VER
using _Unchecked_type = pointer;
#endif // _MSC_VER
SpanIterator(pointer ptr) : ptr_(ptr) {}
reference operator*() const { return *ptr_; }
pointer operator->() { return ptr_; }
SpanIterator& operator++() { ptr_++; return *this; }
SpanIterator operator++(int) { auto tmp = *this; ++(*this); return tmp; }
friend bool operator== (const SpanIterator& lhs, const SpanIterator& rhs) { return lhs.ptr_ == rhs.ptr_; }
friend bool operator!= (const SpanIterator& lhs, const SpanIterator& rhs) { return lhs.ptr_ != rhs.ptr_; }
private:
pointer ptr_;
};
} // namespace internal
#endif // !defined(FLATBUFFERS_SPAN_MINIMAL)
// T - element type; must be a complete type that is not an abstract
// class type.
// Extent - the number of elements in the sequence, or dynamic.
template<class T, std::size_t Extent = dynamic_extent>
class span FLATBUFFERS_FINAL_CLASS {
public:
typedef T element_type;
typedef T& reference;
typedef const T& const_reference;
typedef T* pointer;
typedef const T* const_pointer;
typedef std::size_t size_type;
static FLATBUFFERS_CONSTEXPR size_type extent = Extent;
// Returns the number of elements in the span.
FLATBUFFERS_CONSTEXPR_CPP11 size_type size() const FLATBUFFERS_NOEXCEPT {
return count_;
}
// Returns the size of the sequence in bytes.
FLATBUFFERS_CONSTEXPR_CPP11
size_type size_bytes() const FLATBUFFERS_NOEXCEPT {
return size() * sizeof(element_type);
}
// Checks if the span is empty.
FLATBUFFERS_CONSTEXPR_CPP11 bool empty() const FLATBUFFERS_NOEXCEPT {
return size() == 0;
}
// Returns a pointer to the beginning of the sequence.
FLATBUFFERS_CONSTEXPR_CPP11 pointer data() const FLATBUFFERS_NOEXCEPT {
return data_;
}
#if !defined(FLATBUFFERS_SPAN_MINIMAL)
using Iterator = internal::SpanIterator<T>;
using ConstIterator = internal::SpanIterator<const T>;
Iterator begin() const { return Iterator(data()); }
Iterator end() const { return Iterator(data() + size()); }
ConstIterator cbegin() const { return ConstIterator(data()); }
ConstIterator cend() const { return ConstIterator(data() + size()); }
#endif
// Returns a reference to the idx-th element of the sequence.
// The behavior is undefined if the idx is greater than or equal to size().
FLATBUFFERS_CONSTEXPR_CPP11 reference operator[](size_type idx) const {
return data()[idx];
}
FLATBUFFERS_CONSTEXPR_CPP11 span(const span &other) FLATBUFFERS_NOEXCEPT
: data_(other.data_), count_(other.count_) {}
FLATBUFFERS_CONSTEXPR_CPP14 span &operator=(const span &other)
FLATBUFFERS_NOEXCEPT {
data_ = other.data_;
count_ = other.count_;
}
// Limited implementation of
// `template <class It> constexpr std::span(It first, size_type count);`.
//
// Constructs a span that is a view over the range [first, first + count);
// the resulting span has: data() == first and size() == count.
// The behavior is undefined if [first, first + count) is not a valid range,
// or if (extent != flatbuffers::dynamic_extent && count != extent).
FLATBUFFERS_CONSTEXPR_CPP11
explicit span(pointer first, size_type count) FLATBUFFERS_NOEXCEPT
: data_ (Extent == dynamic_extent ? first : (Extent == count ? first : nullptr)),
count_(Extent == dynamic_extent ? count : (Extent == count ? Extent : 0)) {
// Make span empty if the count argument is incompatible with span<T,N>.
}
// Exclude this code if MSVC2010 is active. The MSVC2010 isn't C++11
// compliant, it doesn't support default template arguments for functions.
#if defined(FLATBUFFERS_SPAN_MINIMAL)
FLATBUFFERS_CONSTEXPR_CPP11 span() FLATBUFFERS_NOEXCEPT : data_(nullptr),
count_(0) {
static_assert(extent == 0 || extent == dynamic_extent, "invalid span");
}
#else
// Constructs an empty span whose data() == nullptr and size() == 0.
// This overload only participates in overload resolution if
// extent == 0 || extent == flatbuffers::dynamic_extent.
// A dummy template argument N is need dependency for SFINAE.
template<std::size_t N = 0,
typename internal::is_span_convertable<element_type, Extent, element_type, (N - N)>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span() FLATBUFFERS_NOEXCEPT : data_(nullptr),
count_(0) {
static_assert(extent == 0 || extent == dynamic_extent, "invalid span");
}
// Constructs a span that is a view over the array arr; the resulting span
// has size() == N and data() == std::data(arr). These overloads only
// participate in overload resolution if
// extent == std::dynamic_extent || N == extent is true and
// std::remove_pointer_t<decltype(std::data(arr))>(*)[]
// is convertible to element_type (*)[].
template<std::size_t N,
typename internal::is_span_convertable<element_type, Extent, element_type, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(element_type (&arr)[N]) FLATBUFFERS_NOEXCEPT
: data_(arr), count_(N) {}
template<class U, std::size_t N,
typename internal::is_span_convertable<element_type, Extent, U, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT
: data_(arr.data()), count_(N) {}
//template<class U, std::size_t N,
// int = 0>
//FLATBUFFERS_CONSTEXPR_CPP11 span(std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT
// : data_(arr.data()), count_(N) {}
template<class U, std::size_t N,
typename internal::is_span_convertable<element_type, Extent, U, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(const std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT
: data_(arr.data()), count_(N) {}
// Converting constructor from another span s;
// the resulting span has size() == s.size() and data() == s.data().
// This overload only participates in overload resolution
// if extent == std::dynamic_extent || N == extent is true and U (*)[]
// is convertible to element_type (*)[].
template<class U, std::size_t N,
typename internal::is_span_convertable<element_type, Extent, U, N>::type = 0>
FLATBUFFERS_CONSTEXPR_CPP11 span(const flatbuffers::span<U, N> &s) FLATBUFFERS_NOEXCEPT
: span(s.data(), s.size()) {
}
#endif // !defined(FLATBUFFERS_SPAN_MINIMAL)
private:
// This is a naive implementation with 'count_' member even if (Extent != dynamic_extent).
pointer const data_;
const size_type count_;
};
#endif // defined(FLATBUFFERS_USE_STD_SPAN)
#if !defined(FLATBUFFERS_SPAN_MINIMAL)
template<class U, std::size_t N>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<U, N> make_span(U(&arr)[N]) FLATBUFFERS_NOEXCEPT {
return span<U, N>(arr);
}
template<class U, std::size_t N>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<const U, N> make_span(const U(&arr)[N]) FLATBUFFERS_NOEXCEPT {
return span<const U, N>(arr);
}
template<class U, std::size_t N>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<U, N> make_span(std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
return span<U, N>(arr);
}
template<class U, std::size_t N>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<const U, N> make_span(const std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
return span<const U, N>(arr);
}
template<class U, std::size_t N>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<U, dynamic_extent> make_span(U *first, std::size_t count) FLATBUFFERS_NOEXCEPT {
return span<U, dynamic_extent>(first, count);
}
template<class U, std::size_t N>
FLATBUFFERS_CONSTEXPR_CPP11
flatbuffers::span<const U, dynamic_extent> make_span(const U *first, std::size_t count) FLATBUFFERS_NOEXCEPT {
return span<const U, dynamic_extent>(first, count);
}
#endif // !defined(FLATBUFFERS_SPAN_MINIMAL)
} // namespace flatbuffers
#endif // FLATBUFFERS_STL_EMULATION_H_

64
flatbuffers/string.h Normal file
View File

@ -0,0 +1,64 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_STRING_H_
#define FLATBUFFERS_STRING_H_
#include "flatbuffers/base.h"
#include "flatbuffers/vector.h"
namespace flatbuffers {
struct String : public Vector<char> {
const char *c_str() const { return reinterpret_cast<const char *>(Data()); }
std::string str() const { return std::string(c_str(), size()); }
// clang-format off
#ifdef FLATBUFFERS_HAS_STRING_VIEW
flatbuffers::string_view string_view() const {
return flatbuffers::string_view(c_str(), size());
}
#endif // FLATBUFFERS_HAS_STRING_VIEW
// clang-format on
bool operator<(const String &o) const {
return StringLessThan(this->data(), this->size(), o.data(), o.size());
}
};
// Convenience function to get std::string from a String returning an empty
// string on null pointer.
static inline std::string GetString(const String *str) {
return str ? str->str() : "";
}
// Convenience function to get char* from a String returning an empty string on
// null pointer.
static inline const char *GetCstring(const String *str) {
return str ? str->c_str() : "";
}
#ifdef FLATBUFFERS_HAS_STRING_VIEW
// Convenience function to get string_view from a String returning an empty
// string_view on null pointer.
static inline flatbuffers::string_view GetStringView(const String *str) {
return str ? str->string_view() : flatbuffers::string_view();
}
#endif // FLATBUFFERS_HAS_STRING_VIEW
} // namespace flatbuffers
#endif // FLATBUFFERS_STRING_H_

53
flatbuffers/struct.h Normal file
View File

@ -0,0 +1,53 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_STRUCT_H_
#define FLATBUFFERS_STRUCT_H_
#include "flatbuffers/base.h"
namespace flatbuffers {
// "structs" are flat structures that do not have an offset table, thus
// always have all members present and do not support forwards/backwards
// compatible extensions.
class Struct FLATBUFFERS_FINAL_CLASS {
public:
template<typename T> T GetField(uoffset_t o) const {
return ReadScalar<T>(&data_[o]);
}
template<typename T> T GetStruct(uoffset_t o) const {
return reinterpret_cast<T>(&data_[o]);
}
const uint8_t *GetAddressOf(uoffset_t o) const { return &data_[o]; }
uint8_t *GetAddressOf(uoffset_t o) { return &data_[o]; }
private:
// private constructor & copy constructor: you obtain instances of this
// class by pointing to existing data only
Struct();
Struct(const Struct &);
Struct &operator=(const Struct &);
uint8_t data_[1];
};
} // namespace flatbuffers
#endif // FLATBUFFERS_STRUCT_H_

166
flatbuffers/table.h Normal file
View File

@ -0,0 +1,166 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_TABLE_H_
#define FLATBUFFERS_TABLE_H_
#include "flatbuffers/base.h"
#include "flatbuffers/verifier.h"
namespace flatbuffers {
// "tables" use an offset table (possibly shared) that allows fields to be
// omitted and added at will, but uses an extra indirection to read.
class Table {
public:
const uint8_t *GetVTable() const {
return data_ - ReadScalar<soffset_t>(data_);
}
// This gets the field offset for any of the functions below it, or 0
// if the field was not present.
voffset_t GetOptionalFieldOffset(voffset_t field) const {
// The vtable offset is always at the start.
auto vtable = GetVTable();
// The first element is the size of the vtable (fields + type id + itself).
auto vtsize = ReadScalar<voffset_t>(vtable);
// If the field we're accessing is outside the vtable, we're reading older
// data, so it's the same as if the offset was 0 (not present).
return field < vtsize ? ReadScalar<voffset_t>(vtable + field) : 0;
}
template<typename T> T GetField(voffset_t field, T defaultval) const {
auto field_offset = GetOptionalFieldOffset(field);
return field_offset ? ReadScalar<T>(data_ + field_offset) : defaultval;
}
template<typename P> P GetPointer(voffset_t field) {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
return field_offset ? reinterpret_cast<P>(p + ReadScalar<uoffset_t>(p))
: nullptr;
}
template<typename P> P GetPointer(voffset_t field) const {
return const_cast<Table *>(this)->GetPointer<P>(field);
}
template<typename P> P GetStruct(voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
auto p = const_cast<uint8_t *>(data_ + field_offset);
return field_offset ? reinterpret_cast<P>(p) : nullptr;
}
template<typename Raw, typename Face>
flatbuffers::Optional<Face> GetOptional(voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
return field_offset ? Optional<Face>(static_cast<Face>(ReadScalar<Raw>(p)))
: Optional<Face>();
}
template<typename T> bool SetField(voffset_t field, T val, T def) {
auto field_offset = GetOptionalFieldOffset(field);
if (!field_offset) return IsTheSameAs(val, def);
WriteScalar(data_ + field_offset, val);
return true;
}
template<typename T> bool SetField(voffset_t field, T val) {
auto field_offset = GetOptionalFieldOffset(field);
if (!field_offset) return false;
WriteScalar(data_ + field_offset, val);
return true;
}
bool SetPointer(voffset_t field, const uint8_t *val) {
auto field_offset = GetOptionalFieldOffset(field);
if (!field_offset) return false;
WriteScalar(data_ + field_offset,
static_cast<uoffset_t>(val - (data_ + field_offset)));
return true;
}
uint8_t *GetAddressOf(voffset_t field) {
auto field_offset = GetOptionalFieldOffset(field);
return field_offset ? data_ + field_offset : nullptr;
}
const uint8_t *GetAddressOf(voffset_t field) const {
return const_cast<Table *>(this)->GetAddressOf(field);
}
bool CheckField(voffset_t field) const {
return GetOptionalFieldOffset(field) != 0;
}
// Verify the vtable of this table.
// Call this once per table, followed by VerifyField once per field.
bool VerifyTableStart(Verifier &verifier) const {
return verifier.VerifyTableStart(data_);
}
// Verify a particular field.
template<typename T>
bool VerifyField(const Verifier &verifier, voffset_t field) const {
// Calling GetOptionalFieldOffset should be safe now thanks to
// VerifyTable().
auto field_offset = GetOptionalFieldOffset(field);
// Check the actual field.
return !field_offset || verifier.Verify<T>(data_, field_offset);
}
// VerifyField for required fields.
template<typename T>
bool VerifyFieldRequired(const Verifier &verifier, voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
return verifier.Check(field_offset != 0) &&
verifier.Verify<T>(data_, field_offset);
}
// Versions for offsets.
bool VerifyOffset(const Verifier &verifier, voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
return !field_offset || verifier.VerifyOffset(data_, field_offset);
}
bool VerifyOffsetRequired(const Verifier &verifier, voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
return verifier.Check(field_offset != 0) &&
verifier.VerifyOffset(data_, field_offset);
}
private:
// private constructor & copy constructor: you obtain instances of this
// class by pointing to existing data only
Table();
Table(const Table &other);
Table &operator=(const Table &);
uint8_t data_[1];
};
// This specialization allows avoiding warnings like:
// MSVC C4800: type: forcing value to bool 'true' or 'false'.
template<>
inline flatbuffers::Optional<bool> Table::GetOptional<uint8_t, bool>(
voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
return field_offset ? Optional<bool>(ReadScalar<uint8_t>(p) != 0)
: Optional<bool>();
}
} // namespace flatbuffers
#endif // FLATBUFFERS_TABLE_H_

689
flatbuffers/util.h Normal file
View File

@ -0,0 +1,689 @@
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_UTIL_H_
#define FLATBUFFERS_UTIL_H_
#include <errno.h>
#include "flatbuffers/base.h"
#include "flatbuffers/stl_emulation.h"
#ifndef FLATBUFFERS_PREFER_PRINTF
# include <sstream>
#else // FLATBUFFERS_PREFER_PRINTF
# include <float.h>
# include <stdio.h>
#endif // FLATBUFFERS_PREFER_PRINTF
#include <iomanip>
#include <string>
namespace flatbuffers {
// @locale-independent functions for ASCII characters set.
// Fast checking that character lies in closed range: [a <= x <= b]
// using one compare (conditional branch) operator.
inline bool check_ascii_range(char x, char a, char b) {
FLATBUFFERS_ASSERT(a <= b);
// (Hacker's Delight): `a <= x <= b` <=> `(x-a) <={u} (b-a)`.
// The x, a, b will be promoted to int and subtracted without overflow.
return static_cast<unsigned int>(x - a) <= static_cast<unsigned int>(b - a);
}
// Case-insensitive isalpha
inline bool is_alpha(char c) {
// ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF).
return check_ascii_range(c & 0xDF, 'a' & 0xDF, 'z' & 0xDF);
}
// Check for uppercase alpha
inline bool is_alpha_upper(char c) { return check_ascii_range(c, 'A', 'Z'); }
// Check (case-insensitive) that `c` is equal to alpha.
inline bool is_alpha_char(char c, char alpha) {
FLATBUFFERS_ASSERT(is_alpha(alpha));
// ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF).
return ((c & 0xDF) == (alpha & 0xDF));
}
// https://en.cppreference.com/w/cpp/string/byte/isxdigit
// isdigit and isxdigit are the only standard narrow character classification
// functions that are not affected by the currently installed C locale. although
// some implementations (e.g. Microsoft in 1252 codepage) may classify
// additional single-byte characters as digits.
inline bool is_digit(char c) { return check_ascii_range(c, '0', '9'); }
inline bool is_xdigit(char c) {
// Replace by look-up table.
return is_digit(c) || check_ascii_range(c & 0xDF, 'a' & 0xDF, 'f' & 0xDF);
}
// Case-insensitive isalnum
inline bool is_alnum(char c) { return is_alpha(c) || is_digit(c); }
inline char CharToUpper(char c) {
return static_cast<char>(::toupper(static_cast<unsigned char>(c)));
}
inline char CharToLower(char c) {
return static_cast<char>(::tolower(static_cast<unsigned char>(c)));
}
// @end-locale-independent functions for ASCII character set
#ifdef FLATBUFFERS_PREFER_PRINTF
template<typename T> size_t IntToDigitCount(T t) {
size_t digit_count = 0;
// Count the sign for negative numbers
if (t < 0) digit_count++;
// Count a single 0 left of the dot for fractional numbers
if (-1 < t && t < 1) digit_count++;
// Count digits until fractional part
T eps = std::numeric_limits<float>::epsilon();
while (t <= (-1 + eps) || (1 - eps) <= t) {
t /= 10;
digit_count++;
}
return digit_count;
}
template<typename T> size_t NumToStringWidth(T t, int precision = 0) {
size_t string_width = IntToDigitCount(t);
// Count the dot for floating point numbers
if (precision) string_width += (precision + 1);
return string_width;
}
template<typename T>
std::string NumToStringImplWrapper(T t, const char *fmt, int precision = 0) {
size_t string_width = NumToStringWidth(t, precision);
std::string s(string_width, 0x00);
// Allow snprintf to use std::string trailing null to detect buffer overflow
snprintf(const_cast<char *>(s.data()), (s.size() + 1), fmt, string_width, t);
return s;
}
#endif // FLATBUFFERS_PREFER_PRINTF
// Convert an integer or floating point value to a string.
// In contrast to std::stringstream, "char" values are
// converted to a string of digits, and we don't use scientific notation.
template<typename T> std::string NumToString(T t) {
// clang-format off
#ifndef FLATBUFFERS_PREFER_PRINTF
std::stringstream ss;
ss << t;
return ss.str();
#else // FLATBUFFERS_PREFER_PRINTF
auto v = static_cast<long long>(t);
return NumToStringImplWrapper(v, "%.*lld");
#endif // FLATBUFFERS_PREFER_PRINTF
// clang-format on
}
// Avoid char types used as character data.
template<> inline std::string NumToString<signed char>(signed char t) {
return NumToString(static_cast<int>(t));
}
template<> inline std::string NumToString<unsigned char>(unsigned char t) {
return NumToString(static_cast<int>(t));
}
template<> inline std::string NumToString<char>(char t) {
return NumToString(static_cast<int>(t));
}
// Special versions for floats/doubles.
template<typename T> std::string FloatToString(T t, int precision) {
// clang-format off
#ifndef FLATBUFFERS_PREFER_PRINTF
// to_string() prints different numbers of digits for floats depending on
// platform and isn't available on Android, so we use stringstream
std::stringstream ss;
// Use std::fixed to suppress scientific notation.
ss << std::fixed;
// Default precision is 6, we want that to be higher for doubles.
ss << std::setprecision(precision);
ss << t;
auto s = ss.str();
#else // FLATBUFFERS_PREFER_PRINTF
auto v = static_cast<double>(t);
auto s = NumToStringImplWrapper(v, "%0.*f", precision);
#endif // FLATBUFFERS_PREFER_PRINTF
// clang-format on
// Sadly, std::fixed turns "1" into "1.00000", so here we undo that.
auto p = s.find_last_not_of('0');
if (p != std::string::npos) {
// Strip trailing zeroes. If it is a whole number, keep one zero.
s.resize(p + (s[p] == '.' ? 2 : 1));
}
return s;
}
template<> inline std::string NumToString<double>(double t) {
return FloatToString(t, 12);
}
template<> inline std::string NumToString<float>(float t) {
return FloatToString(t, 6);
}
// Convert an integer value to a hexadecimal string.
// The returned string length is always xdigits long, prefixed by 0 digits.
// For example, IntToStringHex(0x23, 8) returns the string "00000023".
inline std::string IntToStringHex(int i, int xdigits) {
FLATBUFFERS_ASSERT(i >= 0);
// clang-format off
#ifndef FLATBUFFERS_PREFER_PRINTF
std::stringstream ss;
ss << std::setw(xdigits) << std::setfill('0') << std::hex << std::uppercase
<< i;
return ss.str();
#else // FLATBUFFERS_PREFER_PRINTF
return NumToStringImplWrapper(i, "%.*X", xdigits);
#endif // FLATBUFFERS_PREFER_PRINTF
// clang-format on
}
// clang-format off
// Use locale independent functions {strtod_l, strtof_l, strtoll_l, strtoull_l}.
#if defined(FLATBUFFERS_LOCALE_INDEPENDENT) && (FLATBUFFERS_LOCALE_INDEPENDENT > 0)
class ClassicLocale {
#ifdef _MSC_VER
typedef _locale_t locale_type;
#else
typedef locale_t locale_type; // POSIX.1-2008 locale_t type
#endif
ClassicLocale();
~ClassicLocale();
locale_type locale_;
static ClassicLocale instance_;
public:
static locale_type Get() { return instance_.locale_; }
};
#ifdef _MSC_VER
#define __strtoull_impl(s, pe, b) _strtoui64_l(s, pe, b, ClassicLocale::Get())
#define __strtoll_impl(s, pe, b) _strtoi64_l(s, pe, b, ClassicLocale::Get())
#define __strtod_impl(s, pe) _strtod_l(s, pe, ClassicLocale::Get())
#define __strtof_impl(s, pe) _strtof_l(s, pe, ClassicLocale::Get())
#else
#define __strtoull_impl(s, pe, b) strtoull_l(s, pe, b, ClassicLocale::Get())
#define __strtoll_impl(s, pe, b) strtoll_l(s, pe, b, ClassicLocale::Get())
#define __strtod_impl(s, pe) strtod_l(s, pe, ClassicLocale::Get())
#define __strtof_impl(s, pe) strtof_l(s, pe, ClassicLocale::Get())
#endif
#else
#define __strtod_impl(s, pe) strtod(s, pe)
#define __strtof_impl(s, pe) static_cast<float>(strtod(s, pe))
#ifdef _MSC_VER
#define __strtoull_impl(s, pe, b) _strtoui64(s, pe, b)
#define __strtoll_impl(s, pe, b) _strtoi64(s, pe, b)
#else
#define __strtoull_impl(s, pe, b) strtoull(s, pe, b)
#define __strtoll_impl(s, pe, b) strtoll(s, pe, b)
#endif
#endif
inline void strtoval_impl(int64_t *val, const char *str, char **endptr,
int base) {
*val = __strtoll_impl(str, endptr, base);
}
inline void strtoval_impl(uint64_t *val, const char *str, char **endptr,
int base) {
*val = __strtoull_impl(str, endptr, base);
}
inline void strtoval_impl(double *val, const char *str, char **endptr) {
*val = __strtod_impl(str, endptr);
}
// UBSAN: double to float is safe if numeric_limits<float>::is_iec559 is true.
__supress_ubsan__("float-cast-overflow")
inline void strtoval_impl(float *val, const char *str, char **endptr) {
*val = __strtof_impl(str, endptr);
}
#undef __strtoull_impl
#undef __strtoll_impl
#undef __strtod_impl
#undef __strtof_impl
// clang-format on
// Adaptor for strtoull()/strtoll().
// Flatbuffers accepts numbers with any count of leading zeros (-009 is -9),
// while strtoll with base=0 interprets first leading zero as octal prefix.
// In future, it is possible to add prefixed 0b0101.
// 1) Checks errno code for overflow condition (out of range).
// 2) If base <= 0, function try to detect base of number by prefix.
//
// Return value (like strtoull and strtoll, but reject partial result):
// - If successful, an integer value corresponding to the str is returned.
// - If full string conversion can't be performed, 0 is returned.
// - If the converted value falls out of range of corresponding return type, a
// range error occurs. In this case value MAX(T)/MIN(T) is returned.
template<typename T>
inline bool StringToIntegerImpl(T *val, const char *const str,
const int base = 0,
const bool check_errno = true) {
// T is int64_t or uint64_T
FLATBUFFERS_ASSERT(str);
if (base <= 0) {
auto s = str;
while (*s && !is_digit(*s)) s++;
if (s[0] == '0' && is_alpha_char(s[1], 'X'))
return StringToIntegerImpl(val, str, 16, check_errno);
// if a prefix not match, try base=10
return StringToIntegerImpl(val, str, 10, check_errno);
} else {
if (check_errno) errno = 0; // clear thread-local errno
auto endptr = str;
strtoval_impl(val, str, const_cast<char **>(&endptr), base);
if ((*endptr != '\0') || (endptr == str)) {
*val = 0; // erase partial result
return false; // invalid string
}
// errno is out-of-range, return MAX/MIN
if (check_errno && errno) return false;
return true;
}
}
template<typename T>
inline bool StringToFloatImpl(T *val, const char *const str) {
// Type T must be either float or double.
FLATBUFFERS_ASSERT(str && val);
auto end = str;
strtoval_impl(val, str, const_cast<char **>(&end));
auto done = (end != str) && (*end == '\0');
if (!done) *val = 0; // erase partial result
return done;
}
// Convert a string to an instance of T.
// Return value (matched with StringToInteger64Impl and strtod):
// - If successful, a numeric value corresponding to the str is returned.
// - If full string conversion can't be performed, 0 is returned.
// - If the converted value falls out of range of corresponding return type, a
// range error occurs. In this case value MAX(T)/MIN(T) is returned.
template<typename T> inline bool StringToNumber(const char *s, T *val) {
// Assert on `unsigned long` and `signed long` on LP64.
// If it is necessary, it could be solved with flatbuffers::enable_if<B,T>.
static_assert(sizeof(T) < sizeof(int64_t), "unexpected type T");
FLATBUFFERS_ASSERT(s && val);
int64_t i64;
// The errno check isn't needed, will return MAX/MIN on overflow.
if (StringToIntegerImpl(&i64, s, 0, false)) {
const int64_t max = (flatbuffers::numeric_limits<T>::max)();
const int64_t min = flatbuffers::numeric_limits<T>::lowest();
if (i64 > max) {
*val = static_cast<T>(max);
return false;
}
if (i64 < min) {
// For unsigned types return max to distinguish from
// "no conversion can be performed" when 0 is returned.
*val = static_cast<T>(flatbuffers::is_unsigned<T>::value ? max : min);
return false;
}
*val = static_cast<T>(i64);
return true;
}
*val = 0;
return false;
}
template<> inline bool StringToNumber<int64_t>(const char *str, int64_t *val) {
return StringToIntegerImpl(val, str);
}
template<>
inline bool StringToNumber<uint64_t>(const char *str, uint64_t *val) {
if (!StringToIntegerImpl(val, str)) return false;
// The strtoull accepts negative numbers:
// If the minus sign was part of the input sequence, the numeric value
// calculated from the sequence of digits is negated as if by unary minus
// in the result type, which applies unsigned integer wraparound rules.
// Fix this behaviour (except -0).
if (*val) {
auto s = str;
while (*s && !is_digit(*s)) s++;
s = (s > str) ? (s - 1) : s; // step back to one symbol
if (*s == '-') {
// For unsigned types return the max to distinguish from
// "no conversion can be performed".
*val = (flatbuffers::numeric_limits<uint64_t>::max)();
return false;
}
}
return true;
}
template<> inline bool StringToNumber(const char *s, float *val) {
return StringToFloatImpl(val, s);
}
template<> inline bool StringToNumber(const char *s, double *val) {
return StringToFloatImpl(val, s);
}
inline int64_t StringToInt(const char *s, int base = 10) {
int64_t val;
return StringToIntegerImpl(&val, s, base) ? val : 0;
}
inline uint64_t StringToUInt(const char *s, int base = 10) {
uint64_t val;
return StringToIntegerImpl(&val, s, base) ? val : 0;
}
typedef bool (*LoadFileFunction)(const char *filename, bool binary,
std::string *dest);
typedef bool (*FileExistsFunction)(const char *filename);
LoadFileFunction SetLoadFileFunction(LoadFileFunction load_file_function);
FileExistsFunction SetFileExistsFunction(
FileExistsFunction file_exists_function);
// Check if file "name" exists.
bool FileExists(const char *name);
// Check if "name" exists and it is also a directory.
bool DirExists(const char *name);
// Load file "name" into "buf" returning true if successful
// false otherwise. If "binary" is false data is read
// using ifstream's text mode, otherwise data is read with
// no transcoding.
bool LoadFile(const char *name, bool binary, std::string *buf);
// Save data "buf" of length "len" bytes into a file
// "name" returning true if successful, false otherwise.
// If "binary" is false data is written using ifstream's
// text mode, otherwise data is written with no
// transcoding.
bool SaveFile(const char *name, const char *buf, size_t len, bool binary);
// Save data "buf" into file "name" returning true if
// successful, false otherwise. If "binary" is false
// data is written using ifstream's text mode, otherwise
// data is written with no transcoding.
inline bool SaveFile(const char *name, const std::string &buf, bool binary) {
return SaveFile(name, buf.c_str(), buf.size(), binary);
}
// Functionality for minimalistic portable path handling.
// The functions below behave correctly regardless of whether posix ('/') or
// Windows ('/' or '\\') separators are used.
// Any new separators inserted are always posix.
FLATBUFFERS_CONSTEXPR char kPathSeparator = '/';
// Returns the path with the extension, if any, removed.
std::string StripExtension(const std::string &filepath);
// Returns the extension, if any.
std::string GetExtension(const std::string &filepath);
// Return the last component of the path, after the last separator.
std::string StripPath(const std::string &filepath);
// Strip the last component of the path + separator.
std::string StripFileName(const std::string &filepath);
// Concatenates a path with a filename, regardless of whether the path
// ends in a separator or not.
std::string ConCatPathFileName(const std::string &path,
const std::string &filename);
// Replaces any '\\' separators with '/'
std::string PosixPath(const char *path);
std::string PosixPath(const std::string &path);
// This function ensure a directory exists, by recursively
// creating dirs for any parts of the path that don't exist yet.
void EnsureDirExists(const std::string &filepath);
// Obtains the absolute path from any other path.
// Returns the input path if the absolute path couldn't be resolved.
std::string AbsolutePath(const std::string &filepath);
// Returns files relative to the --project_root path, prefixed with `//`.
std::string RelativeToRootPath(const std::string &project,
const std::string &filepath);
// To and from UTF-8 unicode conversion functions
// Convert a unicode code point into a UTF-8 representation by appending it
// to a string. Returns the number of bytes generated.
inline int ToUTF8(uint32_t ucc, std::string *out) {
FLATBUFFERS_ASSERT(!(ucc & 0x80000000)); // Top bit can't be set.
// 6 possible encodings: http://en.wikipedia.org/wiki/UTF-8
for (int i = 0; i < 6; i++) {
// Max bits this encoding can represent.
uint32_t max_bits = 6 + i * 5 + static_cast<int>(!i);
if (ucc < (1u << max_bits)) { // does it fit?
// Remaining bits not encoded in the first byte, store 6 bits each
uint32_t remain_bits = i * 6;
// Store first byte:
(*out) += static_cast<char>((0xFE << (max_bits - remain_bits)) |
(ucc >> remain_bits));
// Store remaining bytes:
for (int j = i - 1; j >= 0; j--) {
(*out) += static_cast<char>(((ucc >> (j * 6)) & 0x3F) | 0x80);
}
return i + 1; // Return the number of bytes added.
}
}
FLATBUFFERS_ASSERT(0); // Impossible to arrive here.
return -1;
}
// Converts whatever prefix of the incoming string corresponds to a valid
// UTF-8 sequence into a unicode code. The incoming pointer will have been
// advanced past all bytes parsed.
// returns -1 upon corrupt UTF-8 encoding (ignore the incoming pointer in
// this case).
inline int FromUTF8(const char **in) {
int len = 0;
// Count leading 1 bits.
for (int mask = 0x80; mask >= 0x04; mask >>= 1) {
if (**in & mask) {
len++;
} else {
break;
}
}
if ((static_cast<unsigned char>(**in) << len) & 0x80)
return -1; // Bit after leading 1's must be 0.
if (!len) return *(*in)++;
// UTF-8 encoded values with a length are between 2 and 4 bytes.
if (len < 2 || len > 4) { return -1; }
// Grab initial bits of the code.
int ucc = *(*in)++ & ((1 << (7 - len)) - 1);
for (int i = 0; i < len - 1; i++) {
if ((**in & 0xC0) != 0x80) return -1; // Upper bits must 1 0.
ucc <<= 6;
ucc |= *(*in)++ & 0x3F; // Grab 6 more bits of the code.
}
// UTF-8 cannot encode values between 0xD800 and 0xDFFF (reserved for
// UTF-16 surrogate pairs).
if (ucc >= 0xD800 && ucc <= 0xDFFF) { return -1; }
// UTF-8 must represent code points in their shortest possible encoding.
switch (len) {
case 2:
// Two bytes of UTF-8 can represent code points from U+0080 to U+07FF.
if (ucc < 0x0080 || ucc > 0x07FF) { return -1; }
break;
case 3:
// Three bytes of UTF-8 can represent code points from U+0800 to U+FFFF.
if (ucc < 0x0800 || ucc > 0xFFFF) { return -1; }
break;
case 4:
// Four bytes of UTF-8 can represent code points from U+10000 to U+10FFFF.
if (ucc < 0x10000 || ucc > 0x10FFFF) { return -1; }
break;
}
return ucc;
}
#ifndef FLATBUFFERS_PREFER_PRINTF
// Wraps a string to a maximum length, inserting new lines where necessary. Any
// existing whitespace will be collapsed down to a single space. A prefix or
// suffix can be provided, which will be inserted before or after a wrapped
// line, respectively.
inline std::string WordWrap(const std::string in, size_t max_length,
const std::string wrapped_line_prefix,
const std::string wrapped_line_suffix) {
std::istringstream in_stream(in);
std::string wrapped, line, word;
in_stream >> word;
line = word;
while (in_stream >> word) {
if ((line.length() + 1 + word.length() + wrapped_line_suffix.length()) <
max_length) {
line += " " + word;
} else {
wrapped += line + wrapped_line_suffix + "\n";
line = wrapped_line_prefix + word;
}
}
wrapped += line;
return wrapped;
}
#endif // !FLATBUFFERS_PREFER_PRINTF
inline bool EscapeString(const char *s, size_t length, std::string *_text,
bool allow_non_utf8, bool natural_utf8) {
std::string &text = *_text;
text += "\"";
for (uoffset_t i = 0; i < length; i++) {
char c = s[i];
switch (c) {
case '\n': text += "\\n"; break;
case '\t': text += "\\t"; break;
case '\r': text += "\\r"; break;
case '\b': text += "\\b"; break;
case '\f': text += "\\f"; break;
case '\"': text += "\\\""; break;
case '\\': text += "\\\\"; break;
default:
if (c >= ' ' && c <= '~') {
text += c;
} else {
// Not printable ASCII data. Let's see if it's valid UTF-8 first:
const char *utf8 = s + i;
int ucc = FromUTF8(&utf8);
if (ucc < 0) {
if (allow_non_utf8) {
text += "\\x";
text += IntToStringHex(static_cast<uint8_t>(c), 2);
} else {
// There are two cases here:
//
// 1) We reached here by parsing an IDL file. In that case,
// we previously checked for non-UTF-8, so we shouldn't reach
// here.
//
// 2) We reached here by someone calling GenerateText()
// on a previously-serialized flatbuffer. The data might have
// non-UTF-8 Strings, or might be corrupt.
//
// In both cases, we have to give up and inform the caller
// they have no JSON.
return false;
}
} else {
if (natural_utf8) {
// utf8 points to past all utf-8 bytes parsed
text.append(s + i, static_cast<size_t>(utf8 - s - i));
} else if (ucc <= 0xFFFF) {
// Parses as Unicode within JSON's \uXXXX range, so use that.
text += "\\u";
text += IntToStringHex(ucc, 4);
} else if (ucc <= 0x10FFFF) {
// Encode Unicode SMP values to a surrogate pair using two \u
// escapes.
uint32_t base = ucc - 0x10000;
auto high_surrogate = (base >> 10) + 0xD800;
auto low_surrogate = (base & 0x03FF) + 0xDC00;
text += "\\u";
text += IntToStringHex(high_surrogate, 4);
text += "\\u";
text += IntToStringHex(low_surrogate, 4);
}
// Skip past characters recognized.
i = static_cast<uoffset_t>(utf8 - s - 1);
}
}
break;
}
}
text += "\"";
return true;
}
inline std::string BufferToHexText(const void *buffer, size_t buffer_size,
size_t max_length,
const std::string &wrapped_line_prefix,
const std::string &wrapped_line_suffix) {
std::string text = wrapped_line_prefix;
size_t start_offset = 0;
const char *s = reinterpret_cast<const char *>(buffer);
for (size_t i = 0; s && i < buffer_size; i++) {
// Last iteration or do we have more?
bool have_more = i + 1 < buffer_size;
text += "0x";
text += IntToStringHex(static_cast<uint8_t>(s[i]), 2);
if (have_more) { text += ','; }
// If we have more to process and we reached max_length
if (have_more &&
text.size() + wrapped_line_suffix.size() >= start_offset + max_length) {
text += wrapped_line_suffix;
text += '\n';
start_offset = text.size();
text += wrapped_line_prefix;
}
}
text += wrapped_line_suffix;
return text;
}
// Remove paired quotes in a string: "text"|'text' -> text.
std::string RemoveStringQuotes(const std::string &s);
// Change th global C-locale to locale with name <locale_name>.
// Returns an actual locale name in <_value>, useful if locale_name is "" or
// null.
bool SetGlobalTestLocale(const char *locale_name,
std::string *_value = nullptr);
// Read (or test) a value of environment variable.
bool ReadEnvironmentVariable(const char *var_name,
std::string *_value = nullptr);
// MSVC specific: Send all assert reports to STDOUT to prevent CI hangs.
void SetupDefaultCRTReportMode();
} // namespace flatbuffers
#endif // FLATBUFFERS_UTIL_H_

370
flatbuffers/vector.h Normal file
View File

@ -0,0 +1,370 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_VECTOR_H_
#define FLATBUFFERS_VECTOR_H_
#include "flatbuffers/base.h"
#include "flatbuffers/buffer.h"
namespace flatbuffers {
struct String;
// An STL compatible iterator implementation for Vector below, effectively
// calling Get() for every element.
template<typename T, typename IT> struct VectorIterator {
typedef std::random_access_iterator_tag iterator_category;
typedef IT value_type;
typedef ptrdiff_t difference_type;
typedef IT *pointer;
typedef IT &reference;
VectorIterator(const uint8_t *data, uoffset_t i)
: data_(data + IndirectHelper<T>::element_stride * i) {}
VectorIterator(const VectorIterator &other) : data_(other.data_) {}
VectorIterator() : data_(nullptr) {}
VectorIterator &operator=(const VectorIterator &other) {
data_ = other.data_;
return *this;
}
VectorIterator &operator=(VectorIterator &&other) {
data_ = other.data_;
return *this;
}
bool operator==(const VectorIterator &other) const {
return data_ == other.data_;
}
bool operator<(const VectorIterator &other) const {
return data_ < other.data_;
}
bool operator!=(const VectorIterator &other) const {
return data_ != other.data_;
}
difference_type operator-(const VectorIterator &other) const {
return (data_ - other.data_) / IndirectHelper<T>::element_stride;
}
// Note: return type is incompatible with the standard
// `reference operator*()`.
IT operator*() const { return IndirectHelper<T>::Read(data_, 0); }
// Note: return type is incompatible with the standard
// `pointer operator->()`.
IT operator->() const { return IndirectHelper<T>::Read(data_, 0); }
VectorIterator &operator++() {
data_ += IndirectHelper<T>::element_stride;
return *this;
}
VectorIterator operator++(int) {
VectorIterator temp(data_, 0);
data_ += IndirectHelper<T>::element_stride;
return temp;
}
VectorIterator operator+(const uoffset_t &offset) const {
return VectorIterator(data_ + offset * IndirectHelper<T>::element_stride,
0);
}
VectorIterator &operator+=(const uoffset_t &offset) {
data_ += offset * IndirectHelper<T>::element_stride;
return *this;
}
VectorIterator &operator--() {
data_ -= IndirectHelper<T>::element_stride;
return *this;
}
VectorIterator operator--(int) {
VectorIterator temp(data_, 0);
data_ -= IndirectHelper<T>::element_stride;
return temp;
}
VectorIterator operator-(const uoffset_t &offset) const {
return VectorIterator(data_ - offset * IndirectHelper<T>::element_stride,
0);
}
VectorIterator &operator-=(const uoffset_t &offset) {
data_ -= offset * IndirectHelper<T>::element_stride;
return *this;
}
private:
const uint8_t *data_;
};
template<typename Iterator>
struct VectorReverseIterator : public std::reverse_iterator<Iterator> {
explicit VectorReverseIterator(Iterator iter)
: std::reverse_iterator<Iterator>(iter) {}
// Note: return type is incompatible with the standard
// `reference operator*()`.
typename Iterator::value_type operator*() const {
auto tmp = std::reverse_iterator<Iterator>::current;
return *--tmp;
}
// Note: return type is incompatible with the standard
// `pointer operator->()`.
typename Iterator::value_type operator->() const {
auto tmp = std::reverse_iterator<Iterator>::current;
return *--tmp;
}
};
// This is used as a helper type for accessing vectors.
// Vector::data() assumes the vector elements start after the length field.
template<typename T> class Vector {
public:
typedef VectorIterator<T, typename IndirectHelper<T>::mutable_return_type>
iterator;
typedef VectorIterator<T, typename IndirectHelper<T>::return_type>
const_iterator;
typedef VectorReverseIterator<iterator> reverse_iterator;
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
typedef typename flatbuffers::bool_constant<flatbuffers::is_scalar<T>::value>
scalar_tag;
static FLATBUFFERS_CONSTEXPR bool is_span_observable =
scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1);
uoffset_t size() const { return EndianScalar(length_); }
// Deprecated: use size(). Here for backwards compatibility.
FLATBUFFERS_ATTRIBUTE([[deprecated("use size() instead")]])
uoffset_t Length() const { return size(); }
typedef typename IndirectHelper<T>::return_type return_type;
typedef typename IndirectHelper<T>::mutable_return_type mutable_return_type;
typedef return_type value_type;
return_type Get(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return IndirectHelper<T>::Read(Data(), i);
}
return_type operator[](uoffset_t i) const { return Get(i); }
// If this is a Vector of enums, T will be its storage type, not the enum
// type. This function makes it convenient to retrieve value with enum
// type E.
template<typename E> E GetEnum(uoffset_t i) const {
return static_cast<E>(Get(i));
}
// If this a vector of unions, this does the cast for you. There's no check
// to make sure this is the right type!
template<typename U> const U *GetAs(uoffset_t i) const {
return reinterpret_cast<const U *>(Get(i));
}
// If this a vector of unions, this does the cast for you. There's no check
// to make sure this is actually a string!
const String *GetAsString(uoffset_t i) const {
return reinterpret_cast<const String *>(Get(i));
}
const void *GetStructFromOffset(size_t o) const {
return reinterpret_cast<const void *>(Data() + o);
}
iterator begin() { return iterator(Data(), 0); }
const_iterator begin() const { return const_iterator(Data(), 0); }
iterator end() { return iterator(Data(), size()); }
const_iterator end() const { return const_iterator(Data(), size()); }
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
const_reverse_iterator crbegin() const { return rbegin(); }
const_reverse_iterator crend() const { return rend(); }
// Change elements if you have a non-const pointer to this object.
// Scalars only. See reflection.h, and the documentation.
void Mutate(uoffset_t i, const T &val) {
FLATBUFFERS_ASSERT(i < size());
WriteScalar(data() + i, val);
}
// Change an element of a vector of tables (or strings).
// "val" points to the new table/string, as you can obtain from
// e.g. reflection::AddFlatBuffer().
void MutateOffset(uoffset_t i, const uint8_t *val) {
FLATBUFFERS_ASSERT(i < size());
static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types");
WriteScalar(data() + i,
static_cast<uoffset_t>(val - (Data() + i * sizeof(uoffset_t))));
}
// Get a mutable pointer to tables/strings inside this vector.
mutable_return_type GetMutableObject(uoffset_t i) const {
FLATBUFFERS_ASSERT(i < size());
return const_cast<mutable_return_type>(IndirectHelper<T>::Read(Data(), i));
}
// The raw data in little endian format. Use with care.
const uint8_t *Data() const {
return reinterpret_cast<const uint8_t *>(&length_ + 1);
}
uint8_t *Data() { return reinterpret_cast<uint8_t *>(&length_ + 1); }
// Similarly, but typed, much like std::vector::data
const T *data() const { return reinterpret_cast<const T *>(Data()); }
T *data() { return reinterpret_cast<T *>(Data()); }
template<typename K> return_type LookupByKey(K key) const {
void *search_result = std::bsearch(
&key, Data(), size(), IndirectHelper<T>::element_stride, KeyCompare<K>);
if (!search_result) {
return nullptr; // Key not found.
}
const uint8_t *element = reinterpret_cast<const uint8_t *>(search_result);
return IndirectHelper<T>::Read(element, 0);
}
template<typename K> mutable_return_type MutableLookupByKey(K key) {
return const_cast<mutable_return_type>(LookupByKey(key));
}
protected:
// This class is only used to access pre-existing data. Don't ever
// try to construct these manually.
Vector();
uoffset_t length_;
private:
// This class is a pointer. Copying will therefore create an invalid object.
// Private and unimplemented copy constructor.
Vector(const Vector &);
Vector &operator=(const Vector &);
template<typename K> static int KeyCompare(const void *ap, const void *bp) {
const K *key = reinterpret_cast<const K *>(ap);
const uint8_t *data = reinterpret_cast<const uint8_t *>(bp);
auto table = IndirectHelper<T>::Read(data, 0);
// std::bsearch compares with the operands transposed, so we negate the
// result here.
return -table->KeyCompareWithValue(*key);
}
};
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U> make_span(Vector<U> &vec)
FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::is_span_observable,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<U>(vec.data(), vec.size());
}
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const U> make_span(
const Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::is_span_observable,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<const U>(vec.data(), vec.size());
}
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<uint8_t> make_bytes_span(
Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::scalar_tag::value,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<uint8_t>(vec.Data(), vec.size() * sizeof(U));
}
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<const uint8_t> make_bytes_span(
const Vector<U> &vec) FLATBUFFERS_NOEXCEPT {
static_assert(Vector<U>::scalar_tag::value,
"wrong type U, only LE-scalar, or byte types are allowed");
return span<const uint8_t>(vec.Data(), vec.size() * sizeof(U));
}
// Represent a vector much like the template above, but in this case we
// don't know what the element types are (used with reflection.h).
class VectorOfAny {
public:
uoffset_t size() const { return EndianScalar(length_); }
const uint8_t *Data() const {
return reinterpret_cast<const uint8_t *>(&length_ + 1);
}
uint8_t *Data() { return reinterpret_cast<uint8_t *>(&length_ + 1); }
protected:
VectorOfAny();
uoffset_t length_;
private:
VectorOfAny(const VectorOfAny &);
VectorOfAny &operator=(const VectorOfAny &);
};
template<typename T, typename U>
Vector<Offset<T>> *VectorCast(Vector<Offset<U>> *ptr) {
static_assert(std::is_base_of<T, U>::value, "Unrelated types");
return reinterpret_cast<Vector<Offset<T>> *>(ptr);
}
template<typename T, typename U>
const Vector<Offset<T>> *VectorCast(const Vector<Offset<U>> *ptr) {
static_assert(std::is_base_of<T, U>::value, "Unrelated types");
return reinterpret_cast<const Vector<Offset<T>> *>(ptr);
}
// Convenient helper function to get the length of any vector, regardless
// of whether it is null or not (the field is not set).
template<typename T> static inline size_t VectorLength(const Vector<T> *v) {
return v ? v->size() : 0;
}
} // namespace flatbuffers
#endif // FLATBUFFERS_VERIFIER_H_

View File

@ -0,0 +1,271 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_VECTOR_DOWNWARD_H_
#define FLATBUFFERS_VECTOR_DOWNWARD_H_
#include "flatbuffers/base.h"
#include "flatbuffers/default_allocator.h"
#include "flatbuffers/detached_buffer.h"
namespace flatbuffers {
// This is a minimal replication of std::vector<uint8_t> functionality,
// except growing from higher to lower addresses. i.e push_back() inserts data
// in the lowest address in the vector.
// Since this vector leaves the lower part unused, we support a "scratch-pad"
// that can be stored there for temporary data, to share the allocated space.
// Essentially, this supports 2 std::vectors in a single buffer.
class vector_downward {
public:
explicit vector_downward(size_t initial_size, Allocator *allocator,
bool own_allocator, size_t buffer_minalign)
: allocator_(allocator),
own_allocator_(own_allocator),
initial_size_(initial_size),
buffer_minalign_(buffer_minalign),
reserved_(0),
size_(0),
buf_(nullptr),
cur_(nullptr),
scratch_(nullptr) {}
vector_downward(vector_downward &&other)
// clang-format on
: allocator_(other.allocator_),
own_allocator_(other.own_allocator_),
initial_size_(other.initial_size_),
buffer_minalign_(other.buffer_minalign_),
reserved_(other.reserved_),
size_(other.size_),
buf_(other.buf_),
cur_(other.cur_),
scratch_(other.scratch_) {
// No change in other.allocator_
// No change in other.initial_size_
// No change in other.buffer_minalign_
other.own_allocator_ = false;
other.reserved_ = 0;
other.buf_ = nullptr;
other.cur_ = nullptr;
other.scratch_ = nullptr;
}
vector_downward &operator=(vector_downward &&other) {
// Move construct a temporary and swap idiom
vector_downward temp(std::move(other));
swap(temp);
return *this;
}
~vector_downward() {
clear_buffer();
clear_allocator();
}
void reset() {
clear_buffer();
clear();
}
void clear() {
if (buf_) {
cur_ = buf_ + reserved_;
} else {
reserved_ = 0;
cur_ = nullptr;
}
size_ = 0;
clear_scratch();
}
void clear_scratch() { scratch_ = buf_; }
void clear_allocator() {
if (own_allocator_ && allocator_) { delete allocator_; }
allocator_ = nullptr;
own_allocator_ = false;
}
void clear_buffer() {
if (buf_) Deallocate(allocator_, buf_, reserved_);
buf_ = nullptr;
}
// Relinquish the pointer to the caller.
uint8_t *release_raw(size_t &allocated_bytes, size_t &offset) {
auto *buf = buf_;
allocated_bytes = reserved_;
offset = static_cast<size_t>(cur_ - buf_);
// release_raw only relinquishes the buffer ownership.
// Does not deallocate or reset the allocator. Destructor will do that.
buf_ = nullptr;
clear();
return buf;
}
// Relinquish the pointer to the caller.
DetachedBuffer release() {
// allocator ownership (if any) is transferred to DetachedBuffer.
DetachedBuffer fb(allocator_, own_allocator_, buf_, reserved_, cur_,
size());
if (own_allocator_) {
allocator_ = nullptr;
own_allocator_ = false;
}
buf_ = nullptr;
clear();
return fb;
}
size_t ensure_space(size_t len) {
FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_);
if (len > static_cast<size_t>(cur_ - scratch_)) { reallocate(len); }
// Beyond this, signed offsets may not have enough range:
// (FlatBuffers > 2GB not supported).
FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
return len;
}
inline uint8_t *make_space(size_t len) {
if (len) {
ensure_space(len);
cur_ -= len;
size_ += static_cast<uoffset_t>(len);
}
return cur_;
}
// Returns nullptr if using the DefaultAllocator.
Allocator *get_custom_allocator() { return allocator_; }
inline uoffset_t size() const { return size_; }
uoffset_t scratch_size() const {
return static_cast<uoffset_t>(scratch_ - buf_);
}
size_t capacity() const { return reserved_; }
uint8_t *data() const {
FLATBUFFERS_ASSERT(cur_);
return cur_;
}
uint8_t *scratch_data() const {
FLATBUFFERS_ASSERT(buf_);
return buf_;
}
uint8_t *scratch_end() const {
FLATBUFFERS_ASSERT(scratch_);
return scratch_;
}
uint8_t *data_at(size_t offset) const { return buf_ + reserved_ - offset; }
void push(const uint8_t *bytes, size_t num) {
if (num > 0) { memcpy(make_space(num), bytes, num); }
}
// Specialized version of push() that avoids memcpy call for small data.
template<typename T> void push_small(const T &little_endian_t) {
make_space(sizeof(T));
*reinterpret_cast<T *>(cur_) = little_endian_t;
}
template<typename T> void scratch_push_small(const T &t) {
ensure_space(sizeof(T));
*reinterpret_cast<T *>(scratch_) = t;
scratch_ += sizeof(T);
}
// fill() is most frequently called with small byte counts (<= 4),
// which is why we're using loops rather than calling memset.
void fill(size_t zero_pad_bytes) {
make_space(zero_pad_bytes);
for (size_t i = 0; i < zero_pad_bytes; i++) cur_[i] = 0;
}
// Version for when we know the size is larger.
// Precondition: zero_pad_bytes > 0
void fill_big(size_t zero_pad_bytes) {
memset(make_space(zero_pad_bytes), 0, zero_pad_bytes);
}
void pop(size_t bytes_to_remove) {
cur_ += bytes_to_remove;
size_ -= static_cast<uoffset_t>(bytes_to_remove);
}
void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; }
void swap(vector_downward &other) {
using std::swap;
swap(allocator_, other.allocator_);
swap(own_allocator_, other.own_allocator_);
swap(initial_size_, other.initial_size_);
swap(buffer_minalign_, other.buffer_minalign_);
swap(reserved_, other.reserved_);
swap(size_, other.size_);
swap(buf_, other.buf_);
swap(cur_, other.cur_);
swap(scratch_, other.scratch_);
}
void swap_allocator(vector_downward &other) {
using std::swap;
swap(allocator_, other.allocator_);
swap(own_allocator_, other.own_allocator_);
}
private:
// You shouldn't really be copying instances of this class.
FLATBUFFERS_DELETE_FUNC(vector_downward(const vector_downward &));
FLATBUFFERS_DELETE_FUNC(vector_downward &operator=(const vector_downward &));
Allocator *allocator_;
bool own_allocator_;
size_t initial_size_;
size_t buffer_minalign_;
size_t reserved_;
uoffset_t size_;
uint8_t *buf_;
uint8_t *cur_; // Points at location between empty (below) and used (above).
uint8_t *scratch_; // Points to the end of the scratchpad in use.
void reallocate(size_t len) {
auto old_reserved = reserved_;
auto old_size = size();
auto old_scratch_size = scratch_size();
reserved_ +=
(std::max)(len, old_reserved ? old_reserved / 2 : initial_size_);
reserved_ = (reserved_ + buffer_minalign_ - 1) & ~(buffer_minalign_ - 1);
if (buf_) {
buf_ = ReallocateDownward(allocator_, buf_, old_reserved, reserved_,
old_size, old_scratch_size);
} else {
buf_ = Allocate(allocator_, reserved_);
}
cur_ = buf_ + reserved_ - old_size;
scratch_ = buf_ + old_scratch_size;
}
};
} // namespace flatbuffers
#endif // FLATBUFFERS_VECTOR_DOWNWARD_H_

270
flatbuffers/verifier.h Normal file
View File

@ -0,0 +1,270 @@
/*
* Copyright 2021 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLATBUFFERS_VERIFIER_H_
#define FLATBUFFERS_VERIFIER_H_
#include "flatbuffers/base.h"
#include "flatbuffers/util.h"
#include "flatbuffers/vector.h"
namespace flatbuffers {
// Helper class to verify the integrity of a FlatBuffer
class Verifier FLATBUFFERS_FINAL_CLASS {
public:
Verifier(const uint8_t *buf, size_t buf_len, uoffset_t _max_depth = 64,
uoffset_t _max_tables = 1000000, bool _check_alignment = true)
: buf_(buf),
size_(buf_len),
depth_(0),
max_depth_(_max_depth),
num_tables_(0),
max_tables_(_max_tables),
upper_bound_(0),
check_alignment_(_check_alignment) {
FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE);
}
// Central location where any verification failures register.
bool Check(bool ok) const {
// clang-format off
#ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
FLATBUFFERS_ASSERT(ok);
#endif
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
if (!ok)
upper_bound_ = 0;
#endif
// clang-format on
return ok;
}
// Verify any range within the buffer.
bool Verify(size_t elem, size_t elem_len) const {
// clang-format off
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
auto upper_bound = elem + elem_len;
if (upper_bound_ < upper_bound)
upper_bound_ = upper_bound;
#endif
// clang-format on
return Check(elem_len < size_ && elem <= size_ - elem_len);
}
template<typename T> bool VerifyAlignment(size_t elem) const {
return Check((elem & (sizeof(T) - 1)) == 0 || !check_alignment_);
}
// Verify a range indicated by sizeof(T).
template<typename T> bool Verify(size_t elem) const {
return VerifyAlignment<T>(elem) && Verify(elem, sizeof(T));
}
bool VerifyFromPointer(const uint8_t *p, size_t len) {
auto o = static_cast<size_t>(p - buf_);
return Verify(o, len);
}
// Verify relative to a known-good base pointer.
bool Verify(const uint8_t *base, voffset_t elem_off, size_t elem_len) const {
return Verify(static_cast<size_t>(base - buf_) + elem_off, elem_len);
}
template<typename T>
bool Verify(const uint8_t *base, voffset_t elem_off) const {
return Verify(static_cast<size_t>(base - buf_) + elem_off, sizeof(T));
}
// Verify a pointer (may be NULL) of a table type.
template<typename T> bool VerifyTable(const T *table) {
return !table || table->Verify(*this);
}
// Verify a pointer (may be NULL) of any vector type.
template<typename T> bool VerifyVector(const Vector<T> *vec) const {
return !vec || VerifyVectorOrString(reinterpret_cast<const uint8_t *>(vec),
sizeof(T));
}
// Verify a pointer (may be NULL) of a vector to struct.
template<typename T> bool VerifyVector(const Vector<const T *> *vec) const {
return VerifyVector(reinterpret_cast<const Vector<T> *>(vec));
}
// Verify a pointer (may be NULL) to string.
bool VerifyString(const String *str) const {
size_t end;
return !str || (VerifyVectorOrString(reinterpret_cast<const uint8_t *>(str),
1, &end) &&
Verify(end, 1) && // Must have terminator
Check(buf_[end] == '\0')); // Terminating byte must be 0.
}
// Common code between vectors and strings.
bool VerifyVectorOrString(const uint8_t *vec, size_t elem_size,
size_t *end = nullptr) const {
auto veco = static_cast<size_t>(vec - buf_);
// Check we can read the size field.
if (!Verify<uoffset_t>(veco)) return false;
// Check the whole array. If this is a string, the byte past the array
// must be 0.
auto size = ReadScalar<uoffset_t>(vec);
auto max_elems = FLATBUFFERS_MAX_BUFFER_SIZE / elem_size;
if (!Check(size < max_elems))
return false; // Protect against byte_size overflowing.
auto byte_size = sizeof(size) + elem_size * size;
if (end) *end = veco + byte_size;
return Verify(veco, byte_size);
}
// Special case for string contents, after the above has been called.
bool VerifyVectorOfStrings(const Vector<Offset<String>> *vec) const {
if (vec) {
for (uoffset_t i = 0; i < vec->size(); i++) {
if (!VerifyString(vec->Get(i))) return false;
}
}
return true;
}
// Special case for table contents, after the above has been called.
template<typename T> bool VerifyVectorOfTables(const Vector<Offset<T>> *vec) {
if (vec) {
for (uoffset_t i = 0; i < vec->size(); i++) {
if (!vec->Get(i)->Verify(*this)) return false;
}
}
return true;
}
__supress_ubsan__("unsigned-integer-overflow") bool VerifyTableStart(
const uint8_t *table) {
// Check the vtable offset.
auto tableo = static_cast<size_t>(table - buf_);
if (!Verify<soffset_t>(tableo)) return false;
// This offset may be signed, but doing the subtraction unsigned always
// gives the result we want.
auto vtableo = tableo - static_cast<size_t>(ReadScalar<soffset_t>(table));
// Check the vtable size field, then check vtable fits in its entirety.
return VerifyComplexity() && Verify<voffset_t>(vtableo) &&
VerifyAlignment<voffset_t>(ReadScalar<voffset_t>(buf_ + vtableo)) &&
Verify(vtableo, ReadScalar<voffset_t>(buf_ + vtableo));
}
template<typename T>
bool VerifyBufferFromStart(const char *identifier, size_t start) {
if (identifier && !Check((size_ >= 2 * sizeof(flatbuffers::uoffset_t) &&
BufferHasIdentifier(buf_ + start, identifier)))) {
return false;
}
// Call T::Verify, which must be in the generated code for this type.
auto o = VerifyOffset(start);
return o && reinterpret_cast<const T *>(buf_ + start + o)->Verify(*this)
// clang-format off
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
&& GetComputedSize()
#endif
;
// clang-format on
}
template<typename T>
bool VerifyNestedFlatBuffer(const Vector<uint8_t> *buf,
const char *identifier) {
if (!buf) return true;
Verifier nested_verifier(buf->data(), buf->size());
return nested_verifier.VerifyBuffer<T>(identifier);
}
// Verify this whole buffer, starting with root type T.
template<typename T> bool VerifyBuffer() { return VerifyBuffer<T>(nullptr); }
template<typename T> bool VerifyBuffer(const char *identifier) {
return VerifyBufferFromStart<T>(identifier, 0);
}
template<typename T> bool VerifySizePrefixedBuffer(const char *identifier) {
return Verify<uoffset_t>(0U) &&
ReadScalar<uoffset_t>(buf_) == size_ - sizeof(uoffset_t) &&
VerifyBufferFromStart<T>(identifier, sizeof(uoffset_t));
}
uoffset_t VerifyOffset(size_t start) const {
if (!Verify<uoffset_t>(start)) return 0;
auto o = ReadScalar<uoffset_t>(buf_ + start);
// May not point to itself.
if (!Check(o != 0)) return 0;
// Can't wrap around / buffers are max 2GB.
if (!Check(static_cast<soffset_t>(o) >= 0)) return 0;
// Must be inside the buffer to create a pointer from it (pointer outside
// buffer is UB).
if (!Verify(start + o, 1)) return 0;
return o;
}
uoffset_t VerifyOffset(const uint8_t *base, voffset_t start) const {
return VerifyOffset(static_cast<size_t>(base - buf_) + start);
}
// Called at the start of a table to increase counters measuring data
// structure depth and amount, and possibly bails out with false if
// limits set by the constructor have been hit. Needs to be balanced
// with EndTable().
bool VerifyComplexity() {
depth_++;
num_tables_++;
return Check(depth_ <= max_depth_ && num_tables_ <= max_tables_);
}
// Called at the end of a table to pop the depth count.
bool EndTable() {
depth_--;
return true;
}
// Returns the message size in bytes
size_t GetComputedSize() const {
// clang-format off
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
uintptr_t size = upper_bound_;
// Align the size to uoffset_t
size = (size - 1 + sizeof(uoffset_t)) & ~(sizeof(uoffset_t) - 1);
return (size > size_) ? 0 : size;
#else
// Must turn on FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE for this to work.
(void)upper_bound_;
FLATBUFFERS_ASSERT(false);
return 0;
#endif
// clang-format on
}
private:
const uint8_t *buf_;
size_t size_;
uoffset_t depth_;
uoffset_t max_depth_;
uoffset_t num_tables_;
uoffset_t max_tables_;
mutable size_t upper_bound_;
bool check_alignment_;
};
} // namespace flatbuffers
#endif // FLATBUFFERS_VERIFIER_H_

326
flatgeobuf.cpp Normal file
View File

@ -0,0 +1,326 @@
#include <stdio.h>
#include "serial.hpp"
#include <iostream>
#include "projection.hpp"
#include "flatgeobuf/feature_generated.h"
#include "flatgeobuf/header_generated.h"
#include "milo/dtoa_milo.h"
#include "main.hpp"
static constexpr uint8_t magicbytes[8] = { 0x66, 0x67, 0x62, 0x03, 0x66, 0x67, 0x62, 0x01 };
struct NodeItem {
double minX;
double minY;
double maxX;
double maxY;
uint64_t offset;
};
// copied from https://github.com/flatgeobuf/flatgeobuf/blob/master/src/cpp/packedrtree.cpp#L365
uint64_t PackedRTreeSize(const uint64_t numItems, const uint16_t nodeSize)
{
if (nodeSize < 2)
throw std::invalid_argument("Node size must be at least 2");
if (numItems == 0)
throw std::invalid_argument("Number of items must be greater than 0");
const uint16_t nodeSizeMin = std::min(std::max(nodeSize, static_cast<uint16_t>(2)), static_cast<uint16_t>(65535));
// limit so that resulting size in bytes can be represented by uint64_t
if (numItems > static_cast<uint64_t>(1) << 56)
throw std::overflow_error("Number of items must be less than 2^56");
uint64_t n = numItems;
uint64_t numNodes = n;
do {
n = (n + nodeSizeMin - 1) / nodeSizeMin;
numNodes += n;
} while (n != 1);
return numNodes * sizeof(NodeItem);
}
drawvec readPoints(const FlatGeobuf::Geometry *geometry) {
auto xy = geometry->xy();
drawvec dv;
for (unsigned int i = 0; i < xy->size(); i+=2) {
long long x, y;
projection->project(xy->Get(i), xy->Get(i+1), 32, &x, &y);
dv.push_back(draw(VT_MOVETO, x, y));
}
return dv;
}
drawvec readLinePart(const FlatGeobuf::Geometry *geometry) {
auto xy = geometry->xy();
auto ends = geometry->ends();
size_t current_end = 0;
drawvec dv;
for (unsigned int i = 0; i < xy->size(); i+=2) {
long long x, y;
projection->project(xy->Get(i), xy->Get(i+1), 32, &x, &y);
if (i == 0 || (ends != NULL && current_end < ends->size() && i == ends->Get(current_end)*2)) {
dv.push_back(draw(VT_MOVETO, x, y));
if (i > 0) current_end++;
} else {
dv.push_back(draw(VT_LINETO, x, y));
}
}
return dv;
}
drawvec readGeometry(const FlatGeobuf::Geometry *geometry, FlatGeobuf::GeometryType h_geometry_type) {
if (h_geometry_type == FlatGeobuf::GeometryType::MultiPoint) {
return readPoints(geometry);
} if (h_geometry_type == FlatGeobuf::GeometryType::LineString) {
return readLinePart(geometry);
} else if (h_geometry_type == FlatGeobuf::GeometryType::MultiLineString) {
return readLinePart(geometry);
} if (h_geometry_type == FlatGeobuf::GeometryType::Polygon) {
return readLinePart(geometry);
} else if (h_geometry_type == FlatGeobuf::GeometryType::MultiPolygon) {
// if it is a GeometryCollection, parse Parts, ignore XY
drawvec dv;
for (size_t part = 0; part < geometry->parts()->size(); part++) {
drawvec dv2 = readLinePart(geometry->parts()->Get(part));
for (size_t k = 0; k < dv2.size(); k++) {
dv.push_back(dv2[k]);
}
dv.push_back(draw(VT_CLOSEPATH, 0, 0));
}
return dv;
} else {
fprintf(stderr, "flatgeobuf has unsupported geometry type %u\n", (unsigned int)h_geometry_type);
exit(EXIT_FAILURE);
}
}
void readFeature(const FlatGeobuf::Feature *feature, FlatGeobuf::GeometryType h_geometry_type, const std::vector<std::string> &h_column_names, const std::vector<FlatGeobuf::ColumnType> &h_column_types, struct serialization_state *sst, int layer, std::string layername) {
drawvec dv = readGeometry(feature->geometry(), h_geometry_type);
int drawvec_type = -1;
switch (h_geometry_type) {
case FlatGeobuf::GeometryType::Point :
case FlatGeobuf::GeometryType::MultiPoint :
drawvec_type = 1;
break;
case FlatGeobuf::GeometryType::LineString :
case FlatGeobuf::GeometryType::MultiLineString :
drawvec_type = 2;
break;
case FlatGeobuf::GeometryType::Polygon :
case FlatGeobuf::GeometryType::MultiPolygon :
drawvec_type = 3;
break;
case FlatGeobuf::GeometryType::Unknown :
case FlatGeobuf::GeometryType::GeometryCollection :
default:
fprintf(stderr, "flatgeobuf has unsupported geometry type %u\n", (unsigned int)h_geometry_type);
exit(EXIT_FAILURE);
}
serial_feature sf;
sf.layer = layer;
sf.layername = layername;
sf.segment = sst->segment;
sf.has_id = false;
sf.has_tippecanoe_minzoom = false;
sf.has_tippecanoe_maxzoom = false;
sf.feature_minzoom = false;
sf.seq = (*sst->layer_seq);
sf.geometry = dv;
sf.t = drawvec_type;
std::vector<std::string> full_keys;
std::vector<serial_val> full_values;
// assume tabular schema with columns in header
size_t p_pos = 0;
while (p_pos < feature->properties()->size()) {
uint16_t col_idx;
memcpy(&col_idx, feature->properties()->data() + p_pos, sizeof(col_idx));
FlatGeobuf::ColumnType col_type = h_column_types[col_idx];
serial_val sv;
if (col_type == FlatGeobuf::ColumnType::Int) {
sv.type = mvt_double; // this is quirky
int32_t int_val;
memcpy(&int_val, feature->properties()->data() + p_pos + sizeof(uint16_t), sizeof(int_val));
sv.s = std::to_string(int_val);
p_pos += sizeof(uint16_t) + sizeof(int_val);
} else if (col_type == FlatGeobuf::ColumnType::Long) {
sv.type = mvt_double; // this is quirky
uint64_t long_val;
memcpy(&long_val, feature->properties()->data() + p_pos + sizeof(uint16_t), sizeof(long_val));
sv.s = std::to_string(long_val);
p_pos += sizeof(uint16_t) + sizeof(long_val);
} else if (col_type == FlatGeobuf::ColumnType::Double) {
sv.type = mvt_double;
double double_val;
memcpy(&double_val, feature->properties()->data() + p_pos + sizeof(uint16_t), sizeof(double_val));
sv.s = milo::dtoa_milo(double_val);
p_pos += sizeof(uint16_t) + sizeof(double_val);
} else if (col_type == FlatGeobuf::ColumnType::String) {
sv.type = mvt_string;
uint32_t str_len;
memcpy(&str_len, feature->properties()->data() + p_pos + sizeof(uint16_t), sizeof(str_len));
std::string s{reinterpret_cast<const char*>(feature->properties()->data() + p_pos + sizeof(uint16_t) + sizeof(uint32_t)), str_len};
sv.s = s;
p_pos += sizeof(uint16_t) + sizeof(uint32_t) + str_len;
} else {
fprintf(stderr, "flatgeobuf has unsupported column type %u\n", (unsigned int)col_type);
exit(EXIT_FAILURE);
}
full_keys.push_back(h_column_names[col_idx]);
full_values.push_back(sv);
}
sf.full_keys = full_keys;
sf.full_values = full_values;
serialize_feature(sst, sf);
}
struct queued_feature {
const FlatGeobuf::Feature *feature = NULL;
FlatGeobuf::GeometryType h_geometry_type = FlatGeobuf::GeometryType::Unknown;
const std::vector<std::string> *h_column_names = NULL;
const std::vector<FlatGeobuf::ColumnType> *h_column_types = NULL;
std::vector<struct serialization_state> *sst = NULL;
int layer = 0;
std::string layername = "";
};
static std::vector<queued_feature> feature_queue;
struct queue_run_arg {
size_t start;
size_t end;
size_t segment;
queue_run_arg(size_t start1, size_t end1, size_t segment1)
: start(start1), end(end1), segment(segment1) {
}
};
void *fgb_run_parse_feature(void *v) {
struct queue_run_arg *qra = (struct queue_run_arg *) v;
for (size_t i = qra->start; i < qra->end; i++) {
struct queued_feature &qf = feature_queue[i];
readFeature(qf.feature, qf.h_geometry_type, *qf.h_column_names, *qf.h_column_types, &(*qf.sst)[qra->segment], qf.layer, qf.layername);
}
return NULL;
}
void fgbRunQueue() {
if (feature_queue.size() == 0) {
return;
}
std::vector<struct queue_run_arg> qra;
std::vector<pthread_t> pthreads;
pthreads.resize(CPUS);
for (size_t i = 0; i < CPUS; i++) {
*((*(feature_queue[0].sst))[i].layer_seq) = *((*(feature_queue[0].sst))[0].layer_seq) + feature_queue.size() * i / CPUS;
qra.push_back(queue_run_arg(
feature_queue.size() * i / CPUS,
feature_queue.size() * (i + 1) / CPUS,
i));
}
for (size_t i = 0; i < CPUS; i++) {
if (pthread_create(&pthreads[i], NULL, fgb_run_parse_feature, &qra[i]) != 0) {
perror("pthread_create");
exit(EXIT_FAILURE);
}
}
for (size_t i = 0; i < CPUS; i++) {
void *retval;
if (pthread_join(pthreads[i], &retval) != 0) {
perror("pthread_join");
}
}
// Lack of atomicity is OK, since we are single-threaded again here
long long was = *((*(feature_queue[0].sst))[CPUS - 1].layer_seq);
*((*(feature_queue[0].sst))[0].layer_seq) = was;
feature_queue.clear();
}
void queueFeature(const FlatGeobuf::Feature *feature, FlatGeobuf::GeometryType h_geometry_type, const std::vector<std::string> &h_column_names, const std::vector<FlatGeobuf::ColumnType> &h_column_types, std::vector<struct serialization_state> *sst, int layer, std::string layername) {
struct queued_feature qf;
qf.feature = feature;
qf.h_geometry_type = h_geometry_type;
qf.h_column_names = &h_column_names;
qf.h_column_types = &h_column_types;
qf.sst = sst;
qf.layer = layer;
qf.layername = layername;
feature_queue.push_back(qf);
if (feature_queue.size() > CPUS * 500) {
fgbRunQueue();
}
}
void parse_flatgeobuf(std::vector<struct serialization_state> *sst, const char *src, size_t len, int layer, std::string layername) {
auto header_size = flatbuffers::GetPrefixedSize((const uint8_t *)src + sizeof(magicbytes));
flatbuffers::Verifier v((const uint8_t *)src+sizeof(magicbytes),header_size+sizeof(uint32_t));
const auto ok = FlatGeobuf::VerifySizePrefixedHeaderBuffer(v);
if (!ok) {
fprintf(stderr, "flatgeobuf header header verification failed\n");
exit(EXIT_FAILURE);
}
auto header = FlatGeobuf::GetSizePrefixedHeader(src + sizeof(magicbytes));
auto features_count = header->features_count();
auto node_size = header->index_node_size();
std::vector<std::string> h_column_names;
std::vector<FlatGeobuf::ColumnType> h_column_types;
if (header->columns() != NULL) {
for (size_t i = 0; i < header->columns()->size(); i++) {
h_column_names.push_back(header->columns()->Get(i)->name()->c_str());
h_column_types.push_back(header->columns()->Get(i)->type());
}
}
auto h_geometry_type = header->geometry_type();
int index_size = 0;
if (node_size > 0) {
index_size = PackedRTreeSize(features_count,node_size);
}
const char* start = src + sizeof(magicbytes) + sizeof(uint32_t) + header_size + index_size;
while (start < src + len) {
auto feature_size = flatbuffers::GetPrefixedSize((const uint8_t *)start);
flatbuffers::Verifier v2((const uint8_t *)start,feature_size+sizeof(uint32_t));
const auto ok2 = FlatGeobuf::VerifySizePrefixedFeatureBuffer(v2);
if (!ok2) {
fprintf(stderr, "flatgeobuf feature buffer verification failed\n");
exit(EXIT_FAILURE);
}
auto feature = FlatGeobuf::GetSizePrefixedFeature(start);
queueFeature(feature, h_geometry_type, h_column_names, h_column_types, sst, layer, layername);
start += sizeof(uint32_t) + feature_size;
}
fgbRunQueue();
}

6
flatgeobuf.hpp Normal file
View File

@ -0,0 +1,6 @@
#ifndef FLATGEOBUF_HPP
#define FLATGEOBUF_HPP
void parse_flatgeobuf(std::vector<struct serialization_state> *sst, const char *s, size_t len, int layer, std::string layername);
#endif

View File

@ -0,0 +1,278 @@
// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_FEATURE_FLATGEOBUF_H_
#define FLATBUFFERS_GENERATED_FEATURE_FLATGEOBUF_H_
#include "flatbuffers/flatbuffers.h"
#include "header_generated.h"
namespace FlatGeobuf {
struct Geometry;
struct GeometryBuilder;
struct Feature;
struct FeatureBuilder;
struct Geometry FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef GeometryBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ENDS = 4,
VT_XY = 6,
VT_Z = 8,
VT_M = 10,
VT_T = 12,
VT_TM = 14,
VT_TYPE = 16,
VT_PARTS = 18
};
const flatbuffers::Vector<uint32_t> *ends() const {
return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_ENDS);
}
const flatbuffers::Vector<double> *xy() const {
return GetPointer<const flatbuffers::Vector<double> *>(VT_XY);
}
const flatbuffers::Vector<double> *z() const {
return GetPointer<const flatbuffers::Vector<double> *>(VT_Z);
}
const flatbuffers::Vector<double> *m() const {
return GetPointer<const flatbuffers::Vector<double> *>(VT_M);
}
const flatbuffers::Vector<double> *t() const {
return GetPointer<const flatbuffers::Vector<double> *>(VT_T);
}
const flatbuffers::Vector<uint64_t> *tm() const {
return GetPointer<const flatbuffers::Vector<uint64_t> *>(VT_TM);
}
FlatGeobuf::GeometryType type() const {
return static_cast<FlatGeobuf::GeometryType>(GetField<uint8_t>(VT_TYPE, 0));
}
const flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Geometry>> *parts() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Geometry>> *>(VT_PARTS);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_ENDS) &&
verifier.VerifyVector(ends()) &&
VerifyOffset(verifier, VT_XY) &&
verifier.VerifyVector(xy()) &&
VerifyOffset(verifier, VT_Z) &&
verifier.VerifyVector(z()) &&
VerifyOffset(verifier, VT_M) &&
verifier.VerifyVector(m()) &&
VerifyOffset(verifier, VT_T) &&
verifier.VerifyVector(t()) &&
VerifyOffset(verifier, VT_TM) &&
verifier.VerifyVector(tm()) &&
VerifyField<uint8_t>(verifier, VT_TYPE) &&
VerifyOffset(verifier, VT_PARTS) &&
verifier.VerifyVector(parts()) &&
verifier.VerifyVectorOfTables(parts()) &&
verifier.EndTable();
}
};
struct GeometryBuilder {
typedef Geometry Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_ends(flatbuffers::Offset<flatbuffers::Vector<uint32_t>> ends) {
fbb_.AddOffset(Geometry::VT_ENDS, ends);
}
void add_xy(flatbuffers::Offset<flatbuffers::Vector<double>> xy) {
fbb_.AddOffset(Geometry::VT_XY, xy);
}
void add_z(flatbuffers::Offset<flatbuffers::Vector<double>> z) {
fbb_.AddOffset(Geometry::VT_Z, z);
}
void add_m(flatbuffers::Offset<flatbuffers::Vector<double>> m) {
fbb_.AddOffset(Geometry::VT_M, m);
}
void add_t(flatbuffers::Offset<flatbuffers::Vector<double>> t) {
fbb_.AddOffset(Geometry::VT_T, t);
}
void add_tm(flatbuffers::Offset<flatbuffers::Vector<uint64_t>> tm) {
fbb_.AddOffset(Geometry::VT_TM, tm);
}
void add_type(FlatGeobuf::GeometryType type) {
fbb_.AddElement<uint8_t>(Geometry::VT_TYPE, static_cast<uint8_t>(type), 0);
}
void add_parts(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Geometry>>> parts) {
fbb_.AddOffset(Geometry::VT_PARTS, parts);
}
explicit GeometryBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Geometry> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Geometry>(end);
return o;
}
};
inline flatbuffers::Offset<Geometry> CreateGeometry(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<uint32_t>> ends = 0,
flatbuffers::Offset<flatbuffers::Vector<double>> xy = 0,
flatbuffers::Offset<flatbuffers::Vector<double>> z = 0,
flatbuffers::Offset<flatbuffers::Vector<double>> m = 0,
flatbuffers::Offset<flatbuffers::Vector<double>> t = 0,
flatbuffers::Offset<flatbuffers::Vector<uint64_t>> tm = 0,
FlatGeobuf::GeometryType type = FlatGeobuf::GeometryType::Unknown,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Geometry>>> parts = 0) {
GeometryBuilder builder_(_fbb);
builder_.add_parts(parts);
builder_.add_tm(tm);
builder_.add_t(t);
builder_.add_m(m);
builder_.add_z(z);
builder_.add_xy(xy);
builder_.add_ends(ends);
builder_.add_type(type);
return builder_.Finish();
}
inline flatbuffers::Offset<Geometry> CreateGeometryDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<uint32_t> *ends = nullptr,
const std::vector<double> *xy = nullptr,
const std::vector<double> *z = nullptr,
const std::vector<double> *m = nullptr,
const std::vector<double> *t = nullptr,
const std::vector<uint64_t> *tm = nullptr,
FlatGeobuf::GeometryType type = FlatGeobuf::GeometryType::Unknown,
const std::vector<flatbuffers::Offset<FlatGeobuf::Geometry>> *parts = nullptr) {
auto ends__ = ends ? _fbb.CreateVector<uint32_t>(*ends) : 0;
auto xy__ = xy ? _fbb.CreateVector<double>(*xy) : 0;
auto z__ = z ? _fbb.CreateVector<double>(*z) : 0;
auto m__ = m ? _fbb.CreateVector<double>(*m) : 0;
auto t__ = t ? _fbb.CreateVector<double>(*t) : 0;
auto tm__ = tm ? _fbb.CreateVector<uint64_t>(*tm) : 0;
auto parts__ = parts ? _fbb.CreateVector<flatbuffers::Offset<FlatGeobuf::Geometry>>(*parts) : 0;
return FlatGeobuf::CreateGeometry(
_fbb,
ends__,
xy__,
z__,
m__,
t__,
tm__,
type,
parts__);
}
struct Feature FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef FeatureBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_GEOMETRY = 4,
VT_PROPERTIES = 6,
VT_COLUMNS = 8
};
const FlatGeobuf::Geometry *geometry() const {
return GetPointer<const FlatGeobuf::Geometry *>(VT_GEOMETRY);
}
const flatbuffers::Vector<uint8_t> *properties() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_PROPERTIES);
}
const flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Column>> *columns() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Column>> *>(VT_COLUMNS);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_GEOMETRY) &&
verifier.VerifyTable(geometry()) &&
VerifyOffset(verifier, VT_PROPERTIES) &&
verifier.VerifyVector(properties()) &&
VerifyOffset(verifier, VT_COLUMNS) &&
verifier.VerifyVector(columns()) &&
verifier.VerifyVectorOfTables(columns()) &&
verifier.EndTable();
}
};
struct FeatureBuilder {
typedef Feature Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_geometry(flatbuffers::Offset<FlatGeobuf::Geometry> geometry) {
fbb_.AddOffset(Feature::VT_GEOMETRY, geometry);
}
void add_properties(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> properties) {
fbb_.AddOffset(Feature::VT_PROPERTIES, properties);
}
void add_columns(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Column>>> columns) {
fbb_.AddOffset(Feature::VT_COLUMNS, columns);
}
explicit FeatureBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Feature> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Feature>(end);
return o;
}
};
inline flatbuffers::Offset<Feature> CreateFeature(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<FlatGeobuf::Geometry> geometry = 0,
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> properties = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Column>>> columns = 0) {
FeatureBuilder builder_(_fbb);
builder_.add_columns(columns);
builder_.add_properties(properties);
builder_.add_geometry(geometry);
return builder_.Finish();
}
inline flatbuffers::Offset<Feature> CreateFeatureDirect(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<FlatGeobuf::Geometry> geometry = 0,
const std::vector<uint8_t> *properties = nullptr,
const std::vector<flatbuffers::Offset<FlatGeobuf::Column>> *columns = nullptr) {
auto properties__ = properties ? _fbb.CreateVector<uint8_t>(*properties) : 0;
auto columns__ = columns ? _fbb.CreateVector<flatbuffers::Offset<FlatGeobuf::Column>>(*columns) : 0;
return FlatGeobuf::CreateFeature(
_fbb,
geometry,
properties__,
columns__);
}
inline const FlatGeobuf::Feature *GetFeature(const void *buf) {
return flatbuffers::GetRoot<FlatGeobuf::Feature>(buf);
}
inline const FlatGeobuf::Feature *GetSizePrefixedFeature(const void *buf) {
return flatbuffers::GetSizePrefixedRoot<FlatGeobuf::Feature>(buf);
}
inline bool VerifyFeatureBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<FlatGeobuf::Feature>(nullptr);
}
inline bool VerifySizePrefixedFeatureBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<FlatGeobuf::Feature>(nullptr);
}
inline void FinishFeatureBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<FlatGeobuf::Feature> root) {
fbb.Finish(root);
}
inline void FinishSizePrefixedFeatureBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<FlatGeobuf::Feature> root) {
fbb.FinishSizePrefixed(root);
}
} // namespace FlatGeobuf
#endif // FLATBUFFERS_GENERATED_FEATURE_FLATGEOBUF_H_

View File

@ -0,0 +1,715 @@
// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_HEADER_FLATGEOBUF_H_
#define FLATBUFFERS_GENERATED_HEADER_FLATGEOBUF_H_
#include "flatbuffers/flatbuffers.h"
namespace FlatGeobuf {
struct Column;
struct ColumnBuilder;
struct Crs;
struct CrsBuilder;
struct Header;
struct HeaderBuilder;
enum class GeometryType : uint8_t {
Unknown = 0,
Point = 1,
LineString = 2,
Polygon = 3,
MultiPoint = 4,
MultiLineString = 5,
MultiPolygon = 6,
GeometryCollection = 7,
CircularString = 8,
CompoundCurve = 9,
CurvePolygon = 10,
MultiCurve = 11,
MultiSurface = 12,
Curve = 13,
Surface = 14,
PolyhedralSurface = 15,
TIN = 16,
Triangle = 17,
MIN = Unknown,
MAX = Triangle
};
inline const GeometryType (&EnumValuesGeometryType())[18] {
static const GeometryType values[] = {
GeometryType::Unknown,
GeometryType::Point,
GeometryType::LineString,
GeometryType::Polygon,
GeometryType::MultiPoint,
GeometryType::MultiLineString,
GeometryType::MultiPolygon,
GeometryType::GeometryCollection,
GeometryType::CircularString,
GeometryType::CompoundCurve,
GeometryType::CurvePolygon,
GeometryType::MultiCurve,
GeometryType::MultiSurface,
GeometryType::Curve,
GeometryType::Surface,
GeometryType::PolyhedralSurface,
GeometryType::TIN,
GeometryType::Triangle
};
return values;
}
inline const char * const *EnumNamesGeometryType() {
static const char * const names[19] = {
"Unknown",
"Point",
"LineString",
"Polygon",
"MultiPoint",
"MultiLineString",
"MultiPolygon",
"GeometryCollection",
"CircularString",
"CompoundCurve",
"CurvePolygon",
"MultiCurve",
"MultiSurface",
"Curve",
"Surface",
"PolyhedralSurface",
"TIN",
"Triangle",
nullptr
};
return names;
}
inline const char *EnumNameGeometryType(GeometryType e) {
if (flatbuffers::IsOutRange(e, GeometryType::Unknown, GeometryType::Triangle)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesGeometryType()[index];
}
enum class ColumnType : uint8_t {
Byte = 0,
UByte = 1,
Bool = 2,
Short = 3,
UShort = 4,
Int = 5,
UInt = 6,
Long = 7,
ULong = 8,
Float = 9,
Double = 10,
String = 11,
Json = 12,
DateTime = 13,
Binary = 14,
MIN = Byte,
MAX = Binary
};
inline const ColumnType (&EnumValuesColumnType())[15] {
static const ColumnType values[] = {
ColumnType::Byte,
ColumnType::UByte,
ColumnType::Bool,
ColumnType::Short,
ColumnType::UShort,
ColumnType::Int,
ColumnType::UInt,
ColumnType::Long,
ColumnType::ULong,
ColumnType::Float,
ColumnType::Double,
ColumnType::String,
ColumnType::Json,
ColumnType::DateTime,
ColumnType::Binary
};
return values;
}
inline const char * const *EnumNamesColumnType() {
static const char * const names[16] = {
"Byte",
"UByte",
"Bool",
"Short",
"UShort",
"Int",
"UInt",
"Long",
"ULong",
"Float",
"Double",
"String",
"Json",
"DateTime",
"Binary",
nullptr
};
return names;
}
inline const char *EnumNameColumnType(ColumnType e) {
if (flatbuffers::IsOutRange(e, ColumnType::Byte, ColumnType::Binary)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesColumnType()[index];
}
struct Column FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef ColumnBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NAME = 4,
VT_TYPE = 6,
VT_TITLE = 8,
VT_DESCRIPTION = 10,
VT_WIDTH = 12,
VT_PRECISION = 14,
VT_SCALE = 16,
VT_NULLABLE = 18,
VT_UNIQUE = 20,
VT_PRIMARY_KEY = 22,
VT_METADATA = 24
};
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
}
FlatGeobuf::ColumnType type() const {
return static_cast<FlatGeobuf::ColumnType>(GetField<uint8_t>(VT_TYPE, 0));
}
const flatbuffers::String *title() const {
return GetPointer<const flatbuffers::String *>(VT_TITLE);
}
const flatbuffers::String *description() const {
return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION);
}
int32_t width() const {
return GetField<int32_t>(VT_WIDTH, -1);
}
int32_t precision() const {
return GetField<int32_t>(VT_PRECISION, -1);
}
int32_t scale() const {
return GetField<int32_t>(VT_SCALE, -1);
}
bool nullable() const {
return GetField<uint8_t>(VT_NULLABLE, 1) != 0;
}
bool unique() const {
return GetField<uint8_t>(VT_UNIQUE, 0) != 0;
}
bool primary_key() const {
return GetField<uint8_t>(VT_PRIMARY_KEY, 0) != 0;
}
const flatbuffers::String *metadata() const {
return GetPointer<const flatbuffers::String *>(VT_METADATA);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffsetRequired(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
VerifyField<uint8_t>(verifier, VT_TYPE) &&
VerifyOffset(verifier, VT_TITLE) &&
verifier.VerifyString(title()) &&
VerifyOffset(verifier, VT_DESCRIPTION) &&
verifier.VerifyString(description()) &&
VerifyField<int32_t>(verifier, VT_WIDTH) &&
VerifyField<int32_t>(verifier, VT_PRECISION) &&
VerifyField<int32_t>(verifier, VT_SCALE) &&
VerifyField<uint8_t>(verifier, VT_NULLABLE) &&
VerifyField<uint8_t>(verifier, VT_UNIQUE) &&
VerifyField<uint8_t>(verifier, VT_PRIMARY_KEY) &&
VerifyOffset(verifier, VT_METADATA) &&
verifier.VerifyString(metadata()) &&
verifier.EndTable();
}
};
struct ColumnBuilder {
typedef Column Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_name(flatbuffers::Offset<flatbuffers::String> name) {
fbb_.AddOffset(Column::VT_NAME, name);
}
void add_type(FlatGeobuf::ColumnType type) {
fbb_.AddElement<uint8_t>(Column::VT_TYPE, static_cast<uint8_t>(type), 0);
}
void add_title(flatbuffers::Offset<flatbuffers::String> title) {
fbb_.AddOffset(Column::VT_TITLE, title);
}
void add_description(flatbuffers::Offset<flatbuffers::String> description) {
fbb_.AddOffset(Column::VT_DESCRIPTION, description);
}
void add_width(int32_t width) {
fbb_.AddElement<int32_t>(Column::VT_WIDTH, width, -1);
}
void add_precision(int32_t precision) {
fbb_.AddElement<int32_t>(Column::VT_PRECISION, precision, -1);
}
void add_scale(int32_t scale) {
fbb_.AddElement<int32_t>(Column::VT_SCALE, scale, -1);
}
void add_nullable(bool nullable) {
fbb_.AddElement<uint8_t>(Column::VT_NULLABLE, static_cast<uint8_t>(nullable), 1);
}
void add_unique(bool unique) {
fbb_.AddElement<uint8_t>(Column::VT_UNIQUE, static_cast<uint8_t>(unique), 0);
}
void add_primary_key(bool primary_key) {
fbb_.AddElement<uint8_t>(Column::VT_PRIMARY_KEY, static_cast<uint8_t>(primary_key), 0);
}
void add_metadata(flatbuffers::Offset<flatbuffers::String> metadata) {
fbb_.AddOffset(Column::VT_METADATA, metadata);
}
explicit ColumnBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Column> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Column>(end);
fbb_.Required(o, Column::VT_NAME);
return o;
}
};
inline flatbuffers::Offset<Column> CreateColumn(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> name = 0,
FlatGeobuf::ColumnType type = FlatGeobuf::ColumnType::Byte,
flatbuffers::Offset<flatbuffers::String> title = 0,
flatbuffers::Offset<flatbuffers::String> description = 0,
int32_t width = -1,
int32_t precision = -1,
int32_t scale = -1,
bool nullable = true,
bool unique = false,
bool primary_key = false,
flatbuffers::Offset<flatbuffers::String> metadata = 0) {
ColumnBuilder builder_(_fbb);
builder_.add_metadata(metadata);
builder_.add_scale(scale);
builder_.add_precision(precision);
builder_.add_width(width);
builder_.add_description(description);
builder_.add_title(title);
builder_.add_name(name);
builder_.add_primary_key(primary_key);
builder_.add_unique(unique);
builder_.add_nullable(nullable);
builder_.add_type(type);
return builder_.Finish();
}
inline flatbuffers::Offset<Column> CreateColumnDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *name = nullptr,
FlatGeobuf::ColumnType type = FlatGeobuf::ColumnType::Byte,
const char *title = nullptr,
const char *description = nullptr,
int32_t width = -1,
int32_t precision = -1,
int32_t scale = -1,
bool nullable = true,
bool unique = false,
bool primary_key = false,
const char *metadata = nullptr) {
auto name__ = name ? _fbb.CreateString(name) : 0;
auto title__ = title ? _fbb.CreateString(title) : 0;
auto description__ = description ? _fbb.CreateString(description) : 0;
auto metadata__ = metadata ? _fbb.CreateString(metadata) : 0;
return FlatGeobuf::CreateColumn(
_fbb,
name__,
type,
title__,
description__,
width,
precision,
scale,
nullable,
unique,
primary_key,
metadata__);
}
struct Crs FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef CrsBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ORG = 4,
VT_CODE = 6,
VT_NAME = 8,
VT_DESCRIPTION = 10,
VT_WKT = 12,
VT_CODE_STRING = 14
};
const flatbuffers::String *org() const {
return GetPointer<const flatbuffers::String *>(VT_ORG);
}
int32_t code() const {
return GetField<int32_t>(VT_CODE, 0);
}
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
}
const flatbuffers::String *description() const {
return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION);
}
const flatbuffers::String *wkt() const {
return GetPointer<const flatbuffers::String *>(VT_WKT);
}
const flatbuffers::String *code_string() const {
return GetPointer<const flatbuffers::String *>(VT_CODE_STRING);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_ORG) &&
verifier.VerifyString(org()) &&
VerifyField<int32_t>(verifier, VT_CODE) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
VerifyOffset(verifier, VT_DESCRIPTION) &&
verifier.VerifyString(description()) &&
VerifyOffset(verifier, VT_WKT) &&
verifier.VerifyString(wkt()) &&
VerifyOffset(verifier, VT_CODE_STRING) &&
verifier.VerifyString(code_string()) &&
verifier.EndTable();
}
};
struct CrsBuilder {
typedef Crs Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_org(flatbuffers::Offset<flatbuffers::String> org) {
fbb_.AddOffset(Crs::VT_ORG, org);
}
void add_code(int32_t code) {
fbb_.AddElement<int32_t>(Crs::VT_CODE, code, 0);
}
void add_name(flatbuffers::Offset<flatbuffers::String> name) {
fbb_.AddOffset(Crs::VT_NAME, name);
}
void add_description(flatbuffers::Offset<flatbuffers::String> description) {
fbb_.AddOffset(Crs::VT_DESCRIPTION, description);
}
void add_wkt(flatbuffers::Offset<flatbuffers::String> wkt) {
fbb_.AddOffset(Crs::VT_WKT, wkt);
}
void add_code_string(flatbuffers::Offset<flatbuffers::String> code_string) {
fbb_.AddOffset(Crs::VT_CODE_STRING, code_string);
}
explicit CrsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Crs> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Crs>(end);
return o;
}
};
inline flatbuffers::Offset<Crs> CreateCrs(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> org = 0,
int32_t code = 0,
flatbuffers::Offset<flatbuffers::String> name = 0,
flatbuffers::Offset<flatbuffers::String> description = 0,
flatbuffers::Offset<flatbuffers::String> wkt = 0,
flatbuffers::Offset<flatbuffers::String> code_string = 0) {
CrsBuilder builder_(_fbb);
builder_.add_code_string(code_string);
builder_.add_wkt(wkt);
builder_.add_description(description);
builder_.add_name(name);
builder_.add_code(code);
builder_.add_org(org);
return builder_.Finish();
}
inline flatbuffers::Offset<Crs> CreateCrsDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *org = nullptr,
int32_t code = 0,
const char *name = nullptr,
const char *description = nullptr,
const char *wkt = nullptr,
const char *code_string = nullptr) {
auto org__ = org ? _fbb.CreateString(org) : 0;
auto name__ = name ? _fbb.CreateString(name) : 0;
auto description__ = description ? _fbb.CreateString(description) : 0;
auto wkt__ = wkt ? _fbb.CreateString(wkt) : 0;
auto code_string__ = code_string ? _fbb.CreateString(code_string) : 0;
return FlatGeobuf::CreateCrs(
_fbb,
org__,
code,
name__,
description__,
wkt__,
code_string__);
}
struct Header FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef HeaderBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NAME = 4,
VT_ENVELOPE = 6,
VT_GEOMETRY_TYPE = 8,
VT_HAS_Z = 10,
VT_HAS_M = 12,
VT_HAS_T = 14,
VT_HAS_TM = 16,
VT_COLUMNS = 18,
VT_FEATURES_COUNT = 20,
VT_INDEX_NODE_SIZE = 22,
VT_CRS = 24,
VT_TITLE = 26,
VT_DESCRIPTION = 28,
VT_METADATA = 30
};
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
}
const flatbuffers::Vector<double> *envelope() const {
return GetPointer<const flatbuffers::Vector<double> *>(VT_ENVELOPE);
}
FlatGeobuf::GeometryType geometry_type() const {
return static_cast<FlatGeobuf::GeometryType>(GetField<uint8_t>(VT_GEOMETRY_TYPE, 0));
}
bool has_z() const {
return GetField<uint8_t>(VT_HAS_Z, 0) != 0;
}
bool has_m() const {
return GetField<uint8_t>(VT_HAS_M, 0) != 0;
}
bool has_t() const {
return GetField<uint8_t>(VT_HAS_T, 0) != 0;
}
bool has_tm() const {
return GetField<uint8_t>(VT_HAS_TM, 0) != 0;
}
const flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Column>> *columns() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Column>> *>(VT_COLUMNS);
}
uint64_t features_count() const {
return GetField<uint64_t>(VT_FEATURES_COUNT, 0);
}
uint16_t index_node_size() const {
return GetField<uint16_t>(VT_INDEX_NODE_SIZE, 16);
}
const FlatGeobuf::Crs *crs() const {
return GetPointer<const FlatGeobuf::Crs *>(VT_CRS);
}
const flatbuffers::String *title() const {
return GetPointer<const flatbuffers::String *>(VT_TITLE);
}
const flatbuffers::String *description() const {
return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION);
}
const flatbuffers::String *metadata() const {
return GetPointer<const flatbuffers::String *>(VT_METADATA);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
VerifyOffset(verifier, VT_ENVELOPE) &&
verifier.VerifyVector(envelope()) &&
VerifyField<uint8_t>(verifier, VT_GEOMETRY_TYPE) &&
VerifyField<uint8_t>(verifier, VT_HAS_Z) &&
VerifyField<uint8_t>(verifier, VT_HAS_M) &&
VerifyField<uint8_t>(verifier, VT_HAS_T) &&
VerifyField<uint8_t>(verifier, VT_HAS_TM) &&
VerifyOffset(verifier, VT_COLUMNS) &&
verifier.VerifyVector(columns()) &&
verifier.VerifyVectorOfTables(columns()) &&
VerifyField<uint64_t>(verifier, VT_FEATURES_COUNT) &&
VerifyField<uint16_t>(verifier, VT_INDEX_NODE_SIZE) &&
VerifyOffset(verifier, VT_CRS) &&
verifier.VerifyTable(crs()) &&
VerifyOffset(verifier, VT_TITLE) &&
verifier.VerifyString(title()) &&
VerifyOffset(verifier, VT_DESCRIPTION) &&
verifier.VerifyString(description()) &&
VerifyOffset(verifier, VT_METADATA) &&
verifier.VerifyString(metadata()) &&
verifier.EndTable();
}
};
struct HeaderBuilder {
typedef Header Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_name(flatbuffers::Offset<flatbuffers::String> name) {
fbb_.AddOffset(Header::VT_NAME, name);
}
void add_envelope(flatbuffers::Offset<flatbuffers::Vector<double>> envelope) {
fbb_.AddOffset(Header::VT_ENVELOPE, envelope);
}
void add_geometry_type(FlatGeobuf::GeometryType geometry_type) {
fbb_.AddElement<uint8_t>(Header::VT_GEOMETRY_TYPE, static_cast<uint8_t>(geometry_type), 0);
}
void add_has_z(bool has_z) {
fbb_.AddElement<uint8_t>(Header::VT_HAS_Z, static_cast<uint8_t>(has_z), 0);
}
void add_has_m(bool has_m) {
fbb_.AddElement<uint8_t>(Header::VT_HAS_M, static_cast<uint8_t>(has_m), 0);
}
void add_has_t(bool has_t) {
fbb_.AddElement<uint8_t>(Header::VT_HAS_T, static_cast<uint8_t>(has_t), 0);
}
void add_has_tm(bool has_tm) {
fbb_.AddElement<uint8_t>(Header::VT_HAS_TM, static_cast<uint8_t>(has_tm), 0);
}
void add_columns(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Column>>> columns) {
fbb_.AddOffset(Header::VT_COLUMNS, columns);
}
void add_features_count(uint64_t features_count) {
fbb_.AddElement<uint64_t>(Header::VT_FEATURES_COUNT, features_count, 0);
}
void add_index_node_size(uint16_t index_node_size) {
fbb_.AddElement<uint16_t>(Header::VT_INDEX_NODE_SIZE, index_node_size, 16);
}
void add_crs(flatbuffers::Offset<FlatGeobuf::Crs> crs) {
fbb_.AddOffset(Header::VT_CRS, crs);
}
void add_title(flatbuffers::Offset<flatbuffers::String> title) {
fbb_.AddOffset(Header::VT_TITLE, title);
}
void add_description(flatbuffers::Offset<flatbuffers::String> description) {
fbb_.AddOffset(Header::VT_DESCRIPTION, description);
}
void add_metadata(flatbuffers::Offset<flatbuffers::String> metadata) {
fbb_.AddOffset(Header::VT_METADATA, metadata);
}
explicit HeaderBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<Header> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Header>(end);
return o;
}
};
inline flatbuffers::Offset<Header> CreateHeader(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> name = 0,
flatbuffers::Offset<flatbuffers::Vector<double>> envelope = 0,
FlatGeobuf::GeometryType geometry_type = FlatGeobuf::GeometryType::Unknown,
bool has_z = false,
bool has_m = false,
bool has_t = false,
bool has_tm = false,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<FlatGeobuf::Column>>> columns = 0,
uint64_t features_count = 0,
uint16_t index_node_size = 16,
flatbuffers::Offset<FlatGeobuf::Crs> crs = 0,
flatbuffers::Offset<flatbuffers::String> title = 0,
flatbuffers::Offset<flatbuffers::String> description = 0,
flatbuffers::Offset<flatbuffers::String> metadata = 0) {
HeaderBuilder builder_(_fbb);
builder_.add_features_count(features_count);
builder_.add_metadata(metadata);
builder_.add_description(description);
builder_.add_title(title);
builder_.add_crs(crs);
builder_.add_columns(columns);
builder_.add_envelope(envelope);
builder_.add_name(name);
builder_.add_index_node_size(index_node_size);
builder_.add_has_tm(has_tm);
builder_.add_has_t(has_t);
builder_.add_has_m(has_m);
builder_.add_has_z(has_z);
builder_.add_geometry_type(geometry_type);
return builder_.Finish();
}
inline flatbuffers::Offset<Header> CreateHeaderDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *name = nullptr,
const std::vector<double> *envelope = nullptr,
FlatGeobuf::GeometryType geometry_type = FlatGeobuf::GeometryType::Unknown,
bool has_z = false,
bool has_m = false,
bool has_t = false,
bool has_tm = false,
const std::vector<flatbuffers::Offset<FlatGeobuf::Column>> *columns = nullptr,
uint64_t features_count = 0,
uint16_t index_node_size = 16,
flatbuffers::Offset<FlatGeobuf::Crs> crs = 0,
const char *title = nullptr,
const char *description = nullptr,
const char *metadata = nullptr) {
auto name__ = name ? _fbb.CreateString(name) : 0;
auto envelope__ = envelope ? _fbb.CreateVector<double>(*envelope) : 0;
auto columns__ = columns ? _fbb.CreateVector<flatbuffers::Offset<FlatGeobuf::Column>>(*columns) : 0;
auto title__ = title ? _fbb.CreateString(title) : 0;
auto description__ = description ? _fbb.CreateString(description) : 0;
auto metadata__ = metadata ? _fbb.CreateString(metadata) : 0;
return FlatGeobuf::CreateHeader(
_fbb,
name__,
envelope__,
geometry_type,
has_z,
has_m,
has_t,
has_tm,
columns__,
features_count,
index_node_size,
crs,
title__,
description__,
metadata__);
}
inline const FlatGeobuf::Header *GetHeader(const void *buf) {
return flatbuffers::GetRoot<FlatGeobuf::Header>(buf);
}
inline const FlatGeobuf::Header *GetSizePrefixedHeader(const void *buf) {
return flatbuffers::GetSizePrefixedRoot<FlatGeobuf::Header>(buf);
}
inline bool VerifyHeaderBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<FlatGeobuf::Header>(nullptr);
}
inline bool VerifySizePrefixedHeaderBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<FlatGeobuf::Header>(nullptr);
}
inline void FinishHeaderBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<FlatGeobuf::Header> root) {
fbb.Finish(root);
}
inline void FinishSizePrefixedHeaderBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<FlatGeobuf::Header> root) {
fbb.FinishSizePrefixed(root);
}
} // namespace FlatGeobuf
#endif // FLATBUFFERS_GENERATED_HEADER_FLATGEOBUF_H_

View File

@ -53,6 +53,7 @@
#include "main.hpp"
#include "geojson.hpp"
#include "geobuf.hpp"
#include "flatgeobuf.hpp"
#include "geocsv.hpp"
#include "geometry.hpp"
#include "serial.hpp"
@ -1353,6 +1354,76 @@ int read_input(std::vector<source> &sources, char *fname, int maxzoom, int minzo
}
size_t layer = a->second.id;
// geobuf
if (sources[source].format == "fgb" || (sources[source].file.size() > 4 && sources[source].file.substr(sources[source].file.size() - 4) == std::string(".fgb"))) {
struct stat st;
if (fstat(fd, &st) != 0) {
perror("fstat");
perror(sources[source].file.c_str());
exit(EXIT_FAILURE);
}
char *map = (char *) mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (map == MAP_FAILED) {
fprintf(stderr, "%s: mmap: %s: %s\n", *av, reading.c_str(), strerror(errno));
exit(EXIT_FAILURE);
}
std::atomic<long long> layer_seq[CPUS];
double dist_sums[CPUS];
size_t dist_counts[CPUS];
std::vector<struct serialization_state> sst;
sst.resize(CPUS);
for (size_t i = 0; i < CPUS; i++) {
layer_seq[i] = overall_offset;
dist_sums[i] = 0;
dist_counts[i] = 0;
sst[i].fname = reading.c_str();
sst[i].line = 0;
sst[i].layer_seq = &layer_seq[i];
sst[i].progress_seq = &progress_seq;
sst[i].readers = &readers;
sst[i].segment = i;
sst[i].initial_x = &initial_x[i];
sst[i].initial_y = &initial_y[i];
sst[i].initialized = &initialized[i];
sst[i].dist_sum = &dist_sums[i];
sst[i].dist_count = &dist_counts[i];
sst[i].want_dist = guess_maxzoom;
sst[i].maxzoom = maxzoom;
sst[i].filters = prefilter != NULL || postfilter != NULL;
sst[i].uses_gamma = uses_gamma;
sst[i].layermap = &layermaps[i];
sst[i].exclude = exclude;
sst[i].include = include;
sst[i].exclude_all = exclude_all;
sst[i].basezoom = basezoom;
sst[i].attribute_types = attribute_types;
}
parse_flatgeobuf(&sst, map, st.st_size, layer, sources[layer].layer);
for (size_t i = 0; i < CPUS; i++) {
dist_sum += dist_sums[i];
dist_count += dist_counts[i];
}
if (munmap(map, st.st_size) != 0) {
perror("munmap source file");
exit(EXIT_FAILURE);
}
if (close(fd) != 0) {
perror("close");
exit(EXIT_FAILURE);
}
overall_offset = layer_seq[0];
checkdisk(&readers);
continue;
}
if (sources[source].format == "geobuf" || (sources[source].file.size() > 7 && sources[source].file.substr(sources[source].file.size() - 7) == std::string(".geobuf"))) {
struct stat st;
if (fstat(fd, &st) != 0) {