mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-01-29 15:43:52 +00:00
RethinkDB native connector work, minor fixes.
This commit is contained in:
parent
a6203ed038
commit
4e88c80a22
4
.gitignore
vendored
4
.gitignore
vendored
@ -112,3 +112,7 @@ build/
|
||||
!default.perspectivev3
|
||||
*.xccheckout
|
||||
xcuserdata/
|
||||
ext/librethinkdbxx/build
|
||||
.vscode
|
||||
__pycache__
|
||||
*~
|
||||
|
308
controller/RethinkDB.cpp
Normal file
308
controller/RethinkDB.cpp
Normal file
@ -0,0 +1,308 @@
|
||||
#include "RethinkDB.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <algorithm>
|
||||
#include <stdexcept>
|
||||
|
||||
#include "../ext/librethinkdbxx/build/include/rethinkdb.h"
|
||||
|
||||
namespace R = RethinkDB;
|
||||
using nlohmann::json;
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
RethinkDB::RethinkDB(const Address &myAddress,const char *host,const int port,const char *db,const char *auth) :
|
||||
_myAddress(myAddress),
|
||||
_host(host ? host : "127.0.0.1"),
|
||||
_db(db),
|
||||
_auth(auth ? auth : ""),
|
||||
_port((port > 0) ? port : 28015),
|
||||
_ready(2), // two tables need to be synchronized before we're ready
|
||||
_run(1)
|
||||
{
|
||||
_readyLock.lock();
|
||||
|
||||
{
|
||||
char tmp[32];
|
||||
_myAddress.toString(tmp);
|
||||
_myAddressStr = tmp;
|
||||
}
|
||||
|
||||
_membersDbWatcher = std::thread([this]() {
|
||||
while (_run == 1) {
|
||||
try {
|
||||
auto rdb = R::connect(this->_host,this->_port,this->_auth);
|
||||
if (rdb) {
|
||||
_membersDbWatcherConnection = (void *)rdb.get();
|
||||
auto cur = R::db(this->_db).table("Member").get_all(this->_myAddressStr,R::optargs("index","controllerId")).changes(R::optargs("squash",0.1,"include_initial",true,"include_types",true,"include_states",true)).run(*rdb);
|
||||
while (cur.has_next()) {
|
||||
if (_run != 1) break;
|
||||
json tmp(json::parse(cur.next().as_json()));
|
||||
if ((tmp["type"] == "state")&&(tmp["state"] == "ready")) {
|
||||
if (--this->_ready == 0)
|
||||
this->_readyLock.unlock();
|
||||
} else {
|
||||
try {
|
||||
this->_memberChanged(tmp["old_val"],tmp["new_val"]);
|
||||
} catch ( ... ) {} // ignore bad records
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
fprintf(stderr,"ERROR: controller RethinkDB: %s" ZT_EOL_S,e.what());
|
||||
} catch (R::Error &e) {
|
||||
fprintf(stderr,"ERROR: controller RethinkDB: %s" ZT_EOL_S,e.message.c_str());
|
||||
} catch ( ... ) {
|
||||
fprintf(stderr,"ERROR: controller RethinkDB: unknown exception" ZT_EOL_S);
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(250));
|
||||
}
|
||||
});
|
||||
|
||||
_networksDbWatcher = std::thread([this]() {
|
||||
while (_run == 1) {
|
||||
try {
|
||||
auto rdb = R::connect(this->_host,this->_port,this->_auth);
|
||||
if (rdb) {
|
||||
_membersDbWatcherConnection = (void *)rdb.get();
|
||||
auto cur = R::db(this->_db).table("Network").get_all(this->_myAddressStr,R::optargs("index","controllerId")).changes(R::optargs("squash",0.1,"include_initial",true,"include_types",true,"include_states",true)).run(*rdb);
|
||||
while (cur.has_next()) {
|
||||
if (_run != 1) break;
|
||||
json tmp(json::parse(cur.next().as_json()));
|
||||
if ((tmp["type"] == "state")&&(tmp["state"] == "ready")) {
|
||||
if (--this->_ready == 0)
|
||||
this->_readyLock.unlock();
|
||||
} else {
|
||||
try {
|
||||
this->_networkChanged(tmp["old_val"],tmp["new_val"]);
|
||||
} catch ( ... ) {} // ignore bad records
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
fprintf(stderr,"ERROR: controller RethinkDB: %s" ZT_EOL_S,e.what());
|
||||
} catch (R::Error &e) {
|
||||
fprintf(stderr,"ERROR: controller RethinkDB: %s" ZT_EOL_S,e.message.c_str());
|
||||
} catch ( ... ) {
|
||||
fprintf(stderr,"ERROR: controller RethinkDB: unknown exception" ZT_EOL_S);
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(250));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
RethinkDB::~RethinkDB()
|
||||
{
|
||||
// FIXME: not totally safe but will generally work, and only happens on shutdown anyway
|
||||
_run = 0;
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(10));
|
||||
if (_membersDbWatcherConnection)
|
||||
((R::Connection *)_membersDbWatcherConnection)->close();
|
||||
if (_networksDbWatcherConnection)
|
||||
((R::Connection *)_networksDbWatcherConnection)->close();
|
||||
_membersDbWatcher.join();
|
||||
_networksDbWatcher.join();
|
||||
}
|
||||
|
||||
inline bool RethinkDB::get(const uint64_t networkId,nlohmann::json &network)
|
||||
{
|
||||
std::shared_ptr<_Network> nw;
|
||||
{
|
||||
std::lock_guard<std::mutex> l(_networks_l);
|
||||
auto nwi = _networks.find(networkId);
|
||||
if (nwi == _networks.end())
|
||||
return false;
|
||||
nw = nwi->second;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> l2(nw->lock);
|
||||
network = nw->config;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool RethinkDB::get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member,NetworkSummaryInfo &info)
|
||||
{
|
||||
std::shared_ptr<_Network> nw;
|
||||
{
|
||||
std::lock_guard<std::mutex> l(_networks_l);
|
||||
auto nwi = _networks.find(networkId);
|
||||
if (nwi == _networks.end())
|
||||
return false;
|
||||
nw = nwi->second;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> l2(nw->lock);
|
||||
auto m = nw->members.find(memberId);
|
||||
if (m == nw->members.end())
|
||||
return false;
|
||||
network = nw->config;
|
||||
member = m->second;
|
||||
_fillSummaryInfo(nw,info);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool RethinkDB::get(const uint64_t networkId,nlohmann::json &network,std::vector<nlohmann::json> &members)
|
||||
{
|
||||
std::shared_ptr<_Network> nw;
|
||||
{
|
||||
std::lock_guard<std::mutex> l(_networks_l);
|
||||
auto nwi = _networks.find(networkId);
|
||||
if (nwi == _networks.end())
|
||||
return false;
|
||||
nw = nwi->second;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> l2(nw->lock);
|
||||
network = nw->config;
|
||||
for(auto m=nw->members.begin();m!=nw->members.end();++m)
|
||||
members.push_back(m->second);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool RethinkDB::summary(const uint64_t networkId,NetworkSummaryInfo &info)
|
||||
{
|
||||
std::shared_ptr<_Network> nw;
|
||||
{
|
||||
std::lock_guard<std::mutex> l(_networks_l);
|
||||
auto nwi = _networks.find(networkId);
|
||||
if (nwi == _networks.end())
|
||||
return false;
|
||||
nw = nwi->second;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> l2(nw->lock);
|
||||
_fillSummaryInfo(nw,info);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void RethinkDB::_memberChanged(nlohmann::json &old,nlohmann::json &member)
|
||||
{
|
||||
uint64_t memberId = 0;
|
||||
uint64_t networkId = 0;
|
||||
std::shared_ptr<_Network> nw;
|
||||
|
||||
if (old.is_object()) {
|
||||
json &config = old["config"];
|
||||
if (config.is_object()) {
|
||||
memberId = OSUtils::jsonIntHex(config["id"],0ULL);
|
||||
networkId = OSUtils::jsonIntHex(config["nwid"],0ULL);
|
||||
if ((memberId)&&(networkId)) {
|
||||
{
|
||||
std::lock_guard<std::mutex> l(_networks_l);
|
||||
auto nw2 = _networks.find(networkId);
|
||||
if (nw2 != _networks.end())
|
||||
nw = nw2->second;
|
||||
}
|
||||
if (nw) {
|
||||
std::lock_guard<std::mutex> l(nw->lock);
|
||||
if (OSUtils::jsonBool(config["activeBridge"],false))
|
||||
nw->activeBridgeMembers.erase(memberId);
|
||||
if (OSUtils::jsonBool(config["authorized"],false))
|
||||
nw->authorizedMembers.erase(memberId);
|
||||
json &ips = config["ipAssignments"];
|
||||
if (ips.is_array()) {
|
||||
for(unsigned long i=0;i<ips.size();++i) {
|
||||
json &ipj = ips[i];
|
||||
if (ipj.is_string()) {
|
||||
const std::string ips = ipj;
|
||||
InetAddress ipa(ips.c_str());
|
||||
ipa.setPort(0);
|
||||
nw->allocatedIps.erase(ipa);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (member.is_object()) {
|
||||
json &config = member["config"];
|
||||
if (config.is_object()) {
|
||||
if (!nw) {
|
||||
memberId = OSUtils::jsonIntHex(config["id"],0ULL);
|
||||
networkId = OSUtils::jsonIntHex(config["nwid"],0ULL);
|
||||
if ((!memberId)||(!networkId))
|
||||
return;
|
||||
std::lock_guard<std::mutex> l(_networks_l);
|
||||
std::shared_ptr<_Network> &nw2 = _networks[networkId];
|
||||
if (!nw2)
|
||||
nw2.reset(new _Network);
|
||||
nw = nw2;
|
||||
}
|
||||
std::lock_guard<std::mutex> l(nw->lock);
|
||||
|
||||
nw->members[memberId] = config;
|
||||
|
||||
if (OSUtils::jsonBool(config["activeBridge"],false))
|
||||
nw->activeBridgeMembers.insert(memberId);
|
||||
const bool isAuth = OSUtils::jsonBool(config["authorized"],false);
|
||||
if (isAuth)
|
||||
nw->authorizedMembers.insert(memberId);
|
||||
json &ips = config["ipAssignments"];
|
||||
if (ips.is_array()) {
|
||||
for(unsigned long i=0;i<ips.size();++i) {
|
||||
json &ipj = ips[i];
|
||||
if (ipj.is_string()) {
|
||||
const std::string ips = ipj;
|
||||
InetAddress ipa(ips.c_str());
|
||||
ipa.setPort(0);
|
||||
nw->allocatedIps.insert(ipa);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!isAuth) {
|
||||
const int64_t ldt = (int64_t)OSUtils::jsonInt(config["lastDeauthorizedTime"],0ULL);
|
||||
if (ldt > nw->mostRecentDeauthTime)
|
||||
nw->mostRecentDeauthTime = ldt;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RethinkDB::_networkChanged(nlohmann::json &old,nlohmann::json &network)
|
||||
{
|
||||
if (network.is_object()) {
|
||||
json &config = network["config"];
|
||||
if (config.is_object()) {
|
||||
const std::string ids = config["id"];
|
||||
const uint64_t id = Utils::hexStrToU64(ids.c_str());
|
||||
if (id) {
|
||||
std::shared_ptr<_Network> nw;
|
||||
{
|
||||
std::lock_guard<std::mutex> l(_networks_l);
|
||||
std::shared_ptr<_Network> &nw2 = _networks[id];
|
||||
if (!nw2)
|
||||
nw2.reset(new _Network);
|
||||
nw = nw2;
|
||||
}
|
||||
std::lock_guard<std::mutex> l2(nw->lock);
|
||||
nw->config = config;
|
||||
}
|
||||
}
|
||||
} else if (old.is_object()) {
|
||||
const std::string ids = old["id"];
|
||||
const uint64_t id = Utils::hexStrToU64(ids.c_str());
|
||||
if (id) {
|
||||
std::lock_guard<std::mutex> l(_networks_l);
|
||||
_networks.erase(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
||||
/*
|
||||
int main(int argc,char **argv)
|
||||
{
|
||||
ZeroTier::RethinkDB db(ZeroTier::Address(0x8056c2e21cULL),"10.6.6.188",28015,"ztc","");
|
||||
db.waitForReady();
|
||||
printf("ready.\n");
|
||||
pause();
|
||||
}
|
||||
*/
|
101
controller/RethinkDB.hpp
Normal file
101
controller/RethinkDB.hpp
Normal file
@ -0,0 +1,101 @@
|
||||
#ifndef ZT_CONTROLLER_RETHINKDB_HPP
|
||||
#define ZT_CONTROLLER_RETHINKDB_HPP
|
||||
|
||||
#include "../node/Constants.hpp"
|
||||
#include "../node/Address.hpp"
|
||||
#include "../node/InetAddress.hpp"
|
||||
#include "../osdep/OSUtils.hpp"
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
#include "../ext/json/json.hpp"
|
||||
|
||||
namespace ZeroTier
|
||||
{
|
||||
|
||||
class RethinkDB
|
||||
{
|
||||
public:
|
||||
struct NetworkSummaryInfo
|
||||
{
|
||||
NetworkSummaryInfo() : authorizedMemberCount(0),totalMemberCount(0),mostRecentDeauthTime(0) {}
|
||||
std::vector<Address> activeBridges;
|
||||
std::vector<InetAddress> allocatedIps;
|
||||
unsigned long authorizedMemberCount;
|
||||
unsigned long totalMemberCount;
|
||||
int64_t mostRecentDeauthTime;
|
||||
};
|
||||
|
||||
RethinkDB(const Address &myAddress,const char *host,const int port,const char *db,const char *auth);
|
||||
~RethinkDB();
|
||||
|
||||
inline bool ready() const { return (_ready <= 0); }
|
||||
|
||||
inline void waitForReady() const
|
||||
{
|
||||
while (_ready > 0) {
|
||||
_readyLock.lock();
|
||||
_readyLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
bool get(const uint64_t networkId,nlohmann::json &network);
|
||||
bool get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member,NetworkSummaryInfo &info);
|
||||
bool get(const uint64_t networkId,nlohmann::json &network,std::vector<nlohmann::json> &members);
|
||||
bool summary(const uint64_t networkId,NetworkSummaryInfo &info);
|
||||
|
||||
private:
|
||||
struct _Network
|
||||
{
|
||||
_Network() : mostRecentDeauthTime(0) {}
|
||||
nlohmann::json config;
|
||||
std::unordered_map<uint64_t,nlohmann::json> members;
|
||||
std::unordered_set<uint64_t> activeBridgeMembers;
|
||||
std::unordered_set<uint64_t> authorizedMembers;
|
||||
std::unordered_set<InetAddress,InetAddress::Hasher> allocatedIps;
|
||||
int64_t mostRecentDeauthTime;
|
||||
std::mutex lock;
|
||||
};
|
||||
|
||||
void _memberChanged(nlohmann::json &old,nlohmann::json &member);
|
||||
void _networkChanged(nlohmann::json &old,nlohmann::json &network);
|
||||
|
||||
inline void _fillSummaryInfo(const std::shared_ptr<_Network> &nw,NetworkSummaryInfo &info)
|
||||
{
|
||||
for(auto ab=nw->activeBridgeMembers.begin();ab!=nw->activeBridgeMembers.end();++ab)
|
||||
info.activeBridges.push_back(Address(*ab));
|
||||
for(auto ip=nw->allocatedIps.begin();ip!=nw->allocatedIps.end();++ip)
|
||||
info.allocatedIps.push_back(*ip);
|
||||
info.authorizedMemberCount = (unsigned long)nw->authorizedMembers.size();
|
||||
info.totalMemberCount = (unsigned long)nw->members.size();
|
||||
info.mostRecentDeauthTime = nw->mostRecentDeauthTime;
|
||||
}
|
||||
|
||||
const Address _myAddress;
|
||||
std::string _myAddressStr;
|
||||
std::string _host;
|
||||
std::string _db;
|
||||
std::string _auth;
|
||||
const int _port;
|
||||
|
||||
void *_networksDbWatcherConnection;
|
||||
void *_membersDbWatcherConnection;
|
||||
std::thread _networksDbWatcher;
|
||||
std::thread _membersDbWatcher;
|
||||
|
||||
std::unordered_map< uint64_t,std::shared_ptr<_Network> > _networks;
|
||||
std::mutex _networks_l;
|
||||
|
||||
mutable std::mutex _readyLock; // locked until ready
|
||||
std::atomic<int> _ready;
|
||||
std::atomic<int> _run;
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
||||
#endif
|
11
ext/librethinkdbxx/.travis.yml
Normal file
11
ext/librethinkdbxx/.travis.yml
Normal file
@ -0,0 +1,11 @@
|
||||
sudo: required
|
||||
dist: trusty
|
||||
|
||||
python:
|
||||
- "3.4.3"
|
||||
|
||||
addons:
|
||||
rethinkdb: "2.3"
|
||||
|
||||
script:
|
||||
- make test
|
16
ext/librethinkdbxx/COPYRIGHT
Normal file
16
ext/librethinkdbxx/COPYRIGHT
Normal file
@ -0,0 +1,16 @@
|
||||
RethinkDB Language Drivers
|
||||
|
||||
Copyright 2010-2012 RethinkDB
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this product except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
126
ext/librethinkdbxx/Makefile
Normal file
126
ext/librethinkdbxx/Makefile
Normal file
@ -0,0 +1,126 @@
|
||||
# Customisable build settings
|
||||
|
||||
CXX ?= clang++
|
||||
CXXFLAGS ?=
|
||||
INCLUDE_PYTHON_DOCS ?= no
|
||||
DEBUG ?= no
|
||||
PYTHON ?= python3
|
||||
|
||||
# Required build settings
|
||||
|
||||
ifneq (no,$(DEBUG))
|
||||
CXXFLAGS += -ggdb
|
||||
else
|
||||
CXXFLAGS += -O3 # -flto
|
||||
endif
|
||||
|
||||
CXXFLAGS += -std=c++11 -I'build/gen' -Wall -pthread -fPIC
|
||||
|
||||
prefix ?= /usr
|
||||
DESTDIR ?=
|
||||
|
||||
.DELETE_ON_ERROR:
|
||||
SHELL := /bin/bash
|
||||
|
||||
modules := connection datum json term cursor types utils
|
||||
headers := utils error exceptions types datum connection cursor term
|
||||
|
||||
o_files := $(patsubst %, build/obj/%.o, $(modules))
|
||||
d_files := $(patsubst %, build/dep/%.d, $(modules))
|
||||
|
||||
skip_tests := regression/1133 regression/767 regression/1005 # python-only
|
||||
skip_tests += arity # arity errors are compile-time
|
||||
skip_tests += geo # geo types not implemented yet
|
||||
skip_tests += limits # possibly broken tests: https://github.com/rethinkdb/rethinkdb/issues/5940
|
||||
|
||||
upstream_tests := \
|
||||
$(filter-out %.rb.%, \
|
||||
$(filter-out $(patsubst %,test/upstream/%%, $(skip_tests)), \
|
||||
$(filter test/upstream/$(test_filter)%, \
|
||||
$(shell find test/upstream -name '*.yaml' | egrep -v '.(rb|js).yaml$$'))))
|
||||
upstream_tests_cc := $(patsubst %.yaml, build/tests/%.cc, $(upstream_tests))
|
||||
upstream_tests_o := $(patsubst %.cc, %.o, $(upstream_tests_cc))
|
||||
|
||||
.PRECIOUS: $(upstream_tests_cc) $(upstream_tests_o)
|
||||
|
||||
default: build/librethinkdb++.a build/include/rethinkdb.h build/librethinkdb++.so
|
||||
|
||||
all: default build/test
|
||||
|
||||
build/librethinkdb++.a: $(o_files)
|
||||
ar rcs $@ $^
|
||||
|
||||
build/librethinkdb++.so: $(o_files)
|
||||
$(CXX) -o $@ $(CXXFLAGS) -shared $^
|
||||
|
||||
build/obj/%.o: src/%.cc build/gen/protocol_defs.h
|
||||
@mkdir -p $(dir $@)
|
||||
@mkdir -p $(dir build/dep/$*.d)
|
||||
$(CXX) -o $@ $(CXXFLAGS) -c $< -MP -MQ $@ -MD -MF build/dep/$*.d
|
||||
|
||||
build/gen/protocol_defs.h: reql/ql2.proto reql/gen.py | build/gen/.
|
||||
$(PYTHON) reql/gen.py $< > $@
|
||||
|
||||
clean:
|
||||
rm -rf build
|
||||
|
||||
ifneq (no,$(INCLUDE_PYTHON_DOCS))
|
||||
build/include/rethinkdb.h: build/rethinkdb.nodocs.h reql/add_docs.py reql/python_docs.txt | build/include/.
|
||||
$(PYTHON) reql/add_docs.py reql/python_docs.txt < $< > $@
|
||||
else
|
||||
build/include/rethinkdb.h: build/rethinkdb.nodocs.h | build/include/.
|
||||
cp $< $@
|
||||
endif
|
||||
|
||||
build/rethinkdb.nodocs.h: build/gen/protocol_defs.h $(patsubst %, src/%.h, $(headers))
|
||||
( echo "// Auto-generated file, built from $^"; \
|
||||
echo '#pragma once'; \
|
||||
cat $^ | \
|
||||
grep -v '^#pragma once' | \
|
||||
grep -v '^#include "'; \
|
||||
) > $@
|
||||
|
||||
build/tests/%.cc: %.yaml test/yaml_to_cxx.py
|
||||
@mkdir -p $(dir $@)
|
||||
$(PYTHON) test/yaml_to_cxx.py $< > $@
|
||||
|
||||
build/tests/upstream_tests.cc: $(upstream_tests) test/gen_index_cxx.py FORCE | build/tests/.
|
||||
@echo '$(PYTHON) test/gen_index_cxx.py $(wordlist 1,5,$(upstream_tests)) ... > $@'
|
||||
@$(PYTHON) test/gen_index_cxx.py $(upstream_tests) > $@
|
||||
|
||||
build/tests/%.o: build/tests/%.cc build/include/rethinkdb.h test/testlib.h | build/tests/.
|
||||
$(CXX) -o $@ $(CXXFLAGS) -isystem build/include -I test -c $< -Wno-unused-variable
|
||||
|
||||
build/tests/%.o: test/%.cc test/testlib.h build/include/rethinkdb.h | build/tests/.
|
||||
$(CXX) -o $@ $(CXXFLAGS) -isystem build/include -I test -c $<
|
||||
|
||||
build/test: build/tests/testlib.o build/tests/test.o build/tests/upstream_tests.o $(upstream_tests_o) build/librethinkdb++.a
|
||||
@echo $(CXX) -o $@ $(CXXFLAGS) $(wordlist 1,5,$^) ...
|
||||
@$(CXX) -o $@ $(CXXFLAGS) build/librethinkdb++.a $^
|
||||
|
||||
.PHONY: test
|
||||
test: build/test
|
||||
build/test
|
||||
|
||||
build/bench: build/tests/bench.o build/librethinkdb++.a
|
||||
@$(CXX) -o $@ $(CXXFLAGS) -isystem build/include build/librethinkdb++.a $^
|
||||
|
||||
.PHONY: bench
|
||||
bench: build/bench
|
||||
build/bench
|
||||
|
||||
.PHONY: install
|
||||
install: build/librethinkdb++.a build/include/rethinkdb.h build/librethinkdb++.so
|
||||
install -m755 -d $(DESTDIR)$(prefix)/lib
|
||||
install -m755 -d $(DESTDIR)$(prefix)/include
|
||||
install -m644 build/librethinkdb++.a $(DESTDIR)$(prefix)/lib/librethinkdb++.a
|
||||
install -m644 build/librethinkdb++.so $(DESTDIR)$(prefix)/lib/librethinkdb++.so
|
||||
install -m644 build/include/rethinkdb.h $(DESTDIR)$(prefix)/include/rethinkdb.h
|
||||
|
||||
%/.:
|
||||
mkdir -p $*
|
||||
|
||||
.PHONY: FORCE
|
||||
FORCE:
|
||||
|
||||
-include $(d_files)
|
72
ext/librethinkdbxx/README.md
Normal file
72
ext/librethinkdbxx/README.md
Normal file
@ -0,0 +1,72 @@
|
||||
# RethinkDB driver for C++
|
||||
|
||||
This driver is compatible with RethinkDB 2.0. It is based on the
|
||||
official RethinkDB Python driver.
|
||||
|
||||
* [RethinkDB server](http://rethinkdb.com/)
|
||||
* [RethinkDB API docs](http://rethinkdb.com/api/python/)
|
||||
|
||||
## Example
|
||||
|
||||
```
|
||||
#include <memory>
|
||||
#include <cstdio>
|
||||
#include <rethinkdb.h>
|
||||
|
||||
namespace R = RethinkDB;
|
||||
|
||||
int main() {
|
||||
std::unique_ptr<R::Connection> conn = R::connect("localhost", 28015);
|
||||
R::Cursor cursor = R::table("users").filter(R::row["age"] > 14).run(*conn);
|
||||
for (R::Datum& user : cursor) {
|
||||
printf("%s\n", user.as_json().c_str());
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Build
|
||||
|
||||
Requires a modern C++ compiler. to build and install, run:
|
||||
|
||||
```
|
||||
make
|
||||
make install
|
||||
```
|
||||
|
||||
Will build `include/rethinkdb.h`, `librethinkdb++.a` and
|
||||
`librethinkdb++.so` into the `build/` directory.
|
||||
|
||||
To include documentation from the Python driver in the header file,
|
||||
pass the following argument to make.
|
||||
|
||||
```
|
||||
make INCLUDE_PYTHON_DOCS=yes
|
||||
```
|
||||
|
||||
To build in debug mode:
|
||||
|
||||
```
|
||||
make DEBUG=yes
|
||||
```
|
||||
|
||||
To install to a specific location:
|
||||
|
||||
```
|
||||
make install prefix=/usr/local DESTDIR=
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
Still in early stages of development.
|
||||
|
||||
## Tests
|
||||
|
||||
This driver is tested against the upstream ReQL tests from the
|
||||
RethinkDB repo, which are programmatically translated from Python to
|
||||
C++. As of 34dc13c, all tests pass:
|
||||
|
||||
```
|
||||
$ make test
|
||||
...
|
||||
SUCCESS: 2053 tests passed
|
||||
```
|
80
ext/librethinkdbxx/reql/add_docs.py
Normal file
80
ext/librethinkdbxx/reql/add_docs.py
Normal file
@ -0,0 +1,80 @@
|
||||
from sys import stdin, stderr, stdout, argv
|
||||
from re import match, sub
|
||||
|
||||
docs = {}
|
||||
|
||||
for line in open(argv[1]):
|
||||
res = match('^\t\(([^,]*), (.*)\),$', line)
|
||||
if res:
|
||||
fullname = res.group(1)
|
||||
docs[fullname.split('.')[-1]] = eval(res.group(2)).decode('utf-8')
|
||||
|
||||
translate_name = {
|
||||
'name': None,
|
||||
'delete_': 'delete',
|
||||
'union_': 'union',
|
||||
'operator[]': '__getitem__',
|
||||
'operator+': '__add__',
|
||||
'operator-': '__sub__',
|
||||
'operator*': '__mul__',
|
||||
'operator/': '__div__',
|
||||
'operator%': '__mod__',
|
||||
'operator&&': 'and_',
|
||||
'operator||': 'or_',
|
||||
'operator==': '__eq__',
|
||||
'operator!=': '__ne__',
|
||||
'operator>': '__gt__',
|
||||
'operator>=': '__ge__',
|
||||
'operator<': '__lt__',
|
||||
'operator<=': '__le__',
|
||||
'operator!': 'not_',
|
||||
'default_': 'default',
|
||||
'array': None,
|
||||
'desc': None,
|
||||
'asc': None,
|
||||
'maxval': None,
|
||||
'minval': None,
|
||||
'january': None,
|
||||
'february': None,
|
||||
'march': None,
|
||||
'april': None,
|
||||
'may': None,
|
||||
'june': None,
|
||||
'july': None,
|
||||
'august': None,
|
||||
'september': None,
|
||||
'october': None,
|
||||
'november': None,
|
||||
'december': None,
|
||||
'monday': None,
|
||||
'tuesday': None,
|
||||
'wednesday': None,
|
||||
'thursday': None,
|
||||
'friday': None,
|
||||
'saturday': None,
|
||||
'sunday': None,
|
||||
}
|
||||
|
||||
def print_docs(name, line):
|
||||
py_name = translate_name.get(name, name)
|
||||
if py_name in docs:
|
||||
indent = match("^( *)", line).group(1)
|
||||
stdout.write('\n')
|
||||
# TODO: convert the examples to C++
|
||||
for line in docs[py_name].split('\n'):
|
||||
stdout.write(indent + "// " + line + '\n')
|
||||
elif py_name:
|
||||
stderr.write('Warning: no docs for ' + py_name + ': ' + line)
|
||||
|
||||
stdout.write('// Contains documentation copied as-is from the Python driver')
|
||||
|
||||
for line in stdin:
|
||||
res = match("^ *CO?[0-9_]+\(([^,)]+)|extern Query (\w+)|^ *// *(\$)doc\((\w+)\) *$", line)
|
||||
if res:
|
||||
name = res.group(1) or res.group(2) or res.group(4)
|
||||
print_docs(name, line)
|
||||
if not res.group(3):
|
||||
stdout.write(line)
|
||||
else:
|
||||
stdout.write(line)
|
||||
|
33
ext/librethinkdbxx/reql/gen.py
Normal file
33
ext/librethinkdbxx/reql/gen.py
Normal file
@ -0,0 +1,33 @@
|
||||
from sys import argv
|
||||
from re import sub, finditer, VERBOSE
|
||||
|
||||
def gen(defs):
|
||||
indent = 0
|
||||
enum = False
|
||||
def p(s): print(" " * (indent * 4) + s)
|
||||
for item in finditer("""
|
||||
(?P<type> message|enum) \\s+ (?P<name> \\w+) \\s* \\{ |
|
||||
(?P<var> \\w+) \\s* = \\s* (?P<val> \\w+) \\s* ; |
|
||||
\\}
|
||||
""", defs, flags=VERBOSE):
|
||||
if item.group(0) == "}":
|
||||
indent = indent - 1
|
||||
p("};" if enum else "}")
|
||||
enum = False;
|
||||
elif item.group('type') == 'enum':
|
||||
p("enum class %s {" % item.group('name'))
|
||||
indent = indent + 1
|
||||
enum = True
|
||||
elif item.group('type') == 'message':
|
||||
p("namespace %s {" % item.group('name'))
|
||||
indent = indent + 1
|
||||
enum = False
|
||||
else:
|
||||
if enum:
|
||||
p("%s = %s," % (item.group('var'), item.group('val')))
|
||||
|
||||
print("// Auto-generated by reql/gen.py")
|
||||
print("#pragma once")
|
||||
print("namespace RethinkDB { namespace Protocol {")
|
||||
gen(sub("//.*", "", open(argv[1]).read()))
|
||||
print("} }")
|
189
ext/librethinkdbxx/reql/python_docs.txt
Normal file
189
ext/librethinkdbxx/reql/python_docs.txt
Normal file
File diff suppressed because one or more lines are too long
843
ext/librethinkdbxx/reql/ql2.proto
Normal file
843
ext/librethinkdbxx/reql/ql2.proto
Normal file
@ -0,0 +1,843 @@
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// THE HIGH-LEVEL VIEW //
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Process: When you first open a connection, send the magic number
|
||||
// for the version of the protobuf you're targeting (in the [Version]
|
||||
// enum). This should **NOT** be sent as a protobuf; just send the
|
||||
// little-endian 32-bit integer over the wire raw. This number should
|
||||
// only be sent once per connection.
|
||||
|
||||
// The magic number shall be followed by an authorization key. The
|
||||
// first 4 bytes are the length of the key to be sent as a little-endian
|
||||
// 32-bit integer, followed by the key string. Even if there is no key,
|
||||
// an empty string should be sent (length 0 and no data).
|
||||
|
||||
// Following the authorization key, the client shall send a magic number
|
||||
// for the communication protocol they want to use (in the [Protocol]
|
||||
// enum). This shall be a little-endian 32-bit integer.
|
||||
|
||||
// The server will then respond with a NULL-terminated string response.
|
||||
// "SUCCESS" indicates that the connection has been accepted. Any other
|
||||
// response indicates an error, and the response string should describe
|
||||
// the error.
|
||||
|
||||
// Next, for each query you want to send, construct a [Query] protobuf
|
||||
// and serialize it to a binary blob. Send the blob's size to the
|
||||
// server encoded as a little-endian 32-bit integer, followed by the
|
||||
// blob itself. You will recieve a [Response] protobuf back preceded
|
||||
// by its own size, once again encoded as a little-endian 32-bit
|
||||
// integer. You can see an example exchange below in **EXAMPLE**.
|
||||
|
||||
// A query consists of a [Term] to evaluate and a unique-per-connection
|
||||
// [token].
|
||||
|
||||
// Tokens are used for two things:
|
||||
// * Keeping track of which responses correspond to which queries.
|
||||
// * Batched queries. Some queries return lots of results, so we send back
|
||||
// batches of <1000, and you need to send a [CONTINUE] query with the same
|
||||
// token to get more results from the original query.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
message VersionDummy { // We need to wrap it like this for some
|
||||
// non-conforming protobuf libraries
|
||||
// This enum contains the magic numbers for your version. See **THE HIGH-LEVEL
|
||||
// VIEW** for what to do with it.
|
||||
enum Version {
|
||||
V0_1 = 0x3f61ba36;
|
||||
V0_2 = 0x723081e1; // Authorization key during handshake
|
||||
V0_3 = 0x5f75e83e; // Authorization key and protocol during handshake
|
||||
V0_4 = 0x400c2d20; // Queries execute in parallel
|
||||
V1_0 = 0x34c2bdc3; // Users and permissions
|
||||
}
|
||||
|
||||
// The protocol to use after the handshake, specified in V0_3
|
||||
enum Protocol {
|
||||
PROTOBUF = 0x271ffc41;
|
||||
JSON = 0x7e6970c7;
|
||||
}
|
||||
}
|
||||
|
||||
// You send one of:
|
||||
// * A [START] query with a [Term] to evaluate and a unique-per-connection token.
|
||||
// * A [CONTINUE] query with the same token as a [START] query that returned
|
||||
// [SUCCESS_PARTIAL] in its [Response].
|
||||
// * A [STOP] query with the same token as a [START] query that you want to stop.
|
||||
// * A [NOREPLY_WAIT] query with a unique per-connection token. The server answers
|
||||
// with a [WAIT_COMPLETE] [Response].
|
||||
// * A [SERVER_INFO] query. The server answers with a [SERVER_INFO] [Response].
|
||||
message Query {
|
||||
enum QueryType {
|
||||
START = 1; // Start a new query.
|
||||
CONTINUE = 2; // Continue a query that returned [SUCCESS_PARTIAL]
|
||||
// (see [Response]).
|
||||
STOP = 3; // Stop a query partway through executing.
|
||||
NOREPLY_WAIT = 4; // Wait for noreply operations to finish.
|
||||
SERVER_INFO = 5; // Get server information.
|
||||
}
|
||||
optional QueryType type = 1;
|
||||
// A [Term] is how we represent the operations we want a query to perform.
|
||||
optional Term query = 2; // only present when [type] = [START]
|
||||
optional int64 token = 3;
|
||||
// This flag is ignored on the server. `noreply` should be added
|
||||
// to `global_optargs` instead (the key "noreply" should map to
|
||||
// either true or false).
|
||||
optional bool OBSOLETE_noreply = 4 [default = false];
|
||||
|
||||
// If this is set to [true], then [Datum] values will sometimes be
|
||||
// of [DatumType] [R_JSON] (see below). This can provide enormous
|
||||
// speedups in languages with poor protobuf libraries.
|
||||
optional bool accepts_r_json = 5 [default = false];
|
||||
|
||||
message AssocPair {
|
||||
optional string key = 1;
|
||||
optional Term val = 2;
|
||||
}
|
||||
repeated AssocPair global_optargs = 6;
|
||||
}
|
||||
|
||||
// A backtrace frame (see `backtrace` in Response below)
|
||||
message Frame {
|
||||
enum FrameType {
|
||||
POS = 1; // Error occurred in a positional argument.
|
||||
OPT = 2; // Error occurred in an optional argument.
|
||||
}
|
||||
optional FrameType type = 1;
|
||||
optional int64 pos = 2; // The index of the positional argument.
|
||||
optional string opt = 3; // The name of the optional argument.
|
||||
}
|
||||
message Backtrace {
|
||||
repeated Frame frames = 1;
|
||||
}
|
||||
|
||||
// You get back a response with the same [token] as your query.
|
||||
message Response {
|
||||
enum ResponseType {
|
||||
// These response types indicate success.
|
||||
SUCCESS_ATOM = 1; // Query returned a single RQL datatype.
|
||||
SUCCESS_SEQUENCE = 2; // Query returned a sequence of RQL datatypes.
|
||||
SUCCESS_PARTIAL = 3; // Query returned a partial sequence of RQL
|
||||
// datatypes. If you send a [CONTINUE] query with
|
||||
// the same token as this response, you will get
|
||||
// more of the sequence. Keep sending [CONTINUE]
|
||||
// queries until you get back [SUCCESS_SEQUENCE].
|
||||
WAIT_COMPLETE = 4; // A [NOREPLY_WAIT] query completed.
|
||||
SERVER_INFO = 5; // The data for a [SERVER_INFO] request. This is
|
||||
// the same as `SUCCESS_ATOM` except that there will
|
||||
// never be profiling data.
|
||||
|
||||
// These response types indicate failure.
|
||||
CLIENT_ERROR = 16; // Means the client is buggy. An example is if the
|
||||
// client sends a malformed protobuf, or tries to
|
||||
// send [CONTINUE] for an unknown token.
|
||||
COMPILE_ERROR = 17; // Means the query failed during parsing or type
|
||||
// checking. For example, if you pass too many
|
||||
// arguments to a function.
|
||||
RUNTIME_ERROR = 18; // Means the query failed at runtime. An example is
|
||||
// if you add together two values from a table, but
|
||||
// they turn out at runtime to be booleans rather
|
||||
// than numbers.
|
||||
}
|
||||
optional ResponseType type = 1;
|
||||
|
||||
// If `ResponseType` is `RUNTIME_ERROR`, this may be filled in with more
|
||||
// information about the error.
|
||||
enum ErrorType {
|
||||
INTERNAL = 1000000;
|
||||
RESOURCE_LIMIT = 2000000;
|
||||
QUERY_LOGIC = 3000000;
|
||||
NON_EXISTENCE = 3100000;
|
||||
OP_FAILED = 4100000;
|
||||
OP_INDETERMINATE = 4200000;
|
||||
USER = 5000000;
|
||||
PERMISSION_ERROR = 6000000;
|
||||
}
|
||||
optional ErrorType error_type = 7;
|
||||
|
||||
// ResponseNotes are used to provide information about the query
|
||||
// response that may be useful for people writing drivers or ORMs.
|
||||
// Currently all the notes we send indicate that a stream has certain
|
||||
// special properties.
|
||||
enum ResponseNote {
|
||||
// The stream is a changefeed stream (e.g. `r.table('test').changes()`).
|
||||
SEQUENCE_FEED = 1;
|
||||
// The stream is a point changefeed stream
|
||||
// (e.g. `r.table('test').get(0).changes()`).
|
||||
ATOM_FEED = 2;
|
||||
// The stream is an order_by_limit changefeed stream
|
||||
// (e.g. `r.table('test').order_by(index: 'id').limit(5).changes()`).
|
||||
ORDER_BY_LIMIT_FEED = 3;
|
||||
// The stream is a union of multiple changefeed types that can't be
|
||||
// collapsed to a single type
|
||||
// (e.g. `r.table('test').changes().union(r.table('test').get(0).changes())`).
|
||||
UNIONED_FEED = 4;
|
||||
// The stream is a changefeed stream and includes notes on what state
|
||||
// the changefeed stream is in (e.g. objects of the form `{state:
|
||||
// 'initializing'}`).
|
||||
INCLUDES_STATES = 5;
|
||||
}
|
||||
repeated ResponseNote notes = 6;
|
||||
|
||||
optional int64 token = 2; // Indicates what [Query] this response corresponds to.
|
||||
|
||||
// [response] contains 1 RQL datum if [type] is [SUCCESS_ATOM] or
|
||||
// [SERVER_INFO]. [response] contains many RQL data if [type] is
|
||||
// [SUCCESS_SEQUENCE] or [SUCCESS_PARTIAL]. [response] contains 1
|
||||
// error message (of type [R_STR]) in all other cases.
|
||||
repeated Datum response = 3;
|
||||
|
||||
// If [type] is [CLIENT_ERROR], [TYPE_ERROR], or [RUNTIME_ERROR], then a
|
||||
// backtrace will be provided. The backtrace says where in the query the
|
||||
// error occurred. Ideally this information will be presented to the user as
|
||||
// a pretty-printed version of their query with the erroneous section
|
||||
// underlined. A backtrace is a series of 0 or more [Frame]s, each of which
|
||||
// specifies either the index of a positional argument or the name of an
|
||||
// optional argument. (Those words will make more sense if you look at the
|
||||
// [Term] message below.)
|
||||
optional Backtrace backtrace = 4; // Contains n [Frame]s when you get back an error.
|
||||
|
||||
// If the [global_optargs] in the [Query] that this [Response] is a
|
||||
// response to contains a key "profile" which maps to a static value of
|
||||
// true then [profile] will contain a [Datum] which provides profiling
|
||||
// information about the execution of the query. This field should be
|
||||
// returned to the user along with the result that would normally be
|
||||
// returned (a datum or a cursor). In official drivers this is accomplished
|
||||
// by putting them inside of an object with "value" mapping to the return
|
||||
// value and "profile" mapping to the profile object.
|
||||
optional Datum profile = 5;
|
||||
}
|
||||
|
||||
// A [Datum] is a chunk of data that can be serialized to disk or returned to
|
||||
// the user in a Response. Currently we only support JSON types, but we may
|
||||
// support other types in the future (e.g., a date type or an integer type).
|
||||
message Datum {
|
||||
enum DatumType {
|
||||
R_NULL = 1;
|
||||
R_BOOL = 2;
|
||||
R_NUM = 3; // a double
|
||||
R_STR = 4;
|
||||
R_ARRAY = 5;
|
||||
R_OBJECT = 6;
|
||||
// This [DatumType] will only be used if [accepts_r_json] is
|
||||
// set to [true] in [Query]. [r_str] will be filled with a
|
||||
// JSON encoding of the [Datum].
|
||||
R_JSON = 7; // uses r_str
|
||||
}
|
||||
optional DatumType type = 1;
|
||||
optional bool r_bool = 2;
|
||||
optional double r_num = 3;
|
||||
optional string r_str = 4;
|
||||
|
||||
repeated Datum r_array = 5;
|
||||
message AssocPair {
|
||||
optional string key = 1;
|
||||
optional Datum val = 2;
|
||||
}
|
||||
repeated AssocPair r_object = 6;
|
||||
}
|
||||
|
||||
// A [Term] is either a piece of data (see **Datum** above), or an operator and
|
||||
// its operands. If you have a [Datum], it's stored in the member [datum]. If
|
||||
// you have an operator, its positional arguments are stored in [args] and its
|
||||
// optional arguments are stored in [optargs].
|
||||
//
|
||||
// A note about type signatures:
|
||||
// We use the following notation to denote types:
|
||||
// arg1_type, arg2_type, argrest_type... -> result_type
|
||||
// So, for example, if we have a function `avg` that takes any number of
|
||||
// arguments and averages them, we might write:
|
||||
// NUMBER... -> NUMBER
|
||||
// Or if we had a function that took one number modulo another:
|
||||
// NUMBER, NUMBER -> NUMBER
|
||||
// Or a function that takes a table and a primary key of any Datum type, then
|
||||
// retrieves the entry with that primary key:
|
||||
// Table, DATUM -> OBJECT
|
||||
// Some arguments must be provided as literal values (and not the results of sub
|
||||
// terms). These are marked with a `!`.
|
||||
// Optional arguments are specified within curly braces as argname `:` value
|
||||
// type (e.x `{noreply:BOOL}`)
|
||||
// Many RQL operations are polymorphic. For these, alterantive type signatures
|
||||
// are separated by `|`.
|
||||
//
|
||||
// The RQL type hierarchy is as follows:
|
||||
// Top
|
||||
// DATUM
|
||||
// NULL
|
||||
// BOOL
|
||||
// NUMBER
|
||||
// STRING
|
||||
// OBJECT
|
||||
// SingleSelection
|
||||
// ARRAY
|
||||
// Sequence
|
||||
// ARRAY
|
||||
// Stream
|
||||
// StreamSelection
|
||||
// Table
|
||||
// Database
|
||||
// Function
|
||||
// Ordering - used only by ORDER_BY
|
||||
// Pathspec -- an object, string, or array that specifies a path
|
||||
// Error
|
||||
message Term {
|
||||
enum TermType {
|
||||
// A RQL datum, stored in `datum` below.
|
||||
DATUM = 1;
|
||||
|
||||
MAKE_ARRAY = 2; // DATUM... -> ARRAY
|
||||
// Evaluate the terms in [optargs] and make an object
|
||||
MAKE_OBJ = 3; // {...} -> OBJECT
|
||||
|
||||
// * Compound types
|
||||
|
||||
// Takes an integer representing a variable and returns the value stored
|
||||
// in that variable. It's the responsibility of the client to translate
|
||||
// from their local representation of a variable to a unique _non-negative_
|
||||
// integer for that variable. (We do it this way instead of letting
|
||||
// clients provide variable names as strings to discourage
|
||||
// variable-capturing client libraries, and because it's more efficient
|
||||
// on the wire.)
|
||||
VAR = 10; // !NUMBER -> DATUM
|
||||
// Takes some javascript code and executes it.
|
||||
JAVASCRIPT = 11; // STRING {timeout: !NUMBER} -> DATUM |
|
||||
// STRING {timeout: !NUMBER} -> Function(*)
|
||||
UUID = 169; // () -> DATUM
|
||||
|
||||
// Takes an HTTP URL and gets it. If the get succeeds and
|
||||
// returns valid JSON, it is converted into a DATUM
|
||||
HTTP = 153; // STRING {data: OBJECT | STRING,
|
||||
// timeout: !NUMBER,
|
||||
// method: STRING,
|
||||
// params: OBJECT,
|
||||
// header: OBJECT | ARRAY,
|
||||
// attempts: NUMBER,
|
||||
// redirects: NUMBER,
|
||||
// verify: BOOL,
|
||||
// page: FUNC | STRING,
|
||||
// page_limit: NUMBER,
|
||||
// auth: OBJECT,
|
||||
// result_format: STRING,
|
||||
// } -> STRING | STREAM
|
||||
|
||||
// Takes a string and throws an error with that message.
|
||||
// Inside of a `default` block, you can omit the first
|
||||
// argument to rethrow whatever error you catch (this is most
|
||||
// useful as an argument to the `default` filter optarg).
|
||||
ERROR = 12; // STRING -> Error | -> Error
|
||||
// Takes nothing and returns a reference to the implicit variable.
|
||||
IMPLICIT_VAR = 13; // -> DATUM
|
||||
|
||||
// * Data Operators
|
||||
// Returns a reference to a database.
|
||||
DB = 14; // STRING -> Database
|
||||
// Returns a reference to a table.
|
||||
TABLE = 15; // Database, STRING, {read_mode:STRING, identifier_format:STRING} -> Table
|
||||
// STRING, {read_mode:STRING, identifier_format:STRING} -> Table
|
||||
// Gets a single element from a table by its primary or a secondary key.
|
||||
GET = 16; // Table, STRING -> SingleSelection | Table, NUMBER -> SingleSelection |
|
||||
// Table, STRING -> NULL | Table, NUMBER -> NULL |
|
||||
GET_ALL = 78; // Table, DATUM..., {index:!STRING} => ARRAY
|
||||
|
||||
// Simple DATUM Ops
|
||||
EQ = 17; // DATUM... -> BOOL
|
||||
NE = 18; // DATUM... -> BOOL
|
||||
LT = 19; // DATUM... -> BOOL
|
||||
LE = 20; // DATUM... -> BOOL
|
||||
GT = 21; // DATUM... -> BOOL
|
||||
GE = 22; // DATUM... -> BOOL
|
||||
NOT = 23; // BOOL -> BOOL
|
||||
// ADD can either add two numbers or concatenate two arrays.
|
||||
ADD = 24; // NUMBER... -> NUMBER | STRING... -> STRING
|
||||
SUB = 25; // NUMBER... -> NUMBER
|
||||
MUL = 26; // NUMBER... -> NUMBER
|
||||
DIV = 27; // NUMBER... -> NUMBER
|
||||
MOD = 28; // NUMBER, NUMBER -> NUMBER
|
||||
|
||||
FLOOR = 183; // NUMBER -> NUMBER
|
||||
CEIL = 184; // NUMBER -> NUMBER
|
||||
ROUND = 185; // NUMBER -> NUMBER
|
||||
|
||||
// DATUM Array Ops
|
||||
// Append a single element to the end of an array (like `snoc`).
|
||||
APPEND = 29; // ARRAY, DATUM -> ARRAY
|
||||
// Prepend a single element to the end of an array (like `cons`).
|
||||
PREPEND = 80; // ARRAY, DATUM -> ARRAY
|
||||
//Remove the elements of one array from another array.
|
||||
DIFFERENCE = 95; // ARRAY, ARRAY -> ARRAY
|
||||
|
||||
// DATUM Set Ops
|
||||
// Set ops work on arrays. They don't use actual sets and thus have
|
||||
// performance characteristics you would expect from arrays rather than
|
||||
// from sets. All set operations have the post condition that they
|
||||
// array they return contains no duplicate values.
|
||||
SET_INSERT = 88; // ARRAY, DATUM -> ARRAY
|
||||
SET_INTERSECTION = 89; // ARRAY, ARRAY -> ARRAY
|
||||
SET_UNION = 90; // ARRAY, ARRAY -> ARRAY
|
||||
SET_DIFFERENCE = 91; // ARRAY, ARRAY -> ARRAY
|
||||
|
||||
SLICE = 30; // Sequence, NUMBER, NUMBER -> Sequence
|
||||
SKIP = 70; // Sequence, NUMBER -> Sequence
|
||||
LIMIT = 71; // Sequence, NUMBER -> Sequence
|
||||
OFFSETS_OF = 87; // Sequence, DATUM -> Sequence | Sequence, Function(1) -> Sequence
|
||||
CONTAINS = 93; // Sequence, (DATUM | Function(1))... -> BOOL
|
||||
|
||||
// Stream/Object Ops
|
||||
// Get a particular field from an object, or map that over a
|
||||
// sequence.
|
||||
GET_FIELD = 31; // OBJECT, STRING -> DATUM
|
||||
// | Sequence, STRING -> Sequence
|
||||
// Return an array containing the keys of the object.
|
||||
KEYS = 94; // OBJECT -> ARRAY
|
||||
// Return an array containing the values of the object.
|
||||
VALUES = 186; // OBJECT -> ARRAY
|
||||
// Creates an object
|
||||
OBJECT = 143; // STRING, DATUM, ... -> OBJECT
|
||||
// Check whether an object contains all the specified fields,
|
||||
// or filters a sequence so that all objects inside of it
|
||||
// contain all the specified fields.
|
||||
HAS_FIELDS = 32; // OBJECT, Pathspec... -> BOOL
|
||||
// x.with_fields(...) <=> x.has_fields(...).pluck(...)
|
||||
WITH_FIELDS = 96; // Sequence, Pathspec... -> Sequence
|
||||
// Get a subset of an object by selecting some attributes to preserve,
|
||||
// or map that over a sequence. (Both pick and pluck, polymorphic.)
|
||||
PLUCK = 33; // Sequence, Pathspec... -> Sequence | OBJECT, Pathspec... -> OBJECT
|
||||
// Get a subset of an object by selecting some attributes to discard, or
|
||||
// map that over a sequence. (Both unpick and without, polymorphic.)
|
||||
WITHOUT = 34; // Sequence, Pathspec... -> Sequence | OBJECT, Pathspec... -> OBJECT
|
||||
// Merge objects (right-preferential)
|
||||
MERGE = 35; // OBJECT... -> OBJECT | Sequence -> Sequence
|
||||
|
||||
// Sequence Ops
|
||||
// Get all elements of a sequence between two values.
|
||||
// Half-open by default, but the openness of either side can be
|
||||
// changed by passing 'closed' or 'open for `right_bound` or
|
||||
// `left_bound`.
|
||||
BETWEEN_DEPRECATED = 36; // Deprecated version of between, which allows `null` to specify unboundedness
|
||||
// With the newer version, clients should use `r.minval` and `r.maxval` for unboundedness
|
||||
BETWEEN = 182; // StreamSelection, DATUM, DATUM, {index:!STRING, right_bound:STRING, left_bound:STRING} -> StreamSelection
|
||||
REDUCE = 37; // Sequence, Function(2) -> DATUM
|
||||
MAP = 38; // Sequence, Function(1) -> Sequence
|
||||
// The arity of the function should be
|
||||
// Sequence..., Function(sizeof...(Sequence)) -> Sequence
|
||||
|
||||
FOLD = 187; // Sequence, Datum, Function(2), {Function(3), Function(1)
|
||||
|
||||
// Filter a sequence with either a function or a shortcut
|
||||
// object (see API docs for details). The body of FILTER is
|
||||
// wrapped in an implicit `.default(false)`, and you can
|
||||
// change the default value by specifying the `default`
|
||||
// optarg. If you make the default `r.error`, all errors
|
||||
// caught by `default` will be rethrown as if the `default`
|
||||
// did not exist.
|
||||
FILTER = 39; // Sequence, Function(1), {default:DATUM} -> Sequence |
|
||||
// Sequence, OBJECT, {default:DATUM} -> Sequence
|
||||
// Map a function over a sequence and then concatenate the results together.
|
||||
CONCAT_MAP = 40; // Sequence, Function(1) -> Sequence
|
||||
// Order a sequence based on one or more attributes.
|
||||
ORDER_BY = 41; // Sequence, (!STRING | Ordering)..., {index: (!STRING | Ordering)} -> Sequence
|
||||
// Get all distinct elements of a sequence (like `uniq`).
|
||||
DISTINCT = 42; // Sequence -> Sequence
|
||||
// Count the number of elements in a sequence, or only the elements that match
|
||||
// a given filter.
|
||||
COUNT = 43; // Sequence -> NUMBER | Sequence, DATUM -> NUMBER | Sequence, Function(1) -> NUMBER
|
||||
IS_EMPTY = 86; // Sequence -> BOOL
|
||||
// Take the union of multiple sequences (preserves duplicate elements! (use distinct)).
|
||||
UNION = 44; // Sequence... -> Sequence
|
||||
// Get the Nth element of a sequence.
|
||||
NTH = 45; // Sequence, NUMBER -> DATUM
|
||||
// do NTH or GET_FIELD depending on target object
|
||||
BRACKET = 170; // Sequence | OBJECT, NUMBER | STRING -> DATUM
|
||||
// OBSOLETE_GROUPED_MAPREDUCE = 46;
|
||||
// OBSOLETE_GROUPBY = 47;
|
||||
|
||||
INNER_JOIN = 48; // Sequence, Sequence, Function(2) -> Sequence
|
||||
OUTER_JOIN = 49; // Sequence, Sequence, Function(2) -> Sequence
|
||||
// An inner-join that does an equality comparison on two attributes.
|
||||
EQ_JOIN = 50; // Sequence, !STRING, Sequence, {index:!STRING} -> Sequence
|
||||
ZIP = 72; // Sequence -> Sequence
|
||||
RANGE = 173; // -> Sequence [0, +inf)
|
||||
// NUMBER -> Sequence [0, a)
|
||||
// NUMBER, NUMBER -> Sequence [a, b)
|
||||
|
||||
// Array Ops
|
||||
// Insert an element in to an array at a given index.
|
||||
INSERT_AT = 82; // ARRAY, NUMBER, DATUM -> ARRAY
|
||||
// Remove an element at a given index from an array.
|
||||
DELETE_AT = 83; // ARRAY, NUMBER -> ARRAY |
|
||||
// ARRAY, NUMBER, NUMBER -> ARRAY
|
||||
// Change the element at a given index of an array.
|
||||
CHANGE_AT = 84; // ARRAY, NUMBER, DATUM -> ARRAY
|
||||
// Splice one array in to another array.
|
||||
SPLICE_AT = 85; // ARRAY, NUMBER, ARRAY -> ARRAY
|
||||
|
||||
// * Type Ops
|
||||
// Coerces a datum to a named type (e.g. "bool").
|
||||
// If you previously used `stream_to_array`, you should use this instead
|
||||
// with the type "array".
|
||||
COERCE_TO = 51; // Top, STRING -> Top
|
||||
// Returns the named type of a datum (e.g. TYPE_OF(true) = "BOOL")
|
||||
TYPE_OF = 52; // Top -> STRING
|
||||
|
||||
// * Write Ops (the OBJECTs contain data about number of errors etc.)
|
||||
// Updates all the rows in a selection. Calls its Function with the row
|
||||
// to be updated, and then merges the result of that call.
|
||||
UPDATE = 53; // StreamSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT |
|
||||
// SingleSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT |
|
||||
// StreamSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT |
|
||||
// SingleSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT
|
||||
// Deletes all the rows in a selection.
|
||||
DELETE = 54; // StreamSelection, {durability:STRING, return_changes:BOOL} -> OBJECT | SingleSelection -> OBJECT
|
||||
// Replaces all the rows in a selection. Calls its Function with the row
|
||||
// to be replaced, and then discards it and stores the result of that
|
||||
// call.
|
||||
REPLACE = 55; // StreamSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | SingleSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT
|
||||
// Inserts into a table. If `conflict` is replace, overwrites
|
||||
// entries with the same primary key. If `conflict` is
|
||||
// update, does an update on the entry. If `conflict` is
|
||||
// error, or is omitted, conflicts will trigger an error.
|
||||
INSERT = 56; // Table, OBJECT, {conflict:STRING, durability:STRING, return_changes:BOOL} -> OBJECT | Table, Sequence, {conflict:STRING, durability:STRING, return_changes:BOOL} -> OBJECT
|
||||
|
||||
// * Administrative OPs
|
||||
// Creates a database with a particular name.
|
||||
DB_CREATE = 57; // STRING -> OBJECT
|
||||
// Drops a database with a particular name.
|
||||
DB_DROP = 58; // STRING -> OBJECT
|
||||
// Lists all the databases by name. (Takes no arguments)
|
||||
DB_LIST = 59; // -> ARRAY
|
||||
// Creates a table with a particular name in a particular
|
||||
// database. (You may omit the first argument to use the
|
||||
// default database.)
|
||||
TABLE_CREATE = 60; // Database, STRING, {primary_key:STRING, shards:NUMBER, replicas:NUMBER, primary_replica_tag:STRING} -> OBJECT
|
||||
// Database, STRING, {primary_key:STRING, shards:NUMBER, replicas:OBJECT, primary_replica_tag:STRING} -> OBJECT
|
||||
// STRING, {primary_key:STRING, shards:NUMBER, replicas:NUMBER, primary_replica_tag:STRING} -> OBJECT
|
||||
// STRING, {primary_key:STRING, shards:NUMBER, replicas:OBJECT, primary_replica_tag:STRING} -> OBJECT
|
||||
// Drops a table with a particular name from a particular
|
||||
// database. (You may omit the first argument to use the
|
||||
// default database.)
|
||||
TABLE_DROP = 61; // Database, STRING -> OBJECT
|
||||
// STRING -> OBJECT
|
||||
// Lists all the tables in a particular database. (You may
|
||||
// omit the first argument to use the default database.)
|
||||
TABLE_LIST = 62; // Database -> ARRAY
|
||||
// -> ARRAY
|
||||
// Returns the row in the `rethinkdb.table_config` or `rethinkdb.db_config` table
|
||||
// that corresponds to the given database or table.
|
||||
CONFIG = 174; // Database -> SingleSelection
|
||||
// Table -> SingleSelection
|
||||
// Returns the row in the `rethinkdb.table_status` table that corresponds to the
|
||||
// given table.
|
||||
STATUS = 175; // Table -> SingleSelection
|
||||
// Called on a table, waits for that table to be ready for read/write operations.
|
||||
// Called on a database, waits for all of the tables in the database to be ready.
|
||||
// Returns the corresponding row or rows from the `rethinkdb.table_status` table.
|
||||
WAIT = 177; // Table -> OBJECT
|
||||
// Database -> OBJECT
|
||||
// Generates a new config for the given table, or all tables in the given database
|
||||
// The `shards` and `replicas` arguments are required. If `emergency_repair` is
|
||||
// specified, it will enter a completely different mode of repairing a table
|
||||
// which has lost half or more of its replicas.
|
||||
RECONFIGURE = 176; // Database|Table, {shards:NUMBER, replicas:NUMBER [,
|
||||
// dry_run:BOOLEAN]
|
||||
// } -> OBJECT
|
||||
// Database|Table, {shards:NUMBER, replicas:OBJECT [,
|
||||
// primary_replica_tag:STRING,
|
||||
// nonvoting_replica_tags:ARRAY,
|
||||
// dry_run:BOOLEAN]
|
||||
// } -> OBJECT
|
||||
// Table, {emergency_repair:STRING, dry_run:BOOLEAN} -> OBJECT
|
||||
// Balances the table's shards but leaves everything else the same. Can also be
|
||||
// applied to an entire database at once.
|
||||
REBALANCE = 179; // Table -> OBJECT
|
||||
// Database -> OBJECT
|
||||
|
||||
// Ensures that previously issued soft-durability writes are complete and
|
||||
// written to disk.
|
||||
SYNC = 138; // Table -> OBJECT
|
||||
|
||||
// Set global, database, or table-specific permissions
|
||||
GRANT = 188; // -> OBJECT
|
||||
// Database -> OBJECT
|
||||
// Table -> OBJECT
|
||||
|
||||
// * Secondary indexes OPs
|
||||
// Creates a new secondary index with a particular name and definition.
|
||||
INDEX_CREATE = 75; // Table, STRING, Function(1), {multi:BOOL} -> OBJECT
|
||||
// Drops a secondary index with a particular name from the specified table.
|
||||
INDEX_DROP = 76; // Table, STRING -> OBJECT
|
||||
// Lists all secondary indexes on a particular table.
|
||||
INDEX_LIST = 77; // Table -> ARRAY
|
||||
// Gets information about whether or not a set of indexes are ready to
|
||||
// be accessed. Returns a list of objects that look like this:
|
||||
// {index:STRING, ready:BOOL[, progress:NUMBER]}
|
||||
INDEX_STATUS = 139; // Table, STRING... -> ARRAY
|
||||
// Blocks until a set of indexes are ready to be accessed. Returns the
|
||||
// same values INDEX_STATUS.
|
||||
INDEX_WAIT = 140; // Table, STRING... -> ARRAY
|
||||
// Renames the given index to a new name
|
||||
INDEX_RENAME = 156; // Table, STRING, STRING, {overwrite:BOOL} -> OBJECT
|
||||
|
||||
// * Control Operators
|
||||
// Calls a function on data
|
||||
FUNCALL = 64; // Function(*), DATUM... -> DATUM
|
||||
// Executes its first argument, and returns its second argument if it
|
||||
// got [true] or its third argument if it got [false] (like an `if`
|
||||
// statement).
|
||||
BRANCH = 65; // BOOL, Top, Top -> Top
|
||||
// Returns true if any of its arguments returns true (short-circuits).
|
||||
OR = 66; // BOOL... -> BOOL
|
||||
// Returns true if all of its arguments return true (short-circuits).
|
||||
AND = 67; // BOOL... -> BOOL
|
||||
// Calls its Function with each entry in the sequence
|
||||
// and executes the array of terms that Function returns.
|
||||
FOR_EACH = 68; // Sequence, Function(1) -> OBJECT
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
////////// Special Terms
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// An anonymous function. Takes an array of numbers representing
|
||||
// variables (see [VAR] above), and a [Term] to execute with those in
|
||||
// scope. Returns a function that may be passed an array of arguments,
|
||||
// then executes the Term with those bound to the variable names. The
|
||||
// user will never construct this directly. We use it internally for
|
||||
// things like `map` which take a function. The "arity" of a [Function] is
|
||||
// the number of arguments it takes.
|
||||
// For example, here's what `_X_.map{|x| x+2}` turns into:
|
||||
// Term {
|
||||
// type = MAP;
|
||||
// args = [_X_,
|
||||
// Term {
|
||||
// type = Function;
|
||||
// args = [Term {
|
||||
// type = DATUM;
|
||||
// datum = Datum {
|
||||
// type = R_ARRAY;
|
||||
// r_array = [Datum { type = R_NUM; r_num = 1; }];
|
||||
// };
|
||||
// },
|
||||
// Term {
|
||||
// type = ADD;
|
||||
// args = [Term {
|
||||
// type = VAR;
|
||||
// args = [Term {
|
||||
// type = DATUM;
|
||||
// datum = Datum { type = R_NUM;
|
||||
// r_num = 1};
|
||||
// }];
|
||||
// },
|
||||
// Term {
|
||||
// type = DATUM;
|
||||
// datum = Datum { type = R_NUM; r_num = 2; };
|
||||
// }];
|
||||
// }];
|
||||
// }];
|
||||
FUNC = 69; // ARRAY, Top -> ARRAY -> Top
|
||||
|
||||
// Indicates to ORDER_BY that this attribute is to be sorted in ascending order.
|
||||
ASC = 73; // !STRING -> Ordering
|
||||
// Indicates to ORDER_BY that this attribute is to be sorted in descending order.
|
||||
DESC = 74; // !STRING -> Ordering
|
||||
|
||||
// Gets info about anything. INFO is most commonly called on tables.
|
||||
INFO = 79; // Top -> OBJECT
|
||||
|
||||
// `a.match(b)` returns a match object if the string `a`
|
||||
// matches the regular expression `b`.
|
||||
MATCH = 97; // STRING, STRING -> DATUM
|
||||
|
||||
// Change the case of a string.
|
||||
UPCASE = 141; // STRING -> STRING
|
||||
DOWNCASE = 142; // STRING -> STRING
|
||||
|
||||
// Select a number of elements from sequence with uniform distribution.
|
||||
SAMPLE = 81; // Sequence, NUMBER -> Sequence
|
||||
|
||||
// Evaluates its first argument. If that argument returns
|
||||
// NULL or throws an error related to the absence of an
|
||||
// expected value (for instance, accessing a non-existent
|
||||
// field or adding NULL to an integer), DEFAULT will either
|
||||
// return its second argument or execute it if it's a
|
||||
// function. If the second argument is a function, it will be
|
||||
// passed either the text of the error or NULL as its
|
||||
// argument.
|
||||
DEFAULT = 92; // Top, Top -> Top
|
||||
|
||||
// Parses its first argument as a json string and returns it as a
|
||||
// datum.
|
||||
JSON = 98; // STRING -> DATUM
|
||||
// Returns the datum as a JSON string.
|
||||
// N.B.: we would really prefer this be named TO_JSON and that exists as
|
||||
// an alias in Python and JavaScript drivers; however it conflicts with the
|
||||
// standard `to_json` method defined by Ruby's standard json library.
|
||||
TO_JSON_STRING = 172; // DATUM -> STRING
|
||||
|
||||
// Parses its first arguments as an ISO 8601 time and returns it as a
|
||||
// datum.
|
||||
ISO8601 = 99; // STRING -> PSEUDOTYPE(TIME)
|
||||
// Prints a time as an ISO 8601 time.
|
||||
TO_ISO8601 = 100; // PSEUDOTYPE(TIME) -> STRING
|
||||
|
||||
// Returns a time given seconds since epoch in UTC.
|
||||
EPOCH_TIME = 101; // NUMBER -> PSEUDOTYPE(TIME)
|
||||
// Returns seconds since epoch in UTC given a time.
|
||||
TO_EPOCH_TIME = 102; // PSEUDOTYPE(TIME) -> NUMBER
|
||||
|
||||
// The time the query was received by the server.
|
||||
NOW = 103; // -> PSEUDOTYPE(TIME)
|
||||
// Puts a time into an ISO 8601 timezone.
|
||||
IN_TIMEZONE = 104; // PSEUDOTYPE(TIME), STRING -> PSEUDOTYPE(TIME)
|
||||
// a.during(b, c) returns whether a is in the range [b, c)
|
||||
DURING = 105; // PSEUDOTYPE(TIME), PSEUDOTYPE(TIME), PSEUDOTYPE(TIME) -> BOOL
|
||||
// Retrieves the date portion of a time.
|
||||
DATE = 106; // PSEUDOTYPE(TIME) -> PSEUDOTYPE(TIME)
|
||||
// x.time_of_day == x.date - x
|
||||
TIME_OF_DAY = 126; // PSEUDOTYPE(TIME) -> NUMBER
|
||||
// Returns the timezone of a time.
|
||||
TIMEZONE = 127; // PSEUDOTYPE(TIME) -> STRING
|
||||
|
||||
// These access the various components of a time.
|
||||
YEAR = 128; // PSEUDOTYPE(TIME) -> NUMBER
|
||||
MONTH = 129; // PSEUDOTYPE(TIME) -> NUMBER
|
||||
DAY = 130; // PSEUDOTYPE(TIME) -> NUMBER
|
||||
DAY_OF_WEEK = 131; // PSEUDOTYPE(TIME) -> NUMBER
|
||||
DAY_OF_YEAR = 132; // PSEUDOTYPE(TIME) -> NUMBER
|
||||
HOURS = 133; // PSEUDOTYPE(TIME) -> NUMBER
|
||||
MINUTES = 134; // PSEUDOTYPE(TIME) -> NUMBER
|
||||
SECONDS = 135; // PSEUDOTYPE(TIME) -> NUMBER
|
||||
|
||||
// Construct a time from a date and optional timezone or a
|
||||
// date+time and optional timezone.
|
||||
TIME = 136; // NUMBER, NUMBER, NUMBER, STRING -> PSEUDOTYPE(TIME) |
|
||||
// NUMBER, NUMBER, NUMBER, NUMBER, NUMBER, NUMBER, STRING -> PSEUDOTYPE(TIME) |
|
||||
|
||||
// Constants for ISO 8601 days of the week.
|
||||
MONDAY = 107; // -> 1
|
||||
TUESDAY = 108; // -> 2
|
||||
WEDNESDAY = 109; // -> 3
|
||||
THURSDAY = 110; // -> 4
|
||||
FRIDAY = 111; // -> 5
|
||||
SATURDAY = 112; // -> 6
|
||||
SUNDAY = 113; // -> 7
|
||||
|
||||
// Constants for ISO 8601 months.
|
||||
JANUARY = 114; // -> 1
|
||||
FEBRUARY = 115; // -> 2
|
||||
MARCH = 116; // -> 3
|
||||
APRIL = 117; // -> 4
|
||||
MAY = 118; // -> 5
|
||||
JUNE = 119; // -> 6
|
||||
JULY = 120; // -> 7
|
||||
AUGUST = 121; // -> 8
|
||||
SEPTEMBER = 122; // -> 9
|
||||
OCTOBER = 123; // -> 10
|
||||
NOVEMBER = 124; // -> 11
|
||||
DECEMBER = 125; // -> 12
|
||||
|
||||
// Indicates to MERGE to replace, or remove in case of an empty literal, the
|
||||
// other object rather than merge it.
|
||||
LITERAL = 137; // -> Merging
|
||||
// JSON -> Merging
|
||||
|
||||
// SEQUENCE, STRING -> GROUPED_SEQUENCE | SEQUENCE, FUNCTION -> GROUPED_SEQUENCE
|
||||
GROUP = 144;
|
||||
SUM = 145;
|
||||
AVG = 146;
|
||||
MIN = 147;
|
||||
MAX = 148;
|
||||
|
||||
// `str.split()` splits on whitespace
|
||||
// `str.split(" ")` splits on spaces only
|
||||
// `str.split(" ", 5)` splits on spaces with at most 5 results
|
||||
// `str.split(nil, 5)` splits on whitespace with at most 5 results
|
||||
SPLIT = 149; // STRING -> ARRAY | STRING, STRING -> ARRAY | STRING, STRING, NUMBER -> ARRAY | STRING, NULL, NUMBER -> ARRAY
|
||||
|
||||
UNGROUP = 150; // GROUPED_DATA -> ARRAY
|
||||
|
||||
// Takes a range of numbers and returns a random number within the range
|
||||
RANDOM = 151; // NUMBER, NUMBER {float:BOOL} -> DATUM
|
||||
|
||||
CHANGES = 152; // TABLE -> STREAM
|
||||
ARGS = 154; // ARRAY -> SPECIAL (used to splice arguments)
|
||||
|
||||
// BINARY is client-only at the moment, it is not supported on the server
|
||||
BINARY = 155; // STRING -> PSEUDOTYPE(BINARY)
|
||||
|
||||
GEOJSON = 157; // OBJECT -> PSEUDOTYPE(GEOMETRY)
|
||||
TO_GEOJSON = 158; // PSEUDOTYPE(GEOMETRY) -> OBJECT
|
||||
POINT = 159; // NUMBER, NUMBER -> PSEUDOTYPE(GEOMETRY)
|
||||
LINE = 160; // (ARRAY | PSEUDOTYPE(GEOMETRY))... -> PSEUDOTYPE(GEOMETRY)
|
||||
POLYGON = 161; // (ARRAY | PSEUDOTYPE(GEOMETRY))... -> PSEUDOTYPE(GEOMETRY)
|
||||
DISTANCE = 162; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) {geo_system:STRING, unit:STRING} -> NUMBER
|
||||
INTERSECTS = 163; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> BOOL
|
||||
INCLUDES = 164; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> BOOL
|
||||
CIRCLE = 165; // PSEUDOTYPE(GEOMETRY), NUMBER {num_vertices:NUMBER, geo_system:STRING, unit:STRING, fill:BOOL} -> PSEUDOTYPE(GEOMETRY)
|
||||
GET_INTERSECTING = 166; // TABLE, PSEUDOTYPE(GEOMETRY) {index:!STRING} -> StreamSelection
|
||||
FILL = 167; // PSEUDOTYPE(GEOMETRY) -> PSEUDOTYPE(GEOMETRY)
|
||||
GET_NEAREST = 168; // TABLE, PSEUDOTYPE(GEOMETRY) {index:!STRING, max_results:NUM, max_dist:NUM, geo_system:STRING, unit:STRING} -> ARRAY
|
||||
POLYGON_SUB = 171; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> PSEUDOTYPE(GEOMETRY)
|
||||
|
||||
// Constants for specifying key ranges
|
||||
MINVAL = 180;
|
||||
MAXVAL = 181;
|
||||
}
|
||||
optional TermType type = 1;
|
||||
|
||||
// This is only used when type is DATUM.
|
||||
optional Datum datum = 2;
|
||||
|
||||
repeated Term args = 3; // Holds the positional arguments of the query.
|
||||
message AssocPair {
|
||||
optional string key = 1;
|
||||
optional Term val = 2;
|
||||
}
|
||||
repeated AssocPair optargs = 4; // Holds the optional arguments of the query.
|
||||
// (Note that the order of the optional arguments doesn't matter; think of a
|
||||
// Hash.)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// EXAMPLE //
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// ```ruby
|
||||
// r.table('tbl', {:read_mode => 'outdated'}).insert([{:id => 0}, {:id => 1}])
|
||||
// ```
|
||||
// Would turn into:
|
||||
// Term {
|
||||
// type = INSERT;
|
||||
// args = [Term {
|
||||
// type = TABLE;
|
||||
// args = [Term {
|
||||
// type = DATUM;
|
||||
// datum = Datum { type = R_STR; r_str = "tbl"; };
|
||||
// }];
|
||||
// optargs = [["read_mode",
|
||||
// Term {
|
||||
// type = DATUM;
|
||||
// datum = Datum { type = R_STR; r_bool = "outdated"; };
|
||||
// }]];
|
||||
// },
|
||||
// Term {
|
||||
// type = MAKE_ARRAY;
|
||||
// args = [Term {
|
||||
// type = DATUM;
|
||||
// datum = Datum { type = R_OBJECT; r_object = [["id", 0]]; };
|
||||
// },
|
||||
// Term {
|
||||
// type = DATUM;
|
||||
// datum = Datum { type = R_OBJECT; r_object = [["id", 1]]; };
|
||||
// }];
|
||||
// }]
|
||||
// }
|
||||
// And the server would reply:
|
||||
// Response {
|
||||
// type = SUCCESS_ATOM;
|
||||
// token = 1;
|
||||
// response = [Datum { type = R_OBJECT; r_object = [["inserted", 2]]; }];
|
||||
// }
|
||||
// Or, if there were an error:
|
||||
// Response {
|
||||
// type = RUNTIME_ERROR;
|
||||
// token = 1;
|
||||
// response = [Datum { type = R_STR; r_str = "The table `tbl` doesn't exist!"; }];
|
||||
// backtrace = [Frame { type = POS; pos = 0; }, Frame { type = POS; pos = 0; }];
|
||||
// }
|
431
ext/librethinkdbxx/src/connection.cc
Normal file
431
ext/librethinkdbxx/src/connection.cc
Normal file
@ -0,0 +1,431 @@
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/select.h>
|
||||
|
||||
#include <netdb.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
#include <cinttypes>
|
||||
#include <memory>
|
||||
|
||||
#include "connection.h"
|
||||
#include "connection_p.h"
|
||||
#include "json_p.h"
|
||||
#include "exceptions.h"
|
||||
#include "term.h"
|
||||
#include "cursor_p.h"
|
||||
|
||||
#include "rapidjson-config.h"
|
||||
#include "rapidjson/rapidjson.h"
|
||||
#include "rapidjson/encodedstream.h"
|
||||
#include "rapidjson/document.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
using QueryType = Protocol::Query::QueryType;
|
||||
|
||||
// constants
|
||||
const int debug_net = 0;
|
||||
const uint32_t version_magic =
|
||||
static_cast<uint32_t>(Protocol::VersionDummy::Version::V0_4);
|
||||
const uint32_t json_magic =
|
||||
static_cast<uint32_t>(Protocol::VersionDummy::Protocol::JSON);
|
||||
|
||||
std::unique_ptr<Connection> connect(std::string host, int port, std::string auth_key) {
|
||||
struct addrinfo hints;
|
||||
memset(&hints, 0, sizeof hints);
|
||||
hints.ai_family = AF_UNSPEC;
|
||||
hints.ai_socktype = SOCK_STREAM;
|
||||
|
||||
char port_str[16];
|
||||
snprintf(port_str, 16, "%d", port);
|
||||
struct addrinfo *servinfo;
|
||||
int ret = getaddrinfo(host.c_str(), port_str, &hints, &servinfo);
|
||||
if (ret) throw Error("getaddrinfo: %s\n", gai_strerror(ret));
|
||||
|
||||
struct addrinfo *p;
|
||||
Error error;
|
||||
int sockfd;
|
||||
for (p = servinfo; p != NULL; p = p->ai_next) {
|
||||
sockfd = socket(p->ai_family, p->ai_socktype, p->ai_protocol);
|
||||
if (sockfd == -1) {
|
||||
error = Error::from_errno("socket");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (connect(sockfd, p->ai_addr, p->ai_addrlen) == -1) {
|
||||
::close(sockfd);
|
||||
error = Error::from_errno("connect");
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (p == NULL) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
freeaddrinfo(servinfo);
|
||||
|
||||
std::unique_ptr<ConnectionPrivate> conn_private(new ConnectionPrivate(sockfd));
|
||||
WriteLock writer(conn_private.get());
|
||||
{
|
||||
size_t size = auth_key.size();
|
||||
char buf[12 + size];
|
||||
memcpy(buf, &version_magic, 4);
|
||||
uint32_t n = size;
|
||||
memcpy(buf + 4, &n, 4);
|
||||
memcpy(buf + 8, auth_key.data(), size);
|
||||
memcpy(buf + 8 + size, &json_magic, 4);
|
||||
writer.send(buf, sizeof buf);
|
||||
}
|
||||
|
||||
ReadLock reader(conn_private.get());
|
||||
{
|
||||
const size_t max_response_length = 1024;
|
||||
char buf[max_response_length + 1];
|
||||
size_t len = reader.recv_cstring(buf, max_response_length);
|
||||
if (len == max_response_length || strcmp(buf, "SUCCESS")) {
|
||||
buf[len] = 0;
|
||||
::close(sockfd);
|
||||
throw Error("Server rejected connection with message: %s", buf);
|
||||
}
|
||||
}
|
||||
|
||||
return std::unique_ptr<Connection>(new Connection(conn_private.release()));
|
||||
}
|
||||
|
||||
Connection::Connection(ConnectionPrivate *dd) : d(dd) { }
|
||||
Connection::~Connection() {
|
||||
// close();
|
||||
}
|
||||
|
||||
size_t ReadLock::recv_some(char* buf, size_t size, double wait) {
|
||||
if (wait != FOREVER) {
|
||||
while (true) {
|
||||
fd_set readfds;
|
||||
struct timeval tv;
|
||||
|
||||
FD_ZERO(&readfds);
|
||||
FD_SET(conn->guarded_sockfd, &readfds);
|
||||
|
||||
tv.tv_sec = (int)wait;
|
||||
tv.tv_usec = (int)((wait - (int)wait) / MICROSECOND);
|
||||
int rv = select(conn->guarded_sockfd + 1, &readfds, NULL, NULL, &tv);
|
||||
if (rv == -1) {
|
||||
throw Error::from_errno("select");
|
||||
} else if (rv == 0) {
|
||||
throw TimeoutException();
|
||||
}
|
||||
|
||||
if (FD_ISSET(conn->guarded_sockfd, &readfds)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ssize_t numbytes = ::recv(conn->guarded_sockfd, buf, size, 0);
|
||||
if (numbytes == -1) throw Error::from_errno("recv");
|
||||
if (debug_net > 1) {
|
||||
fprintf(stderr, "<< %s\n", write_datum(std::string(buf, numbytes)).c_str());
|
||||
}
|
||||
|
||||
return numbytes;
|
||||
}
|
||||
|
||||
void ReadLock::recv(char* buf, size_t size, double wait) {
|
||||
while (size) {
|
||||
size_t numbytes = recv_some(buf, size, wait);
|
||||
|
||||
buf += numbytes;
|
||||
size -= numbytes;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ReadLock::recv_cstring(char* buf, size_t max_size){
|
||||
size_t size = 0;
|
||||
for (; size < max_size; size++) {
|
||||
recv(buf, 1, FOREVER);
|
||||
if (*buf == 0) {
|
||||
break;
|
||||
}
|
||||
buf++;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
void WriteLock::send(const char* buf, size_t size) {
|
||||
while (size) {
|
||||
ssize_t numbytes = ::write(conn->guarded_sockfd, buf, size);
|
||||
if (numbytes == -1) throw Error::from_errno("write");
|
||||
if (debug_net > 1) {
|
||||
fprintf(stderr, ">> %s\n", write_datum(std::string(buf, numbytes)).c_str());
|
||||
}
|
||||
|
||||
buf += numbytes;
|
||||
size -= numbytes;
|
||||
}
|
||||
}
|
||||
|
||||
void WriteLock::send(const std::string data) {
|
||||
send(data.data(), data.size());
|
||||
}
|
||||
|
||||
std::string ReadLock::recv(size_t size) {
|
||||
char buf[size];
|
||||
recv(buf, size, FOREVER);
|
||||
return buf;
|
||||
}
|
||||
|
||||
void Connection::close() {
|
||||
CacheLock guard(d.get());
|
||||
for (auto& it : d->guarded_cache) {
|
||||
stop_query(it.first);
|
||||
}
|
||||
|
||||
int ret = ::close(d->guarded_sockfd);
|
||||
if (ret == -1) {
|
||||
throw Error::from_errno("close");
|
||||
}
|
||||
}
|
||||
|
||||
Response ConnectionPrivate::wait_for_response(uint64_t token_want, double wait) {
|
||||
CacheLock guard(this);
|
||||
ConnectionPrivate::TokenCache& cache = guarded_cache[token_want];
|
||||
|
||||
while (true) {
|
||||
if (!cache.responses.empty()) {
|
||||
Response response(std::move(cache.responses.front()));
|
||||
cache.responses.pop();
|
||||
if (cache.closed && cache.responses.empty()) {
|
||||
guarded_cache.erase(token_want);
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
if (cache.closed) {
|
||||
throw Error("Trying to read from a closed token");
|
||||
}
|
||||
|
||||
if (guarded_loop_active) {
|
||||
cache.cond.wait(guard.inner_lock);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ReadLock reader(this);
|
||||
return reader.read_loop(token_want, std::move(guard), wait);
|
||||
}
|
||||
|
||||
Response ReadLock::read_loop(uint64_t token_want, CacheLock&& guard, double wait) {
|
||||
if (!guard.inner_lock) {
|
||||
guard.lock();
|
||||
}
|
||||
if (conn->guarded_loop_active) {
|
||||
throw Error("Cannot run more than one read loop on the same connection");
|
||||
}
|
||||
conn->guarded_loop_active = true;
|
||||
guard.unlock();
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
char buf[12];
|
||||
bzero(buf, sizeof(buf));
|
||||
recv(buf, 12, wait);
|
||||
uint64_t token_got;
|
||||
memcpy(&token_got, buf, 8);
|
||||
uint32_t length;
|
||||
memcpy(&length, buf + 8, 4);
|
||||
|
||||
std::unique_ptr<char[]> bufmem(new char[length + 1]);
|
||||
char *buffer = bufmem.get();
|
||||
bzero(buffer, length + 1);
|
||||
recv(buffer, length, wait);
|
||||
buffer[length] = '\0';
|
||||
|
||||
rapidjson::Document json;
|
||||
json.ParseInsitu(buffer);
|
||||
if (json.HasParseError()) {
|
||||
fprintf(stderr, "json parse error, code: %d, position: %d\n",
|
||||
(int)json.GetParseError(), (int)json.GetErrorOffset());
|
||||
} else if (json.IsNull()) {
|
||||
fprintf(stderr, "null value, read: %s\n", buffer);
|
||||
}
|
||||
|
||||
Datum datum = read_datum(json);
|
||||
if (debug_net > 0) {
|
||||
fprintf(stderr, "[%" PRIu64 "] << %s\n", token_got, write_datum(datum).c_str());
|
||||
}
|
||||
|
||||
Response response(std::move(datum));
|
||||
|
||||
if (token_got == token_want) {
|
||||
guard.lock();
|
||||
if (response.type != Protocol::Response::ResponseType::SUCCESS_PARTIAL) {
|
||||
auto it = conn->guarded_cache.find(token_got);
|
||||
if (it != conn->guarded_cache.end()) {
|
||||
it->second.closed = true;
|
||||
it->second.cond.notify_all();
|
||||
}
|
||||
conn->guarded_cache.erase(it);
|
||||
}
|
||||
conn->guarded_loop_active = false;
|
||||
for (auto& it : conn->guarded_cache) {
|
||||
it.second.cond.notify_all();
|
||||
}
|
||||
return response;
|
||||
} else {
|
||||
guard.lock();
|
||||
auto it = conn->guarded_cache.find(token_got);
|
||||
if (it == conn->guarded_cache.end()) {
|
||||
// drop the response
|
||||
} else if (!it->second.closed) {
|
||||
it->second.responses.emplace(std::move(response));
|
||||
if (response.type != Protocol::Response::ResponseType::SUCCESS_PARTIAL) {
|
||||
it->second.closed = true;
|
||||
}
|
||||
}
|
||||
it->second.cond.notify_all();
|
||||
guard.unlock();
|
||||
}
|
||||
}
|
||||
} catch (const TimeoutException &e) {
|
||||
if (!guard.inner_lock){
|
||||
guard.lock();
|
||||
}
|
||||
conn->guarded_loop_active = false;
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
void ConnectionPrivate::run_query(Query query, bool no_reply) {
|
||||
WriteLock writer(this);
|
||||
writer.send(query.serialize());
|
||||
}
|
||||
|
||||
Cursor Connection::start_query(Term *term, OptArgs&& opts) {
|
||||
bool no_reply = false;
|
||||
auto it = opts.find("noreply");
|
||||
if (it != opts.end()) {
|
||||
no_reply = *(it->second.datum.get_boolean());
|
||||
}
|
||||
|
||||
uint64_t token = d->new_token();
|
||||
{
|
||||
CacheLock guard(d.get());
|
||||
d->guarded_cache[token];
|
||||
}
|
||||
|
||||
d->run_query(Query{QueryType::START, token, term->datum, std::move(opts)});
|
||||
if (no_reply) {
|
||||
return Cursor(new CursorPrivate(token, this, Nil()));
|
||||
}
|
||||
|
||||
Cursor cursor(new CursorPrivate(token, this));
|
||||
Response response = d->wait_for_response(token, FOREVER);
|
||||
cursor.d->add_response(std::move(response));
|
||||
return cursor;
|
||||
}
|
||||
|
||||
void Connection::stop_query(uint64_t token) {
|
||||
const auto& it = d->guarded_cache.find(token);
|
||||
if (it != d->guarded_cache.end() && !it->second.closed) {
|
||||
d->run_query(Query{QueryType::STOP, token}, true);
|
||||
}
|
||||
}
|
||||
|
||||
void Connection::continue_query(uint64_t token) {
|
||||
d->run_query(Query{QueryType::CONTINUE, token}, true);
|
||||
}
|
||||
|
||||
Error Response::as_error() {
|
||||
std::string repr;
|
||||
if (result.size() == 1) {
|
||||
std::string* string = result[0].get_string();
|
||||
if (string) {
|
||||
repr = *string;
|
||||
} else {
|
||||
repr = write_datum(result[0]);
|
||||
}
|
||||
} else {
|
||||
repr = write_datum(Datum(result));
|
||||
}
|
||||
std::string err;
|
||||
using RT = Protocol::Response::ResponseType;
|
||||
using ET = Protocol::Response::ErrorType;
|
||||
switch (type) {
|
||||
case RT::SUCCESS_SEQUENCE: err = "unexpected response: SUCCESS_SEQUENCE"; break;
|
||||
case RT::SUCCESS_PARTIAL: err = "unexpected response: SUCCESS_PARTIAL"; break;
|
||||
case RT::SUCCESS_ATOM: err = "unexpected response: SUCCESS_ATOM"; break;
|
||||
case RT::WAIT_COMPLETE: err = "unexpected response: WAIT_COMPLETE"; break;
|
||||
case RT::SERVER_INFO: err = "unexpected response: SERVER_INFO"; break;
|
||||
case RT::CLIENT_ERROR: err = "ReqlDriverError"; break;
|
||||
case RT::COMPILE_ERROR: err = "ReqlCompileError"; break;
|
||||
case RT::RUNTIME_ERROR:
|
||||
switch (error_type) {
|
||||
case ET::INTERNAL: err = "ReqlInternalError"; break;
|
||||
case ET::RESOURCE_LIMIT: err = "ReqlResourceLimitError"; break;
|
||||
case ET::QUERY_LOGIC: err = "ReqlQueryLogicError"; break;
|
||||
case ET::NON_EXISTENCE: err = "ReqlNonExistenceError"; break;
|
||||
case ET::OP_FAILED: err = "ReqlOpFailedError"; break;
|
||||
case ET::OP_INDETERMINATE: err = "ReqlOpIndeterminateError"; break;
|
||||
case ET::USER: err = "ReqlUserError"; break;
|
||||
case ET::PERMISSION_ERROR: err = "ReqlPermissionError"; break;
|
||||
default: err = "ReqlRuntimeError"; break;
|
||||
}
|
||||
}
|
||||
throw Error("%s: %s", err.c_str(), repr.c_str());
|
||||
}
|
||||
|
||||
Protocol::Response::ResponseType response_type(double t) {
|
||||
int n = static_cast<int>(t);
|
||||
using RT = Protocol::Response::ResponseType;
|
||||
switch (n) {
|
||||
case static_cast<int>(RT::SUCCESS_ATOM):
|
||||
return RT::SUCCESS_ATOM;
|
||||
case static_cast<int>(RT::SUCCESS_SEQUENCE):
|
||||
return RT::SUCCESS_SEQUENCE;
|
||||
case static_cast<int>(RT::SUCCESS_PARTIAL):
|
||||
return RT::SUCCESS_PARTIAL;
|
||||
case static_cast<int>(RT::WAIT_COMPLETE):
|
||||
return RT::WAIT_COMPLETE;
|
||||
case static_cast<int>(RT::CLIENT_ERROR):
|
||||
return RT::CLIENT_ERROR;
|
||||
case static_cast<int>(RT::COMPILE_ERROR):
|
||||
return RT::COMPILE_ERROR;
|
||||
case static_cast<int>(RT::RUNTIME_ERROR):
|
||||
return RT::RUNTIME_ERROR;
|
||||
default:
|
||||
throw Error("Unknown response type");
|
||||
}
|
||||
}
|
||||
|
||||
Protocol::Response::ErrorType runtime_error_type(double t) {
|
||||
int n = static_cast<int>(t);
|
||||
using ET = Protocol::Response::ErrorType;
|
||||
switch (n) {
|
||||
case static_cast<int>(ET::INTERNAL):
|
||||
return ET::INTERNAL;
|
||||
case static_cast<int>(ET::RESOURCE_LIMIT):
|
||||
return ET::RESOURCE_LIMIT;
|
||||
case static_cast<int>(ET::QUERY_LOGIC):
|
||||
return ET::QUERY_LOGIC;
|
||||
case static_cast<int>(ET::NON_EXISTENCE):
|
||||
return ET::NON_EXISTENCE;
|
||||
case static_cast<int>(ET::OP_FAILED):
|
||||
return ET::OP_FAILED;
|
||||
case static_cast<int>(ET::OP_INDETERMINATE):
|
||||
return ET::OP_INDETERMINATE;
|
||||
case static_cast<int>(ET::USER):
|
||||
return ET::USER;
|
||||
default:
|
||||
throw Error("Unknown error type");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
59
ext/librethinkdbxx/src/connection.h
Normal file
59
ext/librethinkdbxx/src/connection.h
Normal file
@ -0,0 +1,59 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <queue>
|
||||
#include <mutex>
|
||||
#include <memory>
|
||||
#include <condition_variable>
|
||||
|
||||
#include "protocol_defs.h"
|
||||
#include "datum.h"
|
||||
#include "error.h"
|
||||
|
||||
#define FOREVER (-1)
|
||||
#define SECOND 1
|
||||
#define MICROSECOND 0.000001
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
class Term;
|
||||
using OptArgs = std::map<std::string, Term>;
|
||||
|
||||
// A connection to a RethinkDB server
|
||||
// It contains:
|
||||
// * A socket
|
||||
// * Read and write locks
|
||||
// * A cache of responses that have not been read by the corresponding Cursor
|
||||
class ConnectionPrivate;
|
||||
class Connection {
|
||||
public:
|
||||
Connection() = delete;
|
||||
Connection(const Connection&) noexcept = delete;
|
||||
Connection(Connection&&) noexcept = delete;
|
||||
Connection& operator=(Connection&&) noexcept = delete;
|
||||
Connection& operator=(const Connection&) noexcept = delete;
|
||||
~Connection();
|
||||
|
||||
void close();
|
||||
|
||||
private:
|
||||
explicit Connection(ConnectionPrivate *dd);
|
||||
std::unique_ptr<ConnectionPrivate> d;
|
||||
|
||||
Cursor start_query(Term *term, OptArgs&& args);
|
||||
void stop_query(uint64_t);
|
||||
void continue_query(uint64_t);
|
||||
|
||||
friend class Cursor;
|
||||
friend class CursorPrivate;
|
||||
friend class Token;
|
||||
friend class Term;
|
||||
friend std::unique_ptr<Connection>
|
||||
connect(std::string host, int port, std::string auth_key);
|
||||
|
||||
};
|
||||
|
||||
// $doc(connect)
|
||||
std::unique_ptr<Connection> connect(std::string host = "localhost", int port = 28015, std::string auth_key = "");
|
||||
|
||||
}
|
133
ext/librethinkdbxx/src/connection_p.h
Normal file
133
ext/librethinkdbxx/src/connection_p.h
Normal file
@ -0,0 +1,133 @@
|
||||
#ifndef CONNECTION_P_H
|
||||
#define CONNECTION_P_H
|
||||
|
||||
#include <inttypes.h>
|
||||
|
||||
#include "connection.h"
|
||||
#include "term.h"
|
||||
#include "json_p.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
extern const int debug_net;
|
||||
|
||||
struct Query {
|
||||
Protocol::Query::QueryType type;
|
||||
uint64_t token;
|
||||
Datum term;
|
||||
OptArgs optArgs;
|
||||
|
||||
std::string serialize() {
|
||||
Array query_arr{static_cast<double>(type)};
|
||||
if (term.is_valid()) query_arr.emplace_back(term);
|
||||
if (!optArgs.empty())
|
||||
query_arr.emplace_back(Term(std::move(optArgs)).datum);
|
||||
|
||||
std::string query_str = write_datum(query_arr);
|
||||
if (debug_net > 0) {
|
||||
fprintf(stderr, "[%" PRIu64 "] >> %s\n", token, query_str.c_str());
|
||||
}
|
||||
|
||||
char header[12];
|
||||
memcpy(header, &token, 8);
|
||||
uint32_t size = query_str.size();
|
||||
memcpy(header + 8, &size, 4);
|
||||
query_str.insert(0, header, 12);
|
||||
return query_str;
|
||||
}
|
||||
};
|
||||
|
||||
// Used internally to convert a raw response type into an enum
|
||||
Protocol::Response::ResponseType response_type(double t);
|
||||
Protocol::Response::ErrorType runtime_error_type(double t);
|
||||
|
||||
// Contains a response from the server. Use the Cursor class to interact with these responses
|
||||
class Response {
|
||||
public:
|
||||
Response() = delete;
|
||||
explicit Response(Datum&& datum) :
|
||||
type(response_type(std::move(datum).extract_field("t").extract_number())),
|
||||
error_type(datum.get_field("e") ?
|
||||
runtime_error_type(std::move(datum).extract_field("e").extract_number()) :
|
||||
Protocol::Response::ErrorType(0)),
|
||||
result(std::move(datum).extract_field("r").extract_array()) { }
|
||||
Error as_error();
|
||||
Protocol::Response::ResponseType type;
|
||||
Protocol::Response::ErrorType error_type;
|
||||
Array result;
|
||||
};
|
||||
|
||||
class Token;
|
||||
class ConnectionPrivate {
|
||||
public:
|
||||
ConnectionPrivate(int sockfd)
|
||||
: guarded_next_token(1), guarded_sockfd(sockfd), guarded_loop_active(false)
|
||||
{ }
|
||||
|
||||
void run_query(Query query, bool no_reply = false);
|
||||
|
||||
Response wait_for_response(uint64_t, double);
|
||||
uint64_t new_token() {
|
||||
return guarded_next_token++;
|
||||
}
|
||||
|
||||
std::mutex read_lock;
|
||||
std::mutex write_lock;
|
||||
std::mutex cache_lock;
|
||||
|
||||
struct TokenCache {
|
||||
bool closed = false;
|
||||
std::condition_variable cond;
|
||||
std::queue<Response> responses;
|
||||
};
|
||||
|
||||
std::map<uint64_t, TokenCache> guarded_cache;
|
||||
uint64_t guarded_next_token;
|
||||
int guarded_sockfd;
|
||||
bool guarded_loop_active;
|
||||
};
|
||||
|
||||
class CacheLock {
|
||||
public:
|
||||
CacheLock(ConnectionPrivate* conn) : inner_lock(conn->cache_lock) { }
|
||||
|
||||
void lock() {
|
||||
inner_lock.lock();
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
inner_lock.unlock();
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> inner_lock;
|
||||
};
|
||||
|
||||
class ReadLock {
|
||||
public:
|
||||
ReadLock(ConnectionPrivate* conn_) : lock(conn_->read_lock), conn(conn_) { }
|
||||
|
||||
size_t recv_some(char*, size_t, double wait);
|
||||
void recv(char*, size_t, double wait);
|
||||
std::string recv(size_t);
|
||||
size_t recv_cstring(char*, size_t);
|
||||
|
||||
Response read_loop(uint64_t, CacheLock&&, double);
|
||||
|
||||
std::lock_guard<std::mutex> lock;
|
||||
ConnectionPrivate* conn;
|
||||
};
|
||||
|
||||
class WriteLock {
|
||||
public:
|
||||
WriteLock(ConnectionPrivate* conn_) : lock(conn_->write_lock), conn(conn_) { }
|
||||
|
||||
void send(const char*, size_t);
|
||||
void send(std::string);
|
||||
|
||||
std::lock_guard<std::mutex> lock;
|
||||
ConnectionPrivate* conn;
|
||||
};
|
||||
|
||||
} // namespace RethinkDB
|
||||
|
||||
#endif // CONNECTION_P_H
|
221
ext/librethinkdbxx/src/cursor.cc
Normal file
221
ext/librethinkdbxx/src/cursor.cc
Normal file
@ -0,0 +1,221 @@
|
||||
#include "cursor.h"
|
||||
#include "cursor_p.h"
|
||||
#include "exceptions.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
// for type completion, in order to forward declare with unique_ptr
|
||||
Cursor::Cursor(Cursor&&) = default;
|
||||
Cursor& Cursor::operator=(Cursor&&) = default;
|
||||
|
||||
CursorPrivate::CursorPrivate(uint64_t token_, Connection *conn_)
|
||||
: single(false), no_more(false), index(0),
|
||||
token(token_), conn(conn_)
|
||||
{ }
|
||||
|
||||
CursorPrivate::CursorPrivate(uint64_t token_, Connection *conn_, Datum&& datum)
|
||||
: single(true), no_more(true), index(0), buffer(Array{std::move(datum)}),
|
||||
token(token_), conn(conn_)
|
||||
{ }
|
||||
|
||||
Cursor::Cursor(CursorPrivate *dd) : d(dd) {}
|
||||
|
||||
Cursor::~Cursor() {
|
||||
if (d && d->conn) {
|
||||
close();
|
||||
}
|
||||
}
|
||||
|
||||
Datum& Cursor::next(double wait) const {
|
||||
if (!has_next(wait)) {
|
||||
throw Error("next: No more data");
|
||||
}
|
||||
|
||||
return d->buffer[d->index++];
|
||||
}
|
||||
|
||||
Datum& Cursor::peek(double wait) const {
|
||||
if (!has_next(wait)) {
|
||||
throw Error("next: No more data");
|
||||
}
|
||||
|
||||
return d->buffer[d->index];
|
||||
}
|
||||
|
||||
void Cursor::each(std::function<void(Datum&&)> f, double wait) const {
|
||||
while (has_next(wait)) {
|
||||
f(std::move(d->buffer[d->index++]));
|
||||
}
|
||||
}
|
||||
|
||||
void CursorPrivate::convert_single() const {
|
||||
if (index != 0) {
|
||||
throw Error("Cursor: already consumed");
|
||||
}
|
||||
|
||||
if (buffer.size() != 1) {
|
||||
throw Error("Cursor: invalid response from server");
|
||||
}
|
||||
|
||||
if (!buffer[0].is_array()) {
|
||||
throw Error("Cursor: not an array");
|
||||
}
|
||||
|
||||
buffer.swap(buffer[0].extract_array());
|
||||
single = false;
|
||||
}
|
||||
|
||||
void CursorPrivate::clear_and_read_all() const {
|
||||
if (single) {
|
||||
convert_single();
|
||||
}
|
||||
if (index != 0) {
|
||||
buffer.erase(buffer.begin(), buffer.begin() + index);
|
||||
index = 0;
|
||||
}
|
||||
while (!no_more) {
|
||||
add_response(conn->d->wait_for_response(token, FOREVER));
|
||||
}
|
||||
}
|
||||
|
||||
Array&& Cursor::to_array() && {
|
||||
d->clear_and_read_all();
|
||||
return std::move(d->buffer);
|
||||
}
|
||||
|
||||
Array Cursor::to_array() const & {
|
||||
d->clear_and_read_all();
|
||||
return d->buffer;
|
||||
}
|
||||
|
||||
Datum Cursor::to_datum() const & {
|
||||
if (d->single) {
|
||||
if (d->index != 0) {
|
||||
throw Error("to_datum: already consumed");
|
||||
}
|
||||
return d->buffer[0];
|
||||
}
|
||||
|
||||
d->clear_and_read_all();
|
||||
return d->buffer;
|
||||
}
|
||||
|
||||
Datum Cursor::to_datum() && {
|
||||
Datum ret((Nil()));
|
||||
if (d->single) {
|
||||
if (d->index != 0) {
|
||||
throw Error("to_datum: already consumed");
|
||||
}
|
||||
ret = std::move(d->buffer[0]);
|
||||
} else {
|
||||
d->clear_and_read_all();
|
||||
ret = std::move(d->buffer);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void Cursor::close() const {
|
||||
d->conn->stop_query(d->token);
|
||||
d->no_more = true;
|
||||
}
|
||||
|
||||
bool Cursor::has_next(double wait) const {
|
||||
if (d->single) {
|
||||
d->convert_single();
|
||||
}
|
||||
|
||||
while (true) {
|
||||
if (d->index >= d->buffer.size()) {
|
||||
if (d->no_more) {
|
||||
return false;
|
||||
}
|
||||
d->add_response(d->conn->d->wait_for_response(d->token, wait));
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool Cursor::is_single() const {
|
||||
return d->single;
|
||||
}
|
||||
|
||||
void CursorPrivate::add_results(Array&& results) const {
|
||||
if (index >= buffer.size()) {
|
||||
buffer = std::move(results);
|
||||
index = 0;
|
||||
} else {
|
||||
for (auto& it : results) {
|
||||
buffer.emplace_back(std::move(it));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CursorPrivate::add_response(Response&& response) const {
|
||||
using RT = Protocol::Response::ResponseType;
|
||||
switch (response.type) {
|
||||
case RT::SUCCESS_SEQUENCE:
|
||||
add_results(std::move(response.result));
|
||||
no_more = true;
|
||||
break;
|
||||
case RT::SUCCESS_PARTIAL:
|
||||
conn->continue_query(token);
|
||||
add_results(std::move(response.result));
|
||||
break;
|
||||
case RT::SUCCESS_ATOM:
|
||||
add_results(std::move(response.result));
|
||||
single = true;
|
||||
no_more = true;
|
||||
break;
|
||||
case RT::SERVER_INFO:
|
||||
add_results(std::move(response.result));
|
||||
single = true;
|
||||
no_more = true;
|
||||
break;
|
||||
case RT::WAIT_COMPLETE:
|
||||
case RT::CLIENT_ERROR:
|
||||
case RT::COMPILE_ERROR:
|
||||
case RT::RUNTIME_ERROR:
|
||||
no_more = true;
|
||||
throw response.as_error();
|
||||
}
|
||||
}
|
||||
|
||||
Cursor::iterator Cursor::begin() {
|
||||
return iterator(this);
|
||||
}
|
||||
|
||||
Cursor::iterator Cursor::end() {
|
||||
return iterator(nullptr);
|
||||
}
|
||||
|
||||
Cursor::iterator::iterator(Cursor* cursor_) : cursor(cursor_) {}
|
||||
|
||||
Cursor::iterator& Cursor::iterator::operator++ () {
|
||||
if (cursor == nullptr) {
|
||||
throw Error("incrementing an exhausted Cursor iterator");
|
||||
}
|
||||
|
||||
cursor->next();
|
||||
return *this;
|
||||
}
|
||||
|
||||
Datum& Cursor::iterator::operator* () {
|
||||
if (cursor == nullptr) {
|
||||
throw Error("reading from empty Cursor iterator");
|
||||
}
|
||||
|
||||
return cursor->peek();
|
||||
}
|
||||
|
||||
bool Cursor::iterator::operator!= (const Cursor::iterator& other) const {
|
||||
if (cursor == other.cursor) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return !((cursor == nullptr && !other.cursor->has_next()) ||
|
||||
(other.cursor == nullptr && !cursor->has_next()));
|
||||
}
|
||||
|
||||
}
|
76
ext/librethinkdbxx/src/cursor.h
Normal file
76
ext/librethinkdbxx/src/cursor.h
Normal file
@ -0,0 +1,76 @@
|
||||
#pragma once
|
||||
|
||||
#include "connection.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
// The response from the server, as returned by run.
|
||||
// The response is either a single datum or a stream:
|
||||
// * If it is a stream, the cursor represents each element of the stream.
|
||||
// - Batches are fetched from the server as needed.
|
||||
// * If it is a single datum, is_single() returns true.
|
||||
// - If it is an array, the cursor represents each element of that array
|
||||
// - Otherwise, to_datum() returns the datum and iteration throws an exception.
|
||||
// The cursor can only be iterated over once, it discards data that has already been read.
|
||||
class CursorPrivate;
|
||||
class Cursor {
|
||||
public:
|
||||
Cursor() = delete;
|
||||
~Cursor();
|
||||
|
||||
Cursor(Cursor&&); // movable
|
||||
Cursor& operator=(Cursor&&);
|
||||
Cursor(const Cursor&) = delete; // not copyable
|
||||
Cursor& operator=(const Cursor&) = delete;
|
||||
|
||||
// Returned by begin() and end()
|
||||
class iterator {
|
||||
public:
|
||||
iterator(Cursor*);
|
||||
iterator& operator++ ();
|
||||
Datum& operator* ();
|
||||
bool operator!= (const iterator&) const;
|
||||
|
||||
private:
|
||||
Cursor *cursor;
|
||||
};
|
||||
|
||||
// Consume the next element
|
||||
Datum& next(double wait = FOREVER) const;
|
||||
|
||||
// Peek at the next element
|
||||
Datum& peek(double wait = FOREVER) const;
|
||||
|
||||
// Call f on every element of the Cursor
|
||||
void each(std::function<void(Datum&&)> f, double wait = FOREVER) const;
|
||||
|
||||
// Consume and return all elements
|
||||
Array&& to_array() &&;
|
||||
|
||||
// If is_single(), returns the single datum. Otherwise returns to_array().
|
||||
Datum to_datum() &&;
|
||||
Datum to_datum() const &;
|
||||
|
||||
// Efficiently consume and return all elements
|
||||
Array to_array() const &;
|
||||
|
||||
// Close the cursor
|
||||
void close() const;
|
||||
|
||||
// Returns false if there are no more elements
|
||||
bool has_next(double wait = FOREVER) const;
|
||||
|
||||
// Returns false if the cursor is a stream
|
||||
bool is_single() const;
|
||||
|
||||
iterator begin();
|
||||
iterator end();
|
||||
|
||||
private:
|
||||
explicit Cursor(CursorPrivate *dd);
|
||||
std::unique_ptr<CursorPrivate> d;
|
||||
|
||||
friend class Connection;
|
||||
};
|
||||
|
||||
}
|
29
ext/librethinkdbxx/src/cursor_p.h
Normal file
29
ext/librethinkdbxx/src/cursor_p.h
Normal file
@ -0,0 +1,29 @@
|
||||
#ifndef CURSOR_P_H
|
||||
#define CURSOR_P_H
|
||||
|
||||
#include "connection_p.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
class CursorPrivate {
|
||||
public:
|
||||
CursorPrivate(uint64_t token, Connection *conn);
|
||||
CursorPrivate(uint64_t token, Connection *conn, Datum&&);
|
||||
|
||||
void add_response(Response&&) const;
|
||||
void add_results(Array&&) const;
|
||||
void clear_and_read_all() const;
|
||||
void convert_single() const;
|
||||
|
||||
mutable bool single = false;
|
||||
mutable bool no_more = false;
|
||||
mutable size_t index = 0;
|
||||
mutable Array buffer;
|
||||
|
||||
uint64_t token;
|
||||
Connection *conn;
|
||||
};
|
||||
|
||||
} // namespace RethinkDB
|
||||
|
||||
#endif // CURSOR_P_H
|
449
ext/librethinkdbxx/src/datum.cc
Normal file
449
ext/librethinkdbxx/src/datum.cc
Normal file
@ -0,0 +1,449 @@
|
||||
#include <float.h>
|
||||
#include <cmath>
|
||||
|
||||
#include "datum.h"
|
||||
#include "json_p.h"
|
||||
#include "utils.h"
|
||||
#include "cursor.h"
|
||||
|
||||
#include "rapidjson-config.h"
|
||||
#include "rapidjson/prettywriter.h"
|
||||
#include "rapidjson/stringbuffer.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
using TT = Protocol::Term::TermType;
|
||||
|
||||
bool Datum::is_nil() const {
|
||||
return type == Type::NIL;
|
||||
}
|
||||
|
||||
bool Datum::is_boolean() const {
|
||||
return type == Type::BOOLEAN;
|
||||
}
|
||||
|
||||
bool Datum::is_number() const {
|
||||
return type == Type::NUMBER;
|
||||
}
|
||||
|
||||
bool Datum::is_string() const {
|
||||
return type == Type::STRING;
|
||||
}
|
||||
|
||||
bool Datum::is_object() const {
|
||||
return type == Type::OBJECT;
|
||||
}
|
||||
|
||||
bool Datum::is_array() const {
|
||||
return type == Type::ARRAY;
|
||||
}
|
||||
|
||||
bool Datum::is_binary() const {
|
||||
return type == Type::BINARY;
|
||||
}
|
||||
|
||||
bool Datum::is_time() const {
|
||||
return type == Type::TIME;
|
||||
}
|
||||
|
||||
bool* Datum::get_boolean() {
|
||||
if (type == Type::BOOLEAN) {
|
||||
return &value.boolean;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
const bool* Datum::get_boolean() const {
|
||||
if (type == Type::BOOLEAN) {
|
||||
return &value.boolean;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
double* Datum::get_number() {
|
||||
if (type == Type::NUMBER) {
|
||||
return &value.number;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
const double* Datum::get_number() const {
|
||||
if (type == Type::NUMBER) {
|
||||
return &value.number;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
std::string* Datum::get_string() {
|
||||
if (type == Type::STRING) {
|
||||
return &value.string;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
const std::string* Datum::get_string() const {
|
||||
if (type == Type::STRING) {
|
||||
return &value.string;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
Datum* Datum::get_field(std::string key) {
|
||||
if (type != Type::OBJECT) {
|
||||
return NULL;
|
||||
}
|
||||
auto it = value.object.find(key);
|
||||
if (it == value.object.end()) {
|
||||
return NULL;
|
||||
}
|
||||
return &it->second;
|
||||
}
|
||||
|
||||
const Datum* Datum::get_field(std::string key) const {
|
||||
if (type != Type::OBJECT) {
|
||||
return NULL;
|
||||
}
|
||||
auto it = value.object.find(key);
|
||||
if (it == value.object.end()) {
|
||||
return NULL;
|
||||
}
|
||||
return &it->second;
|
||||
}
|
||||
|
||||
Datum* Datum::get_nth(size_t i) {
|
||||
if (type != Type::ARRAY) {
|
||||
return NULL;
|
||||
}
|
||||
if (i >= value.array.size()) {
|
||||
return NULL;
|
||||
}
|
||||
return &value.array[i];
|
||||
}
|
||||
|
||||
const Datum* Datum::get_nth(size_t i) const {
|
||||
if (type != Type::ARRAY) {
|
||||
return NULL;
|
||||
}
|
||||
if (i >= value.array.size()) {
|
||||
return NULL;
|
||||
}
|
||||
return &value.array[i];
|
||||
}
|
||||
|
||||
Object* Datum::get_object() {
|
||||
if (type == Type::OBJECT) {
|
||||
return &value.object;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
const Object* Datum::get_object() const {
|
||||
if (type == Type::OBJECT) {
|
||||
return &value.object;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
Array* Datum::get_array() {
|
||||
if (type == Type::ARRAY) {
|
||||
return &value.array;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
const Array* Datum::get_array() const {
|
||||
if (type == Type::ARRAY) {
|
||||
return &value.array;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
Binary* Datum::get_binary() {
|
||||
if (type == Type::BINARY) {
|
||||
return &value.binary;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
const Binary* Datum::get_binary() const {
|
||||
if (type == Type::BINARY) {
|
||||
return &value.binary;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
Time* Datum::get_time() {
|
||||
if (type == Type::TIME) {
|
||||
return &value.time;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
const Time* Datum::get_time() const {
|
||||
if (type == Type::TIME) {
|
||||
return &value.time;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bool& Datum::extract_boolean() {
|
||||
if (type != Type::BOOLEAN) {
|
||||
throw Error("extract_bool: Not a boolean");
|
||||
}
|
||||
return value.boolean;
|
||||
}
|
||||
|
||||
double& Datum::extract_number() {
|
||||
if (type != Type::NUMBER) {
|
||||
throw Error("extract_number: Not a number: %s", write_datum(*this).c_str());
|
||||
}
|
||||
return value.number;
|
||||
}
|
||||
|
||||
std::string& Datum::extract_string() {
|
||||
if (type != Type::STRING) {
|
||||
throw Error("extract_string: Not a string");
|
||||
}
|
||||
return value.string;
|
||||
}
|
||||
|
||||
Object& Datum::extract_object() {
|
||||
if (type != Type::OBJECT) {
|
||||
throw Error("extract_object: Not an object");
|
||||
}
|
||||
return value.object;
|
||||
}
|
||||
|
||||
Datum& Datum::extract_field(std::string key) {
|
||||
if (type != Type::OBJECT) {
|
||||
throw Error("extract_field: Not an object");
|
||||
}
|
||||
auto it = value.object.find(key);
|
||||
if (it == value.object.end()) {
|
||||
throw Error("extract_field: No such key in object");
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
||||
Datum& Datum::extract_nth(size_t i) {
|
||||
if (type != Type::ARRAY) {
|
||||
throw Error("extract_nth: Not an array");
|
||||
}
|
||||
if (i >= value.array.size()) {
|
||||
throw Error("extract_nth: index too large");
|
||||
}
|
||||
return value.array[i];
|
||||
}
|
||||
|
||||
Array& Datum::extract_array() {
|
||||
if (type != Type::ARRAY) {
|
||||
throw Error("get_array: Not an array");
|
||||
}
|
||||
return value.array;
|
||||
}
|
||||
|
||||
Binary& Datum::extract_binary() {
|
||||
if (type != Type::BINARY) {
|
||||
throw Error("get_binary: Not a binary");
|
||||
}
|
||||
return value.binary;
|
||||
}
|
||||
|
||||
Time& Datum::extract_time() {
|
||||
if (type != Type::TIME) {
|
||||
throw Error("get_time: Not a time");
|
||||
}
|
||||
return value.time;
|
||||
}
|
||||
|
||||
int Datum::compare(const Datum& other) const {
|
||||
#define COMPARE(a, b) do { \
|
||||
if (a < b) { return -1; } \
|
||||
if (a > b) { return 1; } } while(0)
|
||||
#define COMPARE_OTHER(x) COMPARE(x, other.x)
|
||||
|
||||
COMPARE_OTHER(type);
|
||||
int c;
|
||||
switch (type) {
|
||||
case Type::NIL: case Type::INVALID: break;
|
||||
case Type::BOOLEAN: COMPARE_OTHER(value.boolean); break;
|
||||
case Type::NUMBER: COMPARE_OTHER(value.number); break;
|
||||
case Type::STRING:
|
||||
c = value.string.compare(other.value.string);
|
||||
COMPARE(c, 0);
|
||||
break;
|
||||
case Type::BINARY:
|
||||
c = value.binary.data.compare(other.value.binary.data);
|
||||
COMPARE(c, 0);
|
||||
break;
|
||||
case Type::TIME:
|
||||
COMPARE(value.time.epoch_time, other.value.time.epoch_time);
|
||||
COMPARE(value.time.utc_offset, other.value.time.utc_offset);
|
||||
break;
|
||||
case Type::ARRAY:
|
||||
COMPARE_OTHER(value.array.size());
|
||||
for (size_t i = 0; i < value.array.size(); i++) {
|
||||
c = value.array[i].compare(other.value.array[i]);
|
||||
COMPARE(c, 0);
|
||||
}
|
||||
break;
|
||||
case Type::OBJECT:
|
||||
COMPARE_OTHER(value.object.size());
|
||||
for (Object::const_iterator l = value.object.begin(),
|
||||
r = other.value.object.begin();
|
||||
l != value.object.end();
|
||||
++l, ++r) {
|
||||
COMPARE(l->first, r->first);
|
||||
c = l->second.compare(r->second);
|
||||
COMPARE(c, 0);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw Error("cannot compare invalid datum");
|
||||
}
|
||||
return 0;
|
||||
#undef COMPARE_OTHER
|
||||
#undef COMPARE
|
||||
}
|
||||
|
||||
bool Datum::operator== (const Datum& other) const {
|
||||
return compare(other) == 0;
|
||||
}
|
||||
|
||||
Datum Datum::from_raw() const {
|
||||
do {
|
||||
const Datum* type_field = get_field("$reql_type$");
|
||||
if (!type_field) break;
|
||||
const std::string* type = type_field->get_string();
|
||||
if (!type) break;;
|
||||
if (!strcmp(type->c_str(), "BINARY")) {
|
||||
const Datum* data_field = get_field("data");
|
||||
if (!data_field) break;
|
||||
const std::string* encoded_data = data_field->get_string();
|
||||
if (!encoded_data) break;
|
||||
Binary binary("");
|
||||
if (base64_decode(*encoded_data, binary.data)) {
|
||||
return binary;
|
||||
}
|
||||
} else if (!strcmp(type->c_str(), "TIME")) {
|
||||
const Datum* epoch_field = get_field("epoch_time");
|
||||
if (!epoch_field) break;
|
||||
const Datum* tz_field = get_field("timezone");
|
||||
if (!tz_field) break;
|
||||
const double* epoch_time = epoch_field->get_number();
|
||||
if (!epoch_time) break;
|
||||
const std::string* tz = tz_field->get_string();
|
||||
if (!tz) break;
|
||||
double offset;
|
||||
if (!Time::parse_utc_offset(*tz, &offset)) break;
|
||||
return Time(*epoch_time, offset);
|
||||
}
|
||||
} while (0);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Datum Datum::to_raw() const {
|
||||
if (type == Type::BINARY) {
|
||||
return Object{
|
||||
{"$reql_type$", "BINARY"},
|
||||
{"data", base64_encode(value.binary.data)}};
|
||||
} else if (type == Type::TIME) {
|
||||
return Object{
|
||||
{"$reql_type$", "TIME"},
|
||||
{"epoch_time", value.time.epoch_time},
|
||||
{"timezone", Time::utc_offset_string(value.time.utc_offset)}};
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
Datum::Datum(Cursor&& cursor) : Datum(cursor.to_datum()) { }
|
||||
Datum::Datum(const Cursor& cursor) : Datum(cursor.to_datum()) { }
|
||||
|
||||
static const double max_dbl_int = 0x1LL << DBL_MANT_DIG;
|
||||
static const double min_dbl_int = max_dbl_int * -1;
|
||||
bool number_as_integer(double d, int64_t *i_out) {
|
||||
static_assert(DBL_MANT_DIG == 53, "Doubles are wrong size.");
|
||||
|
||||
if (min_dbl_int <= d && d <= max_dbl_int) {
|
||||
int64_t i = d;
|
||||
if (static_cast<double>(i) == d) {
|
||||
*i_out = i;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template void Datum::write_json(
|
||||
rapidjson::Writer<rapidjson::StringBuffer> *writer) const;
|
||||
template void Datum::write_json(
|
||||
rapidjson::PrettyWriter<rapidjson::StringBuffer> *writer) const;
|
||||
|
||||
template <class json_writer_t>
|
||||
void Datum::write_json(json_writer_t *writer) const {
|
||||
switch (type) {
|
||||
case Type::NIL: writer->Null(); break;
|
||||
case Type::BOOLEAN: writer->Bool(value.boolean); break;
|
||||
case Type::NUMBER: {
|
||||
const double d = value.number;
|
||||
// Always print -0.0 as a double since integers cannot represent -0.
|
||||
// Otherwise check if the number is an integer and print it as such.
|
||||
int64_t i;
|
||||
if (!(d == 0.0 && std::signbit(d)) && number_as_integer(d, &i)) {
|
||||
writer->Int64(i);
|
||||
} else {
|
||||
writer->Double(d);
|
||||
}
|
||||
} break;
|
||||
case Type::STRING: writer->String(value.string.data(), value.string.size()); break;
|
||||
case Type::ARRAY: {
|
||||
writer->StartArray();
|
||||
for (auto it : value.array) {
|
||||
it.write_json(writer);
|
||||
}
|
||||
writer->EndArray();
|
||||
} break;
|
||||
case Type::OBJECT: {
|
||||
writer->StartObject();
|
||||
for (auto it : value.object) {
|
||||
writer->Key(it.first.data(), it.first.size());
|
||||
it.second.write_json(writer);
|
||||
}
|
||||
writer->EndObject();
|
||||
} break;
|
||||
|
||||
case Type::BINARY:
|
||||
case Type::TIME:
|
||||
to_raw().write_json(writer);
|
||||
break;
|
||||
default:
|
||||
throw Error("cannot write invalid datum");
|
||||
}
|
||||
}
|
||||
|
||||
std::string Datum::as_json() const {
|
||||
rapidjson::StringBuffer buffer;
|
||||
rapidjson::Writer<rapidjson::StringBuffer> writer(buffer);
|
||||
write_json(&writer);
|
||||
return std::string(buffer.GetString(), buffer.GetSize());
|
||||
}
|
||||
|
||||
Datum Datum::from_json(const std::string& json) {
|
||||
return read_datum(json);
|
||||
}
|
||||
|
||||
} // namespace RethinkDB
|
287
ext/librethinkdbxx/src/datum.h
Normal file
287
ext/librethinkdbxx/src/datum.h
Normal file
@ -0,0 +1,287 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <functional>
|
||||
|
||||
#include "protocol_defs.h"
|
||||
#include "error.h"
|
||||
#include "types.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
class Cursor;
|
||||
|
||||
// The type of data stored in a RethinkDB database.
|
||||
// The following JSON types are represented in a Datum as
|
||||
// * null -> Nil
|
||||
// * boolean -> bool
|
||||
// * number -> double
|
||||
// * unicode strings -> std::string
|
||||
// * array -> Array (aka std::vector<Datum>
|
||||
// * object -> Object (aka std::map<std::string, Datum>>
|
||||
// Datums can also contain one of the following extra types
|
||||
// * binary strings -> Binary
|
||||
// * timestamps -> Time
|
||||
// * points. lines and polygons -> not implemented
|
||||
class Datum {
|
||||
public:
|
||||
Datum() : type(Type::INVALID), value() {}
|
||||
Datum(Nil) : type(Type::NIL), value() { }
|
||||
Datum(bool boolean_) : type(Type::BOOLEAN), value(boolean_) { }
|
||||
Datum(double number_) : type(Type::NUMBER), value(number_) { }
|
||||
Datum(const std::string& string_) : type(Type::STRING), value(string_) { }
|
||||
Datum(std::string&& string_) : type(Type::STRING), value(std::move(string_)) { }
|
||||
Datum(const Array& array_) : type(Type::ARRAY), value(array_) { }
|
||||
Datum(Array&& array_) : type(Type::ARRAY), value(std::move(array_)) { }
|
||||
Datum(const Binary& binary) : type(Type::BINARY), value(binary) { }
|
||||
Datum(Binary&& binary) : type(Type::BINARY), value(std::move(binary)) { }
|
||||
Datum(const Time time) : type(Type::TIME), value(time) { }
|
||||
Datum(const Object& object_) : type(Type::OBJECT), value(object_) { }
|
||||
Datum(Object&& object_) : type(Type::OBJECT), value(std::move(object_)) { }
|
||||
Datum(const Datum& other) : type(other.type), value(other.type, other.value) { }
|
||||
Datum(Datum&& other) : type(other.type), value(other.type, std::move(other.value)) { }
|
||||
|
||||
Datum& operator=(const Datum& other) {
|
||||
value.destroy(type);
|
||||
type = other.type;
|
||||
value.set(type, other.value);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Datum& operator=(Datum&& other) {
|
||||
value.destroy(type);
|
||||
type = other.type;
|
||||
value.set(type, std::move(other.value));
|
||||
return *this;
|
||||
}
|
||||
|
||||
Datum(unsigned short number_) : Datum(static_cast<double>(number_)) { }
|
||||
Datum(signed short number_) : Datum(static_cast<double>(number_)) { }
|
||||
Datum(unsigned int number_) : Datum(static_cast<double>(number_)) { }
|
||||
Datum(signed int number_) : Datum(static_cast<double>(number_)) { }
|
||||
Datum(unsigned long number_) : Datum(static_cast<double>(number_)) { }
|
||||
Datum(signed long number_) : Datum(static_cast<double>(number_)) { }
|
||||
Datum(unsigned long long number_) : Datum(static_cast<double>(number_)) { }
|
||||
Datum(signed long long number_) : Datum(static_cast<double>(number_)) { }
|
||||
|
||||
Datum(Protocol::Term::TermType type) : Datum(static_cast<double>(type)) { }
|
||||
Datum(const char* string) : Datum(static_cast<std::string>(string)) { }
|
||||
|
||||
// Cursors are implicitly converted into datums
|
||||
Datum(Cursor&&);
|
||||
Datum(const Cursor&);
|
||||
|
||||
template <class T>
|
||||
Datum(const std::map<std::string, T>& map) : type(Type::OBJECT), value(Object()) {
|
||||
for (const auto& it : map) {
|
||||
value.object.emplace(it.left, Datum(it.right));
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
Datum(std::map<std::string, T>&& map) : type(Type::OBJECT), value(Object()) {
|
||||
for (auto& it : map) {
|
||||
value.object.emplace(it.first, Datum(std::move(it.second)));
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
Datum(const std::vector<T>& vec) : type(Type::ARRAY), value(Array()) {
|
||||
for (const auto& it : vec) {
|
||||
value.array.emplace_back(it);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
Datum(std::vector<T>&& vec) : type(Type::ARRAY), value(Array()) {
|
||||
for (auto& it : vec) {
|
||||
value.array.emplace_back(std::move(it));
|
||||
}
|
||||
}
|
||||
|
||||
~Datum() {
|
||||
value.destroy(type);
|
||||
}
|
||||
|
||||
// Apply a visitor
|
||||
template <class R, class F, class ...A>
|
||||
R apply(F f, A&& ...args) const & {
|
||||
switch (type) {
|
||||
case Type::NIL: return f(Nil(), std::forward<A>(args)...); break;
|
||||
case Type::BOOLEAN: return f(value.boolean, std::forward<A>(args)...); break;
|
||||
case Type::NUMBER: return f(value.number, std::forward<A>(args)...); break;
|
||||
case Type::STRING: return f(value.string, std::forward<A>(args)...); break;
|
||||
case Type::OBJECT: return f(value.object, std::forward<A>(args)...); break;
|
||||
case Type::ARRAY: return f(value.array, std::forward<A>(args)...); break;
|
||||
case Type::BINARY: return f(value.binary, std::forward<A>(args)...); break;
|
||||
case Type::TIME: return f(value.time, std::forward<A>(args)...); break;
|
||||
default:
|
||||
throw Error("internal error: no such datum type %d", static_cast<int>(type));
|
||||
}
|
||||
}
|
||||
|
||||
template <class R, class F, class ...A>
|
||||
R apply(F f, A&& ...args) && {
|
||||
switch (type) {
|
||||
case Type::NIL: return f(Nil(), std::forward<A>(args)...); break;
|
||||
case Type::BOOLEAN: return f(std::move(value.boolean), std::forward<A>(args)...); break;
|
||||
case Type::NUMBER: return f(std::move(value.number), std::forward<A>(args)...); break;
|
||||
case Type::STRING: return f(std::move(value.string), std::forward<A>(args)...); break;
|
||||
case Type::OBJECT: return f(std::move(value.object), std::forward<A>(args)...); break;
|
||||
case Type::ARRAY: return f(std::move(value.array), std::forward<A>(args)...); break;
|
||||
case Type::BINARY: return f(std::move(value.binary), std::forward<A>(args)...); break;
|
||||
case Type::TIME: return f(std::move(value.time), std::forward<A>(args)...); break;
|
||||
default:
|
||||
throw Error("internal error: no such datum type %d", static_cast<int>(type));
|
||||
}
|
||||
}
|
||||
|
||||
bool is_nil() const;
|
||||
bool is_boolean() const;
|
||||
bool is_number() const;
|
||||
bool is_string() const;
|
||||
bool is_object() const;
|
||||
bool is_array() const;
|
||||
bool is_binary() const;
|
||||
bool is_time() const;
|
||||
|
||||
// get_* returns nullptr if the datum has a different type
|
||||
|
||||
bool* get_boolean();
|
||||
const bool* get_boolean() const;
|
||||
double* get_number();
|
||||
const double* get_number() const;
|
||||
std::string* get_string();
|
||||
const std::string* get_string() const;
|
||||
Object* get_object();
|
||||
const Object* get_object() const;
|
||||
Datum* get_field(std::string);
|
||||
const Datum* get_field(std::string) const;
|
||||
Array* get_array();
|
||||
const Array* get_array() const;
|
||||
Datum* get_nth(size_t);
|
||||
const Datum* get_nth(size_t) const;
|
||||
Binary* get_binary();
|
||||
const Binary* get_binary() const;
|
||||
Time* get_time();
|
||||
const Time* get_time() const;
|
||||
|
||||
// extract_* throws an exception if the types don't match
|
||||
|
||||
bool& extract_boolean();
|
||||
double& extract_number();
|
||||
std::string& extract_string();
|
||||
Object& extract_object();
|
||||
Datum& extract_field(std::string);
|
||||
Array& extract_array();
|
||||
Datum& extract_nth(size_t);
|
||||
Binary& extract_binary();
|
||||
Time& extract_time();
|
||||
|
||||
// negative, zero or positive if this datum is smaller, identical or larger than the other one, respectively
|
||||
// This is meant to match the results of RethinkDB's comparison operators
|
||||
int compare(const Datum&) const;
|
||||
|
||||
// Deep equality
|
||||
bool operator== (const Datum&) const;
|
||||
|
||||
// Recusively replace non-JSON types into objects that represent them
|
||||
Datum to_raw() const;
|
||||
|
||||
// Recursively replace objects with a $reql_type$ field into the datum they represent
|
||||
Datum from_raw() const;
|
||||
|
||||
template <class json_writer_t> void write_json(json_writer_t *writer) const;
|
||||
|
||||
std::string as_json() const;
|
||||
static Datum from_json(const std::string&);
|
||||
|
||||
bool is_valid() const { return type != Type::INVALID; }
|
||||
|
||||
private:
|
||||
enum class Type {
|
||||
INVALID, // default constructed
|
||||
ARRAY, BOOLEAN, NIL, NUMBER, OBJECT, BINARY, STRING, TIME
|
||||
// POINT, LINE, POLYGON
|
||||
};
|
||||
Type type;
|
||||
|
||||
union datum_value {
|
||||
bool boolean;
|
||||
double number;
|
||||
std::string string;
|
||||
Object object;
|
||||
Array array;
|
||||
Binary binary;
|
||||
Time time;
|
||||
|
||||
datum_value() { }
|
||||
datum_value(bool boolean_) : boolean(boolean_) { }
|
||||
datum_value(double number_) : number(number_) { }
|
||||
datum_value(const std::string& string_) : string(string_) { }
|
||||
datum_value(std::string&& string_) : string(std::move(string_)) { }
|
||||
datum_value(const Object& object_) : object(object_) { }
|
||||
datum_value(Object&& object_) : object(std::move(object_)) { }
|
||||
datum_value(const Array& array_) : array(array_) { }
|
||||
datum_value(Array&& array_) : array(std::move(array_)) { }
|
||||
datum_value(const Binary& binary_) : binary(binary_) { }
|
||||
datum_value(Binary&& binary_) : binary(std::move(binary_)) { }
|
||||
datum_value(Time time) : time(std::move(time)) { }
|
||||
|
||||
datum_value(Type type, const datum_value& other){
|
||||
set(type, other);
|
||||
}
|
||||
|
||||
datum_value(Type type, datum_value&& other){
|
||||
set(type, std::move(other));
|
||||
}
|
||||
|
||||
void set(Type type, datum_value&& other) {
|
||||
switch(type){
|
||||
case Type::NIL: case Type::INVALID: break;
|
||||
case Type::BOOLEAN: new (this) bool(other.boolean); break;
|
||||
case Type::NUMBER: new (this) double(other.number); break;
|
||||
case Type::STRING: new (this) std::string(std::move(other.string)); break;
|
||||
case Type::OBJECT: new (this) Object(std::move(other.object)); break;
|
||||
case Type::ARRAY: new (this) Array(std::move(other.array)); break;
|
||||
case Type::BINARY: new (this) Binary(std::move(other.binary)); break;
|
||||
case Type::TIME: new (this) Time(std::move(other.time)); break;
|
||||
}
|
||||
}
|
||||
|
||||
void set(Type type, const datum_value& other) {
|
||||
switch(type){
|
||||
case Type::NIL: case Type::INVALID: break;
|
||||
case Type::BOOLEAN: new (this) bool(other.boolean); break;
|
||||
case Type::NUMBER: new (this) double(other.number); break;
|
||||
case Type::STRING: new (this) std::string(other.string); break;
|
||||
case Type::OBJECT: new (this) Object(other.object); break;
|
||||
case Type::ARRAY: new (this) Array(other.array); break;
|
||||
case Type::BINARY: new (this) Binary(other.binary); break;
|
||||
case Type::TIME: new (this) Time(other.time); break;
|
||||
}
|
||||
}
|
||||
|
||||
void destroy(Type type) {
|
||||
switch(type){
|
||||
case Type::INVALID: break;
|
||||
case Type::NIL: break;
|
||||
case Type::BOOLEAN: break;
|
||||
case Type::NUMBER: break;
|
||||
case Type::STRING: { typedef std::string str; string.~str(); } break;
|
||||
case Type::OBJECT: object.~Object(); break;
|
||||
case Type::ARRAY: array.~Array(); break;
|
||||
case Type::BINARY: binary.~Binary(); break;
|
||||
case Type::TIME: time.~Time(); break;
|
||||
}
|
||||
}
|
||||
|
||||
~datum_value() { }
|
||||
};
|
||||
|
||||
datum_value value;
|
||||
};
|
||||
|
||||
}
|
46
ext/librethinkdbxx/src/error.h
Normal file
46
ext/librethinkdbxx/src/error.h
Normal file
@ -0,0 +1,46 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdarg>
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <cerrno>
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
// All errors thrown by the server have this type
|
||||
struct Error {
|
||||
template <class ...T>
|
||||
explicit Error(const char* format_, T... A) {
|
||||
format(format_, A...);
|
||||
}
|
||||
|
||||
Error() = default;
|
||||
Error(Error&&) = default;
|
||||
Error(const Error&) = default;
|
||||
|
||||
Error& operator= (Error&& other) {
|
||||
message = std::move(other.message);
|
||||
return *this;
|
||||
}
|
||||
|
||||
static Error from_errno(const char* str){
|
||||
return Error("%s: %s", str, strerror(errno));
|
||||
}
|
||||
|
||||
// The error message
|
||||
std::string message;
|
||||
|
||||
private:
|
||||
const size_t max_message_size = 2048;
|
||||
|
||||
void format(const char* format_, ...) {
|
||||
va_list args;
|
||||
va_start(args, format_);
|
||||
char message_[max_message_size];
|
||||
vsnprintf(message_, max_message_size, format_, args);
|
||||
va_end(args);
|
||||
message = message_;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
13
ext/librethinkdbxx/src/exceptions.h
Normal file
13
ext/librethinkdbxx/src/exceptions.h
Normal file
@ -0,0 +1,13 @@
|
||||
#ifndef EXCEPTIONS_H
|
||||
#define EXCEPTIONS_H
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
class TimeoutException : public std::exception {
|
||||
public:
|
||||
const char *what() const throw () { return "operation timed out"; }
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // EXCEPTIONS_H
|
62
ext/librethinkdbxx/src/json.cc
Normal file
62
ext/librethinkdbxx/src/json.cc
Normal file
@ -0,0 +1,62 @@
|
||||
#include "json_p.h"
|
||||
#include "error.h"
|
||||
#include "utils.h"
|
||||
|
||||
#include "rapidjson-config.h"
|
||||
#include "rapidjson/document.h"
|
||||
#include "rapidjson/stringbuffer.h"
|
||||
#include "rapidjson/writer.h"
|
||||
#include "rapidjson/prettywriter.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
Datum read_datum(const std::string& json) {
|
||||
rapidjson::Document document;
|
||||
document.Parse(json);
|
||||
return read_datum(document);
|
||||
}
|
||||
|
||||
Datum read_datum(const rapidjson::Value &json) {
|
||||
switch(json.GetType()) {
|
||||
case rapidjson::kNullType: return Nil();
|
||||
case rapidjson::kFalseType: return false;
|
||||
case rapidjson::kTrueType: return true;
|
||||
case rapidjson::kNumberType: return json.GetDouble();
|
||||
case rapidjson::kStringType:
|
||||
return std::string(json.GetString(), json.GetStringLength());
|
||||
|
||||
case rapidjson::kObjectType: {
|
||||
Object result;
|
||||
for (rapidjson::Value::ConstMemberIterator it = json.MemberBegin();
|
||||
it != json.MemberEnd(); ++it) {
|
||||
result.insert(std::make_pair(std::string(it->name.GetString(),
|
||||
it->name.GetStringLength()),
|
||||
read_datum(it->value)));
|
||||
}
|
||||
|
||||
if (result.count("$reql_type$"))
|
||||
return Datum(std::move(result)).from_raw();
|
||||
return std::move(result);
|
||||
} break;
|
||||
case rapidjson::kArrayType: {
|
||||
Array result;
|
||||
result.reserve(json.Size());
|
||||
for (rapidjson::Value::ConstValueIterator it = json.Begin();
|
||||
it != json.End(); ++it) {
|
||||
result.push_back(read_datum(*it));
|
||||
}
|
||||
return std::move(result);
|
||||
} break;
|
||||
default:
|
||||
throw Error("invalid rapidjson value");
|
||||
}
|
||||
}
|
||||
|
||||
std::string write_datum(const Datum& datum) {
|
||||
rapidjson::StringBuffer buffer;
|
||||
rapidjson::Writer<rapidjson::StringBuffer> writer(buffer);
|
||||
datum.write_json(&writer);
|
||||
return std::string(buffer.GetString(), buffer.GetSize());
|
||||
}
|
||||
|
||||
}
|
19
ext/librethinkdbxx/src/json_p.h
Normal file
19
ext/librethinkdbxx/src/json_p.h
Normal file
@ -0,0 +1,19 @@
|
||||
#pragma once
|
||||
|
||||
#include "datum.h"
|
||||
|
||||
namespace rapidjson {
|
||||
class CrtAllocator;
|
||||
template<typename> struct UTF8;
|
||||
template <typename, typename> class GenericValue;
|
||||
template <typename> class MemoryPoolAllocator;
|
||||
typedef GenericValue<UTF8<char>, MemoryPoolAllocator<CrtAllocator> > Value;
|
||||
}
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
Datum read_datum(const std::string&);
|
||||
Datum read_datum(const rapidjson::Value &json);
|
||||
std::string write_datum(const Datum&);
|
||||
|
||||
}
|
8
ext/librethinkdbxx/src/rapidjson-config.h
Normal file
8
ext/librethinkdbxx/src/rapidjson-config.h
Normal file
@ -0,0 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#define RAPIDJSON_HAS_STDSTRING 1
|
||||
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
|
||||
#define RAPIDJSON_HAS_CXX11_NOEXCEPT 1
|
||||
#define RAPIDJSON_HAS_CXX11_TYPETRAITS 1
|
||||
#define RAPIDJSON_HAS_CXX11_RANGE_FOR 1
|
||||
#define RAPIDJSON_PARSE_DEFAULT_FLAGS kParseFullPrecisionFlag
|
263
ext/librethinkdbxx/src/rapidjson/allocators.h
Normal file
263
ext/librethinkdbxx/src/rapidjson/allocators.h
Normal file
@ -0,0 +1,263 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ALLOCATORS_H_
|
||||
#define RAPIDJSON_ALLOCATORS_H_
|
||||
|
||||
#include "rapidjson.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Allocator
|
||||
|
||||
/*! \class rapidjson::Allocator
|
||||
\brief Concept for allocating, resizing and freeing memory block.
|
||||
|
||||
Note that Malloc() and Realloc() are non-static but Free() is static.
|
||||
|
||||
So if an allocator need to support Free(), it needs to put its pointer in
|
||||
the header of memory block.
|
||||
|
||||
\code
|
||||
concept Allocator {
|
||||
static const bool kNeedFree; //!< Whether this allocator needs to call Free().
|
||||
|
||||
// Allocate a memory block.
|
||||
// \param size of the memory block in bytes.
|
||||
// \returns pointer to the memory block.
|
||||
void* Malloc(size_t size);
|
||||
|
||||
// Resize a memory block.
|
||||
// \param originalPtr The pointer to current memory block. Null pointer is permitted.
|
||||
// \param originalSize The current size in bytes. (Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.)
|
||||
// \param newSize the new size in bytes.
|
||||
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize);
|
||||
|
||||
// Free a memory block.
|
||||
// \param pointer to the memory block. Null pointer is permitted.
|
||||
static void Free(void *ptr);
|
||||
};
|
||||
\endcode
|
||||
*/
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// CrtAllocator
|
||||
|
||||
//! C-runtime library allocator.
|
||||
/*! This class is just wrapper for standard C library memory routines.
|
||||
\note implements Allocator concept
|
||||
*/
|
||||
class CrtAllocator {
|
||||
public:
|
||||
static const bool kNeedFree = true;
|
||||
void* Malloc(size_t size) {
|
||||
if (size) // behavior of malloc(0) is implementation defined.
|
||||
return std::malloc(size);
|
||||
else
|
||||
return NULL; // standardize to returning NULL.
|
||||
}
|
||||
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
|
||||
(void)originalSize;
|
||||
if (newSize == 0) {
|
||||
std::free(originalPtr);
|
||||
return NULL;
|
||||
}
|
||||
return std::realloc(originalPtr, newSize);
|
||||
}
|
||||
static void Free(void *ptr) { std::free(ptr); }
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// MemoryPoolAllocator
|
||||
|
||||
//! Default memory allocator used by the parser and DOM.
|
||||
/*! This allocator allocate memory blocks from pre-allocated memory chunks.
|
||||
|
||||
It does not free memory blocks. And Realloc() only allocate new memory.
|
||||
|
||||
The memory chunks are allocated by BaseAllocator, which is CrtAllocator by default.
|
||||
|
||||
User may also supply a buffer as the first chunk.
|
||||
|
||||
If the user-buffer is full then additional chunks are allocated by BaseAllocator.
|
||||
|
||||
The user-buffer is not deallocated by this allocator.
|
||||
|
||||
\tparam BaseAllocator the allocator type for allocating memory chunks. Default is CrtAllocator.
|
||||
\note implements Allocator concept
|
||||
*/
|
||||
template <typename BaseAllocator = CrtAllocator>
|
||||
class MemoryPoolAllocator {
|
||||
public:
|
||||
static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator)
|
||||
|
||||
//! Constructor with chunkSize.
|
||||
/*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
|
||||
\param baseAllocator The allocator for allocating memory chunks.
|
||||
*/
|
||||
MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
|
||||
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
|
||||
{
|
||||
}
|
||||
|
||||
//! Constructor with user-supplied buffer.
|
||||
/*! The user buffer will be used firstly. When it is full, memory pool allocates new chunk with chunk size.
|
||||
|
||||
The user buffer will not be deallocated when this allocator is destructed.
|
||||
|
||||
\param buffer User supplied buffer.
|
||||
\param size Size of the buffer in bytes. It must at least larger than sizeof(ChunkHeader).
|
||||
\param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
|
||||
\param baseAllocator The allocator for allocating memory chunks.
|
||||
*/
|
||||
MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
|
||||
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
|
||||
{
|
||||
RAPIDJSON_ASSERT(buffer != 0);
|
||||
RAPIDJSON_ASSERT(size > sizeof(ChunkHeader));
|
||||
chunkHead_ = reinterpret_cast<ChunkHeader*>(buffer);
|
||||
chunkHead_->capacity = size - sizeof(ChunkHeader);
|
||||
chunkHead_->size = 0;
|
||||
chunkHead_->next = 0;
|
||||
}
|
||||
|
||||
//! Destructor.
|
||||
/*! This deallocates all memory chunks, excluding the user-supplied buffer.
|
||||
*/
|
||||
~MemoryPoolAllocator() {
|
||||
Clear();
|
||||
RAPIDJSON_DELETE(ownBaseAllocator_);
|
||||
}
|
||||
|
||||
//! Deallocates all memory chunks, excluding the user-supplied buffer.
|
||||
void Clear() {
|
||||
while (chunkHead_ && chunkHead_ != userBuffer_) {
|
||||
ChunkHeader* next = chunkHead_->next;
|
||||
baseAllocator_->Free(chunkHead_);
|
||||
chunkHead_ = next;
|
||||
}
|
||||
if (chunkHead_ && chunkHead_ == userBuffer_)
|
||||
chunkHead_->size = 0; // Clear user buffer
|
||||
}
|
||||
|
||||
//! Computes the total capacity of allocated memory chunks.
|
||||
/*! \return total capacity in bytes.
|
||||
*/
|
||||
size_t Capacity() const {
|
||||
size_t capacity = 0;
|
||||
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
|
||||
capacity += c->capacity;
|
||||
return capacity;
|
||||
}
|
||||
|
||||
//! Computes the memory blocks allocated.
|
||||
/*! \return total used bytes.
|
||||
*/
|
||||
size_t Size() const {
|
||||
size_t size = 0;
|
||||
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
|
||||
size += c->size;
|
||||
return size;
|
||||
}
|
||||
|
||||
//! Allocates a memory block. (concept Allocator)
|
||||
void* Malloc(size_t size) {
|
||||
if (!size)
|
||||
return NULL;
|
||||
|
||||
size = RAPIDJSON_ALIGN(size);
|
||||
if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity)
|
||||
AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size);
|
||||
|
||||
void *buffer = reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size;
|
||||
chunkHead_->size += size;
|
||||
return buffer;
|
||||
}
|
||||
|
||||
//! Resizes a memory block (concept Allocator)
|
||||
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) {
|
||||
if (originalPtr == 0)
|
||||
return Malloc(newSize);
|
||||
|
||||
if (newSize == 0)
|
||||
return NULL;
|
||||
|
||||
originalSize = RAPIDJSON_ALIGN(originalSize);
|
||||
newSize = RAPIDJSON_ALIGN(newSize);
|
||||
|
||||
// Do not shrink if new size is smaller than original
|
||||
if (originalSize >= newSize)
|
||||
return originalPtr;
|
||||
|
||||
// Simply expand it if it is the last allocation and there is sufficient space
|
||||
if (originalPtr == reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) {
|
||||
size_t increment = static_cast<size_t>(newSize - originalSize);
|
||||
if (chunkHead_->size + increment <= chunkHead_->capacity) {
|
||||
chunkHead_->size += increment;
|
||||
return originalPtr;
|
||||
}
|
||||
}
|
||||
|
||||
// Realloc process: allocate and copy memory, do not free original buffer.
|
||||
void* newBuffer = Malloc(newSize);
|
||||
RAPIDJSON_ASSERT(newBuffer != 0); // Do not handle out-of-memory explicitly.
|
||||
if (originalSize)
|
||||
std::memcpy(newBuffer, originalPtr, originalSize);
|
||||
return newBuffer;
|
||||
}
|
||||
|
||||
//! Frees a memory block (concept Allocator)
|
||||
static void Free(void *ptr) { (void)ptr; } // Do nothing
|
||||
|
||||
private:
|
||||
//! Copy constructor is not permitted.
|
||||
MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */;
|
||||
//! Copy assignment operator is not permitted.
|
||||
MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */;
|
||||
|
||||
//! Creates a new chunk.
|
||||
/*! \param capacity Capacity of the chunk in bytes.
|
||||
*/
|
||||
void AddChunk(size_t capacity) {
|
||||
if (!baseAllocator_)
|
||||
ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator());
|
||||
ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity));
|
||||
chunk->capacity = capacity;
|
||||
chunk->size = 0;
|
||||
chunk->next = chunkHead_;
|
||||
chunkHead_ = chunk;
|
||||
}
|
||||
|
||||
static const int kDefaultChunkCapacity = 64 * 1024; //!< Default chunk capacity.
|
||||
|
||||
//! Chunk header for perpending to each chunk.
|
||||
/*! Chunks are stored as a singly linked list.
|
||||
*/
|
||||
struct ChunkHeader {
|
||||
size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
|
||||
size_t size; //!< Current size of allocated memory in bytes.
|
||||
ChunkHeader *next; //!< Next chunk in the linked list.
|
||||
};
|
||||
|
||||
ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
|
||||
size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated.
|
||||
void *userBuffer_; //!< User supplied buffer.
|
||||
BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks.
|
||||
BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object.
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_ENCODINGS_H_
|
2575
ext/librethinkdbxx/src/rapidjson/document.h
Normal file
2575
ext/librethinkdbxx/src/rapidjson/document.h
Normal file
File diff suppressed because it is too large
Load Diff
299
ext/librethinkdbxx/src/rapidjson/encodedstream.h
Normal file
299
ext/librethinkdbxx/src/rapidjson/encodedstream.h
Normal file
@ -0,0 +1,299 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ENCODEDSTREAM_H_
|
||||
#define RAPIDJSON_ENCODEDSTREAM_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include "memorystream.h"
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Input byte stream wrapper with a statically bound encoding.
|
||||
/*!
|
||||
\tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE.
|
||||
\tparam InputByteStream Type of input byte stream. For example, FileReadStream.
|
||||
*/
|
||||
template <typename Encoding, typename InputByteStream>
|
||||
class EncodedInputStream {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
public:
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
EncodedInputStream(InputByteStream& is) : is_(is) {
|
||||
current_ = Encoding::TakeBOM(is_);
|
||||
}
|
||||
|
||||
Ch Peek() const { return current_; }
|
||||
Ch Take() { Ch c = current_; current_ = Encoding::Take(is_); return c; }
|
||||
size_t Tell() const { return is_.Tell(); }
|
||||
|
||||
// Not implemented
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
EncodedInputStream(const EncodedInputStream&);
|
||||
EncodedInputStream& operator=(const EncodedInputStream&);
|
||||
|
||||
InputByteStream& is_;
|
||||
Ch current_;
|
||||
};
|
||||
|
||||
//! Specialized for UTF8 MemoryStream.
|
||||
template <>
|
||||
class EncodedInputStream<UTF8<>, MemoryStream> {
|
||||
public:
|
||||
typedef UTF8<>::Ch Ch;
|
||||
|
||||
EncodedInputStream(MemoryStream& is) : is_(is) {
|
||||
if (static_cast<unsigned char>(is_.Peek()) == 0xEFu) is_.Take();
|
||||
if (static_cast<unsigned char>(is_.Peek()) == 0xBBu) is_.Take();
|
||||
if (static_cast<unsigned char>(is_.Peek()) == 0xBFu) is_.Take();
|
||||
}
|
||||
Ch Peek() const { return is_.Peek(); }
|
||||
Ch Take() { return is_.Take(); }
|
||||
size_t Tell() const { return is_.Tell(); }
|
||||
|
||||
// Not implemented
|
||||
void Put(Ch) {}
|
||||
void Flush() {}
|
||||
Ch* PutBegin() { return 0; }
|
||||
size_t PutEnd(Ch*) { return 0; }
|
||||
|
||||
MemoryStream& is_;
|
||||
|
||||
private:
|
||||
EncodedInputStream(const EncodedInputStream&);
|
||||
EncodedInputStream& operator=(const EncodedInputStream&);
|
||||
};
|
||||
|
||||
//! Output byte stream wrapper with statically bound encoding.
|
||||
/*!
|
||||
\tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE.
|
||||
\tparam OutputByteStream Type of input byte stream. For example, FileWriteStream.
|
||||
*/
|
||||
template <typename Encoding, typename OutputByteStream>
|
||||
class EncodedOutputStream {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
public:
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
EncodedOutputStream(OutputByteStream& os, bool putBOM = true) : os_(os) {
|
||||
if (putBOM)
|
||||
Encoding::PutBOM(os_);
|
||||
}
|
||||
|
||||
void Put(Ch c) { Encoding::Put(os_, c); }
|
||||
void Flush() { os_.Flush(); }
|
||||
|
||||
// Not implemented
|
||||
Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;}
|
||||
Ch Take() { RAPIDJSON_ASSERT(false); return 0;}
|
||||
size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
EncodedOutputStream(const EncodedOutputStream&);
|
||||
EncodedOutputStream& operator=(const EncodedOutputStream&);
|
||||
|
||||
OutputByteStream& os_;
|
||||
};
|
||||
|
||||
#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8<Ch>::x, UTF16LE<Ch>::x, UTF16BE<Ch>::x, UTF32LE<Ch>::x, UTF32BE<Ch>::x
|
||||
|
||||
//! Input stream wrapper with dynamically bound encoding and automatic encoding detection.
|
||||
/*!
|
||||
\tparam CharType Type of character for reading.
|
||||
\tparam InputByteStream type of input byte stream to be wrapped.
|
||||
*/
|
||||
template <typename CharType, typename InputByteStream>
|
||||
class AutoUTFInputStream {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
public:
|
||||
typedef CharType Ch;
|
||||
|
||||
//! Constructor.
|
||||
/*!
|
||||
\param is input stream to be wrapped.
|
||||
\param type UTF encoding type if it is not detected from the stream.
|
||||
*/
|
||||
AutoUTFInputStream(InputByteStream& is, UTFType type = kUTF8) : is_(&is), type_(type), hasBOM_(false) {
|
||||
RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
|
||||
DetectType();
|
||||
static const TakeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Take) };
|
||||
takeFunc_ = f[type_];
|
||||
current_ = takeFunc_(*is_);
|
||||
}
|
||||
|
||||
UTFType GetType() const { return type_; }
|
||||
bool HasBOM() const { return hasBOM_; }
|
||||
|
||||
Ch Peek() const { return current_; }
|
||||
Ch Take() { Ch c = current_; current_ = takeFunc_(*is_); return c; }
|
||||
size_t Tell() const { return is_->Tell(); }
|
||||
|
||||
// Not implemented
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
AutoUTFInputStream(const AutoUTFInputStream&);
|
||||
AutoUTFInputStream& operator=(const AutoUTFInputStream&);
|
||||
|
||||
// Detect encoding type with BOM or RFC 4627
|
||||
void DetectType() {
|
||||
// BOM (Byte Order Mark):
|
||||
// 00 00 FE FF UTF-32BE
|
||||
// FF FE 00 00 UTF-32LE
|
||||
// FE FF UTF-16BE
|
||||
// FF FE UTF-16LE
|
||||
// EF BB BF UTF-8
|
||||
|
||||
const unsigned char* c = reinterpret_cast<const unsigned char *>(is_->Peek4());
|
||||
if (!c)
|
||||
return;
|
||||
|
||||
unsigned bom = static_cast<unsigned>(c[0] | (c[1] << 8) | (c[2] << 16) | (c[3] << 24));
|
||||
hasBOM_ = false;
|
||||
if (bom == 0xFFFE0000) { type_ = kUTF32BE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); }
|
||||
else if (bom == 0x0000FEFF) { type_ = kUTF32LE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); }
|
||||
else if ((bom & 0xFFFF) == 0xFFFE) { type_ = kUTF16BE; hasBOM_ = true; is_->Take(); is_->Take(); }
|
||||
else if ((bom & 0xFFFF) == 0xFEFF) { type_ = kUTF16LE; hasBOM_ = true; is_->Take(); is_->Take(); }
|
||||
else if ((bom & 0xFFFFFF) == 0xBFBBEF) { type_ = kUTF8; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); }
|
||||
|
||||
// RFC 4627: Section 3
|
||||
// "Since the first two characters of a JSON text will always be ASCII
|
||||
// characters [RFC0020], it is possible to determine whether an octet
|
||||
// stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking
|
||||
// at the pattern of nulls in the first four octets."
|
||||
// 00 00 00 xx UTF-32BE
|
||||
// 00 xx 00 xx UTF-16BE
|
||||
// xx 00 00 00 UTF-32LE
|
||||
// xx 00 xx 00 UTF-16LE
|
||||
// xx xx xx xx UTF-8
|
||||
|
||||
if (!hasBOM_) {
|
||||
unsigned pattern = (c[0] ? 1 : 0) | (c[1] ? 2 : 0) | (c[2] ? 4 : 0) | (c[3] ? 8 : 0);
|
||||
switch (pattern) {
|
||||
case 0x08: type_ = kUTF32BE; break;
|
||||
case 0x0A: type_ = kUTF16BE; break;
|
||||
case 0x01: type_ = kUTF32LE; break;
|
||||
case 0x05: type_ = kUTF16LE; break;
|
||||
case 0x0F: type_ = kUTF8; break;
|
||||
default: break; // Use type defined by user.
|
||||
}
|
||||
}
|
||||
|
||||
// Runtime check whether the size of character type is sufficient. It only perform checks with assertion.
|
||||
if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2);
|
||||
if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4);
|
||||
}
|
||||
|
||||
typedef Ch (*TakeFunc)(InputByteStream& is);
|
||||
InputByteStream* is_;
|
||||
UTFType type_;
|
||||
Ch current_;
|
||||
TakeFunc takeFunc_;
|
||||
bool hasBOM_;
|
||||
};
|
||||
|
||||
//! Output stream wrapper with dynamically bound encoding and automatic encoding detection.
|
||||
/*!
|
||||
\tparam CharType Type of character for writing.
|
||||
\tparam OutputByteStream type of output byte stream to be wrapped.
|
||||
*/
|
||||
template <typename CharType, typename OutputByteStream>
|
||||
class AutoUTFOutputStream {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
public:
|
||||
typedef CharType Ch;
|
||||
|
||||
//! Constructor.
|
||||
/*!
|
||||
\param os output stream to be wrapped.
|
||||
\param type UTF encoding type.
|
||||
\param putBOM Whether to write BOM at the beginning of the stream.
|
||||
*/
|
||||
AutoUTFOutputStream(OutputByteStream& os, UTFType type, bool putBOM) : os_(&os), type_(type) {
|
||||
RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE);
|
||||
|
||||
// Runtime check whether the size of character type is sufficient. It only perform checks with assertion.
|
||||
if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2);
|
||||
if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4);
|
||||
|
||||
static const PutFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Put) };
|
||||
putFunc_ = f[type_];
|
||||
|
||||
if (putBOM)
|
||||
PutBOM();
|
||||
}
|
||||
|
||||
UTFType GetType() const { return type_; }
|
||||
|
||||
void Put(Ch c) { putFunc_(*os_, c); }
|
||||
void Flush() { os_->Flush(); }
|
||||
|
||||
// Not implemented
|
||||
Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;}
|
||||
Ch Take() { RAPIDJSON_ASSERT(false); return 0;}
|
||||
size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
AutoUTFOutputStream(const AutoUTFOutputStream&);
|
||||
AutoUTFOutputStream& operator=(const AutoUTFOutputStream&);
|
||||
|
||||
void PutBOM() {
|
||||
typedef void (*PutBOMFunc)(OutputByteStream&);
|
||||
static const PutBOMFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(PutBOM) };
|
||||
f[type_](*os_);
|
||||
}
|
||||
|
||||
typedef void (*PutFunc)(OutputByteStream&, Ch);
|
||||
|
||||
OutputByteStream* os_;
|
||||
UTFType type_;
|
||||
PutFunc putFunc_;
|
||||
};
|
||||
|
||||
#undef RAPIDJSON_ENCODINGS_FUNC
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_FILESTREAM_H_
|
716
ext/librethinkdbxx/src/rapidjson/encodings.h
Normal file
716
ext/librethinkdbxx/src/rapidjson/encodings.h
Normal file
@ -0,0 +1,716 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ENCODINGS_H_
|
||||
#define RAPIDJSON_ENCODINGS_H_
|
||||
|
||||
#include "rapidjson.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(4244) // conversion from 'type1' to 'type2', possible loss of data
|
||||
RAPIDJSON_DIAG_OFF(4702) // unreachable code
|
||||
#elif defined(__GNUC__)
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
RAPIDJSON_DIAG_OFF(overflow)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Encoding
|
||||
|
||||
/*! \class rapidjson::Encoding
|
||||
\brief Concept for encoding of Unicode characters.
|
||||
|
||||
\code
|
||||
concept Encoding {
|
||||
typename Ch; //! Type of character. A "character" is actually a code unit in unicode's definition.
|
||||
|
||||
enum { supportUnicode = 1 }; // or 0 if not supporting unicode
|
||||
|
||||
//! \brief Encode a Unicode codepoint to an output stream.
|
||||
//! \param os Output stream.
|
||||
//! \param codepoint An unicode codepoint, ranging from 0x0 to 0x10FFFF inclusively.
|
||||
template<typename OutputStream>
|
||||
static void Encode(OutputStream& os, unsigned codepoint);
|
||||
|
||||
//! \brief Decode a Unicode codepoint from an input stream.
|
||||
//! \param is Input stream.
|
||||
//! \param codepoint Output of the unicode codepoint.
|
||||
//! \return true if a valid codepoint can be decoded from the stream.
|
||||
template <typename InputStream>
|
||||
static bool Decode(InputStream& is, unsigned* codepoint);
|
||||
|
||||
//! \brief Validate one Unicode codepoint from an encoded stream.
|
||||
//! \param is Input stream to obtain codepoint.
|
||||
//! \param os Output for copying one codepoint.
|
||||
//! \return true if it is valid.
|
||||
//! \note This function just validating and copying the codepoint without actually decode it.
|
||||
template <typename InputStream, typename OutputStream>
|
||||
static bool Validate(InputStream& is, OutputStream& os);
|
||||
|
||||
// The following functions are deal with byte streams.
|
||||
|
||||
//! Take a character from input byte stream, skip BOM if exist.
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is);
|
||||
|
||||
//! Take a character from input byte stream.
|
||||
template <typename InputByteStream>
|
||||
static Ch Take(InputByteStream& is);
|
||||
|
||||
//! Put BOM to output byte stream.
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os);
|
||||
|
||||
//! Put a character to output byte stream.
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, Ch c);
|
||||
};
|
||||
\endcode
|
||||
*/
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// UTF8
|
||||
|
||||
//! UTF-8 encoding.
|
||||
/*! http://en.wikipedia.org/wiki/UTF-8
|
||||
http://tools.ietf.org/html/rfc3629
|
||||
\tparam CharType Code unit for storing 8-bit UTF-8 data. Default is char.
|
||||
\note implements Encoding concept
|
||||
*/
|
||||
template<typename CharType = char>
|
||||
struct UTF8 {
|
||||
typedef CharType Ch;
|
||||
|
||||
enum { supportUnicode = 1 };
|
||||
|
||||
template<typename OutputStream>
|
||||
static void Encode(OutputStream& os, unsigned codepoint) {
|
||||
if (codepoint <= 0x7F)
|
||||
os.Put(static_cast<Ch>(codepoint & 0xFF));
|
||||
else if (codepoint <= 0x7FF) {
|
||||
os.Put(static_cast<Ch>(0xC0 | ((codepoint >> 6) & 0xFF)));
|
||||
os.Put(static_cast<Ch>(0x80 | ((codepoint & 0x3F))));
|
||||
}
|
||||
else if (codepoint <= 0xFFFF) {
|
||||
os.Put(static_cast<Ch>(0xE0 | ((codepoint >> 12) & 0xFF)));
|
||||
os.Put(static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
|
||||
os.Put(static_cast<Ch>(0x80 | (codepoint & 0x3F)));
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
os.Put(static_cast<Ch>(0xF0 | ((codepoint >> 18) & 0xFF)));
|
||||
os.Put(static_cast<Ch>(0x80 | ((codepoint >> 12) & 0x3F)));
|
||||
os.Put(static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
|
||||
os.Put(static_cast<Ch>(0x80 | (codepoint & 0x3F)));
|
||||
}
|
||||
}
|
||||
|
||||
template<typename OutputStream>
|
||||
static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
|
||||
if (codepoint <= 0x7F)
|
||||
PutUnsafe(os, static_cast<Ch>(codepoint & 0xFF));
|
||||
else if (codepoint <= 0x7FF) {
|
||||
PutUnsafe(os, static_cast<Ch>(0xC0 | ((codepoint >> 6) & 0xFF)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint & 0x3F))));
|
||||
}
|
||||
else if (codepoint <= 0xFFFF) {
|
||||
PutUnsafe(os, static_cast<Ch>(0xE0 | ((codepoint >> 12) & 0xFF)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | (codepoint & 0x3F)));
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
PutUnsafe(os, static_cast<Ch>(0xF0 | ((codepoint >> 18) & 0xFF)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 12) & 0x3F)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | ((codepoint >> 6) & 0x3F)));
|
||||
PutUnsafe(os, static_cast<Ch>(0x80 | (codepoint & 0x3F)));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
static bool Decode(InputStream& is, unsigned* codepoint) {
|
||||
#define COPY() c = is.Take(); *codepoint = (*codepoint << 6) | (static_cast<unsigned char>(c) & 0x3Fu)
|
||||
#define TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0)
|
||||
#define TAIL() COPY(); TRANS(0x70)
|
||||
typename InputStream::Ch c = is.Take();
|
||||
if (!(c & 0x80)) {
|
||||
*codepoint = static_cast<unsigned char>(c);
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned char type = GetRange(static_cast<unsigned char>(c));
|
||||
if (type >= 32) {
|
||||
*codepoint = 0;
|
||||
} else {
|
||||
*codepoint = (0xFF >> type) & static_cast<unsigned char>(c);
|
||||
}
|
||||
bool result = true;
|
||||
switch (type) {
|
||||
case 2: TAIL(); return result;
|
||||
case 3: TAIL(); TAIL(); return result;
|
||||
case 4: COPY(); TRANS(0x50); TAIL(); return result;
|
||||
case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result;
|
||||
case 6: TAIL(); TAIL(); TAIL(); return result;
|
||||
case 10: COPY(); TRANS(0x20); TAIL(); return result;
|
||||
case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result;
|
||||
default: return false;
|
||||
}
|
||||
#undef COPY
|
||||
#undef TRANS
|
||||
#undef TAIL
|
||||
}
|
||||
|
||||
template <typename InputStream, typename OutputStream>
|
||||
static bool Validate(InputStream& is, OutputStream& os) {
|
||||
#define COPY() os.Put(c = is.Take())
|
||||
#define TRANS(mask) result &= ((GetRange(static_cast<unsigned char>(c)) & mask) != 0)
|
||||
#define TAIL() COPY(); TRANS(0x70)
|
||||
Ch c;
|
||||
COPY();
|
||||
if (!(c & 0x80))
|
||||
return true;
|
||||
|
||||
bool result = true;
|
||||
switch (GetRange(static_cast<unsigned char>(c))) {
|
||||
case 2: TAIL(); return result;
|
||||
case 3: TAIL(); TAIL(); return result;
|
||||
case 4: COPY(); TRANS(0x50); TAIL(); return result;
|
||||
case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result;
|
||||
case 6: TAIL(); TAIL(); TAIL(); return result;
|
||||
case 10: COPY(); TRANS(0x20); TAIL(); return result;
|
||||
case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result;
|
||||
default: return false;
|
||||
}
|
||||
#undef COPY
|
||||
#undef TRANS
|
||||
#undef TAIL
|
||||
}
|
||||
|
||||
static unsigned char GetRange(unsigned char c) {
|
||||
// Referring to DFA of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
|
||||
// With new mapping 1 -> 0x10, 7 -> 0x20, 9 -> 0x40, such that AND operation can test multiple types.
|
||||
static const unsigned char type[] = {
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
||||
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
||||
0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
|
||||
0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,
|
||||
0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
|
||||
0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
|
||||
8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
|
||||
10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8,
|
||||
};
|
||||
return type[c];
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
typename InputByteStream::Ch c = Take(is);
|
||||
if (static_cast<unsigned char>(c) != 0xEFu) return c;
|
||||
c = is.Take();
|
||||
if (static_cast<unsigned char>(c) != 0xBBu) return c;
|
||||
c = is.Take();
|
||||
if (static_cast<unsigned char>(c) != 0xBFu) return c;
|
||||
c = is.Take();
|
||||
return c;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static Ch Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
return static_cast<Ch>(is.Take());
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xEFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xBBu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xBFu));
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, Ch c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(c));
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// UTF16
|
||||
|
||||
//! UTF-16 encoding.
|
||||
/*! http://en.wikipedia.org/wiki/UTF-16
|
||||
http://tools.ietf.org/html/rfc2781
|
||||
\tparam CharType Type for storing 16-bit UTF-16 data. Default is wchar_t. C++11 may use char16_t instead.
|
||||
\note implements Encoding concept
|
||||
|
||||
\note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness.
|
||||
For streaming, use UTF16LE and UTF16BE, which handle endianness.
|
||||
*/
|
||||
template<typename CharType = wchar_t>
|
||||
struct UTF16 {
|
||||
typedef CharType Ch;
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 2);
|
||||
|
||||
enum { supportUnicode = 1 };
|
||||
|
||||
template<typename OutputStream>
|
||||
static void Encode(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
|
||||
if (codepoint <= 0xFFFF) {
|
||||
RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair
|
||||
os.Put(static_cast<typename OutputStream::Ch>(codepoint));
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
unsigned v = codepoint - 0x10000;
|
||||
os.Put(static_cast<typename OutputStream::Ch>((v >> 10) | 0xD800));
|
||||
os.Put((v & 0x3FF) | 0xDC00);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename OutputStream>
|
||||
static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
|
||||
if (codepoint <= 0xFFFF) {
|
||||
RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair
|
||||
PutUnsafe(os, static_cast<typename OutputStream::Ch>(codepoint));
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
unsigned v = codepoint - 0x10000;
|
||||
PutUnsafe(os, static_cast<typename OutputStream::Ch>((v >> 10) | 0xD800));
|
||||
PutUnsafe(os, (v & 0x3FF) | 0xDC00);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
static bool Decode(InputStream& is, unsigned* codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2);
|
||||
typename InputStream::Ch c = is.Take();
|
||||
if (c < 0xD800 || c > 0xDFFF) {
|
||||
*codepoint = static_cast<unsigned>(c);
|
||||
return true;
|
||||
}
|
||||
else if (c <= 0xDBFF) {
|
||||
*codepoint = (static_cast<unsigned>(c) & 0x3FF) << 10;
|
||||
c = is.Take();
|
||||
*codepoint |= (static_cast<unsigned>(c) & 0x3FF);
|
||||
*codepoint += 0x10000;
|
||||
return c >= 0xDC00 && c <= 0xDFFF;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename InputStream, typename OutputStream>
|
||||
static bool Validate(InputStream& is, OutputStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2);
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2);
|
||||
typename InputStream::Ch c;
|
||||
os.Put(static_cast<typename OutputStream::Ch>(c = is.Take()));
|
||||
if (c < 0xD800 || c > 0xDFFF)
|
||||
return true;
|
||||
else if (c <= 0xDBFF) {
|
||||
os.Put(c = is.Take());
|
||||
return c >= 0xDC00 && c <= 0xDFFF;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
//! UTF-16 little endian encoding.
|
||||
template<typename CharType = wchar_t>
|
||||
struct UTF16LE : UTF16<CharType> {
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
CharType c = Take(is);
|
||||
return static_cast<uint16_t>(c) == 0xFEFFu ? Take(is) : c;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
unsigned c = static_cast<uint8_t>(is.Take());
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
|
||||
return static_cast<CharType>(c);
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, CharType c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(static_cast<unsigned>(c) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((static_cast<unsigned>(c) >> 8) & 0xFFu));
|
||||
}
|
||||
};
|
||||
|
||||
//! UTF-16 big endian encoding.
|
||||
template<typename CharType = wchar_t>
|
||||
struct UTF16BE : UTF16<CharType> {
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
CharType c = Take(is);
|
||||
return static_cast<uint16_t>(c) == 0xFEFFu ? Take(is) : c;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
unsigned c = static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
|
||||
c |= static_cast<uint8_t>(is.Take());
|
||||
return static_cast<CharType>(c);
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, CharType c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((static_cast<unsigned>(c) >> 8) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(static_cast<unsigned>(c) & 0xFFu));
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// UTF32
|
||||
|
||||
//! UTF-32 encoding.
|
||||
/*! http://en.wikipedia.org/wiki/UTF-32
|
||||
\tparam CharType Type for storing 32-bit UTF-32 data. Default is unsigned. C++11 may use char32_t instead.
|
||||
\note implements Encoding concept
|
||||
|
||||
\note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness.
|
||||
For streaming, use UTF32LE and UTF32BE, which handle endianness.
|
||||
*/
|
||||
template<typename CharType = unsigned>
|
||||
struct UTF32 {
|
||||
typedef CharType Ch;
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 4);
|
||||
|
||||
enum { supportUnicode = 1 };
|
||||
|
||||
template<typename OutputStream>
|
||||
static void Encode(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4);
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
os.Put(codepoint);
|
||||
}
|
||||
|
||||
template<typename OutputStream>
|
||||
static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4);
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x10FFFF);
|
||||
PutUnsafe(os, codepoint);
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
static bool Decode(InputStream& is, unsigned* codepoint) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4);
|
||||
Ch c = is.Take();
|
||||
*codepoint = c;
|
||||
return c <= 0x10FFFF;
|
||||
}
|
||||
|
||||
template <typename InputStream, typename OutputStream>
|
||||
static bool Validate(InputStream& is, OutputStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4);
|
||||
Ch c;
|
||||
os.Put(c = is.Take());
|
||||
return c <= 0x10FFFF;
|
||||
}
|
||||
};
|
||||
|
||||
//! UTF-32 little endian enocoding.
|
||||
template<typename CharType = unsigned>
|
||||
struct UTF32LE : UTF32<CharType> {
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
CharType c = Take(is);
|
||||
return static_cast<uint32_t>(c) == 0x0000FEFFu ? Take(is) : c;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
unsigned c = static_cast<uint8_t>(is.Take());
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 16;
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 24;
|
||||
return static_cast<CharType>(c);
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, CharType c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(c & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 8) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 16) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 24) & 0xFFu));
|
||||
}
|
||||
};
|
||||
|
||||
//! UTF-32 big endian encoding.
|
||||
template<typename CharType = unsigned>
|
||||
struct UTF32BE : UTF32<CharType> {
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
CharType c = Take(is);
|
||||
return static_cast<uint32_t>(c) == 0x0000FEFFu ? Take(is) : c;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
unsigned c = static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 24;
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 16;
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take())) << 8;
|
||||
c |= static_cast<unsigned>(static_cast<uint8_t>(is.Take()));
|
||||
return static_cast<CharType>(c);
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0x00u));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFEu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(0xFFu));
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, CharType c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 24) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 16) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>((c >> 8) & 0xFFu));
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(c & 0xFFu));
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// ASCII
|
||||
|
||||
//! ASCII encoding.
|
||||
/*! http://en.wikipedia.org/wiki/ASCII
|
||||
\tparam CharType Code unit for storing 7-bit ASCII data. Default is char.
|
||||
\note implements Encoding concept
|
||||
*/
|
||||
template<typename CharType = char>
|
||||
struct ASCII {
|
||||
typedef CharType Ch;
|
||||
|
||||
enum { supportUnicode = 0 };
|
||||
|
||||
template<typename OutputStream>
|
||||
static void Encode(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x7F);
|
||||
os.Put(static_cast<Ch>(codepoint & 0xFF));
|
||||
}
|
||||
|
||||
template<typename OutputStream>
|
||||
static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
|
||||
RAPIDJSON_ASSERT(codepoint <= 0x7F);
|
||||
PutUnsafe(os, static_cast<Ch>(codepoint & 0xFF));
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
static bool Decode(InputStream& is, unsigned* codepoint) {
|
||||
uint8_t c = static_cast<uint8_t>(is.Take());
|
||||
*codepoint = c;
|
||||
return c <= 0X7F;
|
||||
}
|
||||
|
||||
template <typename InputStream, typename OutputStream>
|
||||
static bool Validate(InputStream& is, OutputStream& os) {
|
||||
uint8_t c = static_cast<uint8_t>(is.Take());
|
||||
os.Put(static_cast<typename OutputStream::Ch>(c));
|
||||
return c <= 0x7F;
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static CharType TakeBOM(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
uint8_t c = static_cast<uint8_t>(Take(is));
|
||||
return static_cast<Ch>(c);
|
||||
}
|
||||
|
||||
template <typename InputByteStream>
|
||||
static Ch Take(InputByteStream& is) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1);
|
||||
return static_cast<Ch>(is.Take());
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void PutBOM(OutputByteStream& os) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
(void)os;
|
||||
}
|
||||
|
||||
template <typename OutputByteStream>
|
||||
static void Put(OutputByteStream& os, Ch c) {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1);
|
||||
os.Put(static_cast<typename OutputByteStream::Ch>(c));
|
||||
}
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// AutoUTF
|
||||
|
||||
//! Runtime-specified UTF encoding type of a stream.
|
||||
enum UTFType {
|
||||
kUTF8 = 0, //!< UTF-8.
|
||||
kUTF16LE = 1, //!< UTF-16 little endian.
|
||||
kUTF16BE = 2, //!< UTF-16 big endian.
|
||||
kUTF32LE = 3, //!< UTF-32 little endian.
|
||||
kUTF32BE = 4 //!< UTF-32 big endian.
|
||||
};
|
||||
|
||||
//! Dynamically select encoding according to stream's runtime-specified UTF encoding type.
|
||||
/*! \note This class can be used with AutoUTFInputtStream and AutoUTFOutputStream, which provides GetType().
|
||||
*/
|
||||
template<typename CharType>
|
||||
struct AutoUTF {
|
||||
typedef CharType Ch;
|
||||
|
||||
enum { supportUnicode = 1 };
|
||||
|
||||
#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8<Ch>::x, UTF16LE<Ch>::x, UTF16BE<Ch>::x, UTF32LE<Ch>::x, UTF32BE<Ch>::x
|
||||
|
||||
template<typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static void Encode(OutputStream& os, unsigned codepoint) {
|
||||
typedef void (*EncodeFunc)(OutputStream&, unsigned);
|
||||
static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Encode) };
|
||||
(*f[os.GetType()])(os, codepoint);
|
||||
}
|
||||
|
||||
template<typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static void EncodeUnsafe(OutputStream& os, unsigned codepoint) {
|
||||
typedef void (*EncodeFunc)(OutputStream&, unsigned);
|
||||
static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(EncodeUnsafe) };
|
||||
(*f[os.GetType()])(os, codepoint);
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Decode(InputStream& is, unsigned* codepoint) {
|
||||
typedef bool (*DecodeFunc)(InputStream&, unsigned*);
|
||||
static const DecodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Decode) };
|
||||
return (*f[is.GetType()])(is, codepoint);
|
||||
}
|
||||
|
||||
template <typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) {
|
||||
typedef bool (*ValidateFunc)(InputStream&, OutputStream&);
|
||||
static const ValidateFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Validate) };
|
||||
return (*f[is.GetType()])(is, os);
|
||||
}
|
||||
|
||||
#undef RAPIDJSON_ENCODINGS_FUNC
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Transcoder
|
||||
|
||||
//! Encoding conversion.
|
||||
template<typename SourceEncoding, typename TargetEncoding>
|
||||
struct Transcoder {
|
||||
//! Take one Unicode codepoint from source encoding, convert it to target encoding and put it to the output stream.
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) {
|
||||
unsigned codepoint;
|
||||
if (!SourceEncoding::Decode(is, &codepoint))
|
||||
return false;
|
||||
TargetEncoding::Encode(os, codepoint);
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) {
|
||||
unsigned codepoint;
|
||||
if (!SourceEncoding::Decode(is, &codepoint))
|
||||
return false;
|
||||
TargetEncoding::EncodeUnsafe(os, codepoint);
|
||||
return true;
|
||||
}
|
||||
|
||||
//! Validate one Unicode codepoint from an encoded stream.
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) {
|
||||
return Transcode(is, os); // Since source/target encoding is different, must transcode.
|
||||
}
|
||||
};
|
||||
|
||||
// Forward declaration.
|
||||
template<typename Stream>
|
||||
inline void PutUnsafe(Stream& stream, typename Stream::Ch c);
|
||||
|
||||
//! Specialization of Transcoder with same source and target encoding.
|
||||
template<typename Encoding>
|
||||
struct Transcoder<Encoding, Encoding> {
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) {
|
||||
os.Put(is.Take()); // Just copy one code unit. This semantic is different from primary template class.
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) {
|
||||
PutUnsafe(os, is.Take()); // Just copy one code unit. This semantic is different from primary template class.
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename InputStream, typename OutputStream>
|
||||
RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) {
|
||||
return Encoding::Validate(is, os); // source/target encoding are the same
|
||||
}
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#if defined(__GNUC__) || defined(_MSC_VER)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_ENCODINGS_H_
|
74
ext/librethinkdbxx/src/rapidjson/error/en.h
Normal file
74
ext/librethinkdbxx/src/rapidjson/error/en.h
Normal file
@ -0,0 +1,74 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ERROR_EN_H_
|
||||
#define RAPIDJSON_ERROR_EN_H_
|
||||
|
||||
#include "error.h"
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(switch-enum)
|
||||
RAPIDJSON_DIAG_OFF(covered-switch-default)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Maps error code of parsing into error message.
|
||||
/*!
|
||||
\ingroup RAPIDJSON_ERRORS
|
||||
\param parseErrorCode Error code obtained in parsing.
|
||||
\return the error message.
|
||||
\note User can make a copy of this function for localization.
|
||||
Using switch-case is safer for future modification of error codes.
|
||||
*/
|
||||
inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErrorCode) {
|
||||
switch (parseErrorCode) {
|
||||
case kParseErrorNone: return RAPIDJSON_ERROR_STRING("No error.");
|
||||
|
||||
case kParseErrorDocumentEmpty: return RAPIDJSON_ERROR_STRING("The document is empty.");
|
||||
case kParseErrorDocumentRootNotSingular: return RAPIDJSON_ERROR_STRING("The document root must not be followed by other values.");
|
||||
|
||||
case kParseErrorValueInvalid: return RAPIDJSON_ERROR_STRING("Invalid value.");
|
||||
|
||||
case kParseErrorObjectMissName: return RAPIDJSON_ERROR_STRING("Missing a name for object member.");
|
||||
case kParseErrorObjectMissColon: return RAPIDJSON_ERROR_STRING("Missing a colon after a name of object member.");
|
||||
case kParseErrorObjectMissCommaOrCurlyBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or '}' after an object member.");
|
||||
|
||||
case kParseErrorArrayMissCommaOrSquareBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or ']' after an array element.");
|
||||
|
||||
case kParseErrorStringUnicodeEscapeInvalidHex: return RAPIDJSON_ERROR_STRING("Incorrect hex digit after \\u escape in string.");
|
||||
case kParseErrorStringUnicodeSurrogateInvalid: return RAPIDJSON_ERROR_STRING("The surrogate pair in string is invalid.");
|
||||
case kParseErrorStringEscapeInvalid: return RAPIDJSON_ERROR_STRING("Invalid escape character in string.");
|
||||
case kParseErrorStringMissQuotationMark: return RAPIDJSON_ERROR_STRING("Missing a closing quotation mark in string.");
|
||||
case kParseErrorStringInvalidEncoding: return RAPIDJSON_ERROR_STRING("Invalid encoding in string.");
|
||||
|
||||
case kParseErrorNumberTooBig: return RAPIDJSON_ERROR_STRING("Number too big to be stored in double.");
|
||||
case kParseErrorNumberMissFraction: return RAPIDJSON_ERROR_STRING("Miss fraction part in number.");
|
||||
case kParseErrorNumberMissExponent: return RAPIDJSON_ERROR_STRING("Miss exponent in number.");
|
||||
|
||||
case kParseErrorTermination: return RAPIDJSON_ERROR_STRING("Terminate parsing due to Handler error.");
|
||||
case kParseErrorUnspecificSyntaxError: return RAPIDJSON_ERROR_STRING("Unspecific syntax error.");
|
||||
|
||||
default: return RAPIDJSON_ERROR_STRING("Unknown error.");
|
||||
}
|
||||
}
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_ERROR_EN_H_
|
155
ext/librethinkdbxx/src/rapidjson/error/error.h
Normal file
155
ext/librethinkdbxx/src/rapidjson/error/error.h
Normal file
@ -0,0 +1,155 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ERROR_ERROR_H_
|
||||
#define RAPIDJSON_ERROR_ERROR_H_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
/*! \file error.h */
|
||||
|
||||
/*! \defgroup RAPIDJSON_ERRORS RapidJSON error handling */
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_ERROR_CHARTYPE
|
||||
|
||||
//! Character type of error messages.
|
||||
/*! \ingroup RAPIDJSON_ERRORS
|
||||
The default character type is \c char.
|
||||
On Windows, user can define this macro as \c TCHAR for supporting both
|
||||
unicode/non-unicode settings.
|
||||
*/
|
||||
#ifndef RAPIDJSON_ERROR_CHARTYPE
|
||||
#define RAPIDJSON_ERROR_CHARTYPE char
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_ERROR_STRING
|
||||
|
||||
//! Macro for converting string literial to \ref RAPIDJSON_ERROR_CHARTYPE[].
|
||||
/*! \ingroup RAPIDJSON_ERRORS
|
||||
By default this conversion macro does nothing.
|
||||
On Windows, user can define this macro as \c _T(x) for supporting both
|
||||
unicode/non-unicode settings.
|
||||
*/
|
||||
#ifndef RAPIDJSON_ERROR_STRING
|
||||
#define RAPIDJSON_ERROR_STRING(x) x
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// ParseErrorCode
|
||||
|
||||
//! Error code of parsing.
|
||||
/*! \ingroup RAPIDJSON_ERRORS
|
||||
\see GenericReader::Parse, GenericReader::GetParseErrorCode
|
||||
*/
|
||||
enum ParseErrorCode {
|
||||
kParseErrorNone = 0, //!< No error.
|
||||
|
||||
kParseErrorDocumentEmpty, //!< The document is empty.
|
||||
kParseErrorDocumentRootNotSingular, //!< The document root must not follow by other values.
|
||||
|
||||
kParseErrorValueInvalid, //!< Invalid value.
|
||||
|
||||
kParseErrorObjectMissName, //!< Missing a name for object member.
|
||||
kParseErrorObjectMissColon, //!< Missing a colon after a name of object member.
|
||||
kParseErrorObjectMissCommaOrCurlyBracket, //!< Missing a comma or '}' after an object member.
|
||||
|
||||
kParseErrorArrayMissCommaOrSquareBracket, //!< Missing a comma or ']' after an array element.
|
||||
|
||||
kParseErrorStringUnicodeEscapeInvalidHex, //!< Incorrect hex digit after \\u escape in string.
|
||||
kParseErrorStringUnicodeSurrogateInvalid, //!< The surrogate pair in string is invalid.
|
||||
kParseErrorStringEscapeInvalid, //!< Invalid escape character in string.
|
||||
kParseErrorStringMissQuotationMark, //!< Missing a closing quotation mark in string.
|
||||
kParseErrorStringInvalidEncoding, //!< Invalid encoding in string.
|
||||
|
||||
kParseErrorNumberTooBig, //!< Number too big to be stored in double.
|
||||
kParseErrorNumberMissFraction, //!< Miss fraction part in number.
|
||||
kParseErrorNumberMissExponent, //!< Miss exponent in number.
|
||||
|
||||
kParseErrorTermination, //!< Parsing was terminated.
|
||||
kParseErrorUnspecificSyntaxError //!< Unspecific syntax error.
|
||||
};
|
||||
|
||||
//! Result of parsing (wraps ParseErrorCode)
|
||||
/*!
|
||||
\ingroup RAPIDJSON_ERRORS
|
||||
\code
|
||||
Document doc;
|
||||
ParseResult ok = doc.Parse("[42]");
|
||||
if (!ok) {
|
||||
fprintf(stderr, "JSON parse error: %s (%u)",
|
||||
GetParseError_En(ok.Code()), ok.Offset());
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
\endcode
|
||||
\see GenericReader::Parse, GenericDocument::Parse
|
||||
*/
|
||||
struct ParseResult {
|
||||
public:
|
||||
//! Default constructor, no error.
|
||||
ParseResult() : code_(kParseErrorNone), offset_(0) {}
|
||||
//! Constructor to set an error.
|
||||
ParseResult(ParseErrorCode code, size_t offset) : code_(code), offset_(offset) {}
|
||||
|
||||
//! Get the error code.
|
||||
ParseErrorCode Code() const { return code_; }
|
||||
//! Get the error offset, if \ref IsError(), 0 otherwise.
|
||||
size_t Offset() const { return offset_; }
|
||||
|
||||
//! Conversion to \c bool, returns \c true, iff !\ref IsError().
|
||||
operator bool() const { return !IsError(); }
|
||||
//! Whether the result is an error.
|
||||
bool IsError() const { return code_ != kParseErrorNone; }
|
||||
|
||||
bool operator==(const ParseResult& that) const { return code_ == that.code_; }
|
||||
bool operator==(ParseErrorCode code) const { return code_ == code; }
|
||||
friend bool operator==(ParseErrorCode code, const ParseResult & err) { return code == err.code_; }
|
||||
|
||||
//! Reset error code.
|
||||
void Clear() { Set(kParseErrorNone); }
|
||||
//! Update error code and offset.
|
||||
void Set(ParseErrorCode code, size_t offset = 0) { code_ = code; offset_ = offset; }
|
||||
|
||||
private:
|
||||
ParseErrorCode code_;
|
||||
size_t offset_;
|
||||
};
|
||||
|
||||
//! Function pointer type of GetParseError().
|
||||
/*! \ingroup RAPIDJSON_ERRORS
|
||||
|
||||
This is the prototype for \c GetParseError_X(), where \c X is a locale.
|
||||
User can dynamically change locale in runtime, e.g.:
|
||||
\code
|
||||
GetParseErrorFunc GetParseError = GetParseError_En; // or whatever
|
||||
const RAPIDJSON_ERROR_CHARTYPE* s = GetParseError(document.GetParseErrorCode());
|
||||
\endcode
|
||||
*/
|
||||
typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetParseErrorFunc)(ParseErrorCode);
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_ERROR_ERROR_H_
|
99
ext/librethinkdbxx/src/rapidjson/filereadstream.h
Normal file
99
ext/librethinkdbxx/src/rapidjson/filereadstream.h
Normal file
@ -0,0 +1,99 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_FILEREADSTREAM_H_
|
||||
#define RAPIDJSON_FILEREADSTREAM_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include <cstdio>
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
RAPIDJSON_DIAG_OFF(unreachable-code)
|
||||
RAPIDJSON_DIAG_OFF(missing-noreturn)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! File byte stream for input using fread().
|
||||
/*!
|
||||
\note implements Stream concept
|
||||
*/
|
||||
class FileReadStream {
|
||||
public:
|
||||
typedef char Ch; //!< Character type (byte).
|
||||
|
||||
//! Constructor.
|
||||
/*!
|
||||
\param fp File pointer opened for read.
|
||||
\param buffer user-supplied buffer.
|
||||
\param bufferSize size of buffer in bytes. Must >=4 bytes.
|
||||
*/
|
||||
FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) {
|
||||
RAPIDJSON_ASSERT(fp_ != 0);
|
||||
RAPIDJSON_ASSERT(bufferSize >= 4);
|
||||
Read();
|
||||
}
|
||||
|
||||
Ch Peek() const { return *current_; }
|
||||
Ch Take() { Ch c = *current_; Read(); return c; }
|
||||
size_t Tell() const { return count_ + static_cast<size_t>(current_ - buffer_); }
|
||||
|
||||
// Not implemented
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
// For encoding detection only.
|
||||
const Ch* Peek4() const {
|
||||
return (current_ + 4 <= bufferLast_) ? current_ : 0;
|
||||
}
|
||||
|
||||
private:
|
||||
void Read() {
|
||||
if (current_ < bufferLast_)
|
||||
++current_;
|
||||
else if (!eof_) {
|
||||
count_ += readCount_;
|
||||
readCount_ = fread(buffer_, 1, bufferSize_, fp_);
|
||||
bufferLast_ = buffer_ + readCount_ - 1;
|
||||
current_ = buffer_;
|
||||
|
||||
if (readCount_ < bufferSize_) {
|
||||
buffer_[readCount_] = '\0';
|
||||
++bufferLast_;
|
||||
eof_ = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::FILE* fp_;
|
||||
Ch *buffer_;
|
||||
size_t bufferSize_;
|
||||
Ch *bufferLast_;
|
||||
Ch *current_;
|
||||
size_t readCount_;
|
||||
size_t count_; //!< Number of characters read
|
||||
bool eof_;
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_FILESTREAM_H_
|
104
ext/librethinkdbxx/src/rapidjson/filewritestream.h
Normal file
104
ext/librethinkdbxx/src/rapidjson/filewritestream.h
Normal file
@ -0,0 +1,104 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_FILEWRITESTREAM_H_
|
||||
#define RAPIDJSON_FILEWRITESTREAM_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include <cstdio>
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(unreachable-code)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Wrapper of C file stream for input using fread().
|
||||
/*!
|
||||
\note implements Stream concept
|
||||
*/
|
||||
class FileWriteStream {
|
||||
public:
|
||||
typedef char Ch; //!< Character type. Only support char.
|
||||
|
||||
FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) {
|
||||
RAPIDJSON_ASSERT(fp_ != 0);
|
||||
}
|
||||
|
||||
void Put(char c) {
|
||||
if (current_ >= bufferEnd_)
|
||||
Flush();
|
||||
|
||||
*current_++ = c;
|
||||
}
|
||||
|
||||
void PutN(char c, size_t n) {
|
||||
size_t avail = static_cast<size_t>(bufferEnd_ - current_);
|
||||
while (n > avail) {
|
||||
std::memset(current_, c, avail);
|
||||
current_ += avail;
|
||||
Flush();
|
||||
n -= avail;
|
||||
avail = static_cast<size_t>(bufferEnd_ - current_);
|
||||
}
|
||||
|
||||
if (n > 0) {
|
||||
std::memset(current_, c, n);
|
||||
current_ += n;
|
||||
}
|
||||
}
|
||||
|
||||
void Flush() {
|
||||
if (current_ != buffer_) {
|
||||
size_t result = fwrite(buffer_, 1, static_cast<size_t>(current_ - buffer_), fp_);
|
||||
if (result < static_cast<size_t>(current_ - buffer_)) {
|
||||
// failure deliberately ignored at this time
|
||||
// added to avoid warn_unused_result build errors
|
||||
}
|
||||
current_ = buffer_;
|
||||
}
|
||||
}
|
||||
|
||||
// Not implemented
|
||||
char Peek() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
char Take() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
// Prohibit copy constructor & assignment operator.
|
||||
FileWriteStream(const FileWriteStream&);
|
||||
FileWriteStream& operator=(const FileWriteStream&);
|
||||
|
||||
std::FILE* fp_;
|
||||
char *buffer_;
|
||||
char *bufferEnd_;
|
||||
char *current_;
|
||||
};
|
||||
|
||||
//! Implement specialized version of PutN() with memset() for better performance.
|
||||
template<>
|
||||
inline void PutN(FileWriteStream& stream, char c, size_t n) {
|
||||
stream.PutN(c, n);
|
||||
}
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_FILESTREAM_H_
|
151
ext/librethinkdbxx/src/rapidjson/fwd.h
Normal file
151
ext/librethinkdbxx/src/rapidjson/fwd.h
Normal file
@ -0,0 +1,151 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_FWD_H_
|
||||
#define RAPIDJSON_FWD_H_
|
||||
|
||||
#include "rapidjson.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
// encodings.h
|
||||
|
||||
template<typename CharType> struct UTF8;
|
||||
template<typename CharType> struct UTF16;
|
||||
template<typename CharType> struct UTF16BE;
|
||||
template<typename CharType> struct UTF16LE;
|
||||
template<typename CharType> struct UTF32;
|
||||
template<typename CharType> struct UTF32BE;
|
||||
template<typename CharType> struct UTF32LE;
|
||||
template<typename CharType> struct ASCII;
|
||||
template<typename CharType> struct AutoUTF;
|
||||
|
||||
template<typename SourceEncoding, typename TargetEncoding>
|
||||
struct Transcoder;
|
||||
|
||||
// allocators.h
|
||||
|
||||
class CrtAllocator;
|
||||
|
||||
template <typename BaseAllocator>
|
||||
class MemoryPoolAllocator;
|
||||
|
||||
// stream.h
|
||||
|
||||
template <typename Encoding>
|
||||
struct GenericStringStream;
|
||||
|
||||
typedef GenericStringStream<UTF8<char> > StringStream;
|
||||
|
||||
template <typename Encoding>
|
||||
struct GenericInsituStringStream;
|
||||
|
||||
typedef GenericInsituStringStream<UTF8<char> > InsituStringStream;
|
||||
|
||||
// stringbuffer.h
|
||||
|
||||
template <typename Encoding, typename Allocator>
|
||||
class GenericStringBuffer;
|
||||
|
||||
typedef GenericStringBuffer<UTF8<char>, CrtAllocator> StringBuffer;
|
||||
|
||||
// filereadstream.h
|
||||
|
||||
class FileReadStream;
|
||||
|
||||
// filewritestream.h
|
||||
|
||||
class FileWriteStream;
|
||||
|
||||
// memorybuffer.h
|
||||
|
||||
template <typename Allocator>
|
||||
struct GenericMemoryBuffer;
|
||||
|
||||
typedef GenericMemoryBuffer<CrtAllocator> MemoryBuffer;
|
||||
|
||||
// memorystream.h
|
||||
|
||||
struct MemoryStream;
|
||||
|
||||
// reader.h
|
||||
|
||||
template<typename Encoding, typename Derived>
|
||||
struct BaseReaderHandler;
|
||||
|
||||
template <typename SourceEncoding, typename TargetEncoding, typename StackAllocator>
|
||||
class GenericReader;
|
||||
|
||||
typedef GenericReader<UTF8<char>, UTF8<char>, CrtAllocator> Reader;
|
||||
|
||||
// writer.h
|
||||
|
||||
template<typename OutputStream, typename SourceEncoding, typename TargetEncoding, typename StackAllocator, unsigned writeFlags>
|
||||
class Writer;
|
||||
|
||||
// prettywriter.h
|
||||
|
||||
template<typename OutputStream, typename SourceEncoding, typename TargetEncoding, typename StackAllocator, unsigned writeFlags>
|
||||
class PrettyWriter;
|
||||
|
||||
// document.h
|
||||
|
||||
template <typename Encoding, typename Allocator>
|
||||
struct GenericMember;
|
||||
|
||||
template <bool Const, typename Encoding, typename Allocator>
|
||||
class GenericMemberIterator;
|
||||
|
||||
template<typename CharType>
|
||||
struct GenericStringRef;
|
||||
|
||||
template <typename Encoding, typename Allocator>
|
||||
class GenericValue;
|
||||
|
||||
typedef GenericValue<UTF8<char>, MemoryPoolAllocator<CrtAllocator> > Value;
|
||||
|
||||
template <typename Encoding, typename Allocator, typename StackAllocator>
|
||||
class GenericDocument;
|
||||
|
||||
typedef GenericDocument<UTF8<char>, MemoryPoolAllocator<CrtAllocator>, CrtAllocator> Document;
|
||||
|
||||
// pointer.h
|
||||
|
||||
template <typename ValueType, typename Allocator>
|
||||
class GenericPointer;
|
||||
|
||||
typedef GenericPointer<Value, CrtAllocator> Pointer;
|
||||
|
||||
// schema.h
|
||||
|
||||
template <typename SchemaDocumentType>
|
||||
class IGenericRemoteSchemaDocumentProvider;
|
||||
|
||||
template <typename ValueT, typename Allocator>
|
||||
class GenericSchemaDocument;
|
||||
|
||||
typedef GenericSchemaDocument<Value, CrtAllocator> SchemaDocument;
|
||||
typedef IGenericRemoteSchemaDocumentProvider<SchemaDocument> IRemoteSchemaDocumentProvider;
|
||||
|
||||
template <
|
||||
typename SchemaDocumentType,
|
||||
typename OutputHandler,
|
||||
typename StateAllocator>
|
||||
class GenericSchemaValidator;
|
||||
|
||||
typedef GenericSchemaValidator<SchemaDocument, BaseReaderHandler<UTF8<char>, void>, CrtAllocator> SchemaValidator;
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_RAPIDJSONFWD_H_
|
290
ext/librethinkdbxx/src/rapidjson/internal/biginteger.h
Normal file
290
ext/librethinkdbxx/src/rapidjson/internal/biginteger.h
Normal file
@ -0,0 +1,290 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_BIGINTEGER_H_
|
||||
#define RAPIDJSON_BIGINTEGER_H_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
#if defined(_MSC_VER) && defined(_M_AMD64)
|
||||
#include <intrin.h> // for _umul128
|
||||
#pragma intrinsic(_umul128)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
class BigInteger {
|
||||
public:
|
||||
typedef uint64_t Type;
|
||||
|
||||
BigInteger(const BigInteger& rhs) : count_(rhs.count_) {
|
||||
std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type));
|
||||
}
|
||||
|
||||
explicit BigInteger(uint64_t u) : count_(1) {
|
||||
digits_[0] = u;
|
||||
}
|
||||
|
||||
BigInteger(const char* decimals, size_t length) : count_(1) {
|
||||
RAPIDJSON_ASSERT(length > 0);
|
||||
digits_[0] = 0;
|
||||
size_t i = 0;
|
||||
const size_t kMaxDigitPerIteration = 19; // 2^64 = 18446744073709551616 > 10^19
|
||||
while (length >= kMaxDigitPerIteration) {
|
||||
AppendDecimal64(decimals + i, decimals + i + kMaxDigitPerIteration);
|
||||
length -= kMaxDigitPerIteration;
|
||||
i += kMaxDigitPerIteration;
|
||||
}
|
||||
|
||||
if (length > 0)
|
||||
AppendDecimal64(decimals + i, decimals + i + length);
|
||||
}
|
||||
|
||||
BigInteger& operator=(const BigInteger &rhs)
|
||||
{
|
||||
if (this != &rhs) {
|
||||
count_ = rhs.count_;
|
||||
std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type));
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
BigInteger& operator=(uint64_t u) {
|
||||
digits_[0] = u;
|
||||
count_ = 1;
|
||||
return *this;
|
||||
}
|
||||
|
||||
BigInteger& operator+=(uint64_t u) {
|
||||
Type backup = digits_[0];
|
||||
digits_[0] += u;
|
||||
for (size_t i = 0; i < count_ - 1; i++) {
|
||||
if (digits_[i] >= backup)
|
||||
return *this; // no carry
|
||||
backup = digits_[i + 1];
|
||||
digits_[i + 1] += 1;
|
||||
}
|
||||
|
||||
// Last carry
|
||||
if (digits_[count_ - 1] < backup)
|
||||
PushBack(1);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
BigInteger& operator*=(uint64_t u) {
|
||||
if (u == 0) return *this = 0;
|
||||
if (u == 1) return *this;
|
||||
if (*this == 1) return *this = u;
|
||||
|
||||
uint64_t k = 0;
|
||||
for (size_t i = 0; i < count_; i++) {
|
||||
uint64_t hi;
|
||||
digits_[i] = MulAdd64(digits_[i], u, k, &hi);
|
||||
k = hi;
|
||||
}
|
||||
|
||||
if (k > 0)
|
||||
PushBack(k);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
BigInteger& operator*=(uint32_t u) {
|
||||
if (u == 0) return *this = 0;
|
||||
if (u == 1) return *this;
|
||||
if (*this == 1) return *this = u;
|
||||
|
||||
uint64_t k = 0;
|
||||
for (size_t i = 0; i < count_; i++) {
|
||||
const uint64_t c = digits_[i] >> 32;
|
||||
const uint64_t d = digits_[i] & 0xFFFFFFFF;
|
||||
const uint64_t uc = u * c;
|
||||
const uint64_t ud = u * d;
|
||||
const uint64_t p0 = ud + k;
|
||||
const uint64_t p1 = uc + (p0 >> 32);
|
||||
digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32);
|
||||
k = p1 >> 32;
|
||||
}
|
||||
|
||||
if (k > 0)
|
||||
PushBack(k);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
BigInteger& operator<<=(size_t shift) {
|
||||
if (IsZero() || shift == 0) return *this;
|
||||
|
||||
size_t offset = shift / kTypeBit;
|
||||
size_t interShift = shift % kTypeBit;
|
||||
RAPIDJSON_ASSERT(count_ + offset <= kCapacity);
|
||||
|
||||
if (interShift == 0) {
|
||||
std::memmove(&digits_[count_ - 1 + offset], &digits_[count_ - 1], count_ * sizeof(Type));
|
||||
count_ += offset;
|
||||
}
|
||||
else {
|
||||
digits_[count_] = 0;
|
||||
for (size_t i = count_; i > 0; i--)
|
||||
digits_[i + offset] = (digits_[i] << interShift) | (digits_[i - 1] >> (kTypeBit - interShift));
|
||||
digits_[offset] = digits_[0] << interShift;
|
||||
count_ += offset;
|
||||
if (digits_[count_])
|
||||
count_++;
|
||||
}
|
||||
|
||||
std::memset(digits_, 0, offset * sizeof(Type));
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator==(const BigInteger& rhs) const {
|
||||
return count_ == rhs.count_ && std::memcmp(digits_, rhs.digits_, count_ * sizeof(Type)) == 0;
|
||||
}
|
||||
|
||||
bool operator==(const Type rhs) const {
|
||||
return count_ == 1 && digits_[0] == rhs;
|
||||
}
|
||||
|
||||
BigInteger& MultiplyPow5(unsigned exp) {
|
||||
static const uint32_t kPow5[12] = {
|
||||
5,
|
||||
5 * 5,
|
||||
5 * 5 * 5,
|
||||
5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
|
||||
5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5
|
||||
};
|
||||
if (exp == 0) return *this;
|
||||
for (; exp >= 27; exp -= 27) *this *= RAPIDJSON_UINT64_C2(0X6765C793, 0XFA10079D); // 5^27
|
||||
for (; exp >= 13; exp -= 13) *this *= static_cast<uint32_t>(1220703125u); // 5^13
|
||||
if (exp > 0) *this *= kPow5[exp - 1];
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Compute absolute difference of this and rhs.
|
||||
// Assume this != rhs
|
||||
bool Difference(const BigInteger& rhs, BigInteger* out) const {
|
||||
int cmp = Compare(rhs);
|
||||
RAPIDJSON_ASSERT(cmp != 0);
|
||||
const BigInteger *a, *b; // Makes a > b
|
||||
bool ret;
|
||||
if (cmp < 0) { a = &rhs; b = this; ret = true; }
|
||||
else { a = this; b = &rhs; ret = false; }
|
||||
|
||||
Type borrow = 0;
|
||||
for (size_t i = 0; i < a->count_; i++) {
|
||||
Type d = a->digits_[i] - borrow;
|
||||
if (i < b->count_)
|
||||
d -= b->digits_[i];
|
||||
borrow = (d > a->digits_[i]) ? 1 : 0;
|
||||
out->digits_[i] = d;
|
||||
if (d != 0)
|
||||
out->count_ = i + 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int Compare(const BigInteger& rhs) const {
|
||||
if (count_ != rhs.count_)
|
||||
return count_ < rhs.count_ ? -1 : 1;
|
||||
|
||||
for (size_t i = count_; i-- > 0;)
|
||||
if (digits_[i] != rhs.digits_[i])
|
||||
return digits_[i] < rhs.digits_[i] ? -1 : 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t GetCount() const { return count_; }
|
||||
Type GetDigit(size_t index) const { RAPIDJSON_ASSERT(index < count_); return digits_[index]; }
|
||||
bool IsZero() const { return count_ == 1 && digits_[0] == 0; }
|
||||
|
||||
private:
|
||||
void AppendDecimal64(const char* begin, const char* end) {
|
||||
uint64_t u = ParseUint64(begin, end);
|
||||
if (IsZero())
|
||||
*this = u;
|
||||
else {
|
||||
unsigned exp = static_cast<unsigned>(end - begin);
|
||||
(MultiplyPow5(exp) <<= exp) += u; // *this = *this * 10^exp + u
|
||||
}
|
||||
}
|
||||
|
||||
void PushBack(Type digit) {
|
||||
RAPIDJSON_ASSERT(count_ < kCapacity);
|
||||
digits_[count_++] = digit;
|
||||
}
|
||||
|
||||
static uint64_t ParseUint64(const char* begin, const char* end) {
|
||||
uint64_t r = 0;
|
||||
for (const char* p = begin; p != end; ++p) {
|
||||
RAPIDJSON_ASSERT(*p >= '0' && *p <= '9');
|
||||
r = r * 10u + static_cast<unsigned>(*p - '0');
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
// Assume a * b + k < 2^128
|
||||
static uint64_t MulAdd64(uint64_t a, uint64_t b, uint64_t k, uint64_t* outHigh) {
|
||||
#if defined(_MSC_VER) && defined(_M_AMD64)
|
||||
uint64_t low = _umul128(a, b, outHigh) + k;
|
||||
if (low < k)
|
||||
(*outHigh)++;
|
||||
return low;
|
||||
#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
|
||||
__extension__ typedef unsigned __int128 uint128;
|
||||
uint128 p = static_cast<uint128>(a) * static_cast<uint128>(b);
|
||||
p += k;
|
||||
*outHigh = static_cast<uint64_t>(p >> 64);
|
||||
return static_cast<uint64_t>(p);
|
||||
#else
|
||||
const uint64_t a0 = a & 0xFFFFFFFF, a1 = a >> 32, b0 = b & 0xFFFFFFFF, b1 = b >> 32;
|
||||
uint64_t x0 = a0 * b0, x1 = a0 * b1, x2 = a1 * b0, x3 = a1 * b1;
|
||||
x1 += (x0 >> 32); // can't give carry
|
||||
x1 += x2;
|
||||
if (x1 < x2)
|
||||
x3 += (static_cast<uint64_t>(1) << 32);
|
||||
uint64_t lo = (x1 << 32) + (x0 & 0xFFFFFFFF);
|
||||
uint64_t hi = x3 + (x1 >> 32);
|
||||
|
||||
lo += k;
|
||||
if (lo < k)
|
||||
hi++;
|
||||
*outHigh = hi;
|
||||
return lo;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const size_t kBitCount = 3328; // 64bit * 54 > 10^1000
|
||||
static const size_t kCapacity = kBitCount / sizeof(Type);
|
||||
static const size_t kTypeBit = sizeof(Type) * 8;
|
||||
|
||||
Type digits_[kCapacity];
|
||||
size_t count_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_BIGINTEGER_H_
|
258
ext/librethinkdbxx/src/rapidjson/internal/diyfp.h
Normal file
258
ext/librethinkdbxx/src/rapidjson/internal/diyfp.h
Normal file
@ -0,0 +1,258 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
// This is a C++ header-only implementation of Grisu2 algorithm from the publication:
|
||||
// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with
|
||||
// integers." ACM Sigplan Notices 45.6 (2010): 233-243.
|
||||
|
||||
#ifndef RAPIDJSON_DIYFP_H_
|
||||
#define RAPIDJSON_DIYFP_H_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
#if defined(_MSC_VER) && defined(_M_AMD64)
|
||||
#include <intrin.h>
|
||||
#pragma intrinsic(_BitScanReverse64)
|
||||
#pragma intrinsic(_umul128)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
struct DiyFp {
|
||||
DiyFp() : f(), e() {}
|
||||
|
||||
DiyFp(uint64_t fp, int exp) : f(fp), e(exp) {}
|
||||
|
||||
explicit DiyFp(double d) {
|
||||
union {
|
||||
double d;
|
||||
uint64_t u64;
|
||||
} u = { d };
|
||||
|
||||
int biased_e = static_cast<int>((u.u64 & kDpExponentMask) >> kDpSignificandSize);
|
||||
uint64_t significand = (u.u64 & kDpSignificandMask);
|
||||
if (biased_e != 0) {
|
||||
f = significand + kDpHiddenBit;
|
||||
e = biased_e - kDpExponentBias;
|
||||
}
|
||||
else {
|
||||
f = significand;
|
||||
e = kDpMinExponent + 1;
|
||||
}
|
||||
}
|
||||
|
||||
DiyFp operator-(const DiyFp& rhs) const {
|
||||
return DiyFp(f - rhs.f, e);
|
||||
}
|
||||
|
||||
DiyFp operator*(const DiyFp& rhs) const {
|
||||
#if defined(_MSC_VER) && defined(_M_AMD64)
|
||||
uint64_t h;
|
||||
uint64_t l = _umul128(f, rhs.f, &h);
|
||||
if (l & (uint64_t(1) << 63)) // rounding
|
||||
h++;
|
||||
return DiyFp(h, e + rhs.e + 64);
|
||||
#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
|
||||
__extension__ typedef unsigned __int128 uint128;
|
||||
uint128 p = static_cast<uint128>(f) * static_cast<uint128>(rhs.f);
|
||||
uint64_t h = static_cast<uint64_t>(p >> 64);
|
||||
uint64_t l = static_cast<uint64_t>(p);
|
||||
if (l & (uint64_t(1) << 63)) // rounding
|
||||
h++;
|
||||
return DiyFp(h, e + rhs.e + 64);
|
||||
#else
|
||||
const uint64_t M32 = 0xFFFFFFFF;
|
||||
const uint64_t a = f >> 32;
|
||||
const uint64_t b = f & M32;
|
||||
const uint64_t c = rhs.f >> 32;
|
||||
const uint64_t d = rhs.f & M32;
|
||||
const uint64_t ac = a * c;
|
||||
const uint64_t bc = b * c;
|
||||
const uint64_t ad = a * d;
|
||||
const uint64_t bd = b * d;
|
||||
uint64_t tmp = (bd >> 32) + (ad & M32) + (bc & M32);
|
||||
tmp += 1U << 31; /// mult_round
|
||||
return DiyFp(ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), e + rhs.e + 64);
|
||||
#endif
|
||||
}
|
||||
|
||||
DiyFp Normalize() const {
|
||||
#if defined(_MSC_VER) && defined(_M_AMD64)
|
||||
unsigned long index;
|
||||
_BitScanReverse64(&index, f);
|
||||
return DiyFp(f << (63 - index), e - (63 - index));
|
||||
#elif defined(__GNUC__) && __GNUC__ >= 4
|
||||
int s = __builtin_clzll(f);
|
||||
return DiyFp(f << s, e - s);
|
||||
#else
|
||||
DiyFp res = *this;
|
||||
while (!(res.f & (static_cast<uint64_t>(1) << 63))) {
|
||||
res.f <<= 1;
|
||||
res.e--;
|
||||
}
|
||||
return res;
|
||||
#endif
|
||||
}
|
||||
|
||||
DiyFp NormalizeBoundary() const {
|
||||
DiyFp res = *this;
|
||||
while (!(res.f & (kDpHiddenBit << 1))) {
|
||||
res.f <<= 1;
|
||||
res.e--;
|
||||
}
|
||||
res.f <<= (kDiySignificandSize - kDpSignificandSize - 2);
|
||||
res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 2);
|
||||
return res;
|
||||
}
|
||||
|
||||
void NormalizedBoundaries(DiyFp* minus, DiyFp* plus) const {
|
||||
DiyFp pl = DiyFp((f << 1) + 1, e - 1).NormalizeBoundary();
|
||||
DiyFp mi = (f == kDpHiddenBit) ? DiyFp((f << 2) - 1, e - 2) : DiyFp((f << 1) - 1, e - 1);
|
||||
mi.f <<= mi.e - pl.e;
|
||||
mi.e = pl.e;
|
||||
*plus = pl;
|
||||
*minus = mi;
|
||||
}
|
||||
|
||||
double ToDouble() const {
|
||||
union {
|
||||
double d;
|
||||
uint64_t u64;
|
||||
}u;
|
||||
const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 :
|
||||
static_cast<uint64_t>(e + kDpExponentBias);
|
||||
u.u64 = (f & kDpSignificandMask) | (be << kDpSignificandSize);
|
||||
return u.d;
|
||||
}
|
||||
|
||||
static const int kDiySignificandSize = 64;
|
||||
static const int kDpSignificandSize = 52;
|
||||
static const int kDpExponentBias = 0x3FF + kDpSignificandSize;
|
||||
static const int kDpMaxExponent = 0x7FF - kDpExponentBias;
|
||||
static const int kDpMinExponent = -kDpExponentBias;
|
||||
static const int kDpDenormalExponent = -kDpExponentBias + 1;
|
||||
static const uint64_t kDpExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000);
|
||||
static const uint64_t kDpSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
|
||||
static const uint64_t kDpHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000);
|
||||
|
||||
uint64_t f;
|
||||
int e;
|
||||
};
|
||||
|
||||
inline DiyFp GetCachedPowerByIndex(size_t index) {
|
||||
// 10^-348, 10^-340, ..., 10^340
|
||||
static const uint64_t kCachedPowers_F[] = {
|
||||
RAPIDJSON_UINT64_C2(0xfa8fd5a0, 0x081c0288), RAPIDJSON_UINT64_C2(0xbaaee17f, 0xa23ebf76),
|
||||
RAPIDJSON_UINT64_C2(0x8b16fb20, 0x3055ac76), RAPIDJSON_UINT64_C2(0xcf42894a, 0x5dce35ea),
|
||||
RAPIDJSON_UINT64_C2(0x9a6bb0aa, 0x55653b2d), RAPIDJSON_UINT64_C2(0xe61acf03, 0x3d1a45df),
|
||||
RAPIDJSON_UINT64_C2(0xab70fe17, 0xc79ac6ca), RAPIDJSON_UINT64_C2(0xff77b1fc, 0xbebcdc4f),
|
||||
RAPIDJSON_UINT64_C2(0xbe5691ef, 0x416bd60c), RAPIDJSON_UINT64_C2(0x8dd01fad, 0x907ffc3c),
|
||||
RAPIDJSON_UINT64_C2(0xd3515c28, 0x31559a83), RAPIDJSON_UINT64_C2(0x9d71ac8f, 0xada6c9b5),
|
||||
RAPIDJSON_UINT64_C2(0xea9c2277, 0x23ee8bcb), RAPIDJSON_UINT64_C2(0xaecc4991, 0x4078536d),
|
||||
RAPIDJSON_UINT64_C2(0x823c1279, 0x5db6ce57), RAPIDJSON_UINT64_C2(0xc2109436, 0x4dfb5637),
|
||||
RAPIDJSON_UINT64_C2(0x9096ea6f, 0x3848984f), RAPIDJSON_UINT64_C2(0xd77485cb, 0x25823ac7),
|
||||
RAPIDJSON_UINT64_C2(0xa086cfcd, 0x97bf97f4), RAPIDJSON_UINT64_C2(0xef340a98, 0x172aace5),
|
||||
RAPIDJSON_UINT64_C2(0xb23867fb, 0x2a35b28e), RAPIDJSON_UINT64_C2(0x84c8d4df, 0xd2c63f3b),
|
||||
RAPIDJSON_UINT64_C2(0xc5dd4427, 0x1ad3cdba), RAPIDJSON_UINT64_C2(0x936b9fce, 0xbb25c996),
|
||||
RAPIDJSON_UINT64_C2(0xdbac6c24, 0x7d62a584), RAPIDJSON_UINT64_C2(0xa3ab6658, 0x0d5fdaf6),
|
||||
RAPIDJSON_UINT64_C2(0xf3e2f893, 0xdec3f126), RAPIDJSON_UINT64_C2(0xb5b5ada8, 0xaaff80b8),
|
||||
RAPIDJSON_UINT64_C2(0x87625f05, 0x6c7c4a8b), RAPIDJSON_UINT64_C2(0xc9bcff60, 0x34c13053),
|
||||
RAPIDJSON_UINT64_C2(0x964e858c, 0x91ba2655), RAPIDJSON_UINT64_C2(0xdff97724, 0x70297ebd),
|
||||
RAPIDJSON_UINT64_C2(0xa6dfbd9f, 0xb8e5b88f), RAPIDJSON_UINT64_C2(0xf8a95fcf, 0x88747d94),
|
||||
RAPIDJSON_UINT64_C2(0xb9447093, 0x8fa89bcf), RAPIDJSON_UINT64_C2(0x8a08f0f8, 0xbf0f156b),
|
||||
RAPIDJSON_UINT64_C2(0xcdb02555, 0x653131b6), RAPIDJSON_UINT64_C2(0x993fe2c6, 0xd07b7fac),
|
||||
RAPIDJSON_UINT64_C2(0xe45c10c4, 0x2a2b3b06), RAPIDJSON_UINT64_C2(0xaa242499, 0x697392d3),
|
||||
RAPIDJSON_UINT64_C2(0xfd87b5f2, 0x8300ca0e), RAPIDJSON_UINT64_C2(0xbce50864, 0x92111aeb),
|
||||
RAPIDJSON_UINT64_C2(0x8cbccc09, 0x6f5088cc), RAPIDJSON_UINT64_C2(0xd1b71758, 0xe219652c),
|
||||
RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), RAPIDJSON_UINT64_C2(0xe8d4a510, 0x00000000),
|
||||
RAPIDJSON_UINT64_C2(0xad78ebc5, 0xac620000), RAPIDJSON_UINT64_C2(0x813f3978, 0xf8940984),
|
||||
RAPIDJSON_UINT64_C2(0xc097ce7b, 0xc90715b3), RAPIDJSON_UINT64_C2(0x8f7e32ce, 0x7bea5c70),
|
||||
RAPIDJSON_UINT64_C2(0xd5d238a4, 0xabe98068), RAPIDJSON_UINT64_C2(0x9f4f2726, 0x179a2245),
|
||||
RAPIDJSON_UINT64_C2(0xed63a231, 0xd4c4fb27), RAPIDJSON_UINT64_C2(0xb0de6538, 0x8cc8ada8),
|
||||
RAPIDJSON_UINT64_C2(0x83c7088e, 0x1aab65db), RAPIDJSON_UINT64_C2(0xc45d1df9, 0x42711d9a),
|
||||
RAPIDJSON_UINT64_C2(0x924d692c, 0xa61be758), RAPIDJSON_UINT64_C2(0xda01ee64, 0x1a708dea),
|
||||
RAPIDJSON_UINT64_C2(0xa26da399, 0x9aef774a), RAPIDJSON_UINT64_C2(0xf209787b, 0xb47d6b85),
|
||||
RAPIDJSON_UINT64_C2(0xb454e4a1, 0x79dd1877), RAPIDJSON_UINT64_C2(0x865b8692, 0x5b9bc5c2),
|
||||
RAPIDJSON_UINT64_C2(0xc83553c5, 0xc8965d3d), RAPIDJSON_UINT64_C2(0x952ab45c, 0xfa97a0b3),
|
||||
RAPIDJSON_UINT64_C2(0xde469fbd, 0x99a05fe3), RAPIDJSON_UINT64_C2(0xa59bc234, 0xdb398c25),
|
||||
RAPIDJSON_UINT64_C2(0xf6c69a72, 0xa3989f5c), RAPIDJSON_UINT64_C2(0xb7dcbf53, 0x54e9bece),
|
||||
RAPIDJSON_UINT64_C2(0x88fcf317, 0xf22241e2), RAPIDJSON_UINT64_C2(0xcc20ce9b, 0xd35c78a5),
|
||||
RAPIDJSON_UINT64_C2(0x98165af3, 0x7b2153df), RAPIDJSON_UINT64_C2(0xe2a0b5dc, 0x971f303a),
|
||||
RAPIDJSON_UINT64_C2(0xa8d9d153, 0x5ce3b396), RAPIDJSON_UINT64_C2(0xfb9b7cd9, 0xa4a7443c),
|
||||
RAPIDJSON_UINT64_C2(0xbb764c4c, 0xa7a44410), RAPIDJSON_UINT64_C2(0x8bab8eef, 0xb6409c1a),
|
||||
RAPIDJSON_UINT64_C2(0xd01fef10, 0xa657842c), RAPIDJSON_UINT64_C2(0x9b10a4e5, 0xe9913129),
|
||||
RAPIDJSON_UINT64_C2(0xe7109bfb, 0xa19c0c9d), RAPIDJSON_UINT64_C2(0xac2820d9, 0x623bf429),
|
||||
RAPIDJSON_UINT64_C2(0x80444b5e, 0x7aa7cf85), RAPIDJSON_UINT64_C2(0xbf21e440, 0x03acdd2d),
|
||||
RAPIDJSON_UINT64_C2(0x8e679c2f, 0x5e44ff8f), RAPIDJSON_UINT64_C2(0xd433179d, 0x9c8cb841),
|
||||
RAPIDJSON_UINT64_C2(0x9e19db92, 0xb4e31ba9), RAPIDJSON_UINT64_C2(0xeb96bf6e, 0xbadf77d9),
|
||||
RAPIDJSON_UINT64_C2(0xaf87023b, 0x9bf0ee6b)
|
||||
};
|
||||
static const int16_t kCachedPowers_E[] = {
|
||||
-1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980,
|
||||
-954, -927, -901, -874, -847, -821, -794, -768, -741, -715,
|
||||
-688, -661, -635, -608, -582, -555, -529, -502, -475, -449,
|
||||
-422, -396, -369, -343, -316, -289, -263, -236, -210, -183,
|
||||
-157, -130, -103, -77, -50, -24, 3, 30, 56, 83,
|
||||
109, 136, 162, 189, 216, 242, 269, 295, 322, 348,
|
||||
375, 402, 428, 455, 481, 508, 534, 561, 588, 614,
|
||||
641, 667, 694, 720, 747, 774, 800, 827, 853, 880,
|
||||
907, 933, 960, 986, 1013, 1039, 1066
|
||||
};
|
||||
return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]);
|
||||
}
|
||||
|
||||
inline DiyFp GetCachedPower(int e, int* K) {
|
||||
|
||||
//int k = static_cast<int>(ceil((-61 - e) * 0.30102999566398114)) + 374;
|
||||
double dk = (-61 - e) * 0.30102999566398114 + 347; // dk must be positive, so can do ceiling in positive
|
||||
int k = static_cast<int>(dk);
|
||||
if (dk - k > 0.0)
|
||||
k++;
|
||||
|
||||
unsigned index = static_cast<unsigned>((k >> 3) + 1);
|
||||
*K = -(-348 + static_cast<int>(index << 3)); // decimal exponent no need lookup table
|
||||
|
||||
return GetCachedPowerByIndex(index);
|
||||
}
|
||||
|
||||
inline DiyFp GetCachedPower10(int exp, int *outExp) {
|
||||
unsigned index = (static_cast<unsigned>(exp) + 348u) / 8u;
|
||||
*outExp = -348 + static_cast<int>(index) * 8;
|
||||
return GetCachedPowerByIndex(index);
|
||||
}
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_DIYFP_H_
|
245
ext/librethinkdbxx/src/rapidjson/internal/dtoa.h
Normal file
245
ext/librethinkdbxx/src/rapidjson/internal/dtoa.h
Normal file
@ -0,0 +1,245 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
// This is a C++ header-only implementation of Grisu2 algorithm from the publication:
|
||||
// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with
|
||||
// integers." ACM Sigplan Notices 45.6 (2010): 233-243.
|
||||
|
||||
#ifndef RAPIDJSON_DTOA_
|
||||
#define RAPIDJSON_DTOA_
|
||||
|
||||
#include "itoa.h" // GetDigitsLut()
|
||||
#include "diyfp.h"
|
||||
#include "ieee754.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
RAPIDJSON_DIAG_OFF(array-bounds) // some gcc versions generate wrong warnings https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59124
|
||||
#endif
|
||||
|
||||
inline void GrisuRound(char* buffer, int len, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t wp_w) {
|
||||
while (rest < wp_w && delta - rest >= ten_kappa &&
|
||||
(rest + ten_kappa < wp_w || /// closer
|
||||
wp_w - rest > rest + ten_kappa - wp_w)) {
|
||||
buffer[len - 1]--;
|
||||
rest += ten_kappa;
|
||||
}
|
||||
}
|
||||
|
||||
inline unsigned CountDecimalDigit32(uint32_t n) {
|
||||
// Simple pure C++ implementation was faster than __builtin_clz version in this situation.
|
||||
if (n < 10) return 1;
|
||||
if (n < 100) return 2;
|
||||
if (n < 1000) return 3;
|
||||
if (n < 10000) return 4;
|
||||
if (n < 100000) return 5;
|
||||
if (n < 1000000) return 6;
|
||||
if (n < 10000000) return 7;
|
||||
if (n < 100000000) return 8;
|
||||
// Will not reach 10 digits in DigitGen()
|
||||
//if (n < 1000000000) return 9;
|
||||
//return 10;
|
||||
return 9;
|
||||
}
|
||||
|
||||
inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) {
|
||||
static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
|
||||
const DiyFp one(uint64_t(1) << -Mp.e, Mp.e);
|
||||
const DiyFp wp_w = Mp - W;
|
||||
uint32_t p1 = static_cast<uint32_t>(Mp.f >> -one.e);
|
||||
uint64_t p2 = Mp.f & (one.f - 1);
|
||||
unsigned kappa = CountDecimalDigit32(p1); // kappa in [0, 9]
|
||||
*len = 0;
|
||||
|
||||
while (kappa > 0) {
|
||||
uint32_t d = 0;
|
||||
switch (kappa) {
|
||||
case 9: d = p1 / 100000000; p1 %= 100000000; break;
|
||||
case 8: d = p1 / 10000000; p1 %= 10000000; break;
|
||||
case 7: d = p1 / 1000000; p1 %= 1000000; break;
|
||||
case 6: d = p1 / 100000; p1 %= 100000; break;
|
||||
case 5: d = p1 / 10000; p1 %= 10000; break;
|
||||
case 4: d = p1 / 1000; p1 %= 1000; break;
|
||||
case 3: d = p1 / 100; p1 %= 100; break;
|
||||
case 2: d = p1 / 10; p1 %= 10; break;
|
||||
case 1: d = p1; p1 = 0; break;
|
||||
default:;
|
||||
}
|
||||
if (d || *len)
|
||||
buffer[(*len)++] = static_cast<char>('0' + static_cast<char>(d));
|
||||
kappa--;
|
||||
uint64_t tmp = (static_cast<uint64_t>(p1) << -one.e) + p2;
|
||||
if (tmp <= delta) {
|
||||
*K += kappa;
|
||||
GrisuRound(buffer, *len, delta, tmp, static_cast<uint64_t>(kPow10[kappa]) << -one.e, wp_w.f);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// kappa = 0
|
||||
for (;;) {
|
||||
p2 *= 10;
|
||||
delta *= 10;
|
||||
char d = static_cast<char>(p2 >> -one.e);
|
||||
if (d || *len)
|
||||
buffer[(*len)++] = static_cast<char>('0' + d);
|
||||
p2 &= one.f - 1;
|
||||
kappa--;
|
||||
if (p2 < delta) {
|
||||
*K += kappa;
|
||||
int index = -static_cast<int>(kappa);
|
||||
GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[-static_cast<int>(kappa)] : 0));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void Grisu2(double value, char* buffer, int* length, int* K) {
|
||||
const DiyFp v(value);
|
||||
DiyFp w_m, w_p;
|
||||
v.NormalizedBoundaries(&w_m, &w_p);
|
||||
|
||||
const DiyFp c_mk = GetCachedPower(w_p.e, K);
|
||||
const DiyFp W = v.Normalize() * c_mk;
|
||||
DiyFp Wp = w_p * c_mk;
|
||||
DiyFp Wm = w_m * c_mk;
|
||||
Wm.f++;
|
||||
Wp.f--;
|
||||
DigitGen(W, Wp, Wp.f - Wm.f, buffer, length, K);
|
||||
}
|
||||
|
||||
inline char* WriteExponent(int K, char* buffer) {
|
||||
if (K < 0) {
|
||||
*buffer++ = '-';
|
||||
K = -K;
|
||||
}
|
||||
|
||||
if (K >= 100) {
|
||||
*buffer++ = static_cast<char>('0' + static_cast<char>(K / 100));
|
||||
K %= 100;
|
||||
const char* d = GetDigitsLut() + K * 2;
|
||||
*buffer++ = d[0];
|
||||
*buffer++ = d[1];
|
||||
}
|
||||
else if (K >= 10) {
|
||||
const char* d = GetDigitsLut() + K * 2;
|
||||
*buffer++ = d[0];
|
||||
*buffer++ = d[1];
|
||||
}
|
||||
else
|
||||
*buffer++ = static_cast<char>('0' + static_cast<char>(K));
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
inline char* Prettify(char* buffer, int length, int k, int maxDecimalPlaces) {
|
||||
const int kk = length + k; // 10^(kk-1) <= v < 10^kk
|
||||
|
||||
if (0 <= k && kk <= 21) {
|
||||
// 1234e7 -> 12340000000
|
||||
for (int i = length; i < kk; i++)
|
||||
buffer[i] = '0';
|
||||
buffer[kk] = '.';
|
||||
buffer[kk + 1] = '0';
|
||||
return &buffer[kk + 2];
|
||||
}
|
||||
else if (0 < kk && kk <= 21) {
|
||||
// 1234e-2 -> 12.34
|
||||
std::memmove(&buffer[kk + 1], &buffer[kk], static_cast<size_t>(length - kk));
|
||||
buffer[kk] = '.';
|
||||
if (0 > k + maxDecimalPlaces) {
|
||||
// When maxDecimalPlaces = 2, 1.2345 -> 1.23, 1.102 -> 1.1
|
||||
// Remove extra trailing zeros (at least one) after truncation.
|
||||
for (int i = kk + maxDecimalPlaces; i > kk + 1; i--)
|
||||
if (buffer[i] != '0')
|
||||
return &buffer[i + 1];
|
||||
return &buffer[kk + 2]; // Reserve one zero
|
||||
}
|
||||
else
|
||||
return &buffer[length + 1];
|
||||
}
|
||||
else if (-6 < kk && kk <= 0) {
|
||||
// 1234e-6 -> 0.001234
|
||||
const int offset = 2 - kk;
|
||||
std::memmove(&buffer[offset], &buffer[0], static_cast<size_t>(length));
|
||||
buffer[0] = '0';
|
||||
buffer[1] = '.';
|
||||
for (int i = 2; i < offset; i++)
|
||||
buffer[i] = '0';
|
||||
if (length - kk > maxDecimalPlaces) {
|
||||
// When maxDecimalPlaces = 2, 0.123 -> 0.12, 0.102 -> 0.1
|
||||
// Remove extra trailing zeros (at least one) after truncation.
|
||||
for (int i = maxDecimalPlaces + 1; i > 2; i--)
|
||||
if (buffer[i] != '0')
|
||||
return &buffer[i + 1];
|
||||
return &buffer[3]; // Reserve one zero
|
||||
}
|
||||
else
|
||||
return &buffer[length + offset];
|
||||
}
|
||||
else if (kk < -maxDecimalPlaces) {
|
||||
// Truncate to zero
|
||||
buffer[0] = '0';
|
||||
buffer[1] = '.';
|
||||
buffer[2] = '0';
|
||||
return &buffer[3];
|
||||
}
|
||||
else if (length == 1) {
|
||||
// 1e30
|
||||
buffer[1] = 'e';
|
||||
return WriteExponent(kk - 1, &buffer[2]);
|
||||
}
|
||||
else {
|
||||
// 1234e30 -> 1.234e33
|
||||
std::memmove(&buffer[2], &buffer[1], static_cast<size_t>(length - 1));
|
||||
buffer[1] = '.';
|
||||
buffer[length + 1] = 'e';
|
||||
return WriteExponent(kk - 1, &buffer[0 + length + 2]);
|
||||
}
|
||||
}
|
||||
|
||||
inline char* dtoa(double value, char* buffer, int maxDecimalPlaces = 324) {
|
||||
RAPIDJSON_ASSERT(maxDecimalPlaces >= 1);
|
||||
Double d(value);
|
||||
if (d.IsZero()) {
|
||||
if (d.Sign())
|
||||
*buffer++ = '-'; // -0.0, Issue #289
|
||||
buffer[0] = '0';
|
||||
buffer[1] = '.';
|
||||
buffer[2] = '0';
|
||||
return &buffer[3];
|
||||
}
|
||||
else {
|
||||
if (value < 0) {
|
||||
*buffer++ = '-';
|
||||
value = -value;
|
||||
}
|
||||
int length, K;
|
||||
Grisu2(value, buffer, &length, &K);
|
||||
return Prettify(buffer, length, K, maxDecimalPlaces);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_DTOA_
|
78
ext/librethinkdbxx/src/rapidjson/internal/ieee754.h
Normal file
78
ext/librethinkdbxx/src/rapidjson/internal/ieee754.h
Normal file
@ -0,0 +1,78 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_IEEE754_
|
||||
#define RAPIDJSON_IEEE754_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
class Double {
|
||||
public:
|
||||
Double() {}
|
||||
Double(double d) : d_(d) {}
|
||||
Double(uint64_t u) : u_(u) {}
|
||||
|
||||
double Value() const { return d_; }
|
||||
uint64_t Uint64Value() const { return u_; }
|
||||
|
||||
double NextPositiveDouble() const {
|
||||
RAPIDJSON_ASSERT(!Sign());
|
||||
return Double(u_ + 1).Value();
|
||||
}
|
||||
|
||||
bool Sign() const { return (u_ & kSignMask) != 0; }
|
||||
uint64_t Significand() const { return u_ & kSignificandMask; }
|
||||
int Exponent() const { return static_cast<int>(((u_ & kExponentMask) >> kSignificandSize) - kExponentBias); }
|
||||
|
||||
bool IsNan() const { return (u_ & kExponentMask) == kExponentMask && Significand() != 0; }
|
||||
bool IsInf() const { return (u_ & kExponentMask) == kExponentMask && Significand() == 0; }
|
||||
bool IsNanOrInf() const { return (u_ & kExponentMask) == kExponentMask; }
|
||||
bool IsNormal() const { return (u_ & kExponentMask) != 0 || Significand() == 0; }
|
||||
bool IsZero() const { return (u_ & (kExponentMask | kSignificandMask)) == 0; }
|
||||
|
||||
uint64_t IntegerSignificand() const { return IsNormal() ? Significand() | kHiddenBit : Significand(); }
|
||||
int IntegerExponent() const { return (IsNormal() ? Exponent() : kDenormalExponent) - kSignificandSize; }
|
||||
uint64_t ToBias() const { return (u_ & kSignMask) ? ~u_ + 1 : u_ | kSignMask; }
|
||||
|
||||
static unsigned EffectiveSignificandSize(int order) {
|
||||
if (order >= -1021)
|
||||
return 53;
|
||||
else if (order <= -1074)
|
||||
return 0;
|
||||
else
|
||||
return static_cast<unsigned>(order) + 1074;
|
||||
}
|
||||
|
||||
private:
|
||||
static const int kSignificandSize = 52;
|
||||
static const int kExponentBias = 0x3FF;
|
||||
static const int kDenormalExponent = 1 - kExponentBias;
|
||||
static const uint64_t kSignMask = RAPIDJSON_UINT64_C2(0x80000000, 0x00000000);
|
||||
static const uint64_t kExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000);
|
||||
static const uint64_t kSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
|
||||
static const uint64_t kHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000);
|
||||
|
||||
union {
|
||||
double d_;
|
||||
uint64_t u_;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_IEEE754_
|
304
ext/librethinkdbxx/src/rapidjson/internal/itoa.h
Normal file
304
ext/librethinkdbxx/src/rapidjson/internal/itoa.h
Normal file
@ -0,0 +1,304 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ITOA_
|
||||
#define RAPIDJSON_ITOA_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
inline const char* GetDigitsLut() {
|
||||
static const char cDigitsLut[200] = {
|
||||
'0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9',
|
||||
'1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9',
|
||||
'2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9',
|
||||
'3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9',
|
||||
'4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9',
|
||||
'5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9',
|
||||
'6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9',
|
||||
'7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9',
|
||||
'8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9',
|
||||
'9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9'
|
||||
};
|
||||
return cDigitsLut;
|
||||
}
|
||||
|
||||
inline char* u32toa(uint32_t value, char* buffer) {
|
||||
const char* cDigitsLut = GetDigitsLut();
|
||||
|
||||
if (value < 10000) {
|
||||
const uint32_t d1 = (value / 100) << 1;
|
||||
const uint32_t d2 = (value % 100) << 1;
|
||||
|
||||
if (value >= 1000)
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
if (value >= 100)
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
if (value >= 10)
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
}
|
||||
else if (value < 100000000) {
|
||||
// value = bbbbcccc
|
||||
const uint32_t b = value / 10000;
|
||||
const uint32_t c = value % 10000;
|
||||
|
||||
const uint32_t d1 = (b / 100) << 1;
|
||||
const uint32_t d2 = (b % 100) << 1;
|
||||
|
||||
const uint32_t d3 = (c / 100) << 1;
|
||||
const uint32_t d4 = (c % 100) << 1;
|
||||
|
||||
if (value >= 10000000)
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
if (value >= 1000000)
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
if (value >= 100000)
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
|
||||
*buffer++ = cDigitsLut[d3];
|
||||
*buffer++ = cDigitsLut[d3 + 1];
|
||||
*buffer++ = cDigitsLut[d4];
|
||||
*buffer++ = cDigitsLut[d4 + 1];
|
||||
}
|
||||
else {
|
||||
// value = aabbbbcccc in decimal
|
||||
|
||||
const uint32_t a = value / 100000000; // 1 to 42
|
||||
value %= 100000000;
|
||||
|
||||
if (a >= 10) {
|
||||
const unsigned i = a << 1;
|
||||
*buffer++ = cDigitsLut[i];
|
||||
*buffer++ = cDigitsLut[i + 1];
|
||||
}
|
||||
else
|
||||
*buffer++ = static_cast<char>('0' + static_cast<char>(a));
|
||||
|
||||
const uint32_t b = value / 10000; // 0 to 9999
|
||||
const uint32_t c = value % 10000; // 0 to 9999
|
||||
|
||||
const uint32_t d1 = (b / 100) << 1;
|
||||
const uint32_t d2 = (b % 100) << 1;
|
||||
|
||||
const uint32_t d3 = (c / 100) << 1;
|
||||
const uint32_t d4 = (c % 100) << 1;
|
||||
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
*buffer++ = cDigitsLut[d3];
|
||||
*buffer++ = cDigitsLut[d3 + 1];
|
||||
*buffer++ = cDigitsLut[d4];
|
||||
*buffer++ = cDigitsLut[d4 + 1];
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
inline char* i32toa(int32_t value, char* buffer) {
|
||||
uint32_t u = static_cast<uint32_t>(value);
|
||||
if (value < 0) {
|
||||
*buffer++ = '-';
|
||||
u = ~u + 1;
|
||||
}
|
||||
|
||||
return u32toa(u, buffer);
|
||||
}
|
||||
|
||||
inline char* u64toa(uint64_t value, char* buffer) {
|
||||
const char* cDigitsLut = GetDigitsLut();
|
||||
const uint64_t kTen8 = 100000000;
|
||||
const uint64_t kTen9 = kTen8 * 10;
|
||||
const uint64_t kTen10 = kTen8 * 100;
|
||||
const uint64_t kTen11 = kTen8 * 1000;
|
||||
const uint64_t kTen12 = kTen8 * 10000;
|
||||
const uint64_t kTen13 = kTen8 * 100000;
|
||||
const uint64_t kTen14 = kTen8 * 1000000;
|
||||
const uint64_t kTen15 = kTen8 * 10000000;
|
||||
const uint64_t kTen16 = kTen8 * kTen8;
|
||||
|
||||
if (value < kTen8) {
|
||||
uint32_t v = static_cast<uint32_t>(value);
|
||||
if (v < 10000) {
|
||||
const uint32_t d1 = (v / 100) << 1;
|
||||
const uint32_t d2 = (v % 100) << 1;
|
||||
|
||||
if (v >= 1000)
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
if (v >= 100)
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
if (v >= 10)
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
}
|
||||
else {
|
||||
// value = bbbbcccc
|
||||
const uint32_t b = v / 10000;
|
||||
const uint32_t c = v % 10000;
|
||||
|
||||
const uint32_t d1 = (b / 100) << 1;
|
||||
const uint32_t d2 = (b % 100) << 1;
|
||||
|
||||
const uint32_t d3 = (c / 100) << 1;
|
||||
const uint32_t d4 = (c % 100) << 1;
|
||||
|
||||
if (value >= 10000000)
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
if (value >= 1000000)
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
if (value >= 100000)
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
|
||||
*buffer++ = cDigitsLut[d3];
|
||||
*buffer++ = cDigitsLut[d3 + 1];
|
||||
*buffer++ = cDigitsLut[d4];
|
||||
*buffer++ = cDigitsLut[d4 + 1];
|
||||
}
|
||||
}
|
||||
else if (value < kTen16) {
|
||||
const uint32_t v0 = static_cast<uint32_t>(value / kTen8);
|
||||
const uint32_t v1 = static_cast<uint32_t>(value % kTen8);
|
||||
|
||||
const uint32_t b0 = v0 / 10000;
|
||||
const uint32_t c0 = v0 % 10000;
|
||||
|
||||
const uint32_t d1 = (b0 / 100) << 1;
|
||||
const uint32_t d2 = (b0 % 100) << 1;
|
||||
|
||||
const uint32_t d3 = (c0 / 100) << 1;
|
||||
const uint32_t d4 = (c0 % 100) << 1;
|
||||
|
||||
const uint32_t b1 = v1 / 10000;
|
||||
const uint32_t c1 = v1 % 10000;
|
||||
|
||||
const uint32_t d5 = (b1 / 100) << 1;
|
||||
const uint32_t d6 = (b1 % 100) << 1;
|
||||
|
||||
const uint32_t d7 = (c1 / 100) << 1;
|
||||
const uint32_t d8 = (c1 % 100) << 1;
|
||||
|
||||
if (value >= kTen15)
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
if (value >= kTen14)
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
if (value >= kTen13)
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
if (value >= kTen12)
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
if (value >= kTen11)
|
||||
*buffer++ = cDigitsLut[d3];
|
||||
if (value >= kTen10)
|
||||
*buffer++ = cDigitsLut[d3 + 1];
|
||||
if (value >= kTen9)
|
||||
*buffer++ = cDigitsLut[d4];
|
||||
if (value >= kTen8)
|
||||
*buffer++ = cDigitsLut[d4 + 1];
|
||||
|
||||
*buffer++ = cDigitsLut[d5];
|
||||
*buffer++ = cDigitsLut[d5 + 1];
|
||||
*buffer++ = cDigitsLut[d6];
|
||||
*buffer++ = cDigitsLut[d6 + 1];
|
||||
*buffer++ = cDigitsLut[d7];
|
||||
*buffer++ = cDigitsLut[d7 + 1];
|
||||
*buffer++ = cDigitsLut[d8];
|
||||
*buffer++ = cDigitsLut[d8 + 1];
|
||||
}
|
||||
else {
|
||||
const uint32_t a = static_cast<uint32_t>(value / kTen16); // 1 to 1844
|
||||
value %= kTen16;
|
||||
|
||||
if (a < 10)
|
||||
*buffer++ = static_cast<char>('0' + static_cast<char>(a));
|
||||
else if (a < 100) {
|
||||
const uint32_t i = a << 1;
|
||||
*buffer++ = cDigitsLut[i];
|
||||
*buffer++ = cDigitsLut[i + 1];
|
||||
}
|
||||
else if (a < 1000) {
|
||||
*buffer++ = static_cast<char>('0' + static_cast<char>(a / 100));
|
||||
|
||||
const uint32_t i = (a % 100) << 1;
|
||||
*buffer++ = cDigitsLut[i];
|
||||
*buffer++ = cDigitsLut[i + 1];
|
||||
}
|
||||
else {
|
||||
const uint32_t i = (a / 100) << 1;
|
||||
const uint32_t j = (a % 100) << 1;
|
||||
*buffer++ = cDigitsLut[i];
|
||||
*buffer++ = cDigitsLut[i + 1];
|
||||
*buffer++ = cDigitsLut[j];
|
||||
*buffer++ = cDigitsLut[j + 1];
|
||||
}
|
||||
|
||||
const uint32_t v0 = static_cast<uint32_t>(value / kTen8);
|
||||
const uint32_t v1 = static_cast<uint32_t>(value % kTen8);
|
||||
|
||||
const uint32_t b0 = v0 / 10000;
|
||||
const uint32_t c0 = v0 % 10000;
|
||||
|
||||
const uint32_t d1 = (b0 / 100) << 1;
|
||||
const uint32_t d2 = (b0 % 100) << 1;
|
||||
|
||||
const uint32_t d3 = (c0 / 100) << 1;
|
||||
const uint32_t d4 = (c0 % 100) << 1;
|
||||
|
||||
const uint32_t b1 = v1 / 10000;
|
||||
const uint32_t c1 = v1 % 10000;
|
||||
|
||||
const uint32_t d5 = (b1 / 100) << 1;
|
||||
const uint32_t d6 = (b1 % 100) << 1;
|
||||
|
||||
const uint32_t d7 = (c1 / 100) << 1;
|
||||
const uint32_t d8 = (c1 % 100) << 1;
|
||||
|
||||
*buffer++ = cDigitsLut[d1];
|
||||
*buffer++ = cDigitsLut[d1 + 1];
|
||||
*buffer++ = cDigitsLut[d2];
|
||||
*buffer++ = cDigitsLut[d2 + 1];
|
||||
*buffer++ = cDigitsLut[d3];
|
||||
*buffer++ = cDigitsLut[d3 + 1];
|
||||
*buffer++ = cDigitsLut[d4];
|
||||
*buffer++ = cDigitsLut[d4 + 1];
|
||||
*buffer++ = cDigitsLut[d5];
|
||||
*buffer++ = cDigitsLut[d5 + 1];
|
||||
*buffer++ = cDigitsLut[d6];
|
||||
*buffer++ = cDigitsLut[d6 + 1];
|
||||
*buffer++ = cDigitsLut[d7];
|
||||
*buffer++ = cDigitsLut[d7 + 1];
|
||||
*buffer++ = cDigitsLut[d8];
|
||||
*buffer++ = cDigitsLut[d8 + 1];
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
inline char* i64toa(int64_t value, char* buffer) {
|
||||
uint64_t u = static_cast<uint64_t>(value);
|
||||
if (value < 0) {
|
||||
*buffer++ = '-';
|
||||
u = ~u + 1;
|
||||
}
|
||||
|
||||
return u64toa(u, buffer);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_ITOA_
|
181
ext/librethinkdbxx/src/rapidjson/internal/meta.h
Normal file
181
ext/librethinkdbxx/src/rapidjson/internal/meta.h
Normal file
@ -0,0 +1,181 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_INTERNAL_META_H_
|
||||
#define RAPIDJSON_INTERNAL_META_H_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
#endif
|
||||
#if defined(_MSC_VER)
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(6334)
|
||||
#endif
|
||||
|
||||
#if RAPIDJSON_HAS_CXX11_TYPETRAITS
|
||||
#include <type_traits>
|
||||
#endif
|
||||
|
||||
//@cond RAPIDJSON_INTERNAL
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
// Helper to wrap/convert arbitrary types to void, useful for arbitrary type matching
|
||||
template <typename T> struct Void { typedef void Type; };
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// BoolType, TrueType, FalseType
|
||||
//
|
||||
template <bool Cond> struct BoolType {
|
||||
static const bool Value = Cond;
|
||||
typedef BoolType Type;
|
||||
};
|
||||
typedef BoolType<true> TrueType;
|
||||
typedef BoolType<false> FalseType;
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// SelectIf, BoolExpr, NotExpr, AndExpr, OrExpr
|
||||
//
|
||||
|
||||
template <bool C> struct SelectIfImpl { template <typename T1, typename T2> struct Apply { typedef T1 Type; }; };
|
||||
template <> struct SelectIfImpl<false> { template <typename T1, typename T2> struct Apply { typedef T2 Type; }; };
|
||||
template <bool C, typename T1, typename T2> struct SelectIfCond : SelectIfImpl<C>::template Apply<T1,T2> {};
|
||||
template <typename C, typename T1, typename T2> struct SelectIf : SelectIfCond<C::Value, T1, T2> {};
|
||||
|
||||
template <bool Cond1, bool Cond2> struct AndExprCond : FalseType {};
|
||||
template <> struct AndExprCond<true, true> : TrueType {};
|
||||
template <bool Cond1, bool Cond2> struct OrExprCond : TrueType {};
|
||||
template <> struct OrExprCond<false, false> : FalseType {};
|
||||
|
||||
template <typename C> struct BoolExpr : SelectIf<C,TrueType,FalseType>::Type {};
|
||||
template <typename C> struct NotExpr : SelectIf<C,FalseType,TrueType>::Type {};
|
||||
template <typename C1, typename C2> struct AndExpr : AndExprCond<C1::Value, C2::Value>::Type {};
|
||||
template <typename C1, typename C2> struct OrExpr : OrExprCond<C1::Value, C2::Value>::Type {};
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// AddConst, MaybeAddConst, RemoveConst
|
||||
template <typename T> struct AddConst { typedef const T Type; };
|
||||
template <bool Constify, typename T> struct MaybeAddConst : SelectIfCond<Constify, const T, T> {};
|
||||
template <typename T> struct RemoveConst { typedef T Type; };
|
||||
template <typename T> struct RemoveConst<const T> { typedef T Type; };
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// IsSame, IsConst, IsMoreConst, IsPointer
|
||||
//
|
||||
template <typename T, typename U> struct IsSame : FalseType {};
|
||||
template <typename T> struct IsSame<T, T> : TrueType {};
|
||||
|
||||
template <typename T> struct IsConst : FalseType {};
|
||||
template <typename T> struct IsConst<const T> : TrueType {};
|
||||
|
||||
template <typename CT, typename T>
|
||||
struct IsMoreConst
|
||||
: AndExpr<IsSame<typename RemoveConst<CT>::Type, typename RemoveConst<T>::Type>,
|
||||
BoolType<IsConst<CT>::Value >= IsConst<T>::Value> >::Type {};
|
||||
|
||||
template <typename T> struct IsPointer : FalseType {};
|
||||
template <typename T> struct IsPointer<T*> : TrueType {};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// IsBaseOf
|
||||
//
|
||||
#if RAPIDJSON_HAS_CXX11_TYPETRAITS
|
||||
|
||||
template <typename B, typename D> struct IsBaseOf
|
||||
: BoolType< ::std::is_base_of<B,D>::value> {};
|
||||
|
||||
#else // simplified version adopted from Boost
|
||||
|
||||
template<typename B, typename D> struct IsBaseOfImpl {
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(B) != 0);
|
||||
RAPIDJSON_STATIC_ASSERT(sizeof(D) != 0);
|
||||
|
||||
typedef char (&Yes)[1];
|
||||
typedef char (&No) [2];
|
||||
|
||||
template <typename T>
|
||||
static Yes Check(const D*, T);
|
||||
static No Check(const B*, int);
|
||||
|
||||
struct Host {
|
||||
operator const B*() const;
|
||||
operator const D*();
|
||||
};
|
||||
|
||||
enum { Value = (sizeof(Check(Host(), 0)) == sizeof(Yes)) };
|
||||
};
|
||||
|
||||
template <typename B, typename D> struct IsBaseOf
|
||||
: OrExpr<IsSame<B, D>, BoolExpr<IsBaseOfImpl<B, D> > >::Type {};
|
||||
|
||||
#endif // RAPIDJSON_HAS_CXX11_TYPETRAITS
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// EnableIf / DisableIf
|
||||
//
|
||||
template <bool Condition, typename T = void> struct EnableIfCond { typedef T Type; };
|
||||
template <typename T> struct EnableIfCond<false, T> { /* empty */ };
|
||||
|
||||
template <bool Condition, typename T = void> struct DisableIfCond { typedef T Type; };
|
||||
template <typename T> struct DisableIfCond<true, T> { /* empty */ };
|
||||
|
||||
template <typename Condition, typename T = void>
|
||||
struct EnableIf : EnableIfCond<Condition::Value, T> {};
|
||||
|
||||
template <typename Condition, typename T = void>
|
||||
struct DisableIf : DisableIfCond<Condition::Value, T> {};
|
||||
|
||||
// SFINAE helpers
|
||||
struct SfinaeTag {};
|
||||
template <typename T> struct RemoveSfinaeTag;
|
||||
template <typename T> struct RemoveSfinaeTag<SfinaeTag&(*)(T)> { typedef T Type; };
|
||||
|
||||
#define RAPIDJSON_REMOVEFPTR_(type) \
|
||||
typename ::RAPIDJSON_NAMESPACE::internal::RemoveSfinaeTag \
|
||||
< ::RAPIDJSON_NAMESPACE::internal::SfinaeTag&(*) type>::Type
|
||||
|
||||
#define RAPIDJSON_ENABLEIF(cond) \
|
||||
typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \
|
||||
<RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL
|
||||
|
||||
#define RAPIDJSON_DISABLEIF(cond) \
|
||||
typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \
|
||||
<RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL
|
||||
|
||||
#define RAPIDJSON_ENABLEIF_RETURN(cond,returntype) \
|
||||
typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \
|
||||
<RAPIDJSON_REMOVEFPTR_(cond), \
|
||||
RAPIDJSON_REMOVEFPTR_(returntype)>::Type
|
||||
|
||||
#define RAPIDJSON_DISABLEIF_RETURN(cond,returntype) \
|
||||
typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \
|
||||
<RAPIDJSON_REMOVEFPTR_(cond), \
|
||||
RAPIDJSON_REMOVEFPTR_(returntype)>::Type
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
//@endcond
|
||||
|
||||
#if defined(__GNUC__) || defined(_MSC_VER)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_INTERNAL_META_H_
|
55
ext/librethinkdbxx/src/rapidjson/internal/pow10.h
Normal file
55
ext/librethinkdbxx/src/rapidjson/internal/pow10.h
Normal file
@ -0,0 +1,55 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_POW10_
|
||||
#define RAPIDJSON_POW10_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
//! Computes integer powers of 10 in double (10.0^n).
|
||||
/*! This function uses lookup table for fast and accurate results.
|
||||
\param n non-negative exponent. Must <= 308.
|
||||
\return 10.0^n
|
||||
*/
|
||||
inline double Pow10(int n) {
|
||||
static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes
|
||||
1e+0,
|
||||
1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
|
||||
1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40,
|
||||
1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60,
|
||||
1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80,
|
||||
1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100,
|
||||
1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120,
|
||||
1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140,
|
||||
1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160,
|
||||
1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180,
|
||||
1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200,
|
||||
1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220,
|
||||
1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240,
|
||||
1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260,
|
||||
1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280,
|
||||
1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300,
|
||||
1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308
|
||||
};
|
||||
RAPIDJSON_ASSERT(n >= 0 && n <= 308);
|
||||
return e[n];
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_POW10_
|
701
ext/librethinkdbxx/src/rapidjson/internal/regex.h
Normal file
701
ext/librethinkdbxx/src/rapidjson/internal/regex.h
Normal file
@ -0,0 +1,701 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_INTERNAL_REGEX_H_
|
||||
#define RAPIDJSON_INTERNAL_REGEX_H_
|
||||
|
||||
#include "../allocators.h"
|
||||
#include "../stream.h"
|
||||
#include "stack.h"
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
RAPIDJSON_DIAG_OFF(switch-enum)
|
||||
RAPIDJSON_DIAG_OFF(implicit-fallthrough)
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
|
||||
#endif
|
||||
|
||||
#ifndef RAPIDJSON_REGEX_VERBOSE
|
||||
#define RAPIDJSON_REGEX_VERBOSE 0
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// GenericRegex
|
||||
|
||||
static const SizeType kRegexInvalidState = ~SizeType(0); //!< Represents an invalid index in GenericRegex::State::out, out1
|
||||
static const SizeType kRegexInvalidRange = ~SizeType(0);
|
||||
|
||||
//! Regular expression engine with subset of ECMAscript grammar.
|
||||
/*!
|
||||
Supported regular expression syntax:
|
||||
- \c ab Concatenation
|
||||
- \c a|b Alternation
|
||||
- \c a? Zero or one
|
||||
- \c a* Zero or more
|
||||
- \c a+ One or more
|
||||
- \c a{3} Exactly 3 times
|
||||
- \c a{3,} At least 3 times
|
||||
- \c a{3,5} 3 to 5 times
|
||||
- \c (ab) Grouping
|
||||
- \c ^a At the beginning
|
||||
- \c a$ At the end
|
||||
- \c . Any character
|
||||
- \c [abc] Character classes
|
||||
- \c [a-c] Character class range
|
||||
- \c [a-z0-9_] Character class combination
|
||||
- \c [^abc] Negated character classes
|
||||
- \c [^a-c] Negated character class range
|
||||
- \c [\b] Backspace (U+0008)
|
||||
- \c \\| \\\\ ... Escape characters
|
||||
- \c \\f Form feed (U+000C)
|
||||
- \c \\n Line feed (U+000A)
|
||||
- \c \\r Carriage return (U+000D)
|
||||
- \c \\t Tab (U+0009)
|
||||
- \c \\v Vertical tab (U+000B)
|
||||
|
||||
\note This is a Thompson NFA engine, implemented with reference to
|
||||
Cox, Russ. "Regular Expression Matching Can Be Simple And Fast (but is slow in Java, Perl, PHP, Python, Ruby,...).",
|
||||
https://swtch.com/~rsc/regexp/regexp1.html
|
||||
*/
|
||||
template <typename Encoding, typename Allocator = CrtAllocator>
|
||||
class GenericRegex {
|
||||
public:
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
GenericRegex(const Ch* source, Allocator* allocator = 0) :
|
||||
states_(allocator, 256), ranges_(allocator, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(),
|
||||
stateSet_(), state0_(allocator, 0), state1_(allocator, 0), anchorBegin_(), anchorEnd_()
|
||||
{
|
||||
GenericStringStream<Encoding> ss(source);
|
||||
DecodedStream<GenericStringStream<Encoding> > ds(ss);
|
||||
Parse(ds);
|
||||
}
|
||||
|
||||
~GenericRegex() {
|
||||
Allocator::Free(stateSet_);
|
||||
}
|
||||
|
||||
bool IsValid() const {
|
||||
return root_ != kRegexInvalidState;
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool Match(InputStream& is) const {
|
||||
return SearchWithAnchoring(is, true, true);
|
||||
}
|
||||
|
||||
bool Match(const Ch* s) const {
|
||||
GenericStringStream<Encoding> is(s);
|
||||
return Match(is);
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool Search(InputStream& is) const {
|
||||
return SearchWithAnchoring(is, anchorBegin_, anchorEnd_);
|
||||
}
|
||||
|
||||
bool Search(const Ch* s) const {
|
||||
GenericStringStream<Encoding> is(s);
|
||||
return Search(is);
|
||||
}
|
||||
|
||||
private:
|
||||
enum Operator {
|
||||
kZeroOrOne,
|
||||
kZeroOrMore,
|
||||
kOneOrMore,
|
||||
kConcatenation,
|
||||
kAlternation,
|
||||
kLeftParenthesis
|
||||
};
|
||||
|
||||
static const unsigned kAnyCharacterClass = 0xFFFFFFFF; //!< For '.'
|
||||
static const unsigned kRangeCharacterClass = 0xFFFFFFFE;
|
||||
static const unsigned kRangeNegationFlag = 0x80000000;
|
||||
|
||||
struct Range {
|
||||
unsigned start; //
|
||||
unsigned end;
|
||||
SizeType next;
|
||||
};
|
||||
|
||||
struct State {
|
||||
SizeType out; //!< Equals to kInvalid for matching state
|
||||
SizeType out1; //!< Equals to non-kInvalid for split
|
||||
SizeType rangeStart;
|
||||
unsigned codepoint;
|
||||
};
|
||||
|
||||
struct Frag {
|
||||
Frag(SizeType s, SizeType o, SizeType m) : start(s), out(o), minIndex(m) {}
|
||||
SizeType start;
|
||||
SizeType out; //!< link-list of all output states
|
||||
SizeType minIndex;
|
||||
};
|
||||
|
||||
template <typename SourceStream>
|
||||
class DecodedStream {
|
||||
public:
|
||||
DecodedStream(SourceStream& ss) : ss_(ss), codepoint_() { Decode(); }
|
||||
unsigned Peek() { return codepoint_; }
|
||||
unsigned Take() {
|
||||
unsigned c = codepoint_;
|
||||
if (c) // No further decoding when '\0'
|
||||
Decode();
|
||||
return c;
|
||||
}
|
||||
|
||||
private:
|
||||
void Decode() {
|
||||
if (!Encoding::Decode(ss_, &codepoint_))
|
||||
codepoint_ = 0;
|
||||
}
|
||||
|
||||
SourceStream& ss_;
|
||||
unsigned codepoint_;
|
||||
};
|
||||
|
||||
State& GetState(SizeType index) {
|
||||
RAPIDJSON_ASSERT(index < stateCount_);
|
||||
return states_.template Bottom<State>()[index];
|
||||
}
|
||||
|
||||
const State& GetState(SizeType index) const {
|
||||
RAPIDJSON_ASSERT(index < stateCount_);
|
||||
return states_.template Bottom<State>()[index];
|
||||
}
|
||||
|
||||
Range& GetRange(SizeType index) {
|
||||
RAPIDJSON_ASSERT(index < rangeCount_);
|
||||
return ranges_.template Bottom<Range>()[index];
|
||||
}
|
||||
|
||||
const Range& GetRange(SizeType index) const {
|
||||
RAPIDJSON_ASSERT(index < rangeCount_);
|
||||
return ranges_.template Bottom<Range>()[index];
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
void Parse(DecodedStream<InputStream>& ds) {
|
||||
Allocator allocator;
|
||||
Stack<Allocator> operandStack(&allocator, 256); // Frag
|
||||
Stack<Allocator> operatorStack(&allocator, 256); // Operator
|
||||
Stack<Allocator> atomCountStack(&allocator, 256); // unsigned (Atom per parenthesis)
|
||||
|
||||
*atomCountStack.template Push<unsigned>() = 0;
|
||||
|
||||
unsigned codepoint;
|
||||
while (ds.Peek() != 0) {
|
||||
switch (codepoint = ds.Take()) {
|
||||
case '^':
|
||||
anchorBegin_ = true;
|
||||
break;
|
||||
|
||||
case '$':
|
||||
anchorEnd_ = true;
|
||||
break;
|
||||
|
||||
case '|':
|
||||
while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() < kAlternation)
|
||||
if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
|
||||
return;
|
||||
*operatorStack.template Push<Operator>() = kAlternation;
|
||||
*atomCountStack.template Top<unsigned>() = 0;
|
||||
break;
|
||||
|
||||
case '(':
|
||||
*operatorStack.template Push<Operator>() = kLeftParenthesis;
|
||||
*atomCountStack.template Push<unsigned>() = 0;
|
||||
break;
|
||||
|
||||
case ')':
|
||||
while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() != kLeftParenthesis)
|
||||
if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
|
||||
return;
|
||||
if (operatorStack.Empty())
|
||||
return;
|
||||
operatorStack.template Pop<Operator>(1);
|
||||
atomCountStack.template Pop<unsigned>(1);
|
||||
ImplicitConcatenation(atomCountStack, operatorStack);
|
||||
break;
|
||||
|
||||
case '?':
|
||||
if (!Eval(operandStack, kZeroOrOne))
|
||||
return;
|
||||
break;
|
||||
|
||||
case '*':
|
||||
if (!Eval(operandStack, kZeroOrMore))
|
||||
return;
|
||||
break;
|
||||
|
||||
case '+':
|
||||
if (!Eval(operandStack, kOneOrMore))
|
||||
return;
|
||||
break;
|
||||
|
||||
case '{':
|
||||
{
|
||||
unsigned n, m;
|
||||
if (!ParseUnsigned(ds, &n))
|
||||
return;
|
||||
|
||||
if (ds.Peek() == ',') {
|
||||
ds.Take();
|
||||
if (ds.Peek() == '}')
|
||||
m = kInfinityQuantifier;
|
||||
else if (!ParseUnsigned(ds, &m) || m < n)
|
||||
return;
|
||||
}
|
||||
else
|
||||
m = n;
|
||||
|
||||
if (!EvalQuantifier(operandStack, n, m) || ds.Peek() != '}')
|
||||
return;
|
||||
ds.Take();
|
||||
}
|
||||
break;
|
||||
|
||||
case '.':
|
||||
PushOperand(operandStack, kAnyCharacterClass);
|
||||
ImplicitConcatenation(atomCountStack, operatorStack);
|
||||
break;
|
||||
|
||||
case '[':
|
||||
{
|
||||
SizeType range;
|
||||
if (!ParseRange(ds, &range))
|
||||
return;
|
||||
SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, kRangeCharacterClass);
|
||||
GetState(s).rangeStart = range;
|
||||
*operandStack.template Push<Frag>() = Frag(s, s, s);
|
||||
}
|
||||
ImplicitConcatenation(atomCountStack, operatorStack);
|
||||
break;
|
||||
|
||||
case '\\': // Escape character
|
||||
if (!CharacterEscape(ds, &codepoint))
|
||||
return; // Unsupported escape character
|
||||
// fall through to default
|
||||
|
||||
default: // Pattern character
|
||||
PushOperand(operandStack, codepoint);
|
||||
ImplicitConcatenation(atomCountStack, operatorStack);
|
||||
}
|
||||
}
|
||||
|
||||
while (!operatorStack.Empty())
|
||||
if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
|
||||
return;
|
||||
|
||||
// Link the operand to matching state.
|
||||
if (operandStack.GetSize() == sizeof(Frag)) {
|
||||
Frag* e = operandStack.template Pop<Frag>(1);
|
||||
Patch(e->out, NewState(kRegexInvalidState, kRegexInvalidState, 0));
|
||||
root_ = e->start;
|
||||
|
||||
#if RAPIDJSON_REGEX_VERBOSE
|
||||
printf("root: %d\n", root_);
|
||||
for (SizeType i = 0; i < stateCount_ ; i++) {
|
||||
State& s = GetState(i);
|
||||
printf("[%2d] out: %2d out1: %2d c: '%c'\n", i, s.out, s.out1, (char)s.codepoint);
|
||||
}
|
||||
printf("\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
// Preallocate buffer for SearchWithAnchoring()
|
||||
RAPIDJSON_ASSERT(stateSet_ == 0);
|
||||
if (stateCount_ > 0) {
|
||||
stateSet_ = static_cast<unsigned*>(states_.GetAllocator().Malloc(GetStateSetSize()));
|
||||
state0_.template Reserve<SizeType>(stateCount_);
|
||||
state1_.template Reserve<SizeType>(stateCount_);
|
||||
}
|
||||
}
|
||||
|
||||
SizeType NewState(SizeType out, SizeType out1, unsigned codepoint) {
|
||||
State* s = states_.template Push<State>();
|
||||
s->out = out;
|
||||
s->out1 = out1;
|
||||
s->codepoint = codepoint;
|
||||
s->rangeStart = kRegexInvalidRange;
|
||||
return stateCount_++;
|
||||
}
|
||||
|
||||
void PushOperand(Stack<Allocator>& operandStack, unsigned codepoint) {
|
||||
SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, codepoint);
|
||||
*operandStack.template Push<Frag>() = Frag(s, s, s);
|
||||
}
|
||||
|
||||
void ImplicitConcatenation(Stack<Allocator>& atomCountStack, Stack<Allocator>& operatorStack) {
|
||||
if (*atomCountStack.template Top<unsigned>())
|
||||
*operatorStack.template Push<Operator>() = kConcatenation;
|
||||
(*atomCountStack.template Top<unsigned>())++;
|
||||
}
|
||||
|
||||
SizeType Append(SizeType l1, SizeType l2) {
|
||||
SizeType old = l1;
|
||||
while (GetState(l1).out != kRegexInvalidState)
|
||||
l1 = GetState(l1).out;
|
||||
GetState(l1).out = l2;
|
||||
return old;
|
||||
}
|
||||
|
||||
void Patch(SizeType l, SizeType s) {
|
||||
for (SizeType next; l != kRegexInvalidState; l = next) {
|
||||
next = GetState(l).out;
|
||||
GetState(l).out = s;
|
||||
}
|
||||
}
|
||||
|
||||
bool Eval(Stack<Allocator>& operandStack, Operator op) {
|
||||
switch (op) {
|
||||
case kConcatenation:
|
||||
RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag) * 2);
|
||||
{
|
||||
Frag e2 = *operandStack.template Pop<Frag>(1);
|
||||
Frag e1 = *operandStack.template Pop<Frag>(1);
|
||||
Patch(e1.out, e2.start);
|
||||
*operandStack.template Push<Frag>() = Frag(e1.start, e2.out, Min(e1.minIndex, e2.minIndex));
|
||||
}
|
||||
return true;
|
||||
|
||||
case kAlternation:
|
||||
if (operandStack.GetSize() >= sizeof(Frag) * 2) {
|
||||
Frag e2 = *operandStack.template Pop<Frag>(1);
|
||||
Frag e1 = *operandStack.template Pop<Frag>(1);
|
||||
SizeType s = NewState(e1.start, e2.start, 0);
|
||||
*operandStack.template Push<Frag>() = Frag(s, Append(e1.out, e2.out), Min(e1.minIndex, e2.minIndex));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
case kZeroOrOne:
|
||||
if (operandStack.GetSize() >= sizeof(Frag)) {
|
||||
Frag e = *operandStack.template Pop<Frag>(1);
|
||||
SizeType s = NewState(kRegexInvalidState, e.start, 0);
|
||||
*operandStack.template Push<Frag>() = Frag(s, Append(e.out, s), e.minIndex);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
case kZeroOrMore:
|
||||
if (operandStack.GetSize() >= sizeof(Frag)) {
|
||||
Frag e = *operandStack.template Pop<Frag>(1);
|
||||
SizeType s = NewState(kRegexInvalidState, e.start, 0);
|
||||
Patch(e.out, s);
|
||||
*operandStack.template Push<Frag>() = Frag(s, s, e.minIndex);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
default:
|
||||
RAPIDJSON_ASSERT(op == kOneOrMore);
|
||||
if (operandStack.GetSize() >= sizeof(Frag)) {
|
||||
Frag e = *operandStack.template Pop<Frag>(1);
|
||||
SizeType s = NewState(kRegexInvalidState, e.start, 0);
|
||||
Patch(e.out, s);
|
||||
*operandStack.template Push<Frag>() = Frag(e.start, s, e.minIndex);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool EvalQuantifier(Stack<Allocator>& operandStack, unsigned n, unsigned m) {
|
||||
RAPIDJSON_ASSERT(n <= m);
|
||||
RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag));
|
||||
|
||||
if (n == 0) {
|
||||
if (m == 0) // a{0} not support
|
||||
return false;
|
||||
else if (m == kInfinityQuantifier)
|
||||
Eval(operandStack, kZeroOrMore); // a{0,} -> a*
|
||||
else {
|
||||
Eval(operandStack, kZeroOrOne); // a{0,5} -> a?
|
||||
for (unsigned i = 0; i < m - 1; i++)
|
||||
CloneTopOperand(operandStack); // a{0,5} -> a? a? a? a? a?
|
||||
for (unsigned i = 0; i < m - 1; i++)
|
||||
Eval(operandStack, kConcatenation); // a{0,5} -> a?a?a?a?a?
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < n - 1; i++) // a{3} -> a a a
|
||||
CloneTopOperand(operandStack);
|
||||
|
||||
if (m == kInfinityQuantifier)
|
||||
Eval(operandStack, kOneOrMore); // a{3,} -> a a a+
|
||||
else if (m > n) {
|
||||
CloneTopOperand(operandStack); // a{3,5} -> a a a a
|
||||
Eval(operandStack, kZeroOrOne); // a{3,5} -> a a a a?
|
||||
for (unsigned i = n; i < m - 1; i++)
|
||||
CloneTopOperand(operandStack); // a{3,5} -> a a a a? a?
|
||||
for (unsigned i = n; i < m; i++)
|
||||
Eval(operandStack, kConcatenation); // a{3,5} -> a a aa?a?
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < n - 1; i++)
|
||||
Eval(operandStack, kConcatenation); // a{3} -> aaa, a{3,} -> aaa+, a{3.5} -> aaaa?a?
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static SizeType Min(SizeType a, SizeType b) { return a < b ? a : b; }
|
||||
|
||||
void CloneTopOperand(Stack<Allocator>& operandStack) {
|
||||
const Frag src = *operandStack.template Top<Frag>(); // Copy constructor to prevent invalidation
|
||||
SizeType count = stateCount_ - src.minIndex; // Assumes top operand contains states in [src->minIndex, stateCount_)
|
||||
State* s = states_.template Push<State>(count);
|
||||
memcpy(s, &GetState(src.minIndex), count * sizeof(State));
|
||||
for (SizeType j = 0; j < count; j++) {
|
||||
if (s[j].out != kRegexInvalidState)
|
||||
s[j].out += count;
|
||||
if (s[j].out1 != kRegexInvalidState)
|
||||
s[j].out1 += count;
|
||||
}
|
||||
*operandStack.template Push<Frag>() = Frag(src.start + count, src.out + count, src.minIndex + count);
|
||||
stateCount_ += count;
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool ParseUnsigned(DecodedStream<InputStream>& ds, unsigned* u) {
|
||||
unsigned r = 0;
|
||||
if (ds.Peek() < '0' || ds.Peek() > '9')
|
||||
return false;
|
||||
while (ds.Peek() >= '0' && ds.Peek() <= '9') {
|
||||
if (r >= 429496729 && ds.Peek() > '5') // 2^32 - 1 = 4294967295
|
||||
return false; // overflow
|
||||
r = r * 10 + (ds.Take() - '0');
|
||||
}
|
||||
*u = r;
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool ParseRange(DecodedStream<InputStream>& ds, SizeType* range) {
|
||||
bool isBegin = true;
|
||||
bool negate = false;
|
||||
int step = 0;
|
||||
SizeType start = kRegexInvalidRange;
|
||||
SizeType current = kRegexInvalidRange;
|
||||
unsigned codepoint;
|
||||
while ((codepoint = ds.Take()) != 0) {
|
||||
if (isBegin) {
|
||||
isBegin = false;
|
||||
if (codepoint == '^') {
|
||||
negate = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
switch (codepoint) {
|
||||
case ']':
|
||||
if (start == kRegexInvalidRange)
|
||||
return false; // Error: nothing inside []
|
||||
if (step == 2) { // Add trailing '-'
|
||||
SizeType r = NewRange('-');
|
||||
RAPIDJSON_ASSERT(current != kRegexInvalidRange);
|
||||
GetRange(current).next = r;
|
||||
}
|
||||
if (negate)
|
||||
GetRange(start).start |= kRangeNegationFlag;
|
||||
*range = start;
|
||||
return true;
|
||||
|
||||
case '\\':
|
||||
if (ds.Peek() == 'b') {
|
||||
ds.Take();
|
||||
codepoint = 0x0008; // Escape backspace character
|
||||
}
|
||||
else if (!CharacterEscape(ds, &codepoint))
|
||||
return false;
|
||||
// fall through to default
|
||||
|
||||
default:
|
||||
switch (step) {
|
||||
case 1:
|
||||
if (codepoint == '-') {
|
||||
step++;
|
||||
break;
|
||||
}
|
||||
// fall through to step 0 for other characters
|
||||
|
||||
case 0:
|
||||
{
|
||||
SizeType r = NewRange(codepoint);
|
||||
if (current != kRegexInvalidRange)
|
||||
GetRange(current).next = r;
|
||||
if (start == kRegexInvalidRange)
|
||||
start = r;
|
||||
current = r;
|
||||
}
|
||||
step = 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
RAPIDJSON_ASSERT(step == 2);
|
||||
GetRange(current).end = codepoint;
|
||||
step = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
SizeType NewRange(unsigned codepoint) {
|
||||
Range* r = ranges_.template Push<Range>();
|
||||
r->start = r->end = codepoint;
|
||||
r->next = kRegexInvalidRange;
|
||||
return rangeCount_++;
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool CharacterEscape(DecodedStream<InputStream>& ds, unsigned* escapedCodepoint) {
|
||||
unsigned codepoint;
|
||||
switch (codepoint = ds.Take()) {
|
||||
case '^':
|
||||
case '$':
|
||||
case '|':
|
||||
case '(':
|
||||
case ')':
|
||||
case '?':
|
||||
case '*':
|
||||
case '+':
|
||||
case '.':
|
||||
case '[':
|
||||
case ']':
|
||||
case '{':
|
||||
case '}':
|
||||
case '\\':
|
||||
*escapedCodepoint = codepoint; return true;
|
||||
case 'f': *escapedCodepoint = 0x000C; return true;
|
||||
case 'n': *escapedCodepoint = 0x000A; return true;
|
||||
case 'r': *escapedCodepoint = 0x000D; return true;
|
||||
case 't': *escapedCodepoint = 0x0009; return true;
|
||||
case 'v': *escapedCodepoint = 0x000B; return true;
|
||||
default:
|
||||
return false; // Unsupported escape character
|
||||
}
|
||||
}
|
||||
|
||||
template <typename InputStream>
|
||||
bool SearchWithAnchoring(InputStream& is, bool anchorBegin, bool anchorEnd) const {
|
||||
RAPIDJSON_ASSERT(IsValid());
|
||||
DecodedStream<InputStream> ds(is);
|
||||
|
||||
state0_.Clear();
|
||||
Stack<Allocator> *current = &state0_, *next = &state1_;
|
||||
const size_t stateSetSize = GetStateSetSize();
|
||||
std::memset(stateSet_, 0, stateSetSize);
|
||||
|
||||
bool matched = AddState(*current, root_);
|
||||
unsigned codepoint;
|
||||
while (!current->Empty() && (codepoint = ds.Take()) != 0) {
|
||||
std::memset(stateSet_, 0, stateSetSize);
|
||||
next->Clear();
|
||||
matched = false;
|
||||
for (const SizeType* s = current->template Bottom<SizeType>(); s != current->template End<SizeType>(); ++s) {
|
||||
const State& sr = GetState(*s);
|
||||
if (sr.codepoint == codepoint ||
|
||||
sr.codepoint == kAnyCharacterClass ||
|
||||
(sr.codepoint == kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint)))
|
||||
{
|
||||
matched = AddState(*next, sr.out) || matched;
|
||||
if (!anchorEnd && matched)
|
||||
return true;
|
||||
}
|
||||
if (!anchorBegin)
|
||||
AddState(*next, root_);
|
||||
}
|
||||
internal::Swap(current, next);
|
||||
}
|
||||
|
||||
return matched;
|
||||
}
|
||||
|
||||
size_t GetStateSetSize() const {
|
||||
return (stateCount_ + 31) / 32 * 4;
|
||||
}
|
||||
|
||||
// Return whether the added states is a match state
|
||||
bool AddState(Stack<Allocator>& l, SizeType index) const {
|
||||
RAPIDJSON_ASSERT(index != kRegexInvalidState);
|
||||
|
||||
const State& s = GetState(index);
|
||||
if (s.out1 != kRegexInvalidState) { // Split
|
||||
bool matched = AddState(l, s.out);
|
||||
return AddState(l, s.out1) || matched;
|
||||
}
|
||||
else if (!(stateSet_[index >> 5] & (1 << (index & 31)))) {
|
||||
stateSet_[index >> 5] |= (1 << (index & 31));
|
||||
*l.template PushUnsafe<SizeType>() = index;
|
||||
}
|
||||
return s.out == kRegexInvalidState; // by using PushUnsafe() above, we can ensure s is not validated due to reallocation.
|
||||
}
|
||||
|
||||
bool MatchRange(SizeType rangeIndex, unsigned codepoint) const {
|
||||
bool yes = (GetRange(rangeIndex).start & kRangeNegationFlag) == 0;
|
||||
while (rangeIndex != kRegexInvalidRange) {
|
||||
const Range& r = GetRange(rangeIndex);
|
||||
if (codepoint >= (r.start & ~kRangeNegationFlag) && codepoint <= r.end)
|
||||
return yes;
|
||||
rangeIndex = r.next;
|
||||
}
|
||||
return !yes;
|
||||
}
|
||||
|
||||
Stack<Allocator> states_;
|
||||
Stack<Allocator> ranges_;
|
||||
SizeType root_;
|
||||
SizeType stateCount_;
|
||||
SizeType rangeCount_;
|
||||
|
||||
static const unsigned kInfinityQuantifier = ~0u;
|
||||
|
||||
// For SearchWithAnchoring()
|
||||
uint32_t* stateSet_; // allocated by states_.GetAllocator()
|
||||
mutable Stack<Allocator> state0_;
|
||||
mutable Stack<Allocator> state1_;
|
||||
bool anchorBegin_;
|
||||
bool anchorEnd_;
|
||||
};
|
||||
|
||||
typedef GenericRegex<UTF8<> > Regex;
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_INTERNAL_REGEX_H_
|
230
ext/librethinkdbxx/src/rapidjson/internal/stack.h
Normal file
230
ext/librethinkdbxx/src/rapidjson/internal/stack.h
Normal file
@ -0,0 +1,230 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_INTERNAL_STACK_H_
|
||||
#define RAPIDJSON_INTERNAL_STACK_H_
|
||||
|
||||
#include "../allocators.h"
|
||||
#include "swap.h"
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(c++98-compat)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Stack
|
||||
|
||||
//! A type-unsafe stack for storing different types of data.
|
||||
/*! \tparam Allocator Allocator for allocating stack memory.
|
||||
*/
|
||||
template <typename Allocator>
|
||||
class Stack {
|
||||
public:
|
||||
// Optimization note: Do not allocate memory for stack_ in constructor.
|
||||
// Do it lazily when first Push() -> Expand() -> Resize().
|
||||
Stack(Allocator* allocator, size_t stackCapacity) : allocator_(allocator), ownAllocator_(0), stack_(0), stackTop_(0), stackEnd_(0), initialCapacity_(stackCapacity) {
|
||||
}
|
||||
|
||||
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
Stack(Stack&& rhs)
|
||||
: allocator_(rhs.allocator_),
|
||||
ownAllocator_(rhs.ownAllocator_),
|
||||
stack_(rhs.stack_),
|
||||
stackTop_(rhs.stackTop_),
|
||||
stackEnd_(rhs.stackEnd_),
|
||||
initialCapacity_(rhs.initialCapacity_)
|
||||
{
|
||||
rhs.allocator_ = 0;
|
||||
rhs.ownAllocator_ = 0;
|
||||
rhs.stack_ = 0;
|
||||
rhs.stackTop_ = 0;
|
||||
rhs.stackEnd_ = 0;
|
||||
rhs.initialCapacity_ = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
~Stack() {
|
||||
Destroy();
|
||||
}
|
||||
|
||||
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
Stack& operator=(Stack&& rhs) {
|
||||
if (&rhs != this)
|
||||
{
|
||||
Destroy();
|
||||
|
||||
allocator_ = rhs.allocator_;
|
||||
ownAllocator_ = rhs.ownAllocator_;
|
||||
stack_ = rhs.stack_;
|
||||
stackTop_ = rhs.stackTop_;
|
||||
stackEnd_ = rhs.stackEnd_;
|
||||
initialCapacity_ = rhs.initialCapacity_;
|
||||
|
||||
rhs.allocator_ = 0;
|
||||
rhs.ownAllocator_ = 0;
|
||||
rhs.stack_ = 0;
|
||||
rhs.stackTop_ = 0;
|
||||
rhs.stackEnd_ = 0;
|
||||
rhs.initialCapacity_ = 0;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
void Swap(Stack& rhs) RAPIDJSON_NOEXCEPT {
|
||||
internal::Swap(allocator_, rhs.allocator_);
|
||||
internal::Swap(ownAllocator_, rhs.ownAllocator_);
|
||||
internal::Swap(stack_, rhs.stack_);
|
||||
internal::Swap(stackTop_, rhs.stackTop_);
|
||||
internal::Swap(stackEnd_, rhs.stackEnd_);
|
||||
internal::Swap(initialCapacity_, rhs.initialCapacity_);
|
||||
}
|
||||
|
||||
void Clear() { stackTop_ = stack_; }
|
||||
|
||||
void ShrinkToFit() {
|
||||
if (Empty()) {
|
||||
// If the stack is empty, completely deallocate the memory.
|
||||
Allocator::Free(stack_);
|
||||
stack_ = 0;
|
||||
stackTop_ = 0;
|
||||
stackEnd_ = 0;
|
||||
}
|
||||
else
|
||||
Resize(GetSize());
|
||||
}
|
||||
|
||||
// Optimization note: try to minimize the size of this function for force inline.
|
||||
// Expansion is run very infrequently, so it is moved to another (probably non-inline) function.
|
||||
template<typename T>
|
||||
RAPIDJSON_FORCEINLINE void Reserve(size_t count = 1) {
|
||||
// Expand the stack if needed
|
||||
if (RAPIDJSON_UNLIKELY(stackTop_ + sizeof(T) * count > stackEnd_))
|
||||
Expand<T>(count);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
RAPIDJSON_FORCEINLINE T* Push(size_t count = 1) {
|
||||
Reserve<T>(count);
|
||||
return PushUnsafe<T>(count);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
RAPIDJSON_FORCEINLINE T* PushUnsafe(size_t count = 1) {
|
||||
RAPIDJSON_ASSERT(stackTop_ + sizeof(T) * count <= stackEnd_);
|
||||
T* ret = reinterpret_cast<T*>(stackTop_);
|
||||
stackTop_ += sizeof(T) * count;
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T* Pop(size_t count) {
|
||||
RAPIDJSON_ASSERT(GetSize() >= count * sizeof(T));
|
||||
stackTop_ -= count * sizeof(T);
|
||||
return reinterpret_cast<T*>(stackTop_);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T* Top() {
|
||||
RAPIDJSON_ASSERT(GetSize() >= sizeof(T));
|
||||
return reinterpret_cast<T*>(stackTop_ - sizeof(T));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
const T* Top() const {
|
||||
RAPIDJSON_ASSERT(GetSize() >= sizeof(T));
|
||||
return reinterpret_cast<T*>(stackTop_ - sizeof(T));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T* End() { return reinterpret_cast<T*>(stackTop_); }
|
||||
|
||||
template<typename T>
|
||||
const T* End() const { return reinterpret_cast<T*>(stackTop_); }
|
||||
|
||||
template<typename T>
|
||||
T* Bottom() { return reinterpret_cast<T*>(stack_); }
|
||||
|
||||
template<typename T>
|
||||
const T* Bottom() const { return reinterpret_cast<T*>(stack_); }
|
||||
|
||||
bool HasAllocator() const {
|
||||
return allocator_ != 0;
|
||||
}
|
||||
|
||||
Allocator& GetAllocator() {
|
||||
RAPIDJSON_ASSERT(allocator_);
|
||||
return *allocator_;
|
||||
}
|
||||
|
||||
bool Empty() const { return stackTop_ == stack_; }
|
||||
size_t GetSize() const { return static_cast<size_t>(stackTop_ - stack_); }
|
||||
size_t GetCapacity() const { return static_cast<size_t>(stackEnd_ - stack_); }
|
||||
|
||||
private:
|
||||
template<typename T>
|
||||
void Expand(size_t count) {
|
||||
// Only expand the capacity if the current stack exists. Otherwise just create a stack with initial capacity.
|
||||
size_t newCapacity;
|
||||
if (stack_ == 0) {
|
||||
if (!allocator_)
|
||||
ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator());
|
||||
newCapacity = initialCapacity_;
|
||||
} else {
|
||||
newCapacity = GetCapacity();
|
||||
newCapacity += (newCapacity + 1) / 2;
|
||||
}
|
||||
size_t newSize = GetSize() + sizeof(T) * count;
|
||||
if (newCapacity < newSize)
|
||||
newCapacity = newSize;
|
||||
|
||||
Resize(newCapacity);
|
||||
}
|
||||
|
||||
void Resize(size_t newCapacity) {
|
||||
const size_t size = GetSize(); // Backup the current size
|
||||
stack_ = static_cast<char*>(allocator_->Realloc(stack_, GetCapacity(), newCapacity));
|
||||
stackTop_ = stack_ + size;
|
||||
stackEnd_ = stack_ + newCapacity;
|
||||
}
|
||||
|
||||
void Destroy() {
|
||||
Allocator::Free(stack_);
|
||||
RAPIDJSON_DELETE(ownAllocator_); // Only delete if it is owned by the stack
|
||||
}
|
||||
|
||||
// Prohibit copy constructor & assignment operator.
|
||||
Stack(const Stack&);
|
||||
Stack& operator=(const Stack&);
|
||||
|
||||
Allocator* allocator_;
|
||||
Allocator* ownAllocator_;
|
||||
char *stack_;
|
||||
char *stackTop_;
|
||||
char *stackEnd_;
|
||||
size_t initialCapacity_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_STACK_H_
|
55
ext/librethinkdbxx/src/rapidjson/internal/strfunc.h
Normal file
55
ext/librethinkdbxx/src/rapidjson/internal/strfunc.h
Normal file
@ -0,0 +1,55 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_INTERNAL_STRFUNC_H_
|
||||
#define RAPIDJSON_INTERNAL_STRFUNC_H_
|
||||
|
||||
#include "../stream.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
//! Custom strlen() which works on different character types.
|
||||
/*! \tparam Ch Character type (e.g. char, wchar_t, short)
|
||||
\param s Null-terminated input string.
|
||||
\return Number of characters in the string.
|
||||
\note This has the same semantics as strlen(), the return value is not number of Unicode codepoints.
|
||||
*/
|
||||
template <typename Ch>
|
||||
inline SizeType StrLen(const Ch* s) {
|
||||
const Ch* p = s;
|
||||
while (*p) ++p;
|
||||
return SizeType(p - s);
|
||||
}
|
||||
|
||||
//! Returns number of code points in a encoded string.
|
||||
template<typename Encoding>
|
||||
bool CountStringCodePoint(const typename Encoding::Ch* s, SizeType length, SizeType* outCount) {
|
||||
GenericStringStream<Encoding> is(s);
|
||||
const typename Encoding::Ch* end = s + length;
|
||||
SizeType count = 0;
|
||||
while (is.src_ < end) {
|
||||
unsigned codepoint;
|
||||
if (!Encoding::Decode(is, &codepoint))
|
||||
return false;
|
||||
count++;
|
||||
}
|
||||
*outCount = count;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_INTERNAL_STRFUNC_H_
|
269
ext/librethinkdbxx/src/rapidjson/internal/strtod.h
Normal file
269
ext/librethinkdbxx/src/rapidjson/internal/strtod.h
Normal file
@ -0,0 +1,269 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_STRTOD_
|
||||
#define RAPIDJSON_STRTOD_
|
||||
|
||||
#include "ieee754.h"
|
||||
#include "biginteger.h"
|
||||
#include "diyfp.h"
|
||||
#include "pow10.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
inline double FastPath(double significand, int exp) {
|
||||
if (exp < -308)
|
||||
return 0.0;
|
||||
else if (exp >= 0)
|
||||
return significand * internal::Pow10(exp);
|
||||
else
|
||||
return significand / internal::Pow10(-exp);
|
||||
}
|
||||
|
||||
inline double StrtodNormalPrecision(double d, int p) {
|
||||
if (p < -308) {
|
||||
// Prevent expSum < -308, making Pow10(p) = 0
|
||||
d = FastPath(d, -308);
|
||||
d = FastPath(d, p + 308);
|
||||
}
|
||||
else
|
||||
d = FastPath(d, p);
|
||||
return d;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T Min3(T a, T b, T c) {
|
||||
T m = a;
|
||||
if (m > b) m = b;
|
||||
if (m > c) m = c;
|
||||
return m;
|
||||
}
|
||||
|
||||
inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) {
|
||||
const Double db(b);
|
||||
const uint64_t bInt = db.IntegerSignificand();
|
||||
const int bExp = db.IntegerExponent();
|
||||
const int hExp = bExp - 1;
|
||||
|
||||
int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0;
|
||||
|
||||
// Adjust for decimal exponent
|
||||
if (dExp >= 0) {
|
||||
dS_Exp2 += dExp;
|
||||
dS_Exp5 += dExp;
|
||||
}
|
||||
else {
|
||||
bS_Exp2 -= dExp;
|
||||
bS_Exp5 -= dExp;
|
||||
hS_Exp2 -= dExp;
|
||||
hS_Exp5 -= dExp;
|
||||
}
|
||||
|
||||
// Adjust for binary exponent
|
||||
if (bExp >= 0)
|
||||
bS_Exp2 += bExp;
|
||||
else {
|
||||
dS_Exp2 -= bExp;
|
||||
hS_Exp2 -= bExp;
|
||||
}
|
||||
|
||||
// Adjust for half ulp exponent
|
||||
if (hExp >= 0)
|
||||
hS_Exp2 += hExp;
|
||||
else {
|
||||
dS_Exp2 -= hExp;
|
||||
bS_Exp2 -= hExp;
|
||||
}
|
||||
|
||||
// Remove common power of two factor from all three scaled values
|
||||
int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2);
|
||||
dS_Exp2 -= common_Exp2;
|
||||
bS_Exp2 -= common_Exp2;
|
||||
hS_Exp2 -= common_Exp2;
|
||||
|
||||
BigInteger dS = d;
|
||||
dS.MultiplyPow5(static_cast<unsigned>(dS_Exp5)) <<= static_cast<unsigned>(dS_Exp2);
|
||||
|
||||
BigInteger bS(bInt);
|
||||
bS.MultiplyPow5(static_cast<unsigned>(bS_Exp5)) <<= static_cast<unsigned>(bS_Exp2);
|
||||
|
||||
BigInteger hS(1);
|
||||
hS.MultiplyPow5(static_cast<unsigned>(hS_Exp5)) <<= static_cast<unsigned>(hS_Exp2);
|
||||
|
||||
BigInteger delta(0);
|
||||
dS.Difference(bS, &delta);
|
||||
|
||||
return delta.Compare(hS);
|
||||
}
|
||||
|
||||
inline bool StrtodFast(double d, int p, double* result) {
|
||||
// Use fast path for string-to-double conversion if possible
|
||||
// see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
|
||||
if (p > 22 && p < 22 + 16) {
|
||||
// Fast Path Cases In Disguise
|
||||
d *= internal::Pow10(p - 22);
|
||||
p = 22;
|
||||
}
|
||||
|
||||
if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1
|
||||
*result = FastPath(d, p);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
// Compute an approximation and see if it is within 1/2 ULP
|
||||
inline bool StrtodDiyFp(const char* decimals, size_t length, size_t decimalPosition, int exp, double* result) {
|
||||
uint64_t significand = 0;
|
||||
size_t i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
|
||||
for (; i < length; i++) {
|
||||
if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
|
||||
(significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5'))
|
||||
break;
|
||||
significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0');
|
||||
}
|
||||
|
||||
if (i < length && decimals[i] >= '5') // Rounding
|
||||
significand++;
|
||||
|
||||
size_t remaining = length - i;
|
||||
const unsigned kUlpShift = 3;
|
||||
const unsigned kUlp = 1 << kUlpShift;
|
||||
int64_t error = (remaining == 0) ? 0 : kUlp / 2;
|
||||
|
||||
DiyFp v(significand, 0);
|
||||
v = v.Normalize();
|
||||
error <<= -v.e;
|
||||
|
||||
const int dExp = static_cast<int>(decimalPosition) - static_cast<int>(i) + exp;
|
||||
|
||||
int actualExp;
|
||||
DiyFp cachedPower = GetCachedPower10(dExp, &actualExp);
|
||||
if (actualExp != dExp) {
|
||||
static const DiyFp kPow10[] = {
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 00000000), -60), // 10^1
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 00000000), -57), // 10^2
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 00000000), -54), // 10^3
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 00000000), -50), // 10^4
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 00000000), -47), // 10^5
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 00000000), -44), // 10^6
|
||||
DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 00000000), -40) // 10^7
|
||||
};
|
||||
int adjustment = dExp - actualExp - 1;
|
||||
RAPIDJSON_ASSERT(adjustment >= 0 && adjustment < 7);
|
||||
v = v * kPow10[adjustment];
|
||||
if (length + static_cast<unsigned>(adjustment)> 19u) // has more digits than decimal digits in 64-bit
|
||||
error += kUlp / 2;
|
||||
}
|
||||
|
||||
v = v * cachedPower;
|
||||
|
||||
error += kUlp + (error == 0 ? 0 : 1);
|
||||
|
||||
const int oldExp = v.e;
|
||||
v = v.Normalize();
|
||||
error <<= oldExp - v.e;
|
||||
|
||||
const unsigned effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e);
|
||||
unsigned precisionSize = 64 - effectiveSignificandSize;
|
||||
if (precisionSize + kUlpShift >= 64) {
|
||||
unsigned scaleExp = (precisionSize + kUlpShift) - 63;
|
||||
v.f >>= scaleExp;
|
||||
v.e += scaleExp;
|
||||
error = (error >> scaleExp) + 1 + static_cast<int>(kUlp);
|
||||
precisionSize -= scaleExp;
|
||||
}
|
||||
|
||||
DiyFp rounded(v.f >> precisionSize, v.e + static_cast<int>(precisionSize));
|
||||
const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp;
|
||||
const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp;
|
||||
if (precisionBits >= halfWay + static_cast<unsigned>(error)) {
|
||||
rounded.f++;
|
||||
if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340)
|
||||
rounded.f >>= 1;
|
||||
rounded.e++;
|
||||
}
|
||||
}
|
||||
|
||||
*result = rounded.ToDouble();
|
||||
|
||||
return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error);
|
||||
}
|
||||
|
||||
inline double StrtodBigInteger(double approx, const char* decimals, size_t length, size_t decimalPosition, int exp) {
|
||||
const BigInteger dInt(decimals, length);
|
||||
const int dExp = static_cast<int>(decimalPosition) - static_cast<int>(length) + exp;
|
||||
Double a(approx);
|
||||
int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp);
|
||||
if (cmp < 0)
|
||||
return a.Value(); // within half ULP
|
||||
else if (cmp == 0) {
|
||||
// Round towards even
|
||||
if (a.Significand() & 1)
|
||||
return a.NextPositiveDouble();
|
||||
else
|
||||
return a.Value();
|
||||
}
|
||||
else // adjustment
|
||||
return a.NextPositiveDouble();
|
||||
}
|
||||
|
||||
inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) {
|
||||
RAPIDJSON_ASSERT(d >= 0.0);
|
||||
RAPIDJSON_ASSERT(length >= 1);
|
||||
|
||||
double result;
|
||||
if (StrtodFast(d, p, &result))
|
||||
return result;
|
||||
|
||||
// Trim leading zeros
|
||||
while (*decimals == '0' && length > 1) {
|
||||
length--;
|
||||
decimals++;
|
||||
decimalPosition--;
|
||||
}
|
||||
|
||||
// Trim trailing zeros
|
||||
while (decimals[length - 1] == '0' && length > 1) {
|
||||
length--;
|
||||
decimalPosition--;
|
||||
exp++;
|
||||
}
|
||||
|
||||
// Trim right-most digits
|
||||
const int kMaxDecimalDigit = 780;
|
||||
if (static_cast<int>(length) > kMaxDecimalDigit) {
|
||||
int delta = (static_cast<int>(length) - kMaxDecimalDigit);
|
||||
exp += delta;
|
||||
decimalPosition -= static_cast<unsigned>(delta);
|
||||
length = kMaxDecimalDigit;
|
||||
}
|
||||
|
||||
// If too small, underflow to zero
|
||||
if (int(length) + exp < -324)
|
||||
return 0.0;
|
||||
|
||||
if (StrtodDiyFp(decimals, length, decimalPosition, exp, &result))
|
||||
return result;
|
||||
|
||||
// Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison
|
||||
return StrtodBigInteger(result, decimals, length, decimalPosition, exp);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_STRTOD_
|
46
ext/librethinkdbxx/src/rapidjson/internal/swap.h
Normal file
46
ext/librethinkdbxx/src/rapidjson/internal/swap.h
Normal file
@ -0,0 +1,46 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_INTERNAL_SWAP_H_
|
||||
#define RAPIDJSON_INTERNAL_SWAP_H_
|
||||
|
||||
#include "../rapidjson.h"
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(c++98-compat)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
namespace internal {
|
||||
|
||||
//! Custom swap() to avoid dependency on C++ <algorithm> header
|
||||
/*! \tparam T Type of the arguments to swap, should be instantiated with primitive C++ types only.
|
||||
\note This has the same semantics as std::swap().
|
||||
*/
|
||||
template <typename T>
|
||||
inline void Swap(T& a, T& b) RAPIDJSON_NOEXCEPT {
|
||||
T tmp = a;
|
||||
a = b;
|
||||
b = tmp;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_INTERNAL_SWAP_H_
|
115
ext/librethinkdbxx/src/rapidjson/istreamwrapper.h
Normal file
115
ext/librethinkdbxx/src/rapidjson/istreamwrapper.h
Normal file
@ -0,0 +1,115 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_ISTREAMWRAPPER_H_
|
||||
#define RAPIDJSON_ISTREAMWRAPPER_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include <iosfwd>
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(4351) // new behavior: elements of array 'array' will be default initialized
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Wrapper of \c std::basic_istream into RapidJSON's Stream concept.
|
||||
/*!
|
||||
The classes can be wrapped including but not limited to:
|
||||
|
||||
- \c std::istringstream
|
||||
- \c std::stringstream
|
||||
- \c std::wistringstream
|
||||
- \c std::wstringstream
|
||||
- \c std::ifstream
|
||||
- \c std::fstream
|
||||
- \c std::wifstream
|
||||
- \c std::wfstream
|
||||
|
||||
\tparam StreamType Class derived from \c std::basic_istream.
|
||||
*/
|
||||
|
||||
template <typename StreamType>
|
||||
class BasicIStreamWrapper {
|
||||
public:
|
||||
typedef typename StreamType::char_type Ch;
|
||||
BasicIStreamWrapper(StreamType& stream) : stream_(stream), count_(), peekBuffer_() {}
|
||||
|
||||
Ch Peek() const {
|
||||
typename StreamType::int_type c = stream_.peek();
|
||||
return RAPIDJSON_LIKELY(c != StreamType::traits_type::eof()) ? static_cast<Ch>(c) : '\0';
|
||||
}
|
||||
|
||||
Ch Take() {
|
||||
typename StreamType::int_type c = stream_.get();
|
||||
if (RAPIDJSON_LIKELY(c != StreamType::traits_type::eof())) {
|
||||
count_++;
|
||||
return static_cast<Ch>(c);
|
||||
}
|
||||
else
|
||||
return '\0';
|
||||
}
|
||||
|
||||
// tellg() may return -1 when failed. So we count by ourself.
|
||||
size_t Tell() const { return count_; }
|
||||
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
// For encoding detection only.
|
||||
const Ch* Peek4() const {
|
||||
RAPIDJSON_ASSERT(sizeof(Ch) == 1); // Only usable for byte stream.
|
||||
int i;
|
||||
bool hasError = false;
|
||||
for (i = 0; i < 4; ++i) {
|
||||
typename StreamType::int_type c = stream_.get();
|
||||
if (c == StreamType::traits_type::eof()) {
|
||||
hasError = true;
|
||||
stream_.clear();
|
||||
break;
|
||||
}
|
||||
peekBuffer_[i] = static_cast<Ch>(c);
|
||||
}
|
||||
for (--i; i >= 0; --i)
|
||||
stream_.putback(peekBuffer_[i]);
|
||||
return !hasError ? peekBuffer_ : 0;
|
||||
}
|
||||
|
||||
private:
|
||||
BasicIStreamWrapper(const BasicIStreamWrapper&);
|
||||
BasicIStreamWrapper& operator=(const BasicIStreamWrapper&);
|
||||
|
||||
StreamType& stream_;
|
||||
size_t count_; //!< Number of characters read. Note:
|
||||
mutable Ch peekBuffer_[4];
|
||||
};
|
||||
|
||||
typedef BasicIStreamWrapper<std::istream> IStreamWrapper;
|
||||
typedef BasicIStreamWrapper<std::wistream> WIStreamWrapper;
|
||||
|
||||
#if defined(__clang__) || defined(_MSC_VER)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_ISTREAMWRAPPER_H_
|
70
ext/librethinkdbxx/src/rapidjson/memorybuffer.h
Normal file
70
ext/librethinkdbxx/src/rapidjson/memorybuffer.h
Normal file
@ -0,0 +1,70 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_MEMORYBUFFER_H_
|
||||
#define RAPIDJSON_MEMORYBUFFER_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include "internal/stack.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Represents an in-memory output byte stream.
|
||||
/*!
|
||||
This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream.
|
||||
|
||||
It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file.
|
||||
|
||||
Differences between MemoryBuffer and StringBuffer:
|
||||
1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer.
|
||||
2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator.
|
||||
|
||||
\tparam Allocator type for allocating memory buffer.
|
||||
\note implements Stream concept
|
||||
*/
|
||||
template <typename Allocator = CrtAllocator>
|
||||
struct GenericMemoryBuffer {
|
||||
typedef char Ch; // byte
|
||||
|
||||
GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {}
|
||||
|
||||
void Put(Ch c) { *stack_.template Push<Ch>() = c; }
|
||||
void Flush() {}
|
||||
|
||||
void Clear() { stack_.Clear(); }
|
||||
void ShrinkToFit() { stack_.ShrinkToFit(); }
|
||||
Ch* Push(size_t count) { return stack_.template Push<Ch>(count); }
|
||||
void Pop(size_t count) { stack_.template Pop<Ch>(count); }
|
||||
|
||||
const Ch* GetBuffer() const {
|
||||
return stack_.template Bottom<Ch>();
|
||||
}
|
||||
|
||||
size_t GetSize() const { return stack_.GetSize(); }
|
||||
|
||||
static const size_t kDefaultCapacity = 256;
|
||||
mutable internal::Stack<Allocator> stack_;
|
||||
};
|
||||
|
||||
typedef GenericMemoryBuffer<> MemoryBuffer;
|
||||
|
||||
//! Implement specialized version of PutN() with memset() for better performance.
|
||||
template<>
|
||||
inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) {
|
||||
std::memset(memoryBuffer.stack_.Push<char>(n), c, n * sizeof(c));
|
||||
}
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_MEMORYBUFFER_H_
|
71
ext/librethinkdbxx/src/rapidjson/memorystream.h
Normal file
71
ext/librethinkdbxx/src/rapidjson/memorystream.h
Normal file
@ -0,0 +1,71 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_MEMORYSTREAM_H_
|
||||
#define RAPIDJSON_MEMORYSTREAM_H_
|
||||
|
||||
#include "stream.h"
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(unreachable-code)
|
||||
RAPIDJSON_DIAG_OFF(missing-noreturn)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Represents an in-memory input byte stream.
|
||||
/*!
|
||||
This class is mainly for being wrapped by EncodedInputStream or AutoUTFInputStream.
|
||||
|
||||
It is similar to FileReadBuffer but the source is an in-memory buffer instead of a file.
|
||||
|
||||
Differences between MemoryStream and StringStream:
|
||||
1. StringStream has encoding but MemoryStream is a byte stream.
|
||||
2. MemoryStream needs size of the source buffer and the buffer don't need to be null terminated. StringStream assume null-terminated string as source.
|
||||
3. MemoryStream supports Peek4() for encoding detection. StringStream is specified with an encoding so it should not have Peek4().
|
||||
\note implements Stream concept
|
||||
*/
|
||||
struct MemoryStream {
|
||||
typedef char Ch; // byte
|
||||
|
||||
MemoryStream(const Ch *src, size_t size) : src_(src), begin_(src), end_(src + size), size_(size) {}
|
||||
|
||||
Ch Peek() const { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_; }
|
||||
Ch Take() { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_++; }
|
||||
size_t Tell() const { return static_cast<size_t>(src_ - begin_); }
|
||||
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
// For encoding detection only.
|
||||
const Ch* Peek4() const {
|
||||
return Tell() + 4 <= size_ ? src_ : 0;
|
||||
}
|
||||
|
||||
const Ch* src_; //!< Current read position.
|
||||
const Ch* begin_; //!< Original head of the string.
|
||||
const Ch* end_; //!< End of stream.
|
||||
size_t size_; //!< Size of the stream.
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_MEMORYBUFFER_H_
|
316
ext/librethinkdbxx/src/rapidjson/msinttypes/inttypes.h
Normal file
316
ext/librethinkdbxx/src/rapidjson/msinttypes/inttypes.h
Normal file
@ -0,0 +1,316 @@
|
||||
// ISO C9x compliant inttypes.h for Microsoft Visual Studio
|
||||
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
|
||||
//
|
||||
// Copyright (c) 2006-2013 Alexander Chemeris
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the product nor the names of its contributors may
|
||||
// be used to endorse or promote products derived from this software
|
||||
// without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// The above software in this distribution may have been modified by
|
||||
// THL A29 Limited ("Tencent Modifications").
|
||||
// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited.
|
||||
|
||||
#ifndef _MSC_VER // [
|
||||
#error "Use this header only with Microsoft Visual C++ compilers!"
|
||||
#endif // _MSC_VER ]
|
||||
|
||||
#ifndef _MSC_INTTYPES_H_ // [
|
||||
#define _MSC_INTTYPES_H_
|
||||
|
||||
#if _MSC_VER > 1000
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#include "stdint.h"
|
||||
|
||||
// miloyip: VC supports inttypes.h since VC2013
|
||||
#if _MSC_VER >= 1800
|
||||
#include <inttypes.h>
|
||||
#else
|
||||
|
||||
// 7.8 Format conversion of integer types
|
||||
|
||||
typedef struct {
|
||||
intmax_t quot;
|
||||
intmax_t rem;
|
||||
} imaxdiv_t;
|
||||
|
||||
// 7.8.1 Macros for format specifiers
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
|
||||
|
||||
// The fprintf macros for signed integers are:
|
||||
#define PRId8 "d"
|
||||
#define PRIi8 "i"
|
||||
#define PRIdLEAST8 "d"
|
||||
#define PRIiLEAST8 "i"
|
||||
#define PRIdFAST8 "d"
|
||||
#define PRIiFAST8 "i"
|
||||
|
||||
#define PRId16 "hd"
|
||||
#define PRIi16 "hi"
|
||||
#define PRIdLEAST16 "hd"
|
||||
#define PRIiLEAST16 "hi"
|
||||
#define PRIdFAST16 "hd"
|
||||
#define PRIiFAST16 "hi"
|
||||
|
||||
#define PRId32 "I32d"
|
||||
#define PRIi32 "I32i"
|
||||
#define PRIdLEAST32 "I32d"
|
||||
#define PRIiLEAST32 "I32i"
|
||||
#define PRIdFAST32 "I32d"
|
||||
#define PRIiFAST32 "I32i"
|
||||
|
||||
#define PRId64 "I64d"
|
||||
#define PRIi64 "I64i"
|
||||
#define PRIdLEAST64 "I64d"
|
||||
#define PRIiLEAST64 "I64i"
|
||||
#define PRIdFAST64 "I64d"
|
||||
#define PRIiFAST64 "I64i"
|
||||
|
||||
#define PRIdMAX "I64d"
|
||||
#define PRIiMAX "I64i"
|
||||
|
||||
#define PRIdPTR "Id"
|
||||
#define PRIiPTR "Ii"
|
||||
|
||||
// The fprintf macros for unsigned integers are:
|
||||
#define PRIo8 "o"
|
||||
#define PRIu8 "u"
|
||||
#define PRIx8 "x"
|
||||
#define PRIX8 "X"
|
||||
#define PRIoLEAST8 "o"
|
||||
#define PRIuLEAST8 "u"
|
||||
#define PRIxLEAST8 "x"
|
||||
#define PRIXLEAST8 "X"
|
||||
#define PRIoFAST8 "o"
|
||||
#define PRIuFAST8 "u"
|
||||
#define PRIxFAST8 "x"
|
||||
#define PRIXFAST8 "X"
|
||||
|
||||
#define PRIo16 "ho"
|
||||
#define PRIu16 "hu"
|
||||
#define PRIx16 "hx"
|
||||
#define PRIX16 "hX"
|
||||
#define PRIoLEAST16 "ho"
|
||||
#define PRIuLEAST16 "hu"
|
||||
#define PRIxLEAST16 "hx"
|
||||
#define PRIXLEAST16 "hX"
|
||||
#define PRIoFAST16 "ho"
|
||||
#define PRIuFAST16 "hu"
|
||||
#define PRIxFAST16 "hx"
|
||||
#define PRIXFAST16 "hX"
|
||||
|
||||
#define PRIo32 "I32o"
|
||||
#define PRIu32 "I32u"
|
||||
#define PRIx32 "I32x"
|
||||
#define PRIX32 "I32X"
|
||||
#define PRIoLEAST32 "I32o"
|
||||
#define PRIuLEAST32 "I32u"
|
||||
#define PRIxLEAST32 "I32x"
|
||||
#define PRIXLEAST32 "I32X"
|
||||
#define PRIoFAST32 "I32o"
|
||||
#define PRIuFAST32 "I32u"
|
||||
#define PRIxFAST32 "I32x"
|
||||
#define PRIXFAST32 "I32X"
|
||||
|
||||
#define PRIo64 "I64o"
|
||||
#define PRIu64 "I64u"
|
||||
#define PRIx64 "I64x"
|
||||
#define PRIX64 "I64X"
|
||||
#define PRIoLEAST64 "I64o"
|
||||
#define PRIuLEAST64 "I64u"
|
||||
#define PRIxLEAST64 "I64x"
|
||||
#define PRIXLEAST64 "I64X"
|
||||
#define PRIoFAST64 "I64o"
|
||||
#define PRIuFAST64 "I64u"
|
||||
#define PRIxFAST64 "I64x"
|
||||
#define PRIXFAST64 "I64X"
|
||||
|
||||
#define PRIoMAX "I64o"
|
||||
#define PRIuMAX "I64u"
|
||||
#define PRIxMAX "I64x"
|
||||
#define PRIXMAX "I64X"
|
||||
|
||||
#define PRIoPTR "Io"
|
||||
#define PRIuPTR "Iu"
|
||||
#define PRIxPTR "Ix"
|
||||
#define PRIXPTR "IX"
|
||||
|
||||
// The fscanf macros for signed integers are:
|
||||
#define SCNd8 "d"
|
||||
#define SCNi8 "i"
|
||||
#define SCNdLEAST8 "d"
|
||||
#define SCNiLEAST8 "i"
|
||||
#define SCNdFAST8 "d"
|
||||
#define SCNiFAST8 "i"
|
||||
|
||||
#define SCNd16 "hd"
|
||||
#define SCNi16 "hi"
|
||||
#define SCNdLEAST16 "hd"
|
||||
#define SCNiLEAST16 "hi"
|
||||
#define SCNdFAST16 "hd"
|
||||
#define SCNiFAST16 "hi"
|
||||
|
||||
#define SCNd32 "ld"
|
||||
#define SCNi32 "li"
|
||||
#define SCNdLEAST32 "ld"
|
||||
#define SCNiLEAST32 "li"
|
||||
#define SCNdFAST32 "ld"
|
||||
#define SCNiFAST32 "li"
|
||||
|
||||
#define SCNd64 "I64d"
|
||||
#define SCNi64 "I64i"
|
||||
#define SCNdLEAST64 "I64d"
|
||||
#define SCNiLEAST64 "I64i"
|
||||
#define SCNdFAST64 "I64d"
|
||||
#define SCNiFAST64 "I64i"
|
||||
|
||||
#define SCNdMAX "I64d"
|
||||
#define SCNiMAX "I64i"
|
||||
|
||||
#ifdef _WIN64 // [
|
||||
# define SCNdPTR "I64d"
|
||||
# define SCNiPTR "I64i"
|
||||
#else // _WIN64 ][
|
||||
# define SCNdPTR "ld"
|
||||
# define SCNiPTR "li"
|
||||
#endif // _WIN64 ]
|
||||
|
||||
// The fscanf macros for unsigned integers are:
|
||||
#define SCNo8 "o"
|
||||
#define SCNu8 "u"
|
||||
#define SCNx8 "x"
|
||||
#define SCNX8 "X"
|
||||
#define SCNoLEAST8 "o"
|
||||
#define SCNuLEAST8 "u"
|
||||
#define SCNxLEAST8 "x"
|
||||
#define SCNXLEAST8 "X"
|
||||
#define SCNoFAST8 "o"
|
||||
#define SCNuFAST8 "u"
|
||||
#define SCNxFAST8 "x"
|
||||
#define SCNXFAST8 "X"
|
||||
|
||||
#define SCNo16 "ho"
|
||||
#define SCNu16 "hu"
|
||||
#define SCNx16 "hx"
|
||||
#define SCNX16 "hX"
|
||||
#define SCNoLEAST16 "ho"
|
||||
#define SCNuLEAST16 "hu"
|
||||
#define SCNxLEAST16 "hx"
|
||||
#define SCNXLEAST16 "hX"
|
||||
#define SCNoFAST16 "ho"
|
||||
#define SCNuFAST16 "hu"
|
||||
#define SCNxFAST16 "hx"
|
||||
#define SCNXFAST16 "hX"
|
||||
|
||||
#define SCNo32 "lo"
|
||||
#define SCNu32 "lu"
|
||||
#define SCNx32 "lx"
|
||||
#define SCNX32 "lX"
|
||||
#define SCNoLEAST32 "lo"
|
||||
#define SCNuLEAST32 "lu"
|
||||
#define SCNxLEAST32 "lx"
|
||||
#define SCNXLEAST32 "lX"
|
||||
#define SCNoFAST32 "lo"
|
||||
#define SCNuFAST32 "lu"
|
||||
#define SCNxFAST32 "lx"
|
||||
#define SCNXFAST32 "lX"
|
||||
|
||||
#define SCNo64 "I64o"
|
||||
#define SCNu64 "I64u"
|
||||
#define SCNx64 "I64x"
|
||||
#define SCNX64 "I64X"
|
||||
#define SCNoLEAST64 "I64o"
|
||||
#define SCNuLEAST64 "I64u"
|
||||
#define SCNxLEAST64 "I64x"
|
||||
#define SCNXLEAST64 "I64X"
|
||||
#define SCNoFAST64 "I64o"
|
||||
#define SCNuFAST64 "I64u"
|
||||
#define SCNxFAST64 "I64x"
|
||||
#define SCNXFAST64 "I64X"
|
||||
|
||||
#define SCNoMAX "I64o"
|
||||
#define SCNuMAX "I64u"
|
||||
#define SCNxMAX "I64x"
|
||||
#define SCNXMAX "I64X"
|
||||
|
||||
#ifdef _WIN64 // [
|
||||
# define SCNoPTR "I64o"
|
||||
# define SCNuPTR "I64u"
|
||||
# define SCNxPTR "I64x"
|
||||
# define SCNXPTR "I64X"
|
||||
#else // _WIN64 ][
|
||||
# define SCNoPTR "lo"
|
||||
# define SCNuPTR "lu"
|
||||
# define SCNxPTR "lx"
|
||||
# define SCNXPTR "lX"
|
||||
#endif // _WIN64 ]
|
||||
|
||||
#endif // __STDC_FORMAT_MACROS ]
|
||||
|
||||
// 7.8.2 Functions for greatest-width integer types
|
||||
|
||||
// 7.8.2.1 The imaxabs function
|
||||
#define imaxabs _abs64
|
||||
|
||||
// 7.8.2.2 The imaxdiv function
|
||||
|
||||
// This is modified version of div() function from Microsoft's div.c found
|
||||
// in %MSVC.NET%\crt\src\div.c
|
||||
#ifdef STATIC_IMAXDIV // [
|
||||
static
|
||||
#else // STATIC_IMAXDIV ][
|
||||
_inline
|
||||
#endif // STATIC_IMAXDIV ]
|
||||
imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
|
||||
{
|
||||
imaxdiv_t result;
|
||||
|
||||
result.quot = numer / denom;
|
||||
result.rem = numer % denom;
|
||||
|
||||
if (numer < 0 && result.rem > 0) {
|
||||
// did division wrong; must fix up
|
||||
++result.quot;
|
||||
result.rem -= denom;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// 7.8.2.3 The strtoimax and strtoumax functions
|
||||
#define strtoimax _strtoi64
|
||||
#define strtoumax _strtoui64
|
||||
|
||||
// 7.8.2.4 The wcstoimax and wcstoumax functions
|
||||
#define wcstoimax _wcstoi64
|
||||
#define wcstoumax _wcstoui64
|
||||
|
||||
#endif // _MSC_VER >= 1800
|
||||
|
||||
#endif // _MSC_INTTYPES_H_ ]
|
300
ext/librethinkdbxx/src/rapidjson/msinttypes/stdint.h
Normal file
300
ext/librethinkdbxx/src/rapidjson/msinttypes/stdint.h
Normal file
@ -0,0 +1,300 @@
|
||||
// ISO C9x compliant stdint.h for Microsoft Visual Studio
|
||||
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
|
||||
//
|
||||
// Copyright (c) 2006-2013 Alexander Chemeris
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
//
|
||||
// 3. Neither the name of the product nor the names of its contributors may
|
||||
// be used to endorse or promote products derived from this software
|
||||
// without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// The above software in this distribution may have been modified by
|
||||
// THL A29 Limited ("Tencent Modifications").
|
||||
// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited.
|
||||
|
||||
#ifndef _MSC_VER // [
|
||||
#error "Use this header only with Microsoft Visual C++ compilers!"
|
||||
#endif // _MSC_VER ]
|
||||
|
||||
#ifndef _MSC_STDINT_H_ // [
|
||||
#define _MSC_STDINT_H_
|
||||
|
||||
#if _MSC_VER > 1000
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
// miloyip: Originally Visual Studio 2010 uses its own stdint.h. However it generates warning with INT64_C(), so change to use this file for vs2010.
|
||||
#if _MSC_VER >= 1600 // [
|
||||
#include <stdint.h>
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
|
||||
|
||||
#undef INT8_C
|
||||
#undef INT16_C
|
||||
#undef INT32_C
|
||||
#undef INT64_C
|
||||
#undef UINT8_C
|
||||
#undef UINT16_C
|
||||
#undef UINT32_C
|
||||
#undef UINT64_C
|
||||
|
||||
// 7.18.4.1 Macros for minimum-width integer constants
|
||||
|
||||
#define INT8_C(val) val##i8
|
||||
#define INT16_C(val) val##i16
|
||||
#define INT32_C(val) val##i32
|
||||
#define INT64_C(val) val##i64
|
||||
|
||||
#define UINT8_C(val) val##ui8
|
||||
#define UINT16_C(val) val##ui16
|
||||
#define UINT32_C(val) val##ui32
|
||||
#define UINT64_C(val) val##ui64
|
||||
|
||||
// 7.18.4.2 Macros for greatest-width integer constants
|
||||
// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
|
||||
// Check out Issue 9 for the details.
|
||||
#ifndef INTMAX_C // [
|
||||
# define INTMAX_C INT64_C
|
||||
#endif // INTMAX_C ]
|
||||
#ifndef UINTMAX_C // [
|
||||
# define UINTMAX_C UINT64_C
|
||||
#endif // UINTMAX_C ]
|
||||
|
||||
#endif // __STDC_CONSTANT_MACROS ]
|
||||
|
||||
#else // ] _MSC_VER >= 1700 [
|
||||
|
||||
#include <limits.h>
|
||||
|
||||
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
|
||||
// compiling for ARM we have to wrap <wchar.h> include with 'extern "C++" {}'
|
||||
// or compiler would give many errors like this:
|
||||
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
|
||||
#if defined(__cplusplus) && !defined(_M_ARM)
|
||||
extern "C" {
|
||||
#endif
|
||||
# include <wchar.h>
|
||||
#if defined(__cplusplus) && !defined(_M_ARM)
|
||||
}
|
||||
#endif
|
||||
|
||||
// Define _W64 macros to mark types changing their size, like intptr_t.
|
||||
#ifndef _W64
|
||||
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
|
||||
# define _W64 __w64
|
||||
# else
|
||||
# define _W64
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
// 7.18.1 Integer types
|
||||
|
||||
// 7.18.1.1 Exact-width integer types
|
||||
|
||||
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
|
||||
// realize that, e.g. char has the same size as __int8
|
||||
// so we give up on __intX for them.
|
||||
#if (_MSC_VER < 1300)
|
||||
typedef signed char int8_t;
|
||||
typedef signed short int16_t;
|
||||
typedef signed int int32_t;
|
||||
typedef unsigned char uint8_t;
|
||||
typedef unsigned short uint16_t;
|
||||
typedef unsigned int uint32_t;
|
||||
#else
|
||||
typedef signed __int8 int8_t;
|
||||
typedef signed __int16 int16_t;
|
||||
typedef signed __int32 int32_t;
|
||||
typedef unsigned __int8 uint8_t;
|
||||
typedef unsigned __int16 uint16_t;
|
||||
typedef unsigned __int32 uint32_t;
|
||||
#endif
|
||||
typedef signed __int64 int64_t;
|
||||
typedef unsigned __int64 uint64_t;
|
||||
|
||||
|
||||
// 7.18.1.2 Minimum-width integer types
|
||||
typedef int8_t int_least8_t;
|
||||
typedef int16_t int_least16_t;
|
||||
typedef int32_t int_least32_t;
|
||||
typedef int64_t int_least64_t;
|
||||
typedef uint8_t uint_least8_t;
|
||||
typedef uint16_t uint_least16_t;
|
||||
typedef uint32_t uint_least32_t;
|
||||
typedef uint64_t uint_least64_t;
|
||||
|
||||
// 7.18.1.3 Fastest minimum-width integer types
|
||||
typedef int8_t int_fast8_t;
|
||||
typedef int16_t int_fast16_t;
|
||||
typedef int32_t int_fast32_t;
|
||||
typedef int64_t int_fast64_t;
|
||||
typedef uint8_t uint_fast8_t;
|
||||
typedef uint16_t uint_fast16_t;
|
||||
typedef uint32_t uint_fast32_t;
|
||||
typedef uint64_t uint_fast64_t;
|
||||
|
||||
// 7.18.1.4 Integer types capable of holding object pointers
|
||||
#ifdef _WIN64 // [
|
||||
typedef signed __int64 intptr_t;
|
||||
typedef unsigned __int64 uintptr_t;
|
||||
#else // _WIN64 ][
|
||||
typedef _W64 signed int intptr_t;
|
||||
typedef _W64 unsigned int uintptr_t;
|
||||
#endif // _WIN64 ]
|
||||
|
||||
// 7.18.1.5 Greatest-width integer types
|
||||
typedef int64_t intmax_t;
|
||||
typedef uint64_t uintmax_t;
|
||||
|
||||
|
||||
// 7.18.2 Limits of specified-width integer types
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
|
||||
|
||||
// 7.18.2.1 Limits of exact-width integer types
|
||||
#define INT8_MIN ((int8_t)_I8_MIN)
|
||||
#define INT8_MAX _I8_MAX
|
||||
#define INT16_MIN ((int16_t)_I16_MIN)
|
||||
#define INT16_MAX _I16_MAX
|
||||
#define INT32_MIN ((int32_t)_I32_MIN)
|
||||
#define INT32_MAX _I32_MAX
|
||||
#define INT64_MIN ((int64_t)_I64_MIN)
|
||||
#define INT64_MAX _I64_MAX
|
||||
#define UINT8_MAX _UI8_MAX
|
||||
#define UINT16_MAX _UI16_MAX
|
||||
#define UINT32_MAX _UI32_MAX
|
||||
#define UINT64_MAX _UI64_MAX
|
||||
|
||||
// 7.18.2.2 Limits of minimum-width integer types
|
||||
#define INT_LEAST8_MIN INT8_MIN
|
||||
#define INT_LEAST8_MAX INT8_MAX
|
||||
#define INT_LEAST16_MIN INT16_MIN
|
||||
#define INT_LEAST16_MAX INT16_MAX
|
||||
#define INT_LEAST32_MIN INT32_MIN
|
||||
#define INT_LEAST32_MAX INT32_MAX
|
||||
#define INT_LEAST64_MIN INT64_MIN
|
||||
#define INT_LEAST64_MAX INT64_MAX
|
||||
#define UINT_LEAST8_MAX UINT8_MAX
|
||||
#define UINT_LEAST16_MAX UINT16_MAX
|
||||
#define UINT_LEAST32_MAX UINT32_MAX
|
||||
#define UINT_LEAST64_MAX UINT64_MAX
|
||||
|
||||
// 7.18.2.3 Limits of fastest minimum-width integer types
|
||||
#define INT_FAST8_MIN INT8_MIN
|
||||
#define INT_FAST8_MAX INT8_MAX
|
||||
#define INT_FAST16_MIN INT16_MIN
|
||||
#define INT_FAST16_MAX INT16_MAX
|
||||
#define INT_FAST32_MIN INT32_MIN
|
||||
#define INT_FAST32_MAX INT32_MAX
|
||||
#define INT_FAST64_MIN INT64_MIN
|
||||
#define INT_FAST64_MAX INT64_MAX
|
||||
#define UINT_FAST8_MAX UINT8_MAX
|
||||
#define UINT_FAST16_MAX UINT16_MAX
|
||||
#define UINT_FAST32_MAX UINT32_MAX
|
||||
#define UINT_FAST64_MAX UINT64_MAX
|
||||
|
||||
// 7.18.2.4 Limits of integer types capable of holding object pointers
|
||||
#ifdef _WIN64 // [
|
||||
# define INTPTR_MIN INT64_MIN
|
||||
# define INTPTR_MAX INT64_MAX
|
||||
# define UINTPTR_MAX UINT64_MAX
|
||||
#else // _WIN64 ][
|
||||
# define INTPTR_MIN INT32_MIN
|
||||
# define INTPTR_MAX INT32_MAX
|
||||
# define UINTPTR_MAX UINT32_MAX
|
||||
#endif // _WIN64 ]
|
||||
|
||||
// 7.18.2.5 Limits of greatest-width integer types
|
||||
#define INTMAX_MIN INT64_MIN
|
||||
#define INTMAX_MAX INT64_MAX
|
||||
#define UINTMAX_MAX UINT64_MAX
|
||||
|
||||
// 7.18.3 Limits of other integer types
|
||||
|
||||
#ifdef _WIN64 // [
|
||||
# define PTRDIFF_MIN _I64_MIN
|
||||
# define PTRDIFF_MAX _I64_MAX
|
||||
#else // _WIN64 ][
|
||||
# define PTRDIFF_MIN _I32_MIN
|
||||
# define PTRDIFF_MAX _I32_MAX
|
||||
#endif // _WIN64 ]
|
||||
|
||||
#define SIG_ATOMIC_MIN INT_MIN
|
||||
#define SIG_ATOMIC_MAX INT_MAX
|
||||
|
||||
#ifndef SIZE_MAX // [
|
||||
# ifdef _WIN64 // [
|
||||
# define SIZE_MAX _UI64_MAX
|
||||
# else // _WIN64 ][
|
||||
# define SIZE_MAX _UI32_MAX
|
||||
# endif // _WIN64 ]
|
||||
#endif // SIZE_MAX ]
|
||||
|
||||
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
|
||||
#ifndef WCHAR_MIN // [
|
||||
# define WCHAR_MIN 0
|
||||
#endif // WCHAR_MIN ]
|
||||
#ifndef WCHAR_MAX // [
|
||||
# define WCHAR_MAX _UI16_MAX
|
||||
#endif // WCHAR_MAX ]
|
||||
|
||||
#define WINT_MIN 0
|
||||
#define WINT_MAX _UI16_MAX
|
||||
|
||||
#endif // __STDC_LIMIT_MACROS ]
|
||||
|
||||
|
||||
// 7.18.4 Limits of other integer types
|
||||
|
||||
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
|
||||
|
||||
// 7.18.4.1 Macros for minimum-width integer constants
|
||||
|
||||
#define INT8_C(val) val##i8
|
||||
#define INT16_C(val) val##i16
|
||||
#define INT32_C(val) val##i32
|
||||
#define INT64_C(val) val##i64
|
||||
|
||||
#define UINT8_C(val) val##ui8
|
||||
#define UINT16_C(val) val##ui16
|
||||
#define UINT32_C(val) val##ui32
|
||||
#define UINT64_C(val) val##ui64
|
||||
|
||||
// 7.18.4.2 Macros for greatest-width integer constants
|
||||
// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
|
||||
// Check out Issue 9 for the details.
|
||||
#ifndef INTMAX_C // [
|
||||
# define INTMAX_C INT64_C
|
||||
#endif // INTMAX_C ]
|
||||
#ifndef UINTMAX_C // [
|
||||
# define UINTMAX_C UINT64_C
|
||||
#endif // UINTMAX_C ]
|
||||
|
||||
#endif // __STDC_CONSTANT_MACROS ]
|
||||
|
||||
#endif // _MSC_VER >= 1600 ]
|
||||
|
||||
#endif // _MSC_STDINT_H_ ]
|
81
ext/librethinkdbxx/src/rapidjson/ostreamwrapper.h
Normal file
81
ext/librethinkdbxx/src/rapidjson/ostreamwrapper.h
Normal file
@ -0,0 +1,81 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_OSTREAMWRAPPER_H_
|
||||
#define RAPIDJSON_OSTREAMWRAPPER_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include <iosfwd>
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Wrapper of \c std::basic_ostream into RapidJSON's Stream concept.
|
||||
/*!
|
||||
The classes can be wrapped including but not limited to:
|
||||
|
||||
- \c std::ostringstream
|
||||
- \c std::stringstream
|
||||
- \c std::wpstringstream
|
||||
- \c std::wstringstream
|
||||
- \c std::ifstream
|
||||
- \c std::fstream
|
||||
- \c std::wofstream
|
||||
- \c std::wfstream
|
||||
|
||||
\tparam StreamType Class derived from \c std::basic_ostream.
|
||||
*/
|
||||
|
||||
template <typename StreamType>
|
||||
class BasicOStreamWrapper {
|
||||
public:
|
||||
typedef typename StreamType::char_type Ch;
|
||||
BasicOStreamWrapper(StreamType& stream) : stream_(stream) {}
|
||||
|
||||
void Put(Ch c) {
|
||||
stream_.put(c);
|
||||
}
|
||||
|
||||
void Flush() {
|
||||
stream_.flush();
|
||||
}
|
||||
|
||||
// Not implemented
|
||||
char Peek() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
char Take() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; }
|
||||
char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
private:
|
||||
BasicOStreamWrapper(const BasicOStreamWrapper&);
|
||||
BasicOStreamWrapper& operator=(const BasicOStreamWrapper&);
|
||||
|
||||
StreamType& stream_;
|
||||
};
|
||||
|
||||
typedef BasicOStreamWrapper<std::ostream> OStreamWrapper;
|
||||
typedef BasicOStreamWrapper<std::wostream> WOStreamWrapper;
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_OSTREAMWRAPPER_H_
|
1358
ext/librethinkdbxx/src/rapidjson/pointer.h
Normal file
1358
ext/librethinkdbxx/src/rapidjson/pointer.h
Normal file
File diff suppressed because it is too large
Load Diff
249
ext/librethinkdbxx/src/rapidjson/prettywriter.h
Normal file
249
ext/librethinkdbxx/src/rapidjson/prettywriter.h
Normal file
@ -0,0 +1,249 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_PRETTYWRITER_H_
|
||||
#define RAPIDJSON_PRETTYWRITER_H_
|
||||
|
||||
#include "writer.h"
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(effc++)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Combination of PrettyWriter format flags.
|
||||
/*! \see PrettyWriter::SetFormatOptions
|
||||
*/
|
||||
enum PrettyFormatOptions {
|
||||
kFormatDefault = 0, //!< Default pretty formatting.
|
||||
kFormatSingleLineArray = 1 //!< Format arrays on a single line.
|
||||
};
|
||||
|
||||
//! Writer with indentation and spacing.
|
||||
/*!
|
||||
\tparam OutputStream Type of ouptut os.
|
||||
\tparam SourceEncoding Encoding of source string.
|
||||
\tparam TargetEncoding Encoding of output stream.
|
||||
\tparam StackAllocator Type of allocator for allocating memory of stack.
|
||||
*/
|
||||
template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags>
|
||||
class PrettyWriter : public Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator, writeFlags> {
|
||||
public:
|
||||
typedef Writer<OutputStream, SourceEncoding, TargetEncoding, StackAllocator> Base;
|
||||
typedef typename Base::Ch Ch;
|
||||
|
||||
//! Constructor
|
||||
/*! \param os Output stream.
|
||||
\param allocator User supplied allocator. If it is null, it will create a private one.
|
||||
\param levelDepth Initial capacity of stack.
|
||||
*/
|
||||
explicit PrettyWriter(OutputStream& os, StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) :
|
||||
Base(os, allocator, levelDepth), indentChar_(' '), indentCharCount_(4), formatOptions_(kFormatDefault) {}
|
||||
|
||||
|
||||
explicit PrettyWriter(StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) :
|
||||
Base(allocator, levelDepth), indentChar_(' '), indentCharCount_(4) {}
|
||||
|
||||
//! Set custom indentation.
|
||||
/*! \param indentChar Character for indentation. Must be whitespace character (' ', '\\t', '\\n', '\\r').
|
||||
\param indentCharCount Number of indent characters for each indentation level.
|
||||
\note The default indentation is 4 spaces.
|
||||
*/
|
||||
PrettyWriter& SetIndent(Ch indentChar, unsigned indentCharCount) {
|
||||
RAPIDJSON_ASSERT(indentChar == ' ' || indentChar == '\t' || indentChar == '\n' || indentChar == '\r');
|
||||
indentChar_ = indentChar;
|
||||
indentCharCount_ = indentCharCount;
|
||||
return *this;
|
||||
}
|
||||
|
||||
//! Set pretty writer formatting options.
|
||||
/*! \param options Formatting options.
|
||||
*/
|
||||
PrettyWriter& SetFormatOptions(PrettyFormatOptions options) {
|
||||
formatOptions_ = options;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/*! @name Implementation of Handler
|
||||
\see Handler
|
||||
*/
|
||||
//@{
|
||||
|
||||
bool Null() { PrettyPrefix(kNullType); return Base::WriteNull(); }
|
||||
bool Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); return Base::WriteBool(b); }
|
||||
bool Int(int i) { PrettyPrefix(kNumberType); return Base::WriteInt(i); }
|
||||
bool Uint(unsigned u) { PrettyPrefix(kNumberType); return Base::WriteUint(u); }
|
||||
bool Int64(int64_t i64) { PrettyPrefix(kNumberType); return Base::WriteInt64(i64); }
|
||||
bool Uint64(uint64_t u64) { PrettyPrefix(kNumberType); return Base::WriteUint64(u64); }
|
||||
bool Double(double d) { PrettyPrefix(kNumberType); return Base::WriteDouble(d); }
|
||||
|
||||
bool RawNumber(const Ch* str, SizeType length, bool copy = false) {
|
||||
(void)copy;
|
||||
PrettyPrefix(kNumberType);
|
||||
return Base::WriteString(str, length);
|
||||
}
|
||||
|
||||
bool String(const Ch* str, SizeType length, bool copy = false) {
|
||||
(void)copy;
|
||||
PrettyPrefix(kStringType);
|
||||
return Base::WriteString(str, length);
|
||||
}
|
||||
|
||||
#if RAPIDJSON_HAS_STDSTRING
|
||||
bool String(const std::basic_string<Ch>& str) {
|
||||
return String(str.data(), SizeType(str.size()));
|
||||
}
|
||||
#endif
|
||||
|
||||
bool StartObject() {
|
||||
PrettyPrefix(kObjectType);
|
||||
new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(false);
|
||||
return Base::WriteStartObject();
|
||||
}
|
||||
|
||||
bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); }
|
||||
|
||||
bool EndObject(SizeType memberCount = 0) {
|
||||
(void)memberCount;
|
||||
RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level));
|
||||
RAPIDJSON_ASSERT(!Base::level_stack_.template Top<typename Base::Level>()->inArray);
|
||||
bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
|
||||
|
||||
if (!empty) {
|
||||
Base::os_->Put('\n');
|
||||
WriteIndent();
|
||||
}
|
||||
bool ret = Base::WriteEndObject();
|
||||
(void)ret;
|
||||
RAPIDJSON_ASSERT(ret == true);
|
||||
if (Base::level_stack_.Empty()) // end of json text
|
||||
Base::os_->Flush();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StartArray() {
|
||||
PrettyPrefix(kArrayType);
|
||||
new (Base::level_stack_.template Push<typename Base::Level>()) typename Base::Level(true);
|
||||
return Base::WriteStartArray();
|
||||
}
|
||||
|
||||
bool EndArray(SizeType memberCount = 0) {
|
||||
(void)memberCount;
|
||||
RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level));
|
||||
RAPIDJSON_ASSERT(Base::level_stack_.template Top<typename Base::Level>()->inArray);
|
||||
bool empty = Base::level_stack_.template Pop<typename Base::Level>(1)->valueCount == 0;
|
||||
|
||||
if (!empty && !(formatOptions_ & kFormatSingleLineArray)) {
|
||||
Base::os_->Put('\n');
|
||||
WriteIndent();
|
||||
}
|
||||
bool ret = Base::WriteEndArray();
|
||||
(void)ret;
|
||||
RAPIDJSON_ASSERT(ret == true);
|
||||
if (Base::level_stack_.Empty()) // end of json text
|
||||
Base::os_->Flush();
|
||||
return true;
|
||||
}
|
||||
|
||||
//@}
|
||||
|
||||
/*! @name Convenience extensions */
|
||||
//@{
|
||||
|
||||
//! Simpler but slower overload.
|
||||
bool String(const Ch* str) { return String(str, internal::StrLen(str)); }
|
||||
bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); }
|
||||
|
||||
//@}
|
||||
|
||||
//! Write a raw JSON value.
|
||||
/*!
|
||||
For user to write a stringified JSON as a value.
|
||||
|
||||
\param json A well-formed JSON value. It should not contain null character within [0, length - 1] range.
|
||||
\param length Length of the json.
|
||||
\param type Type of the root of json.
|
||||
\note When using PrettyWriter::RawValue(), the result json may not be indented correctly.
|
||||
*/
|
||||
bool RawValue(const Ch* json, size_t length, Type type) { PrettyPrefix(type); return Base::WriteRawValue(json, length); }
|
||||
|
||||
protected:
|
||||
void PrettyPrefix(Type type) {
|
||||
(void)type;
|
||||
if (Base::level_stack_.GetSize() != 0) { // this value is not at root
|
||||
typename Base::Level* level = Base::level_stack_.template Top<typename Base::Level>();
|
||||
|
||||
if (level->inArray) {
|
||||
if (level->valueCount > 0) {
|
||||
Base::os_->Put(','); // add comma if it is not the first element in array
|
||||
if (formatOptions_ & kFormatSingleLineArray)
|
||||
Base::os_->Put(' ');
|
||||
}
|
||||
|
||||
if (!(formatOptions_ & kFormatSingleLineArray)) {
|
||||
Base::os_->Put('\n');
|
||||
WriteIndent();
|
||||
}
|
||||
}
|
||||
else { // in object
|
||||
if (level->valueCount > 0) {
|
||||
if (level->valueCount % 2 == 0) {
|
||||
Base::os_->Put(',');
|
||||
Base::os_->Put('\n');
|
||||
}
|
||||
else {
|
||||
Base::os_->Put(':');
|
||||
Base::os_->Put(' ');
|
||||
}
|
||||
}
|
||||
else
|
||||
Base::os_->Put('\n');
|
||||
|
||||
if (level->valueCount % 2 == 0)
|
||||
WriteIndent();
|
||||
}
|
||||
if (!level->inArray && level->valueCount % 2 == 0)
|
||||
RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name
|
||||
level->valueCount++;
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(!Base::hasRoot_); // Should only has one and only one root.
|
||||
Base::hasRoot_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
void WriteIndent() {
|
||||
size_t count = (Base::level_stack_.GetSize() / sizeof(typename Base::Level)) * indentCharCount_;
|
||||
PutN(*Base::os_, static_cast<typename TargetEncoding::Ch>(indentChar_), count);
|
||||
}
|
||||
|
||||
Ch indentChar_;
|
||||
unsigned indentCharCount_;
|
||||
PrettyFormatOptions formatOptions_;
|
||||
|
||||
private:
|
||||
// Prohibit copy constructor & assignment operator.
|
||||
PrettyWriter(const PrettyWriter&);
|
||||
PrettyWriter& operator=(const PrettyWriter&);
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef __GNUC__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_RAPIDJSON_H_
|
615
ext/librethinkdbxx/src/rapidjson/rapidjson.h
Normal file
615
ext/librethinkdbxx/src/rapidjson/rapidjson.h
Normal file
@ -0,0 +1,615 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_RAPIDJSON_H_
|
||||
#define RAPIDJSON_RAPIDJSON_H_
|
||||
|
||||
/*!\file rapidjson.h
|
||||
\brief common definitions and configuration
|
||||
|
||||
\see RAPIDJSON_CONFIG
|
||||
*/
|
||||
|
||||
/*! \defgroup RAPIDJSON_CONFIG RapidJSON configuration
|
||||
\brief Configuration macros for library features
|
||||
|
||||
Some RapidJSON features are configurable to adapt the library to a wide
|
||||
variety of platforms, environments and usage scenarios. Most of the
|
||||
features can be configured in terms of overriden or predefined
|
||||
preprocessor macros at compile-time.
|
||||
|
||||
Some additional customization is available in the \ref RAPIDJSON_ERRORS APIs.
|
||||
|
||||
\note These macros should be given on the compiler command-line
|
||||
(where applicable) to avoid inconsistent values when compiling
|
||||
different translation units of a single application.
|
||||
*/
|
||||
|
||||
#include <cstdlib> // malloc(), realloc(), free(), size_t
|
||||
#include <cstring> // memset(), memcpy(), memmove(), memcmp()
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_VERSION_STRING
|
||||
//
|
||||
// ALWAYS synchronize the following 3 macros with corresponding variables in /CMakeLists.txt.
|
||||
//
|
||||
|
||||
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
|
||||
// token stringification
|
||||
#define RAPIDJSON_STRINGIFY(x) RAPIDJSON_DO_STRINGIFY(x)
|
||||
#define RAPIDJSON_DO_STRINGIFY(x) #x
|
||||
//!@endcond
|
||||
|
||||
/*! \def RAPIDJSON_MAJOR_VERSION
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Major version of RapidJSON in integer.
|
||||
*/
|
||||
/*! \def RAPIDJSON_MINOR_VERSION
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Minor version of RapidJSON in integer.
|
||||
*/
|
||||
/*! \def RAPIDJSON_PATCH_VERSION
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Patch version of RapidJSON in integer.
|
||||
*/
|
||||
/*! \def RAPIDJSON_VERSION_STRING
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Version of RapidJSON in "<major>.<minor>.<patch>" string format.
|
||||
*/
|
||||
#define RAPIDJSON_MAJOR_VERSION 1
|
||||
#define RAPIDJSON_MINOR_VERSION 0
|
||||
#define RAPIDJSON_PATCH_VERSION 2
|
||||
#define RAPIDJSON_VERSION_STRING \
|
||||
RAPIDJSON_STRINGIFY(RAPIDJSON_MAJOR_VERSION.RAPIDJSON_MINOR_VERSION.RAPIDJSON_PATCH_VERSION)
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_NAMESPACE_(BEGIN|END)
|
||||
/*! \def RAPIDJSON_NAMESPACE
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief provide custom rapidjson namespace
|
||||
|
||||
In order to avoid symbol clashes and/or "One Definition Rule" errors
|
||||
between multiple inclusions of (different versions of) RapidJSON in
|
||||
a single binary, users can customize the name of the main RapidJSON
|
||||
namespace.
|
||||
|
||||
In case of a single nesting level, defining \c RAPIDJSON_NAMESPACE
|
||||
to a custom name (e.g. \c MyRapidJSON) is sufficient. If multiple
|
||||
levels are needed, both \ref RAPIDJSON_NAMESPACE_BEGIN and \ref
|
||||
RAPIDJSON_NAMESPACE_END need to be defined as well:
|
||||
|
||||
\code
|
||||
// in some .cpp file
|
||||
#define RAPIDJSON_NAMESPACE my::rapidjson
|
||||
#define RAPIDJSON_NAMESPACE_BEGIN namespace my { namespace rapidjson {
|
||||
#define RAPIDJSON_NAMESPACE_END } }
|
||||
#include "rapidjson/..."
|
||||
\endcode
|
||||
|
||||
\see rapidjson
|
||||
*/
|
||||
/*! \def RAPIDJSON_NAMESPACE_BEGIN
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief provide custom rapidjson namespace (opening expression)
|
||||
\see RAPIDJSON_NAMESPACE
|
||||
*/
|
||||
/*! \def RAPIDJSON_NAMESPACE_END
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief provide custom rapidjson namespace (closing expression)
|
||||
\see RAPIDJSON_NAMESPACE
|
||||
*/
|
||||
#ifndef RAPIDJSON_NAMESPACE
|
||||
#define RAPIDJSON_NAMESPACE rapidjson
|
||||
#endif
|
||||
#ifndef RAPIDJSON_NAMESPACE_BEGIN
|
||||
#define RAPIDJSON_NAMESPACE_BEGIN namespace RAPIDJSON_NAMESPACE {
|
||||
#endif
|
||||
#ifndef RAPIDJSON_NAMESPACE_END
|
||||
#define RAPIDJSON_NAMESPACE_END }
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_HAS_STDSTRING
|
||||
|
||||
#ifndef RAPIDJSON_HAS_STDSTRING
|
||||
#ifdef RAPIDJSON_DOXYGEN_RUNNING
|
||||
#define RAPIDJSON_HAS_STDSTRING 1 // force generation of documentation
|
||||
#else
|
||||
#define RAPIDJSON_HAS_STDSTRING 0 // no std::string support by default
|
||||
#endif
|
||||
/*! \def RAPIDJSON_HAS_STDSTRING
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Enable RapidJSON support for \c std::string
|
||||
|
||||
By defining this preprocessor symbol to \c 1, several convenience functions for using
|
||||
\ref rapidjson::GenericValue with \c std::string are enabled, especially
|
||||
for construction and comparison.
|
||||
|
||||
\hideinitializer
|
||||
*/
|
||||
#endif // !defined(RAPIDJSON_HAS_STDSTRING)
|
||||
|
||||
#if RAPIDJSON_HAS_STDSTRING
|
||||
#include <string>
|
||||
#endif // RAPIDJSON_HAS_STDSTRING
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_NO_INT64DEFINE
|
||||
|
||||
/*! \def RAPIDJSON_NO_INT64DEFINE
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Use external 64-bit integer types.
|
||||
|
||||
RapidJSON requires the 64-bit integer types \c int64_t and \c uint64_t types
|
||||
to be available at global scope.
|
||||
|
||||
If users have their own definition, define RAPIDJSON_NO_INT64DEFINE to
|
||||
prevent RapidJSON from defining its own types.
|
||||
*/
|
||||
#ifndef RAPIDJSON_NO_INT64DEFINE
|
||||
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
|
||||
#if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013
|
||||
#include "msinttypes/stdint.h"
|
||||
#include "msinttypes/inttypes.h"
|
||||
#else
|
||||
// Other compilers should have this.
|
||||
#include <stdint.h>
|
||||
#include <inttypes.h>
|
||||
#endif
|
||||
//!@endcond
|
||||
#ifdef RAPIDJSON_DOXYGEN_RUNNING
|
||||
#define RAPIDJSON_NO_INT64DEFINE
|
||||
#endif
|
||||
#endif // RAPIDJSON_NO_INT64TYPEDEF
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_FORCEINLINE
|
||||
|
||||
#ifndef RAPIDJSON_FORCEINLINE
|
||||
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
|
||||
#if defined(_MSC_VER) && defined(NDEBUG)
|
||||
#define RAPIDJSON_FORCEINLINE __forceinline
|
||||
#elif defined(__GNUC__) && __GNUC__ >= 4 && defined(NDEBUG)
|
||||
#define RAPIDJSON_FORCEINLINE __attribute__((always_inline))
|
||||
#else
|
||||
#define RAPIDJSON_FORCEINLINE
|
||||
#endif
|
||||
//!@endcond
|
||||
#endif // RAPIDJSON_FORCEINLINE
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_ENDIAN
|
||||
#define RAPIDJSON_LITTLEENDIAN 0 //!< Little endian machine
|
||||
#define RAPIDJSON_BIGENDIAN 1 //!< Big endian machine
|
||||
|
||||
//! Endianness of the machine.
|
||||
/*!
|
||||
\def RAPIDJSON_ENDIAN
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
|
||||
GCC 4.6 provided macro for detecting endianness of the target machine. But other
|
||||
compilers may not have this. User can define RAPIDJSON_ENDIAN to either
|
||||
\ref RAPIDJSON_LITTLEENDIAN or \ref RAPIDJSON_BIGENDIAN.
|
||||
|
||||
Default detection implemented with reference to
|
||||
\li https://gcc.gnu.org/onlinedocs/gcc-4.6.0/cpp/Common-Predefined-Macros.html
|
||||
\li http://www.boost.org/doc/libs/1_42_0/boost/detail/endian.hpp
|
||||
*/
|
||||
#ifndef RAPIDJSON_ENDIAN
|
||||
// Detect with GCC 4.6's macro
|
||||
# ifdef __BYTE_ORDER__
|
||||
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
|
||||
# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
|
||||
# else
|
||||
# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN.
|
||||
# endif // __BYTE_ORDER__
|
||||
// Detect with GLIBC's endian.h
|
||||
# elif defined(__GLIBC__)
|
||||
# include <endian.h>
|
||||
# if (__BYTE_ORDER == __LITTLE_ENDIAN)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
|
||||
# elif (__BYTE_ORDER == __BIG_ENDIAN)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
|
||||
# else
|
||||
# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN.
|
||||
# endif // __GLIBC__
|
||||
// Detect with _LITTLE_ENDIAN and _BIG_ENDIAN macro
|
||||
# elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
|
||||
# elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
|
||||
// Detect with architecture macros
|
||||
# elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
|
||||
# elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
|
||||
# elif defined(_MSC_VER) && defined(_M_ARM)
|
||||
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
|
||||
# elif defined(RAPIDJSON_DOXYGEN_RUNNING)
|
||||
# define RAPIDJSON_ENDIAN
|
||||
# else
|
||||
# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN.
|
||||
# endif
|
||||
#endif // RAPIDJSON_ENDIAN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_64BIT
|
||||
|
||||
//! Whether using 64-bit architecture
|
||||
#ifndef RAPIDJSON_64BIT
|
||||
#if defined(__LP64__) || defined(_WIN64) || defined(__EMSCRIPTEN__)
|
||||
#define RAPIDJSON_64BIT 1
|
||||
#else
|
||||
#define RAPIDJSON_64BIT 0
|
||||
#endif
|
||||
#endif // RAPIDJSON_64BIT
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_ALIGN
|
||||
|
||||
//! Data alignment of the machine.
|
||||
/*! \ingroup RAPIDJSON_CONFIG
|
||||
\param x pointer to align
|
||||
|
||||
Some machines require strict data alignment. Currently the default uses 4 bytes
|
||||
alignment on 32-bit platforms and 8 bytes alignment for 64-bit platforms.
|
||||
User can customize by defining the RAPIDJSON_ALIGN function macro.
|
||||
*/
|
||||
#ifndef RAPIDJSON_ALIGN
|
||||
#if RAPIDJSON_64BIT == 1
|
||||
#define RAPIDJSON_ALIGN(x) (((x) + static_cast<uint64_t>(7u)) & ~static_cast<uint64_t>(7u))
|
||||
#else
|
||||
#define RAPIDJSON_ALIGN(x) (((x) + 3u) & ~3u)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_UINT64_C2
|
||||
|
||||
//! Construct a 64-bit literal by a pair of 32-bit integer.
|
||||
/*!
|
||||
64-bit literal with or without ULL suffix is prone to compiler warnings.
|
||||
UINT64_C() is C macro which cause compilation problems.
|
||||
Use this macro to define 64-bit constants by a pair of 32-bit integer.
|
||||
*/
|
||||
#ifndef RAPIDJSON_UINT64_C2
|
||||
#define RAPIDJSON_UINT64_C2(high32, low32) ((static_cast<uint64_t>(high32) << 32) | static_cast<uint64_t>(low32))
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_48BITPOINTER_OPTIMIZATION
|
||||
|
||||
//! Use only lower 48-bit address for some pointers.
|
||||
/*!
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
|
||||
This optimization uses the fact that current X86-64 architecture only implement lower 48-bit virtual address.
|
||||
The higher 16-bit can be used for storing other data.
|
||||
\c GenericValue uses this optimization to reduce its size form 24 bytes to 16 bytes in 64-bit architecture.
|
||||
*/
|
||||
#ifndef RAPIDJSON_48BITPOINTER_OPTIMIZATION
|
||||
#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
|
||||
#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 1
|
||||
#else
|
||||
#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 0
|
||||
#endif
|
||||
#endif // RAPIDJSON_48BITPOINTER_OPTIMIZATION
|
||||
|
||||
#if RAPIDJSON_48BITPOINTER_OPTIMIZATION == 1
|
||||
#if RAPIDJSON_64BIT != 1
|
||||
#error RAPIDJSON_48BITPOINTER_OPTIMIZATION can only be set to 1 when RAPIDJSON_64BIT=1
|
||||
#endif
|
||||
#define RAPIDJSON_SETPOINTER(type, p, x) (p = reinterpret_cast<type *>((reinterpret_cast<uintptr_t>(p) & static_cast<uintptr_t>(RAPIDJSON_UINT64_C2(0xFFFF0000, 0x00000000))) | reinterpret_cast<uintptr_t>(reinterpret_cast<const void*>(x))))
|
||||
#define RAPIDJSON_GETPOINTER(type, p) (reinterpret_cast<type *>(reinterpret_cast<uintptr_t>(p) & static_cast<uintptr_t>(RAPIDJSON_UINT64_C2(0x0000FFFF, 0xFFFFFFFF))))
|
||||
#else
|
||||
#define RAPIDJSON_SETPOINTER(type, p, x) (p = (x))
|
||||
#define RAPIDJSON_GETPOINTER(type, p) (p)
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_SSE2/RAPIDJSON_SSE42/RAPIDJSON_SIMD
|
||||
|
||||
/*! \def RAPIDJSON_SIMD
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief Enable SSE2/SSE4.2 optimization.
|
||||
|
||||
RapidJSON supports optimized implementations for some parsing operations
|
||||
based on the SSE2 or SSE4.2 SIMD extensions on modern Intel-compatible
|
||||
processors.
|
||||
|
||||
To enable these optimizations, two different symbols can be defined;
|
||||
\code
|
||||
// Enable SSE2 optimization.
|
||||
#define RAPIDJSON_SSE2
|
||||
|
||||
// Enable SSE4.2 optimization.
|
||||
#define RAPIDJSON_SSE42
|
||||
\endcode
|
||||
|
||||
\c RAPIDJSON_SSE42 takes precedence, if both are defined.
|
||||
|
||||
If any of these symbols is defined, RapidJSON defines the macro
|
||||
\c RAPIDJSON_SIMD to indicate the availability of the optimized code.
|
||||
*/
|
||||
#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) \
|
||||
|| defined(RAPIDJSON_DOXYGEN_RUNNING)
|
||||
#define RAPIDJSON_SIMD
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_NO_SIZETYPEDEFINE
|
||||
|
||||
#ifndef RAPIDJSON_NO_SIZETYPEDEFINE
|
||||
/*! \def RAPIDJSON_NO_SIZETYPEDEFINE
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief User-provided \c SizeType definition.
|
||||
|
||||
In order to avoid using 32-bit size types for indexing strings and arrays,
|
||||
define this preprocessor symbol and provide the type rapidjson::SizeType
|
||||
before including RapidJSON:
|
||||
\code
|
||||
#define RAPIDJSON_NO_SIZETYPEDEFINE
|
||||
namespace rapidjson { typedef ::std::size_t SizeType; }
|
||||
#include "rapidjson/..."
|
||||
\endcode
|
||||
|
||||
\see rapidjson::SizeType
|
||||
*/
|
||||
#ifdef RAPIDJSON_DOXYGEN_RUNNING
|
||||
#define RAPIDJSON_NO_SIZETYPEDEFINE
|
||||
#endif
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
//! Size type (for string lengths, array sizes, etc.)
|
||||
/*! RapidJSON uses 32-bit array/string indices even on 64-bit platforms,
|
||||
instead of using \c size_t. Users may override the SizeType by defining
|
||||
\ref RAPIDJSON_NO_SIZETYPEDEFINE.
|
||||
*/
|
||||
typedef unsigned SizeType;
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
#endif
|
||||
|
||||
// always import std::size_t to rapidjson namespace
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
using std::size_t;
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_ASSERT
|
||||
|
||||
//! Assertion.
|
||||
/*! \ingroup RAPIDJSON_CONFIG
|
||||
By default, rapidjson uses C \c assert() for internal assertions.
|
||||
User can override it by defining RAPIDJSON_ASSERT(x) macro.
|
||||
|
||||
\note Parsing errors are handled and can be customized by the
|
||||
\ref RAPIDJSON_ERRORS APIs.
|
||||
*/
|
||||
#ifndef RAPIDJSON_ASSERT
|
||||
#include <cassert>
|
||||
#define RAPIDJSON_ASSERT(x) assert(x)
|
||||
#endif // RAPIDJSON_ASSERT
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_STATIC_ASSERT
|
||||
|
||||
// Adopt from boost
|
||||
#ifndef RAPIDJSON_STATIC_ASSERT
|
||||
#ifndef __clang__
|
||||
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
|
||||
#endif
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
template <bool x> struct STATIC_ASSERTION_FAILURE;
|
||||
template <> struct STATIC_ASSERTION_FAILURE<true> { enum { value = 1 }; };
|
||||
template<int x> struct StaticAssertTest {};
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#define RAPIDJSON_JOIN(X, Y) RAPIDJSON_DO_JOIN(X, Y)
|
||||
#define RAPIDJSON_DO_JOIN(X, Y) RAPIDJSON_DO_JOIN2(X, Y)
|
||||
#define RAPIDJSON_DO_JOIN2(X, Y) X##Y
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused))
|
||||
#else
|
||||
#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE
|
||||
#endif
|
||||
#ifndef __clang__
|
||||
//!@endcond
|
||||
#endif
|
||||
|
||||
/*! \def RAPIDJSON_STATIC_ASSERT
|
||||
\brief (Internal) macro to check for conditions at compile-time
|
||||
\param x compile-time condition
|
||||
\hideinitializer
|
||||
*/
|
||||
#define RAPIDJSON_STATIC_ASSERT(x) \
|
||||
typedef ::RAPIDJSON_NAMESPACE::StaticAssertTest< \
|
||||
sizeof(::RAPIDJSON_NAMESPACE::STATIC_ASSERTION_FAILURE<bool(x) >)> \
|
||||
RAPIDJSON_JOIN(StaticAssertTypedef, __LINE__) RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_LIKELY, RAPIDJSON_UNLIKELY
|
||||
|
||||
//! Compiler branching hint for expression with high probability to be true.
|
||||
/*!
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\param x Boolean expression likely to be true.
|
||||
*/
|
||||
#ifndef RAPIDJSON_LIKELY
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define RAPIDJSON_LIKELY(x) __builtin_expect(!!(x), 1)
|
||||
#else
|
||||
#define RAPIDJSON_LIKELY(x) (x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
//! Compiler branching hint for expression with low probability to be true.
|
||||
/*!
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\param x Boolean expression unlikely to be true.
|
||||
*/
|
||||
#ifndef RAPIDJSON_UNLIKELY
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define RAPIDJSON_UNLIKELY(x) __builtin_expect(!!(x), 0)
|
||||
#else
|
||||
#define RAPIDJSON_UNLIKELY(x) (x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Helpers
|
||||
|
||||
//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN
|
||||
|
||||
#define RAPIDJSON_MULTILINEMACRO_BEGIN do {
|
||||
#define RAPIDJSON_MULTILINEMACRO_END \
|
||||
} while((void)0, 0)
|
||||
|
||||
// adopted from Boost
|
||||
#define RAPIDJSON_VERSION_CODE(x,y,z) \
|
||||
(((x)*100000) + ((y)*100) + (z))
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// RAPIDJSON_DIAG_PUSH/POP, RAPIDJSON_DIAG_OFF
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#define RAPIDJSON_GNUC \
|
||||
RAPIDJSON_VERSION_CODE(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__)
|
||||
#endif
|
||||
|
||||
#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,2,0))
|
||||
|
||||
#define RAPIDJSON_PRAGMA(x) _Pragma(RAPIDJSON_STRINGIFY(x))
|
||||
#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(GCC diagnostic x)
|
||||
#define RAPIDJSON_DIAG_OFF(x) \
|
||||
RAPIDJSON_DIAG_PRAGMA(ignored RAPIDJSON_STRINGIFY(RAPIDJSON_JOIN(-W,x)))
|
||||
|
||||
// push/pop support in Clang and GCC>=4.6
|
||||
#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0))
|
||||
#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push)
|
||||
#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop)
|
||||
#else // GCC >= 4.2, < 4.6
|
||||
#define RAPIDJSON_DIAG_PUSH /* ignored */
|
||||
#define RAPIDJSON_DIAG_POP /* ignored */
|
||||
#endif
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
// pragma (MSVC specific)
|
||||
#define RAPIDJSON_PRAGMA(x) __pragma(x)
|
||||
#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(warning(x))
|
||||
|
||||
#define RAPIDJSON_DIAG_OFF(x) RAPIDJSON_DIAG_PRAGMA(disable: x)
|
||||
#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push)
|
||||
#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop)
|
||||
|
||||
#else
|
||||
|
||||
#define RAPIDJSON_DIAG_OFF(x) /* ignored */
|
||||
#define RAPIDJSON_DIAG_PUSH /* ignored */
|
||||
#define RAPIDJSON_DIAG_POP /* ignored */
|
||||
|
||||
#endif // RAPIDJSON_DIAG_*
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// C++11 features
|
||||
|
||||
#ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
#if defined(__clang__)
|
||||
#if __has_feature(cxx_rvalue_references) && \
|
||||
(defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306)
|
||||
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
|
||||
#else
|
||||
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0
|
||||
#endif
|
||||
#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
|
||||
(defined(_MSC_VER) && _MSC_VER >= 1600)
|
||||
|
||||
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1
|
||||
#else
|
||||
#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0
|
||||
#endif
|
||||
#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
|
||||
#ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT
|
||||
#if defined(__clang__)
|
||||
#define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept)
|
||||
#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__))
|
||||
// (defined(_MSC_VER) && _MSC_VER >= ????) // not yet supported
|
||||
#define RAPIDJSON_HAS_CXX11_NOEXCEPT 1
|
||||
#else
|
||||
#define RAPIDJSON_HAS_CXX11_NOEXCEPT 0
|
||||
#endif
|
||||
#endif
|
||||
#if RAPIDJSON_HAS_CXX11_NOEXCEPT
|
||||
#define RAPIDJSON_NOEXCEPT noexcept
|
||||
#else
|
||||
#define RAPIDJSON_NOEXCEPT /* noexcept */
|
||||
#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT
|
||||
|
||||
// no automatic detection, yet
|
||||
#ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS
|
||||
#define RAPIDJSON_HAS_CXX11_TYPETRAITS 0
|
||||
#endif
|
||||
|
||||
#ifndef RAPIDJSON_HAS_CXX11_RANGE_FOR
|
||||
#if defined(__clang__)
|
||||
#define RAPIDJSON_HAS_CXX11_RANGE_FOR __has_feature(cxx_range_for)
|
||||
#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \
|
||||
(defined(_MSC_VER) && _MSC_VER >= 1700)
|
||||
#define RAPIDJSON_HAS_CXX11_RANGE_FOR 1
|
||||
#else
|
||||
#define RAPIDJSON_HAS_CXX11_RANGE_FOR 0
|
||||
#endif
|
||||
#endif // RAPIDJSON_HAS_CXX11_RANGE_FOR
|
||||
|
||||
//!@endcond
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// new/delete
|
||||
|
||||
#ifndef RAPIDJSON_NEW
|
||||
///! customization point for global \c new
|
||||
#define RAPIDJSON_NEW(x) new x
|
||||
#endif
|
||||
#ifndef RAPIDJSON_DELETE
|
||||
///! customization point for global \c delete
|
||||
#define RAPIDJSON_DELETE(x) delete x
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Type
|
||||
|
||||
/*! \namespace rapidjson
|
||||
\brief main RapidJSON namespace
|
||||
\see RAPIDJSON_NAMESPACE
|
||||
*/
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Type of JSON value
|
||||
enum Type {
|
||||
kNullType = 0, //!< null
|
||||
kFalseType = 1, //!< false
|
||||
kTrueType = 2, //!< true
|
||||
kObjectType = 3, //!< object
|
||||
kArrayType = 4, //!< array
|
||||
kStringType = 5, //!< string
|
||||
kNumberType = 6 //!< number
|
||||
};
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_RAPIDJSON_H_
|
1879
ext/librethinkdbxx/src/rapidjson/reader.h
Normal file
1879
ext/librethinkdbxx/src/rapidjson/reader.h
Normal file
File diff suppressed because it is too large
Load Diff
2006
ext/librethinkdbxx/src/rapidjson/schema.h
Normal file
2006
ext/librethinkdbxx/src/rapidjson/schema.h
Normal file
File diff suppressed because it is too large
Load Diff
179
ext/librethinkdbxx/src/rapidjson/stream.h
Normal file
179
ext/librethinkdbxx/src/rapidjson/stream.h
Normal file
@ -0,0 +1,179 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#include "rapidjson.h"
|
||||
|
||||
#ifndef RAPIDJSON_STREAM_H_
|
||||
#define RAPIDJSON_STREAM_H_
|
||||
|
||||
#include "encodings.h"
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Stream
|
||||
|
||||
/*! \class rapidjson::Stream
|
||||
\brief Concept for reading and writing characters.
|
||||
|
||||
For read-only stream, no need to implement PutBegin(), Put(), Flush() and PutEnd().
|
||||
|
||||
For write-only stream, only need to implement Put() and Flush().
|
||||
|
||||
\code
|
||||
concept Stream {
|
||||
typename Ch; //!< Character type of the stream.
|
||||
|
||||
//! Read the current character from stream without moving the read cursor.
|
||||
Ch Peek() const;
|
||||
|
||||
//! Read the current character from stream and moving the read cursor to next character.
|
||||
Ch Take();
|
||||
|
||||
//! Get the current read cursor.
|
||||
//! \return Number of characters read from start.
|
||||
size_t Tell();
|
||||
|
||||
//! Begin writing operation at the current read pointer.
|
||||
//! \return The begin writer pointer.
|
||||
Ch* PutBegin();
|
||||
|
||||
//! Write a character.
|
||||
void Put(Ch c);
|
||||
|
||||
//! Flush the buffer.
|
||||
void Flush();
|
||||
|
||||
//! End the writing operation.
|
||||
//! \param begin The begin write pointer returned by PutBegin().
|
||||
//! \return Number of characters written.
|
||||
size_t PutEnd(Ch* begin);
|
||||
}
|
||||
\endcode
|
||||
*/
|
||||
|
||||
//! Provides additional information for stream.
|
||||
/*!
|
||||
By using traits pattern, this type provides a default configuration for stream.
|
||||
For custom stream, this type can be specialized for other configuration.
|
||||
See TEST(Reader, CustomStringStream) in readertest.cpp for example.
|
||||
*/
|
||||
template<typename Stream>
|
||||
struct StreamTraits {
|
||||
//! Whether to make local copy of stream for optimization during parsing.
|
||||
/*!
|
||||
By default, for safety, streams do not use local copy optimization.
|
||||
Stream that can be copied fast should specialize this, like StreamTraits<StringStream>.
|
||||
*/
|
||||
enum { copyOptimization = 0 };
|
||||
};
|
||||
|
||||
//! Reserve n characters for writing to a stream.
|
||||
template<typename Stream>
|
||||
inline void PutReserve(Stream& stream, size_t count) {
|
||||
(void)stream;
|
||||
(void)count;
|
||||
}
|
||||
|
||||
//! Write character to a stream, presuming buffer is reserved.
|
||||
template<typename Stream>
|
||||
inline void PutUnsafe(Stream& stream, typename Stream::Ch c) {
|
||||
stream.Put(c);
|
||||
}
|
||||
|
||||
//! Put N copies of a character to a stream.
|
||||
template<typename Stream, typename Ch>
|
||||
inline void PutN(Stream& stream, Ch c, size_t n) {
|
||||
PutReserve(stream, n);
|
||||
for (size_t i = 0; i < n; i++)
|
||||
PutUnsafe(stream, c);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// StringStream
|
||||
|
||||
//! Read-only string stream.
|
||||
/*! \note implements Stream concept
|
||||
*/
|
||||
template <typename Encoding>
|
||||
struct GenericStringStream {
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
GenericStringStream(const Ch *src) : src_(src), head_(src) {}
|
||||
|
||||
Ch Peek() const { return *src_; }
|
||||
Ch Take() { return *src_++; }
|
||||
size_t Tell() const { return static_cast<size_t>(src_ - head_); }
|
||||
|
||||
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
|
||||
void Put(Ch) { RAPIDJSON_ASSERT(false); }
|
||||
void Flush() { RAPIDJSON_ASSERT(false); }
|
||||
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
|
||||
|
||||
const Ch* src_; //!< Current read position.
|
||||
const Ch* head_; //!< Original head of the string.
|
||||
};
|
||||
|
||||
template <typename Encoding>
|
||||
struct StreamTraits<GenericStringStream<Encoding> > {
|
||||
enum { copyOptimization = 1 };
|
||||
};
|
||||
|
||||
//! String stream with UTF8 encoding.
|
||||
typedef GenericStringStream<UTF8<> > StringStream;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// InsituStringStream
|
||||
|
||||
//! A read-write string stream.
|
||||
/*! This string stream is particularly designed for in-situ parsing.
|
||||
\note implements Stream concept
|
||||
*/
|
||||
template <typename Encoding>
|
||||
struct GenericInsituStringStream {
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
GenericInsituStringStream(Ch *src) : src_(src), dst_(0), head_(src) {}
|
||||
|
||||
// Read
|
||||
Ch Peek() { return *src_; }
|
||||
Ch Take() { return *src_++; }
|
||||
size_t Tell() { return static_cast<size_t>(src_ - head_); }
|
||||
|
||||
// Write
|
||||
void Put(Ch c) { RAPIDJSON_ASSERT(dst_ != 0); *dst_++ = c; }
|
||||
|
||||
Ch* PutBegin() { return dst_ = src_; }
|
||||
size_t PutEnd(Ch* begin) { return static_cast<size_t>(dst_ - begin); }
|
||||
void Flush() {}
|
||||
|
||||
Ch* Push(size_t count) { Ch* begin = dst_; dst_ += count; return begin; }
|
||||
void Pop(size_t count) { dst_ -= count; }
|
||||
|
||||
Ch* src_;
|
||||
Ch* dst_;
|
||||
Ch* head_;
|
||||
};
|
||||
|
||||
template <typename Encoding>
|
||||
struct StreamTraits<GenericInsituStringStream<Encoding> > {
|
||||
enum { copyOptimization = 1 };
|
||||
};
|
||||
|
||||
//! Insitu string stream with UTF8 encoding.
|
||||
typedef GenericInsituStringStream<UTF8<> > InsituStringStream;
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#endif // RAPIDJSON_STREAM_H_
|
117
ext/librethinkdbxx/src/rapidjson/stringbuffer.h
Normal file
117
ext/librethinkdbxx/src/rapidjson/stringbuffer.h
Normal file
@ -0,0 +1,117 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_STRINGBUFFER_H_
|
||||
#define RAPIDJSON_STRINGBUFFER_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include "internal/stack.h"
|
||||
|
||||
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
#include <utility> // std::move
|
||||
#endif
|
||||
|
||||
#include "internal/stack.h"
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(c++98-compat)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
//! Represents an in-memory output stream.
|
||||
/*!
|
||||
\tparam Encoding Encoding of the stream.
|
||||
\tparam Allocator type for allocating memory buffer.
|
||||
\note implements Stream concept
|
||||
*/
|
||||
template <typename Encoding, typename Allocator = CrtAllocator>
|
||||
class GenericStringBuffer {
|
||||
public:
|
||||
typedef typename Encoding::Ch Ch;
|
||||
|
||||
GenericStringBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {}
|
||||
|
||||
#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
|
||||
GenericStringBuffer(GenericStringBuffer&& rhs) : stack_(std::move(rhs.stack_)) {}
|
||||
GenericStringBuffer& operator=(GenericStringBuffer&& rhs) {
|
||||
if (&rhs != this)
|
||||
stack_ = std::move(rhs.stack_);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
void Put(Ch c) { *stack_.template Push<Ch>() = c; }
|
||||
void PutUnsafe(Ch c) { *stack_.template PushUnsafe<Ch>() = c; }
|
||||
void Flush() {}
|
||||
|
||||
void Clear() { stack_.Clear(); }
|
||||
void ShrinkToFit() {
|
||||
// Push and pop a null terminator. This is safe.
|
||||
*stack_.template Push<Ch>() = '\0';
|
||||
stack_.ShrinkToFit();
|
||||
stack_.template Pop<Ch>(1);
|
||||
}
|
||||
|
||||
void Reserve(size_t count) { stack_.template Reserve<Ch>(count); }
|
||||
Ch* Push(size_t count) { return stack_.template Push<Ch>(count); }
|
||||
Ch* PushUnsafe(size_t count) { return stack_.template PushUnsafe<Ch>(count); }
|
||||
void Pop(size_t count) { stack_.template Pop<Ch>(count); }
|
||||
|
||||
const Ch* GetString() const {
|
||||
// Push and pop a null terminator. This is safe.
|
||||
*stack_.template Push<Ch>() = '\0';
|
||||
stack_.template Pop<Ch>(1);
|
||||
|
||||
return stack_.template Bottom<Ch>();
|
||||
}
|
||||
|
||||
size_t GetSize() const { return stack_.GetSize(); }
|
||||
|
||||
static const size_t kDefaultCapacity = 256;
|
||||
mutable internal::Stack<Allocator> stack_;
|
||||
|
||||
private:
|
||||
// Prohibit copy constructor & assignment operator.
|
||||
GenericStringBuffer(const GenericStringBuffer&);
|
||||
GenericStringBuffer& operator=(const GenericStringBuffer&);
|
||||
};
|
||||
|
||||
//! String buffer with UTF8 encoding
|
||||
typedef GenericStringBuffer<UTF8<> > StringBuffer;
|
||||
|
||||
template<typename Encoding, typename Allocator>
|
||||
inline void PutReserve(GenericStringBuffer<Encoding, Allocator>& stream, size_t count) {
|
||||
stream.Reserve(count);
|
||||
}
|
||||
|
||||
template<typename Encoding, typename Allocator>
|
||||
inline void PutUnsafe(GenericStringBuffer<Encoding, Allocator>& stream, typename Encoding::Ch c) {
|
||||
stream.PutUnsafe(c);
|
||||
}
|
||||
|
||||
//! Implement specialized version of PutN() with memset() for better performance.
|
||||
template<>
|
||||
inline void PutN(GenericStringBuffer<UTF8<> >& stream, char c, size_t n) {
|
||||
std::memset(stream.stack_.Push<char>(n), c, n * sizeof(c));
|
||||
}
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#if defined(__clang__)
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_STRINGBUFFER_H_
|
609
ext/librethinkdbxx/src/rapidjson/writer.h
Normal file
609
ext/librethinkdbxx/src/rapidjson/writer.h
Normal file
@ -0,0 +1,609 @@
|
||||
// Tencent is pleased to support the open source community by making RapidJSON available.
|
||||
//
|
||||
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
|
||||
//
|
||||
// Licensed under the MIT License (the "License"); you may not use this file except
|
||||
// in compliance with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://opensource.org/licenses/MIT
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
#ifndef RAPIDJSON_WRITER_H_
|
||||
#define RAPIDJSON_WRITER_H_
|
||||
|
||||
#include "stream.h"
|
||||
#include "internal/stack.h"
|
||||
#include "internal/strfunc.h"
|
||||
#include "internal/dtoa.h"
|
||||
#include "internal/itoa.h"
|
||||
#include "stringbuffer.h"
|
||||
#include <new> // placement new
|
||||
|
||||
#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER)
|
||||
#include <intrin.h>
|
||||
#pragma intrinsic(_BitScanForward)
|
||||
#endif
|
||||
#ifdef RAPIDJSON_SSE42
|
||||
#include <nmmintrin.h>
|
||||
#elif defined(RAPIDJSON_SSE2)
|
||||
#include <emmintrin.h>
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_PUSH
|
||||
RAPIDJSON_DIAG_OFF(padded)
|
||||
RAPIDJSON_DIAG_OFF(unreachable-code)
|
||||
#endif
|
||||
|
||||
RAPIDJSON_NAMESPACE_BEGIN
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// WriteFlag
|
||||
|
||||
/*! \def RAPIDJSON_WRITE_DEFAULT_FLAGS
|
||||
\ingroup RAPIDJSON_CONFIG
|
||||
\brief User-defined kWriteDefaultFlags definition.
|
||||
|
||||
User can define this as any \c WriteFlag combinations.
|
||||
*/
|
||||
#ifndef RAPIDJSON_WRITE_DEFAULT_FLAGS
|
||||
#define RAPIDJSON_WRITE_DEFAULT_FLAGS kWriteNoFlags
|
||||
#endif
|
||||
|
||||
//! Combination of writeFlags
|
||||
enum WriteFlag {
|
||||
kWriteNoFlags = 0, //!< No flags are set.
|
||||
kWriteValidateEncodingFlag = 1, //!< Validate encoding of JSON strings.
|
||||
kWriteNanAndInfFlag = 2, //!< Allow writing of Inf, -Inf and NaN.
|
||||
kWriteDefaultFlags = RAPIDJSON_WRITE_DEFAULT_FLAGS //!< Default write flags. Can be customized by defining RAPIDJSON_WRITE_DEFAULT_FLAGS
|
||||
};
|
||||
|
||||
//! JSON writer
|
||||
/*! Writer implements the concept Handler.
|
||||
It generates JSON text by events to an output os.
|
||||
|
||||
User may programmatically calls the functions of a writer to generate JSON text.
|
||||
|
||||
On the other side, a writer can also be passed to objects that generates events,
|
||||
|
||||
for example Reader::Parse() and Document::Accept().
|
||||
|
||||
\tparam OutputStream Type of output stream.
|
||||
\tparam SourceEncoding Encoding of source string.
|
||||
\tparam TargetEncoding Encoding of output stream.
|
||||
\tparam StackAllocator Type of allocator for allocating memory of stack.
|
||||
\note implements Handler concept
|
||||
*/
|
||||
template<typename OutputStream, typename SourceEncoding = UTF8<>, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags>
|
||||
class Writer {
|
||||
public:
|
||||
typedef typename SourceEncoding::Ch Ch;
|
||||
|
||||
static const int kDefaultMaxDecimalPlaces = 324;
|
||||
|
||||
//! Constructor
|
||||
/*! \param os Output stream.
|
||||
\param stackAllocator User supplied allocator. If it is null, it will create a private one.
|
||||
\param levelDepth Initial capacity of stack.
|
||||
*/
|
||||
explicit
|
||||
Writer(OutputStream& os, StackAllocator* stackAllocator = 0, size_t levelDepth = kDefaultLevelDepth) :
|
||||
os_(&os), level_stack_(stackAllocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {}
|
||||
|
||||
explicit
|
||||
Writer(StackAllocator* allocator = 0, size_t levelDepth = kDefaultLevelDepth) :
|
||||
os_(0), level_stack_(allocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {}
|
||||
|
||||
//! Reset the writer with a new stream.
|
||||
/*!
|
||||
This function reset the writer with a new stream and default settings,
|
||||
in order to make a Writer object reusable for output multiple JSONs.
|
||||
|
||||
\param os New output stream.
|
||||
\code
|
||||
Writer<OutputStream> writer(os1);
|
||||
writer.StartObject();
|
||||
// ...
|
||||
writer.EndObject();
|
||||
|
||||
writer.Reset(os2);
|
||||
writer.StartObject();
|
||||
// ...
|
||||
writer.EndObject();
|
||||
\endcode
|
||||
*/
|
||||
void Reset(OutputStream& os) {
|
||||
os_ = &os;
|
||||
hasRoot_ = false;
|
||||
level_stack_.Clear();
|
||||
}
|
||||
|
||||
//! Checks whether the output is a complete JSON.
|
||||
/*!
|
||||
A complete JSON has a complete root object or array.
|
||||
*/
|
||||
bool IsComplete() const {
|
||||
return hasRoot_ && level_stack_.Empty();
|
||||
}
|
||||
|
||||
int GetMaxDecimalPlaces() const {
|
||||
return maxDecimalPlaces_;
|
||||
}
|
||||
|
||||
//! Sets the maximum number of decimal places for double output.
|
||||
/*!
|
||||
This setting truncates the output with specified number of decimal places.
|
||||
|
||||
For example,
|
||||
|
||||
\code
|
||||
writer.SetMaxDecimalPlaces(3);
|
||||
writer.StartArray();
|
||||
writer.Double(0.12345); // "0.123"
|
||||
writer.Double(0.0001); // "0.0"
|
||||
writer.Double(1.234567890123456e30); // "1.234567890123456e30" (do not truncate significand for positive exponent)
|
||||
writer.Double(1.23e-4); // "0.0" (do truncate significand for negative exponent)
|
||||
writer.EndArray();
|
||||
\endcode
|
||||
|
||||
The default setting does not truncate any decimal places. You can restore to this setting by calling
|
||||
\code
|
||||
writer.SetMaxDecimalPlaces(Writer::kDefaultMaxDecimalPlaces);
|
||||
\endcode
|
||||
*/
|
||||
void SetMaxDecimalPlaces(int maxDecimalPlaces) {
|
||||
maxDecimalPlaces_ = maxDecimalPlaces;
|
||||
}
|
||||
|
||||
/*!@name Implementation of Handler
|
||||
\see Handler
|
||||
*/
|
||||
//@{
|
||||
|
||||
bool Null() { Prefix(kNullType); return WriteNull(); }
|
||||
bool Bool(bool b) { Prefix(b ? kTrueType : kFalseType); return WriteBool(b); }
|
||||
bool Int(int i) { Prefix(kNumberType); return WriteInt(i); }
|
||||
bool Uint(unsigned u) { Prefix(kNumberType); return WriteUint(u); }
|
||||
bool Int64(int64_t i64) { Prefix(kNumberType); return WriteInt64(i64); }
|
||||
bool Uint64(uint64_t u64) { Prefix(kNumberType); return WriteUint64(u64); }
|
||||
|
||||
//! Writes the given \c double value to the stream
|
||||
/*!
|
||||
\param d The value to be written.
|
||||
\return Whether it is succeed.
|
||||
*/
|
||||
bool Double(double d) { Prefix(kNumberType); return WriteDouble(d); }
|
||||
|
||||
bool RawNumber(const Ch* str, SizeType length, bool copy = false) {
|
||||
(void)copy;
|
||||
Prefix(kNumberType);
|
||||
return WriteString(str, length);
|
||||
}
|
||||
|
||||
bool String(const Ch* str, SizeType length, bool copy = false) {
|
||||
(void)copy;
|
||||
Prefix(kStringType);
|
||||
return WriteString(str, length);
|
||||
}
|
||||
|
||||
#if RAPIDJSON_HAS_STDSTRING
|
||||
bool String(const std::basic_string<Ch>& str) {
|
||||
return String(str.data(), SizeType(str.size()));
|
||||
}
|
||||
#endif
|
||||
|
||||
bool StartObject() {
|
||||
Prefix(kObjectType);
|
||||
new (level_stack_.template Push<Level>()) Level(false);
|
||||
return WriteStartObject();
|
||||
}
|
||||
|
||||
bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); }
|
||||
|
||||
bool EndObject(SizeType memberCount = 0) {
|
||||
(void)memberCount;
|
||||
RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level));
|
||||
RAPIDJSON_ASSERT(!level_stack_.template Top<Level>()->inArray);
|
||||
level_stack_.template Pop<Level>(1);
|
||||
bool ret = WriteEndObject();
|
||||
if (RAPIDJSON_UNLIKELY(level_stack_.Empty())) // end of json text
|
||||
os_->Flush();
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool StartArray() {
|
||||
Prefix(kArrayType);
|
||||
new (level_stack_.template Push<Level>()) Level(true);
|
||||
return WriteStartArray();
|
||||
}
|
||||
|
||||
bool EndArray(SizeType elementCount = 0) {
|
||||
(void)elementCount;
|
||||
RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level));
|
||||
RAPIDJSON_ASSERT(level_stack_.template Top<Level>()->inArray);
|
||||
level_stack_.template Pop<Level>(1);
|
||||
bool ret = WriteEndArray();
|
||||
if (RAPIDJSON_UNLIKELY(level_stack_.Empty())) // end of json text
|
||||
os_->Flush();
|
||||
return ret;
|
||||
}
|
||||
//@}
|
||||
|
||||
/*! @name Convenience extensions */
|
||||
//@{
|
||||
|
||||
//! Simpler but slower overload.
|
||||
bool String(const Ch* str) { return String(str, internal::StrLen(str)); }
|
||||
bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); }
|
||||
|
||||
//@}
|
||||
|
||||
//! Write a raw JSON value.
|
||||
/*!
|
||||
For user to write a stringified JSON as a value.
|
||||
|
||||
\param json A well-formed JSON value. It should not contain null character within [0, length - 1] range.
|
||||
\param length Length of the json.
|
||||
\param type Type of the root of json.
|
||||
*/
|
||||
bool RawValue(const Ch* json, size_t length, Type type) { Prefix(type); return WriteRawValue(json, length); }
|
||||
|
||||
protected:
|
||||
//! Information for each nested level
|
||||
struct Level {
|
||||
Level(bool inArray_) : valueCount(0), inArray(inArray_) {}
|
||||
size_t valueCount; //!< number of values in this level
|
||||
bool inArray; //!< true if in array, otherwise in object
|
||||
};
|
||||
|
||||
static const size_t kDefaultLevelDepth = 32;
|
||||
|
||||
bool WriteNull() {
|
||||
PutReserve(*os_, 4);
|
||||
PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l'); return true;
|
||||
}
|
||||
|
||||
bool WriteBool(bool b) {
|
||||
if (b) {
|
||||
PutReserve(*os_, 4);
|
||||
PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'r'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'e');
|
||||
}
|
||||
else {
|
||||
PutReserve(*os_, 5);
|
||||
PutUnsafe(*os_, 'f'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 's'); PutUnsafe(*os_, 'e');
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteInt(int i) {
|
||||
char buffer[11];
|
||||
const char* end = internal::i32toa(i, buffer);
|
||||
PutReserve(*os_, static_cast<size_t>(end - buffer));
|
||||
for (const char* p = buffer; p != end; ++p)
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteUint(unsigned u) {
|
||||
char buffer[10];
|
||||
const char* end = internal::u32toa(u, buffer);
|
||||
PutReserve(*os_, static_cast<size_t>(end - buffer));
|
||||
for (const char* p = buffer; p != end; ++p)
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteInt64(int64_t i64) {
|
||||
char buffer[21];
|
||||
const char* end = internal::i64toa(i64, buffer);
|
||||
PutReserve(*os_, static_cast<size_t>(end - buffer));
|
||||
for (const char* p = buffer; p != end; ++p)
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteUint64(uint64_t u64) {
|
||||
char buffer[20];
|
||||
char* end = internal::u64toa(u64, buffer);
|
||||
PutReserve(*os_, static_cast<size_t>(end - buffer));
|
||||
for (char* p = buffer; p != end; ++p)
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteDouble(double d) {
|
||||
if (internal::Double(d).IsNanOrInf()) {
|
||||
if (!(writeFlags & kWriteNanAndInfFlag))
|
||||
return false;
|
||||
if (internal::Double(d).IsNan()) {
|
||||
PutReserve(*os_, 3);
|
||||
PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N');
|
||||
return true;
|
||||
}
|
||||
if (internal::Double(d).Sign()) {
|
||||
PutReserve(*os_, 9);
|
||||
PutUnsafe(*os_, '-');
|
||||
}
|
||||
else
|
||||
PutReserve(*os_, 8);
|
||||
PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f');
|
||||
PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y');
|
||||
return true;
|
||||
}
|
||||
|
||||
char buffer[25];
|
||||
char* end = internal::dtoa(d, buffer, maxDecimalPlaces_);
|
||||
PutReserve(*os_, static_cast<size_t>(end - buffer));
|
||||
for (char* p = buffer; p != end; ++p)
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(*p));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteString(const Ch* str, SizeType length) {
|
||||
static const typename TargetEncoding::Ch hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
|
||||
static const char escape[256] = {
|
||||
#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
||||
//0 1 2 3 4 5 6 7 8 9 A B C D E F
|
||||
'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'b', 't', 'n', 'u', 'f', 'r', 'u', 'u', // 00
|
||||
'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', // 10
|
||||
0, 0, '"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20
|
||||
Z16, Z16, // 30~4F
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, // 50
|
||||
Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 // 60~FF
|
||||
#undef Z16
|
||||
};
|
||||
|
||||
if (TargetEncoding::supportUnicode)
|
||||
PutReserve(*os_, 2 + length * 6); // "\uxxxx..."
|
||||
else
|
||||
PutReserve(*os_, 2 + length * 12); // "\uxxxx\uyyyy..."
|
||||
|
||||
PutUnsafe(*os_, '\"');
|
||||
GenericStringStream<SourceEncoding> is(str);
|
||||
while (ScanWriteUnescapedString(is, length)) {
|
||||
const Ch c = is.Peek();
|
||||
if (!TargetEncoding::supportUnicode && static_cast<unsigned>(c) >= 0x80) {
|
||||
// Unicode escaping
|
||||
unsigned codepoint;
|
||||
if (RAPIDJSON_UNLIKELY(!SourceEncoding::Decode(is, &codepoint)))
|
||||
return false;
|
||||
PutUnsafe(*os_, '\\');
|
||||
PutUnsafe(*os_, 'u');
|
||||
if (codepoint <= 0xD7FF || (codepoint >= 0xE000 && codepoint <= 0xFFFF)) {
|
||||
PutUnsafe(*os_, hexDigits[(codepoint >> 12) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(codepoint >> 8) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(codepoint >> 4) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(codepoint ) & 15]);
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(codepoint >= 0x010000 && codepoint <= 0x10FFFF);
|
||||
// Surrogate pair
|
||||
unsigned s = codepoint - 0x010000;
|
||||
unsigned lead = (s >> 10) + 0xD800;
|
||||
unsigned trail = (s & 0x3FF) + 0xDC00;
|
||||
PutUnsafe(*os_, hexDigits[(lead >> 12) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(lead >> 8) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(lead >> 4) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(lead ) & 15]);
|
||||
PutUnsafe(*os_, '\\');
|
||||
PutUnsafe(*os_, 'u');
|
||||
PutUnsafe(*os_, hexDigits[(trail >> 12) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(trail >> 8) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(trail >> 4) & 15]);
|
||||
PutUnsafe(*os_, hexDigits[(trail ) & 15]);
|
||||
}
|
||||
}
|
||||
else if ((sizeof(Ch) == 1 || static_cast<unsigned>(c) < 256) && RAPIDJSON_UNLIKELY(escape[static_cast<unsigned char>(c)])) {
|
||||
is.Take();
|
||||
PutUnsafe(*os_, '\\');
|
||||
PutUnsafe(*os_, static_cast<typename TargetEncoding::Ch>(escape[static_cast<unsigned char>(c)]));
|
||||
if (escape[static_cast<unsigned char>(c)] == 'u') {
|
||||
PutUnsafe(*os_, '0');
|
||||
PutUnsafe(*os_, '0');
|
||||
PutUnsafe(*os_, hexDigits[static_cast<unsigned char>(c) >> 4]);
|
||||
PutUnsafe(*os_, hexDigits[static_cast<unsigned char>(c) & 0xF]);
|
||||
}
|
||||
}
|
||||
else if (RAPIDJSON_UNLIKELY(!(writeFlags & kWriteValidateEncodingFlag ?
|
||||
Transcoder<SourceEncoding, TargetEncoding>::Validate(is, *os_) :
|
||||
Transcoder<SourceEncoding, TargetEncoding>::TranscodeUnsafe(is, *os_))))
|
||||
return false;
|
||||
}
|
||||
PutUnsafe(*os_, '\"');
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ScanWriteUnescapedString(GenericStringStream<SourceEncoding>& is, size_t length) {
|
||||
return RAPIDJSON_LIKELY(is.Tell() < length);
|
||||
}
|
||||
|
||||
bool WriteStartObject() { os_->Put('{'); return true; }
|
||||
bool WriteEndObject() { os_->Put('}'); return true; }
|
||||
bool WriteStartArray() { os_->Put('['); return true; }
|
||||
bool WriteEndArray() { os_->Put(']'); return true; }
|
||||
|
||||
bool WriteRawValue(const Ch* json, size_t length) {
|
||||
PutReserve(*os_, length);
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
RAPIDJSON_ASSERT(json[i] != '\0');
|
||||
PutUnsafe(*os_, json[i]);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Prefix(Type type) {
|
||||
(void)type;
|
||||
if (RAPIDJSON_LIKELY(level_stack_.GetSize() != 0)) { // this value is not at root
|
||||
Level* level = level_stack_.template Top<Level>();
|
||||
if (level->valueCount > 0) {
|
||||
if (level->inArray)
|
||||
os_->Put(','); // add comma if it is not the first element in array
|
||||
else // in object
|
||||
os_->Put((level->valueCount % 2 == 0) ? ',' : ':');
|
||||
}
|
||||
if (!level->inArray && level->valueCount % 2 == 0)
|
||||
RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name
|
||||
level->valueCount++;
|
||||
}
|
||||
else {
|
||||
RAPIDJSON_ASSERT(!hasRoot_); // Should only has one and only one root.
|
||||
hasRoot_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
OutputStream* os_;
|
||||
internal::Stack<StackAllocator> level_stack_;
|
||||
int maxDecimalPlaces_;
|
||||
bool hasRoot_;
|
||||
|
||||
private:
|
||||
// Prohibit copy constructor & assignment operator.
|
||||
Writer(const Writer&);
|
||||
Writer& operator=(const Writer&);
|
||||
};
|
||||
|
||||
// Full specialization for StringStream to prevent memory copying
|
||||
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::WriteInt(int i) {
|
||||
char *buffer = os_->Push(11);
|
||||
const char* end = internal::i32toa(i, buffer);
|
||||
os_->Pop(static_cast<size_t>(11 - (end - buffer)));
|
||||
return true;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::WriteUint(unsigned u) {
|
||||
char *buffer = os_->Push(10);
|
||||
const char* end = internal::u32toa(u, buffer);
|
||||
os_->Pop(static_cast<size_t>(10 - (end - buffer)));
|
||||
return true;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::WriteInt64(int64_t i64) {
|
||||
char *buffer = os_->Push(21);
|
||||
const char* end = internal::i64toa(i64, buffer);
|
||||
os_->Pop(static_cast<size_t>(21 - (end - buffer)));
|
||||
return true;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::WriteUint64(uint64_t u) {
|
||||
char *buffer = os_->Push(20);
|
||||
const char* end = internal::u64toa(u, buffer);
|
||||
os_->Pop(static_cast<size_t>(20 - (end - buffer)));
|
||||
return true;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::WriteDouble(double d) {
|
||||
if (internal::Double(d).IsNanOrInf()) {
|
||||
// Note: This code path can only be reached if (RAPIDJSON_WRITE_DEFAULT_FLAGS & kWriteNanAndInfFlag).
|
||||
if (!(kWriteDefaultFlags & kWriteNanAndInfFlag))
|
||||
return false;
|
||||
if (internal::Double(d).IsNan()) {
|
||||
PutReserve(*os_, 3);
|
||||
PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N');
|
||||
return true;
|
||||
}
|
||||
if (internal::Double(d).Sign()) {
|
||||
PutReserve(*os_, 9);
|
||||
PutUnsafe(*os_, '-');
|
||||
}
|
||||
else
|
||||
PutReserve(*os_, 8);
|
||||
PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f');
|
||||
PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y');
|
||||
return true;
|
||||
}
|
||||
|
||||
char *buffer = os_->Push(25);
|
||||
char* end = internal::dtoa(d, buffer, maxDecimalPlaces_);
|
||||
os_->Pop(static_cast<size_t>(25 - (end - buffer)));
|
||||
return true;
|
||||
}
|
||||
|
||||
#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42)
|
||||
template<>
|
||||
inline bool Writer<StringBuffer>::ScanWriteUnescapedString(StringStream& is, size_t length) {
|
||||
if (length < 16)
|
||||
return RAPIDJSON_LIKELY(is.Tell() < length);
|
||||
|
||||
if (!RAPIDJSON_LIKELY(is.Tell() < length))
|
||||
return false;
|
||||
|
||||
const char* p = is.src_;
|
||||
const char* end = is.head_ + length;
|
||||
const char* nextAligned = reinterpret_cast<const char*>((reinterpret_cast<size_t>(p) + 15) & static_cast<size_t>(~15));
|
||||
const char* endAligned = reinterpret_cast<const char*>(reinterpret_cast<size_t>(end) & static_cast<size_t>(~15));
|
||||
if (nextAligned > end)
|
||||
return true;
|
||||
|
||||
while (p != nextAligned)
|
||||
if (*p < 0x20 || *p == '\"' || *p == '\\') {
|
||||
is.src_ = p;
|
||||
return RAPIDJSON_LIKELY(is.Tell() < length);
|
||||
}
|
||||
else
|
||||
os_->PutUnsafe(*p++);
|
||||
|
||||
// The rest of string using SIMD
|
||||
static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' };
|
||||
static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' };
|
||||
static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 };
|
||||
const __m128i dq = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&dquote[0]));
|
||||
const __m128i bs = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&bslash[0]));
|
||||
const __m128i sp = _mm_loadu_si128(reinterpret_cast<const __m128i *>(&space[0]));
|
||||
|
||||
for (; p != endAligned; p += 16) {
|
||||
const __m128i s = _mm_load_si128(reinterpret_cast<const __m128i *>(p));
|
||||
const __m128i t1 = _mm_cmpeq_epi8(s, dq);
|
||||
const __m128i t2 = _mm_cmpeq_epi8(s, bs);
|
||||
const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19
|
||||
const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3);
|
||||
unsigned short r = static_cast<unsigned short>(_mm_movemask_epi8(x));
|
||||
if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped
|
||||
SizeType len;
|
||||
#ifdef _MSC_VER // Find the index of first escaped
|
||||
unsigned long offset;
|
||||
_BitScanForward(&offset, r);
|
||||
len = offset;
|
||||
#else
|
||||
len = static_cast<SizeType>(__builtin_ffs(r) - 1);
|
||||
#endif
|
||||
char* q = reinterpret_cast<char*>(os_->PushUnsafe(len));
|
||||
for (size_t i = 0; i < len; i++)
|
||||
q[i] = p[i];
|
||||
|
||||
p += len;
|
||||
break;
|
||||
}
|
||||
_mm_storeu_si128(reinterpret_cast<__m128i *>(os_->PushUnsafe(16)), s);
|
||||
}
|
||||
|
||||
is.src_ = p;
|
||||
return RAPIDJSON_LIKELY(is.Tell() < length);
|
||||
}
|
||||
#endif // defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42)
|
||||
|
||||
RAPIDJSON_NAMESPACE_END
|
||||
|
||||
#ifdef _MSC_VER
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#ifdef __clang__
|
||||
RAPIDJSON_DIAG_POP
|
||||
#endif
|
||||
|
||||
#endif // RAPIDJSON_RAPIDJSON_H_
|
285
ext/librethinkdbxx/src/term.cc
Normal file
285
ext/librethinkdbxx/src/term.cc
Normal file
@ -0,0 +1,285 @@
|
||||
#include <cstdlib>
|
||||
#include <set>
|
||||
|
||||
#include "term.h"
|
||||
#include "json_p.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
using TT = Protocol::Term::TermType;
|
||||
|
||||
struct {
|
||||
Datum operator() (const Array& array) {
|
||||
Array copy;
|
||||
copy.reserve(array.size());
|
||||
for (const auto& it : array) {
|
||||
copy.emplace_back(it.apply<Datum>(*this));
|
||||
}
|
||||
return Datum(Array{TT::MAKE_ARRAY, std::move(copy)});
|
||||
}
|
||||
Datum operator() (const Object& object) {
|
||||
Object copy;
|
||||
for (const auto& it : object) {
|
||||
copy.emplace(it.first, it.second.apply<Datum>(*this));
|
||||
}
|
||||
return std::move(copy);
|
||||
}
|
||||
template<class T>
|
||||
Datum operator() (T&& atomic) {
|
||||
return Datum(std::forward<T>(atomic));
|
||||
}
|
||||
} datum_to_term;
|
||||
|
||||
Term::Term(Datum&& datum_) : datum(datum_.apply<Datum>(datum_to_term)) { }
|
||||
Term::Term(const Datum& datum_) : datum(datum_.apply<Datum>(datum_to_term)) { }
|
||||
|
||||
Term::Term(Term&& orig, OptArgs&& new_optargs) : datum(Nil()) {
|
||||
Datum* cur = orig.datum.get_nth(2);
|
||||
Object optargs;
|
||||
free_vars = std::move(orig.free_vars);
|
||||
if (cur) {
|
||||
optargs = std::move(cur->extract_object());
|
||||
}
|
||||
for (auto& it : new_optargs) {
|
||||
optargs.emplace(std::move(it.first), alpha_rename(std::move(it.second)));
|
||||
}
|
||||
datum = Array{ std::move(orig.datum.extract_nth(0)), std::move(orig.datum.extract_nth(1)), std::move(optargs) };
|
||||
}
|
||||
|
||||
Term nil() {
|
||||
return Term(Nil());
|
||||
}
|
||||
|
||||
Cursor Term::run(Connection& conn, OptArgs&& opts) {
|
||||
if (!free_vars.empty()) {
|
||||
throw Error("run: term has free variables");
|
||||
}
|
||||
|
||||
return conn.start_query(this, std::move(opts));
|
||||
}
|
||||
|
||||
struct {
|
||||
Datum operator() (Object&& object, const std::map<int, int>& subst, bool) {
|
||||
Object ret;
|
||||
for (auto& it : object) {
|
||||
ret.emplace(std::move(it.first), std::move(it.second).apply<Datum>(*this, subst, false));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
Datum operator() (Array&& array, const std::map<int, int>& subst, bool args) {
|
||||
if (!args) {
|
||||
double cmd = array[0].extract_number();
|
||||
if (cmd == static_cast<int>(TT::VAR)) {
|
||||
double var = array[1].extract_nth(0).extract_number();
|
||||
auto it = subst.find(static_cast<int>(var));
|
||||
if (it != subst.end()) {
|
||||
return Array{ TT::VAR, { it->second }};
|
||||
}
|
||||
}
|
||||
if (array.size() == 2) {
|
||||
return Array{ std::move(array[0]), std::move(array[1]).apply<Datum>(*this, subst, true) };
|
||||
} else {
|
||||
return Array{
|
||||
std::move(array[0]),
|
||||
std::move(array[1]).apply<Datum>(*this, subst, true),
|
||||
std::move(array[2]).apply<Datum>(*this, subst, false) };
|
||||
}
|
||||
} else {
|
||||
Array ret;
|
||||
for (auto& it : array) {
|
||||
ret.emplace_back(std::move(it).apply<Datum>(*this, subst, false));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
template <class T>
|
||||
Datum operator() (T&& a, const std::map<int, int>&, bool) {
|
||||
return std::move(a);
|
||||
}
|
||||
} alpha_renamer;
|
||||
|
||||
static int new_var_id(const std::map<int, int*>& vars) {
|
||||
while (true) {
|
||||
int id = gen_var_id();
|
||||
if (vars.find(id) == vars.end()) {
|
||||
return id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Datum Term::alpha_rename(Term&& term) {
|
||||
if (free_vars.empty()) {
|
||||
free_vars = std::move(term.free_vars);
|
||||
return std::move(term.datum);
|
||||
}
|
||||
|
||||
std::map<int, int> subst;
|
||||
for (auto it = term.free_vars.begin(); it != term.free_vars.end(); ++it) {
|
||||
auto var = free_vars.find(it->first);
|
||||
if (var == free_vars.end()) {
|
||||
free_vars.emplace(it->first, it->second);
|
||||
} else if (var->second != it->second) {
|
||||
int id = new_var_id(free_vars);
|
||||
subst.emplace(it->first, id);
|
||||
free_vars.emplace(id, it->second);
|
||||
}
|
||||
}
|
||||
if (subst.empty()) {
|
||||
return std::move(term.datum);
|
||||
} else {
|
||||
return term.datum.apply<Datum>(alpha_renamer, subst, false);
|
||||
}
|
||||
}
|
||||
|
||||
int gen_var_id() {
|
||||
return ::random() % (1<<30);
|
||||
}
|
||||
|
||||
C0_IMPL(db_list, DB_LIST)
|
||||
C0_IMPL(table_list, TABLE_LIST)
|
||||
C0_IMPL(random, RANDOM)
|
||||
C0_IMPL(now, NOW)
|
||||
C0_IMPL(range, RANGE)
|
||||
C0_IMPL(error, ERROR)
|
||||
C0_IMPL(uuid, UUID)
|
||||
C0_IMPL(literal, LITERAL)
|
||||
CO0_IMPL(wait, WAIT)
|
||||
C0_IMPL(rebalance, REBALANCE)
|
||||
CO0_IMPL(random, RANDOM)
|
||||
|
||||
Term row(TT::IMPLICIT_VAR, {});
|
||||
Term minval(TT::MINVAL, {});
|
||||
Term maxval(TT::MAXVAL, {});
|
||||
|
||||
Term binary(const std::string& data) {
|
||||
return expr(Binary(data));
|
||||
}
|
||||
|
||||
Term binary(std::string&& data) {
|
||||
return expr(Binary(data));
|
||||
}
|
||||
|
||||
Term binary(const char* data) {
|
||||
return expr(Binary(data));
|
||||
}
|
||||
|
||||
struct {
|
||||
bool operator() (const Object& object) {
|
||||
for (const auto& it : object) {
|
||||
if (it.second.apply<bool>(*this)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
bool operator() (const Array& array) {
|
||||
int type = *array[0].get_number();
|
||||
if (type == static_cast<int>(TT::IMPLICIT_VAR)) {
|
||||
return true;
|
||||
}
|
||||
if (type == static_cast<int>(TT::FUNC)) {
|
||||
return false;
|
||||
}
|
||||
for (const auto& it : *array[1].get_array()) {
|
||||
if (it.apply<bool>(*this)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (array.size() == 3) {
|
||||
return array[2].apply<bool>(*this);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
template <class T>
|
||||
bool operator() (T) {
|
||||
return false;
|
||||
}
|
||||
} needs_func_wrap;
|
||||
|
||||
Term Term::func_wrap(Term&& term) {
|
||||
if (term.datum.apply<bool>(needs_func_wrap)) {
|
||||
return Term(TT::FUNC, {expr(Array{new_var_id(term.free_vars)}), std::move(term)});
|
||||
}
|
||||
return term;
|
||||
}
|
||||
|
||||
Term Term::func_wrap(const Term& term) {
|
||||
if (term.datum.apply<bool>(needs_func_wrap)) {
|
||||
// TODO return Term(TT::FUNC, {expr(Array{new_var_id(Term.free_vars)}), Term.copy()});
|
||||
return Term(Nil());
|
||||
}
|
||||
return term;
|
||||
}
|
||||
|
||||
Term Term::make_object(std::vector<Term>&& args) {
|
||||
if (args.size() % 2 != 0) {
|
||||
return Term(TT::OBJECT, std::move(args));
|
||||
}
|
||||
std::set<std::string> keys;
|
||||
for (auto it = args.begin(); it != args.end() && it + 1 != args.end(); it += 2) {
|
||||
std::string* key = it->datum.get_string();
|
||||
if (!key || keys.count(*key)) {
|
||||
return Term(TT::OBJECT, std::move(args));
|
||||
}
|
||||
keys.insert(*key);
|
||||
}
|
||||
Term ret{Nil()};
|
||||
Object object;
|
||||
for (auto it = args.begin(); it != args.end(); it += 2) {
|
||||
std::string* key = it->datum.get_string();
|
||||
object.emplace(std::move(*key), ret.alpha_rename(std::move(*(it + 1))));
|
||||
}
|
||||
ret.datum = std::move(object);
|
||||
return ret;
|
||||
}
|
||||
|
||||
Term Term::make_binary(Term&& term) {
|
||||
std::string* string = term.datum.get_string();
|
||||
if (string) {
|
||||
return expr(Binary(std::move(*string)));
|
||||
}
|
||||
return Term(TT::BINARY, std::vector<Term>{term});
|
||||
}
|
||||
|
||||
Term::Term(OptArgs&& optargs) : datum(Nil()) {
|
||||
Object oargs;
|
||||
for (auto& it : optargs) {
|
||||
oargs.emplace(it.first, alpha_rename(std::move(it.second)));
|
||||
}
|
||||
datum = std::move(oargs);
|
||||
}
|
||||
|
||||
OptArgs optargs() {
|
||||
return OptArgs{};
|
||||
}
|
||||
|
||||
Term january(TT::JANUARY, {});
|
||||
Term february(TT::FEBRUARY, {});
|
||||
Term march(TT::MARCH, {});
|
||||
Term april(TT::APRIL, {});
|
||||
Term may(TT::MAY, {});
|
||||
Term june(TT::JUNE, {});
|
||||
Term july(TT::JULY, {});
|
||||
Term august(TT::AUGUST, {});
|
||||
Term september(TT::SEPTEMBER, {});
|
||||
Term october(TT::OCTOBER, {});
|
||||
Term november(TT::NOVEMBER, {});
|
||||
Term december(TT::DECEMBER, {});
|
||||
Term monday(TT::MONDAY, {});
|
||||
Term tuesday(TT::TUESDAY, {});
|
||||
Term wednesday(TT::WEDNESDAY, {});
|
||||
Term thursday(TT::THURSDAY, {});
|
||||
Term friday(TT::FRIDAY, {});
|
||||
Term saturday(TT::SATURDAY, {});
|
||||
Term sunday(TT::SUNDAY, {});
|
||||
|
||||
Term Term::copy() const {
|
||||
return *this;
|
||||
}
|
||||
|
||||
Datum Term::get_datum() const {
|
||||
return datum;
|
||||
}
|
||||
|
||||
}
|
592
ext/librethinkdbxx/src/term.h
Normal file
592
ext/librethinkdbxx/src/term.h
Normal file
@ -0,0 +1,592 @@
|
||||
#pragma once
|
||||
|
||||
#include "datum.h"
|
||||
#include "connection.h"
|
||||
#include "protocol_defs.h"
|
||||
#include "cursor.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
using TT = Protocol::Term::TermType;
|
||||
|
||||
class Term;
|
||||
class Var;
|
||||
|
||||
// An alias for the Term constructor
|
||||
template <class T>
|
||||
Term expr(T&&);
|
||||
|
||||
int gen_var_id();
|
||||
|
||||
// Can be used as the last argument to some ReQL commands that expect named arguments
|
||||
using OptArgs = std::map<std::string, Term>;
|
||||
|
||||
// Represents a ReQL Term (RethinkDB Query Language)
|
||||
// Designed to be used with r-value *this
|
||||
class Term {
|
||||
public:
|
||||
Term(const Term& other) = default;
|
||||
Term(Term&& other) = default;
|
||||
Term& operator= (const Term& other) = default;
|
||||
Term& operator= (Term&& other) = default;
|
||||
|
||||
explicit Term(Datum&&);
|
||||
explicit Term(const Datum&);
|
||||
explicit Term(OptArgs&&);
|
||||
|
||||
// Create a copy of the Term
|
||||
Term copy() const;
|
||||
|
||||
Term(std::function<Term()> f) : datum(Nil()) { set_function<std::function<Term()>>(f); }
|
||||
Term(std::function<Term(Var)> f) : datum(Nil()) { set_function<std::function<Term(Var)>, 0>(f); }
|
||||
Term(std::function<Term(Var, Var)> f) : datum(Nil()) { set_function<std::function<Term(Var, Var)>, 0, 1>(f); }
|
||||
Term(std::function<Term(Var, Var, Var)> f) : datum(Nil()) { set_function<std::function<Term(Var, Var, Var)>, 0, 1, 2>(f); }
|
||||
Term(Protocol::Term::TermType type, std::vector<Term>&& args) : datum(Array()) {
|
||||
Array dargs;
|
||||
for (auto& it : args) {
|
||||
dargs.emplace_back(alpha_rename(std::move(it)));
|
||||
}
|
||||
datum = Datum(Array{ type, std::move(dargs) });
|
||||
}
|
||||
|
||||
Term(Protocol::Term::TermType type, std::vector<Term>&& args, OptArgs&& optargs) : datum(Array()) {
|
||||
Array dargs;
|
||||
for (auto& it : args) {
|
||||
dargs.emplace_back(alpha_rename(std::move(it)));
|
||||
}
|
||||
Object oargs;
|
||||
for (auto& it : optargs) {
|
||||
oargs.emplace(it.first, alpha_rename(std::move(it.second)));
|
||||
}
|
||||
datum = Array{ type, std::move(dargs), std::move(oargs) };
|
||||
}
|
||||
|
||||
// Used internally to support row
|
||||
static Term func_wrap(Term&&);
|
||||
static Term func_wrap(const Term&);
|
||||
|
||||
|
||||
// These macros are used to define most ReQL commands
|
||||
// * Cn represents a method with n arguments
|
||||
// * COn represents a method with n arguments and optional named arguments
|
||||
// * C_ represents a method with any number of arguments
|
||||
// Each method is implemented twice, once with r-value *this, and once with const *this
|
||||
// The third argument, wrap, allows converting arguments into functions if they contain row
|
||||
|
||||
#define C0(name, type) \
|
||||
Term name() && { return Term(TT::type, std::vector<Term>{ std::move(*this) }); } \
|
||||
Term name() const & { return Term(TT::type, std::vector<Term>{ *this }); }
|
||||
#define C1(name, type, wrap) \
|
||||
template <class T> \
|
||||
Term name(T&& a) && { return Term(TT::type, std::vector<Term>{ std::move(*this), wrap(expr(std::forward<T>(a))) }); } \
|
||||
template <class T> \
|
||||
Term name(T&& a) const & { return Term(TT::type, std::vector<Term>{ *this, wrap(expr(std::forward<T>(a))) }); }
|
||||
#define C2(name, type) \
|
||||
template <class T, class U> Term name(T&& a, U&& b) && { \
|
||||
return Term(TT::type, std::vector<Term>{ std::move(*this), \
|
||||
expr(std::forward<T>(a)), expr(std::forward<U>(b)) }); } \
|
||||
template <class T, class U> Term name(T&& a, U&& b) const & { \
|
||||
return Term(TT::type, std::vector<Term>{ *this, \
|
||||
expr(std::forward<T>(a)), expr(std::forward<U>(b)) }); }
|
||||
#define C_(name, type, wrap) \
|
||||
template <class ...T> Term name(T&& ...a) && { \
|
||||
return Term(TT::type, std::vector<Term>{ std::move(*this), \
|
||||
wrap(expr(std::forward<T>(a)))... }); } \
|
||||
template <class ...T> Term name(T&& ...a) const & { \
|
||||
return Term(TT::type, std::vector<Term>{ *this, \
|
||||
wrap(expr(std::forward<T>(a)))... }); }
|
||||
#define CO0(name, type) \
|
||||
Term name(OptArgs&& optarg = {}) && { \
|
||||
return Term(TT::type, std::vector<Term>{ std::move(*this) }, std::move(optarg)); } \
|
||||
Term name(OptArgs&& optarg = {}) const & { \
|
||||
return Term(TT::type, std::vector<Term>{ *this }, std::move(optarg)); }
|
||||
#define CO1(name, type, wrap) \
|
||||
template <class T> Term name(T&& a, OptArgs&& optarg = {}) && { \
|
||||
return Term(TT::type, std::vector<Term>{ std::move(*this), \
|
||||
wrap(expr(std::forward<T>(a))) }, std::move(optarg)); } \
|
||||
template <class T> Term name(T&& a, OptArgs&& optarg = {}) const & { \
|
||||
return Term(TT::type, std::vector<Term>{ *this, \
|
||||
wrap(expr(std::forward<T>(a))) }, std::move(optarg)); }
|
||||
#define CO2(name, type, wrap) \
|
||||
template <class T, class U> Term name(T&& a, U&& b, OptArgs&& optarg = {}) && { \
|
||||
return Term(TT::type, std::vector<Term>{ std::move(*this), \
|
||||
wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))) }, std::move(optarg)); } \
|
||||
template <class T, class U> Term name(T&& a, U&& b, OptArgs&& optarg = {}) const & { \
|
||||
return Term(TT::type, std::vector<Term>{ *this, \
|
||||
wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))) }, std::move(optarg)); }
|
||||
#define CO3(name, type, wrap) \
|
||||
template <class T, class U, class V> Term name(T&& a, U&& b, V&& c, OptArgs&& optarg = {}) && { \
|
||||
return Term(TT::type, std::vector<Term>{ std::move(*this), \
|
||||
wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))), \
|
||||
wrap(expr(std::forward<V>(c))) }, std::move(optarg)); } \
|
||||
template <class T, class U, class V> Term name(T&& a, U&& b, V&& c, OptArgs&& optarg = {}) const & { \
|
||||
return Term(TT::type, std::vector<Term>{ *this, \
|
||||
wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))), \
|
||||
wrap(expr(std::forward<V>(c)))}, std::move(optarg)); }
|
||||
#define CO4(name, type, wrap) \
|
||||
template <class T, class U, class V, class W> Term name(T&& a, U&& b, V&& c, W&& d, OptArgs&& optarg = {}) && { \
|
||||
return Term(TT::type, std::vector<Term>{ std::move(*this), \
|
||||
wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))), \
|
||||
wrap(expr(std::forward<V>(c))), wrap(expr(std::forward<W>(d))) }, std::move(optarg)); } \
|
||||
template <class T, class U, class V, class W> Term name(T&& a, U&& b, V&& c, W&& d, OptArgs&& optarg = {}) const & { \
|
||||
return Term(TT::type, std::vector<Term>{ *this, \
|
||||
wrap(expr(std::forward<T>(a))), wrap(expr(std::forward<U>(b))), \
|
||||
wrap(expr(std::forward<V>(c))), wrap(expr(std::forward<W>(d))) }, std::move(optarg)); }
|
||||
#define CO_(name, type, wrap) \
|
||||
C_(name, type, wrap) \
|
||||
CO0(name, type) \
|
||||
CO1(name, type, wrap) \
|
||||
CO2(name, type, wrap) \
|
||||
CO3(name, type, wrap) \
|
||||
CO4(name, type, wrap)
|
||||
#define no_wrap(x) x
|
||||
|
||||
CO1(table_create, TABLE_CREATE, no_wrap)
|
||||
C1(table_drop, TABLE_DROP, no_wrap)
|
||||
C0(table_list, TABLE_LIST)
|
||||
CO1(index_create, INDEX_CREATE, no_wrap)
|
||||
CO2(index_create, INDEX_CREATE, func_wrap)
|
||||
C1(index_drop, INDEX_DROP, no_wrap)
|
||||
C0(index_list, INDEX_LIST)
|
||||
CO2(index_rename, INDEX_RENAME, no_wrap)
|
||||
C_(index_status, INDEX_STATUS, no_wrap)
|
||||
C_(index_wait, INDEX_WAIT, no_wrap)
|
||||
CO0(changes, CHANGES)
|
||||
CO1(insert, INSERT, no_wrap)
|
||||
CO1(update, UPDATE, func_wrap)
|
||||
CO1(replace, REPLACE, func_wrap)
|
||||
CO0(delete_, DELETE)
|
||||
C0(sync, SYNC)
|
||||
CO1(table, TABLE, no_wrap)
|
||||
C1(get, GET, no_wrap)
|
||||
CO_(get_all, GET_ALL, no_wrap)
|
||||
CO2(between, BETWEEN, no_wrap)
|
||||
CO1(filter, FILTER, func_wrap)
|
||||
C2(inner_join, INNER_JOIN)
|
||||
C2(outer_join, OUTER_JOIN)
|
||||
CO2(eq_join, EQ_JOIN, func_wrap)
|
||||
C0(zip, ZIP)
|
||||
C_(map, MAP, func_wrap)
|
||||
C_(with_fields, WITH_FIELDS, no_wrap)
|
||||
C1(concat_map, CONCAT_MAP, func_wrap)
|
||||
CO_(order_by, ORDER_BY, func_wrap)
|
||||
C1(skip, SKIP, no_wrap)
|
||||
C1(limit, LIMIT, no_wrap)
|
||||
CO1(slice, SLICE, no_wrap)
|
||||
CO2(slice, SLICE, no_wrap)
|
||||
C1(nth, NTH, no_wrap)
|
||||
C1(offsets_of, OFFSETS_OF, func_wrap)
|
||||
C0(is_empty, IS_EMPTY)
|
||||
CO_(union_, UNION, no_wrap)
|
||||
C1(sample, SAMPLE, no_wrap)
|
||||
CO_(group, GROUP, func_wrap)
|
||||
C0(ungroup, UNGROUP)
|
||||
C1(reduce, REDUCE, no_wrap)
|
||||
CO2(fold, FOLD, no_wrap)
|
||||
C0(count, COUNT)
|
||||
C1(count, COUNT, func_wrap)
|
||||
C0(sum, SUM)
|
||||
C1(sum, SUM, func_wrap)
|
||||
C0(avg, AVG)
|
||||
C1(avg, AVG, func_wrap)
|
||||
C1(min, MIN, func_wrap)
|
||||
CO0(min, MIN)
|
||||
C1(max, MAX, func_wrap)
|
||||
CO0(max, MAX)
|
||||
CO0(distinct, DISTINCT)
|
||||
C_(contains, CONTAINS, func_wrap)
|
||||
C_(pluck, PLUCK, no_wrap)
|
||||
C_(without, WITHOUT, no_wrap)
|
||||
C_(merge, MERGE, func_wrap)
|
||||
C1(append, APPEND, no_wrap)
|
||||
C1(prepend, PREPEND, no_wrap)
|
||||
C1(difference, DIFFERENCE, no_wrap)
|
||||
C1(set_insert, SET_INSERT, no_wrap)
|
||||
C1(set_union, SET_UNION, no_wrap)
|
||||
C1(set_intersection, SET_INTERSECTION, no_wrap)
|
||||
C1(set_difference, SET_DIFFERENCE, no_wrap)
|
||||
C1(operator[], BRACKET, no_wrap)
|
||||
C1(get_field, GET_FIELD, no_wrap)
|
||||
C_(has_fields, HAS_FIELDS, no_wrap)
|
||||
C2(insert_at, INSERT_AT)
|
||||
C2(splice_at, SPLICE_AT)
|
||||
C1(delete_at, DELETE_AT, no_wrap)
|
||||
C2(delete_at, DELETE_AT)
|
||||
C2(change_at, CHANGE_AT)
|
||||
C0(keys, KEYS)
|
||||
C1(match, MATCH, no_wrap)
|
||||
C0(split, SPLIT)
|
||||
C1(split, SPLIT, no_wrap)
|
||||
C2(split, SPLIT)
|
||||
C0(upcase, UPCASE)
|
||||
C0(downcase, DOWNCASE)
|
||||
C_(add, ADD, no_wrap)
|
||||
C1(operator+, ADD, no_wrap)
|
||||
C_(sub, SUB, no_wrap)
|
||||
C1(operator-, SUB, no_wrap)
|
||||
C_(mul, MUL, no_wrap)
|
||||
C1(operator*, MUL, no_wrap)
|
||||
C_(div, DIV, no_wrap)
|
||||
C1(operator/, DIV, no_wrap)
|
||||
C1(mod, MOD, no_wrap)
|
||||
C1(operator%, MOD, no_wrap)
|
||||
C_(and_, AND, no_wrap)
|
||||
C1(operator&&, AND, no_wrap)
|
||||
C_(or_, OR, no_wrap)
|
||||
C1(operator||, OR, no_wrap)
|
||||
C1(eq, EQ, no_wrap)
|
||||
C1(operator==, EQ, no_wrap)
|
||||
C1(ne, NE, no_wrap)
|
||||
C1(operator!=, NE, no_wrap)
|
||||
C1(gt, GT, no_wrap)
|
||||
C1(operator>, GT, no_wrap)
|
||||
C1(ge, GE, no_wrap)
|
||||
C1(operator>=, GE, no_wrap)
|
||||
C1(lt, LT, no_wrap)
|
||||
C1(operator<, LT, no_wrap)
|
||||
C1(le, LE, no_wrap)
|
||||
C1(operator<=, LE, no_wrap)
|
||||
C0(not_, NOT)
|
||||
C0(operator!, NOT)
|
||||
C1(in_timezone, IN_TIMEZONE, no_wrap)
|
||||
C0(timezone, TIMEZONE)
|
||||
CO2(during, DURING, no_wrap)
|
||||
C0(date, DATE)
|
||||
C0(time_of_day, TIME_OF_DAY)
|
||||
C0(year, YEAR)
|
||||
C0(month, MONTH)
|
||||
C0(day, DAY)
|
||||
C0(day_of_week, DAY_OF_WEEK)
|
||||
C0(day_of_year, DAY_OF_YEAR)
|
||||
C0(hours, HOURS)
|
||||
C0(minutes, MINUTES)
|
||||
C0(seconds, SECONDS)
|
||||
C0(to_iso8601, TO_ISO8601)
|
||||
C0(to_epoch_time, TO_EPOCH_TIME)
|
||||
C1(for_each, FOR_EACH, func_wrap)
|
||||
C1(default_, DEFAULT, no_wrap)
|
||||
CO1(js, JAVASCRIPT, no_wrap)
|
||||
C1(coerce_to, COERCE_TO, no_wrap)
|
||||
C0(type_of, TYPE_OF)
|
||||
C0(info, INFO)
|
||||
C0(to_json, TO_JSON_STRING)
|
||||
C0(to_json_string, TO_JSON_STRING)
|
||||
C1(distance, DISTANCE, no_wrap)
|
||||
C0(fill, FILL)
|
||||
C0(to_geojson, TO_GEOJSON)
|
||||
CO1(get_intersecting, GET_INTERSECTING, no_wrap)
|
||||
CO1(get_nearest, GET_NEAREST, no_wrap)
|
||||
C1(includes, INCLUDES, no_wrap)
|
||||
C1(intersects, INTERSECTS, no_wrap)
|
||||
C1(polygon_sub, POLYGON_SUB, no_wrap)
|
||||
C0(config, CONFIG)
|
||||
C0(rebalance, REBALANCE)
|
||||
CO0(reconfigure, RECONFIGURE)
|
||||
C0(status, STATUS)
|
||||
CO0(wait, WAIT)
|
||||
C0(floor, FLOOR)
|
||||
C0(ceil, CEIL)
|
||||
C0(round, ROUND)
|
||||
C0(values, VALUES)
|
||||
|
||||
// The expansion of this macro fails to compile on some versions of GCC and Clang:
|
||||
// C_(operator(), FUNCALL, no_wrap)
|
||||
// The std::enable_if makes the error go away
|
||||
|
||||
// $doc(do)
|
||||
|
||||
template <class T, class ...U>
|
||||
typename std::enable_if<!std::is_same<T, Var>::value, Term>::type
|
||||
operator() (T&& a, U&& ...b) && {
|
||||
return Term(TT::FUNCALL, std::vector<Term>{
|
||||
std::move(*this),
|
||||
expr(std::forward<T>(a)),
|
||||
expr(std::forward<U>(b))... });
|
||||
}
|
||||
template <class T, class ...U>
|
||||
typename std::enable_if<!std::is_same<T, Var>::value, Term>::type
|
||||
operator() (T&& a, U&& ...b) const & {
|
||||
return Term(TT::FUNCALL, std::vector<Term>{
|
||||
*this,
|
||||
expr(std::forward<T>(a)),
|
||||
expr(std::forward<U>(b))... });
|
||||
}
|
||||
|
||||
#undef C0
|
||||
#undef C1
|
||||
#undef C2
|
||||
#undef C_
|
||||
#undef CO0
|
||||
#undef CO1
|
||||
#undef CO2
|
||||
|
||||
// Send the term to the server and return the results.
|
||||
// Errors returned by the server are thrown.
|
||||
Cursor run(Connection&, OptArgs&& args = {});
|
||||
|
||||
// $doc(do)
|
||||
template <class ...T>
|
||||
Term do_(T&& ...a) && {
|
||||
auto list = { std::move(*this), Term::func_wrap(expr(std::forward<T>(a)))... };
|
||||
std::vector<Term> args;
|
||||
args.reserve(list.size() + 1);
|
||||
args.emplace_back(func_wrap(std::move(*(list.end()-1))));
|
||||
for (auto it = list.begin(); it + 1 != list.end(); ++it) {
|
||||
args.emplace_back(std::move(*it));
|
||||
}
|
||||
return Term(TT::FUNCALL, std::move(args));
|
||||
}
|
||||
|
||||
// Adds optargs to an already built term
|
||||
Term opt(OptArgs&& optargs) && {
|
||||
return Term(std::move(*this), std::move(optargs));
|
||||
}
|
||||
|
||||
// Used internally to implement object()
|
||||
static Term make_object(std::vector<Term>&&);
|
||||
|
||||
// Used internally to implement array()
|
||||
static Term make_binary(Term&&);
|
||||
|
||||
Datum get_datum() const;
|
||||
|
||||
private:
|
||||
friend class Var;
|
||||
friend class Connection;
|
||||
friend struct Query;
|
||||
|
||||
template <int _>
|
||||
Var mkvar(std::vector<int>& vars);
|
||||
|
||||
template <class F, int ...N>
|
||||
void set_function(F);
|
||||
|
||||
Datum alpha_rename(Term&&);
|
||||
|
||||
Term(Term&& orig, OptArgs&& optargs);
|
||||
|
||||
std::map<int, int*> free_vars;
|
||||
Datum datum;
|
||||
};
|
||||
|
||||
// A term representing null
|
||||
Term nil();
|
||||
|
||||
template <class T>
|
||||
Term expr(T&& a) {
|
||||
return Term(std::forward<T>(a));
|
||||
}
|
||||
|
||||
// Represents a ReQL variable.
|
||||
// This type is passed to functions used in ReQL queries.
|
||||
class Var {
|
||||
public:
|
||||
// Convert to a term
|
||||
Term operator*() const {
|
||||
Term term(TT::VAR, std::vector<Term>{expr(*id)});
|
||||
term.free_vars = {{*id, id}};
|
||||
return term;
|
||||
}
|
||||
|
||||
Var(int* id_) : id(id_) { }
|
||||
private:
|
||||
int* id;
|
||||
};
|
||||
|
||||
template <int N>
|
||||
Var Term::mkvar(std::vector<int>& vars) {
|
||||
int id = gen_var_id();
|
||||
vars.push_back(id);
|
||||
return Var(&*vars.rbegin());
|
||||
}
|
||||
|
||||
template <class F, int ...N>
|
||||
void Term::set_function(F f) {
|
||||
std::vector<int> vars;
|
||||
vars.reserve(sizeof...(N));
|
||||
std::vector<Var> args = { mkvar<N>(vars)... };
|
||||
Term body = f(args[N] ...);
|
||||
|
||||
int* low = &*vars.begin();
|
||||
int* high = &*(vars.end() - 1);
|
||||
for (auto it = body.free_vars.begin(); it != body.free_vars.end(); ) {
|
||||
if (it->second >= low && it->second <= high) {
|
||||
if (it->first != *it->second) {
|
||||
throw Error("Internal error: variable index mis-match");
|
||||
}
|
||||
++it;
|
||||
} else {
|
||||
free_vars.emplace(*it);
|
||||
++it;
|
||||
}
|
||||
}
|
||||
datum = Array{TT::FUNC, Array{Array{TT::MAKE_ARRAY, vars}, body.datum}};
|
||||
}
|
||||
|
||||
// These macros are similar to those defined above, but for top-level ReQL operations
|
||||
|
||||
#define C0(name) Term name();
|
||||
#define C0_IMPL(name, type) Term name() { return Term(TT::type, std::vector<Term>{}); }
|
||||
#define CO0(name) Term name(OptArgs&& optargs = {});
|
||||
#define CO0_IMPL(name, type) Term name(OptArgs&& optargs) { return Term(TT::type, std::vector<Term>{}, std::move(optargs)); }
|
||||
#define C1(name, type, wrap) template <class T> Term name(T&& a) { \
|
||||
return Term(TT::type, std::vector<Term>{ wrap(expr(std::forward<T>(a))) }); }
|
||||
#define C2(name, type) template <class T, class U> Term name(T&& a, U&& b) { \
|
||||
return Term(TT::type, std::vector<Term>{ expr(std::forward<T>(a)), expr(std::forward<U>(b)) }); }
|
||||
#define C3(name, type) template <class A, class B, class C> \
|
||||
Term name(A&& a, B&& b, C&& c) { return Term(TT::type, std::vector<Term>{ \
|
||||
expr(std::forward<A>(a)), expr(std::forward<B>(b)), expr(std::forward<C>(c)) }); }
|
||||
#define C4(name, type) template <class A, class B, class C, class D> \
|
||||
Term name(A&& a, B&& b, C&& c, D&& d) { return Term(TT::type, std::vector<Term>{ \
|
||||
expr(std::forward<A>(a)), expr(std::forward<B>(b)), \
|
||||
expr(std::forward<C>(c)), expr(std::forward<D>(d))}); }
|
||||
#define C7(name, type) template <class A, class B, class C, class D, class E, class F, class G> \
|
||||
Term name(A&& a, B&& b, C&& c, D&& d, E&& e, F&& f, G&& g) { return Term(TT::type, std::vector<Term>{ \
|
||||
expr(std::forward<A>(a)), expr(std::forward<B>(b)), expr(std::forward<C>(c)), \
|
||||
expr(std::forward<D>(d)), expr(std::forward<E>(e)), expr(std::forward<F>(f)), \
|
||||
expr(std::forward<G>(g))}); }
|
||||
#define C_(name, type, wrap) template <class ...T> Term name(T&& ...a) { \
|
||||
return Term(TT::type, std::vector<Term>{ wrap(expr(std::forward<T>(a)))... }); }
|
||||
#define CO1(name, type, wrap) template <class T> Term name(T&& a, OptArgs&& optarg = {}) { \
|
||||
return Term(TT::type, std::vector<Term>{ wrap(expr(std::forward<T>(a)))}, std::move(optarg)); }
|
||||
#define CO2(name, type) template <class T, class U> Term name(T&& a, U&& b, OptArgs&& optarg = {}) { \
|
||||
return Term(TT::type, std::vector<Term>{ expr(std::forward<T>(a)), expr(std::forward<U>(b))}, std::move(optarg)); }
|
||||
#define func_wrap Term::func_wrap
|
||||
|
||||
C1(db_create, DB_CREATE, no_wrap)
|
||||
C1(db_drop, DB_DROP, no_wrap)
|
||||
C0(db_list)
|
||||
CO1(table_create, TABLE_CREATE, no_wrap)
|
||||
C1(table_drop, TABLE_DROP, no_wrap)
|
||||
C0(table_list)
|
||||
C1(db, DB, no_wrap)
|
||||
CO1(table, TABLE, no_wrap)
|
||||
C_(add, ADD, no_wrap)
|
||||
C2(sub, SUB)
|
||||
C_(mul, MUL, no_wrap)
|
||||
C_(div, DIV, no_wrap)
|
||||
C2(mod, MOD)
|
||||
C_(and_, AND, no_wrap)
|
||||
C_(or_, OR, no_wrap)
|
||||
C2(eq, EQ)
|
||||
C2(ne, NE)
|
||||
C2(gt, GT)
|
||||
C2(ge, GE)
|
||||
C2(lt, LT)
|
||||
C2(le, LE)
|
||||
C1(not_, NOT, no_wrap)
|
||||
CO0(random)
|
||||
CO1(random, RANDOM, no_wrap)
|
||||
CO2(random, RANDOM)
|
||||
C0(now)
|
||||
C4(time, TIME)
|
||||
C7(time, TIME)
|
||||
C1(epoch_time, EPOCH_TIME, no_wrap)
|
||||
CO1(iso8601, ISO8601, no_wrap)
|
||||
CO1(js, JAVASCRIPT, no_wrap)
|
||||
C1(args, ARGS, no_wrap)
|
||||
C_(branch, BRANCH, no_wrap)
|
||||
C0(range)
|
||||
C1(range, RANGE, no_wrap)
|
||||
C2(range, RANGE)
|
||||
C0(error)
|
||||
C1(error, ERROR, no_wrap)
|
||||
C1(json, JSON, no_wrap)
|
||||
CO1(http, HTTP, func_wrap)
|
||||
C0(uuid)
|
||||
C1(uuid, UUID, no_wrap)
|
||||
CO2(circle, CIRCLE)
|
||||
C1(geojson, GEOJSON, no_wrap)
|
||||
C_(line, LINE, no_wrap)
|
||||
C2(point, POINT)
|
||||
C_(polygon, POLYGON, no_wrap)
|
||||
C_(array, MAKE_ARRAY, no_wrap)
|
||||
C1(desc, DESC, func_wrap)
|
||||
C1(asc, ASC, func_wrap)
|
||||
C0(literal)
|
||||
C1(literal, LITERAL, no_wrap)
|
||||
C1(type_of, TYPE_OF, no_wrap)
|
||||
C_(map, MAP, func_wrap)
|
||||
C1(floor, FLOOR, no_wrap)
|
||||
C1(ceil, CEIL, no_wrap)
|
||||
C1(round, ROUND, no_wrap)
|
||||
C_(union_, UNION, no_wrap)
|
||||
C_(group, GROUP, func_wrap)
|
||||
C1(count, COUNT, no_wrap)
|
||||
C_(count, COUNT, func_wrap)
|
||||
C1(sum, SUM, no_wrap)
|
||||
C_(sum, SUM, func_wrap)
|
||||
C1(avg, AVG, no_wrap)
|
||||
C_(avg, AVG, func_wrap)
|
||||
C1(min, MIN, no_wrap)
|
||||
C_(min, MIN, func_wrap)
|
||||
C1(max, MAX, no_wrap)
|
||||
C_(max, MAX, func_wrap)
|
||||
C1(distinct, DISTINCT, no_wrap)
|
||||
C1(contains, CONTAINS, no_wrap)
|
||||
C_(contains, CONTAINS, func_wrap)
|
||||
|
||||
#undef C0
|
||||
#undef C1
|
||||
#undef C2
|
||||
#undef C3
|
||||
#undef C4
|
||||
#undef C7
|
||||
#undef C_
|
||||
#undef CO1
|
||||
#undef CO2
|
||||
#undef func_wrap
|
||||
|
||||
// $doc(do)
|
||||
template <class R, class ...T>
|
||||
Term do_(R&& a, T&& ...b) {
|
||||
return expr(std::forward<R>(a)).do_(std::forward<T>(b)...);
|
||||
}
|
||||
|
||||
// $doc(object)
|
||||
template <class ...T>
|
||||
Term object(T&& ...a) {
|
||||
return Term::make_object(std::vector<Term>{ expr(std::forward<T>(a))... });
|
||||
}
|
||||
|
||||
// $doc(binary)
|
||||
template <class T>
|
||||
Term binary(T&& a) {
|
||||
return Term::make_binary(expr(std::forward<T>(a)));
|
||||
}
|
||||
|
||||
// Construct an empty optarg
|
||||
OptArgs optargs();
|
||||
|
||||
// Construct an optarg made out of pairs of arguments
|
||||
// For example: optargs("k1", v1, "k2", v2)
|
||||
template <class V, class ...T>
|
||||
OptArgs optargs(const char* key, V&& val, T&& ...rest) {
|
||||
OptArgs opts = optargs(rest...);
|
||||
opts.emplace(key, expr(std::forward<V>(val)));
|
||||
return opts;
|
||||
}
|
||||
|
||||
extern Term row;
|
||||
extern Term maxval;
|
||||
extern Term minval;
|
||||
extern Term january;
|
||||
extern Term february;
|
||||
extern Term march;
|
||||
extern Term april;
|
||||
extern Term may;
|
||||
extern Term june;
|
||||
extern Term july;
|
||||
extern Term august;
|
||||
extern Term september;
|
||||
extern Term october;
|
||||
extern Term november;
|
||||
extern Term december;
|
||||
extern Term monday;
|
||||
extern Term tuesday;
|
||||
extern Term wednesday;
|
||||
extern Term thursday;
|
||||
extern Term friday;
|
||||
extern Term saturday;
|
||||
extern Term sunday;
|
||||
}
|
47
ext/librethinkdbxx/src/types.cc
Normal file
47
ext/librethinkdbxx/src/types.cc
Normal file
@ -0,0 +1,47 @@
|
||||
#include <cstdlib>
|
||||
|
||||
#include "types.h"
|
||||
#include "error.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
bool Time::parse_utc_offset(const std::string& string, double* offset) {
|
||||
const char *s = string.c_str();
|
||||
double sign = 1;
|
||||
switch (s[0]) {
|
||||
case '-':
|
||||
sign = -1;
|
||||
case '+':
|
||||
++s;
|
||||
break;
|
||||
case 0:
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
if (s[i] == 0) return false;
|
||||
if (i == 2) continue;
|
||||
if (s[i] < '0' || s[i] > '9') return false;
|
||||
}
|
||||
if (s[2] != ':') return false;
|
||||
*offset = sign * ((s[0] - '0') * 36000 + (s[1] - '0') * 3600 + (s[3] - '0') * 600 + (s[4] - '0') * 60);
|
||||
return true;
|
||||
}
|
||||
|
||||
double Time::parse_utc_offset(const std::string& string) {
|
||||
double out;
|
||||
if (!parse_utc_offset(string, &out)) {
|
||||
throw Error("invalid utc offset `%s'", string.c_str());
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
std::string Time::utc_offset_string(double offset) {
|
||||
char buf[8];
|
||||
int hour = offset / 3600;
|
||||
int minutes = std::abs(static_cast<int>(offset / 60)) % 60;
|
||||
int n = snprintf(buf, 7, "%+03d:%02d", hour, minutes);
|
||||
buf[n] = 0;
|
||||
return std::string(buf);
|
||||
}
|
||||
|
||||
}
|
53
ext/librethinkdbxx/src/types.h
Normal file
53
ext/librethinkdbxx/src/types.h
Normal file
@ -0,0 +1,53 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <ctime>
|
||||
#include <string>
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
class Datum;
|
||||
|
||||
// Represents a null datum
|
||||
struct Nil { };
|
||||
|
||||
using Array = std::vector<Datum>;
|
||||
using Object = std::map<std::string, Datum>;
|
||||
|
||||
// Represents a string of bytes. Plain std::strings are passed on to the server as utf-8 strings
|
||||
struct Binary {
|
||||
bool operator== (const Binary& other) const {
|
||||
return data == other.data;
|
||||
}
|
||||
|
||||
Binary(const std::string& data_) : data(data_) { }
|
||||
Binary(std::string&& data_) : data(std::move(data_)) { }
|
||||
std::string data;
|
||||
};
|
||||
|
||||
// Represents a point in time as
|
||||
// * A floating amount of seconds since the UNIX epoch
|
||||
// * And a timezone offset represented as seconds relative to UTC
|
||||
struct Time {
|
||||
Time(double epoch_time_, double utc_offset_ = 0) :
|
||||
epoch_time(epoch_time_), utc_offset(utc_offset_) { }
|
||||
|
||||
static Time now() {
|
||||
return Time(time(NULL));
|
||||
}
|
||||
|
||||
static bool parse_utc_offset(const std::string&, double*);
|
||||
static double parse_utc_offset(const std::string&);
|
||||
static std::string utc_offset_string(double);
|
||||
|
||||
double epoch_time;
|
||||
double utc_offset;
|
||||
};
|
||||
|
||||
// Not implemented
|
||||
class Point;
|
||||
class Line;
|
||||
class Polygon;
|
||||
|
||||
}
|
153
ext/librethinkdbxx/src/utils.cc
Normal file
153
ext/librethinkdbxx/src/utils.cc
Normal file
@ -0,0 +1,153 @@
|
||||
#include "utils.h"
|
||||
#include "error.h"
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
size_t utf8_encode(unsigned int code, char* buf) {
|
||||
if (!(code & ~0x7F)) {
|
||||
buf[0] = code;
|
||||
return 1;
|
||||
} else if (!(code & ~0x7FF)) {
|
||||
buf[0] = 0xC0 | (code >> 6);
|
||||
buf[1] = 0x80 | (code & 0x3F);
|
||||
return 2;
|
||||
} else if (!(code & ~0xFFFF)) {
|
||||
buf[0] = 0xE0 | (code >> 12);
|
||||
buf[1] = 0x80 | ((code >> 6) & 0x3F);
|
||||
buf[2] = 0x80 | (code & 0x3F);
|
||||
return 3;
|
||||
} else if (!(code & ~0x1FFFFF)) {
|
||||
buf[0] = 0xF0 | (code >> 18);
|
||||
buf[1] = 0x80 | ((code >> 12) & 0x3F);
|
||||
buf[2] = 0x80 | ((code >> 6) & 0x3F);
|
||||
buf[3] = 0x80 | (code & 0x3F);
|
||||
return 4;
|
||||
} else if (!(code & ~0x3FFFFFF)) {
|
||||
buf[0] = 0xF8 | (code >> 24);
|
||||
buf[1] = 0x80 | ((code >> 18) & 0x3F);
|
||||
buf[2] = 0x80 | ((code >> 12) & 0x3F);
|
||||
buf[3] = 0x80 | ((code >> 6) & 0x3F);
|
||||
buf[4] = 0x80 | (code & 0x3F);
|
||||
return 5;
|
||||
} else if (!(code & ~0x7FFFFFFF)) {
|
||||
buf[0] = 0xFC | (code >> 30);
|
||||
buf[1] = 0x80 | ((code >> 24) & 0x3F);
|
||||
buf[2] = 0x80 | ((code >> 18) & 0x3F);
|
||||
buf[3] = 0x80 | ((code >> 12) & 0x3F);
|
||||
buf[4] = 0x80 | ((code >> 6) & 0x3F);
|
||||
buf[5] = 0x80 | (code & 0x3F);
|
||||
return 6;
|
||||
} else {
|
||||
throw Error("Invalid unicode codepoint %ud", code);
|
||||
}
|
||||
}
|
||||
|
||||
bool base64_decode(char c, int* out) {
|
||||
if (c >= 'A' && c <= 'Z') {
|
||||
*out = c - 'A';
|
||||
} else if (c >= 'a' && c <= 'z') {
|
||||
*out = c - ('a' - 26);
|
||||
} else if (c >= '0' && c <= '9') {
|
||||
*out = c - ('0' - 52);
|
||||
} else if (c == '+') {
|
||||
*out = 62;
|
||||
} else if (c == '/') {
|
||||
*out = 63;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool base64_decode(const std::string& in, std::string& out) {
|
||||
out.clear();
|
||||
out.reserve(in.size() * 3 / 4);
|
||||
auto read = in.begin();
|
||||
while (true) {
|
||||
int c[4];
|
||||
int end = 4;
|
||||
for (int i = 0; i < 4; i++) {
|
||||
while (true) {
|
||||
if (read == in.end()) {
|
||||
c[i] = 0;
|
||||
end = i;
|
||||
i = 3;
|
||||
break;
|
||||
} else if (base64_decode(*read, &c[i])) {
|
||||
++read;
|
||||
break;
|
||||
} else {
|
||||
++read;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (end == 1) return false;
|
||||
int val = c[0] << 18 | c[1] << 12 | c[2] << 6 | c[3];
|
||||
if (end > 1) out.append(1, val >> 16);
|
||||
if (end > 2) out.append(1, val >> 8 & 0xFF);
|
||||
if (end > 3) out.append(1, val & 0xFF);
|
||||
if (end != 4) break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
char base64_encode(unsigned int c) {
|
||||
if (c < 26) {
|
||||
return 'A' + c;
|
||||
} else if (c < 52) {
|
||||
return 'a' + c - 26;
|
||||
} else if (c < 62) {
|
||||
return '0' + c - 52;
|
||||
} else if (c == 62) {
|
||||
return '+';
|
||||
} else if (c == 63) {
|
||||
return '/';
|
||||
} else {
|
||||
throw Error("unreachable: base64 encoding %d", c);
|
||||
}
|
||||
}
|
||||
|
||||
void base64_encode(unsigned int* c, int n, std::string& out) {
|
||||
if (n == 0) {
|
||||
return;
|
||||
}
|
||||
out.append(1, base64_encode(c[0] >> 2));
|
||||
out.append(1, base64_encode((c[0] & 0x3) << 4 | c[1] >> 4));
|
||||
if (n == 1) {
|
||||
out.append("==");
|
||||
return;
|
||||
}
|
||||
out.append(1, base64_encode((c[1] & 0xF) << 2 | c[2] >> 6));
|
||||
if (n == 2) {
|
||||
out.append("=");
|
||||
return;
|
||||
}
|
||||
out.append(1, base64_encode(c[2] & 0x3F));
|
||||
}
|
||||
|
||||
std::string base64_encode(const std::string& in) {
|
||||
std::string out;
|
||||
out.reserve(in.size() * 4 / 3 + in.size() / 48 + 3);
|
||||
auto read = in.begin();
|
||||
while (true) {
|
||||
for (int group = 0; group < 16; ++group) {
|
||||
unsigned int c[3];
|
||||
int i = 0;
|
||||
for (; i < 3; ++i) {
|
||||
if (read == in.end()) {
|
||||
c[i] = 0;
|
||||
break;
|
||||
} else {
|
||||
c[i] = static_cast<unsigned char>(*read++);
|
||||
}
|
||||
}
|
||||
base64_encode(c, i, out);
|
||||
if (i != 3) {
|
||||
return out;
|
||||
}
|
||||
}
|
||||
out.append("\n");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
19
ext/librethinkdbxx/src/utils.h
Normal file
19
ext/librethinkdbxx/src/utils.h
Normal file
@ -0,0 +1,19 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <string>
|
||||
|
||||
namespace RethinkDB {
|
||||
|
||||
// The size of the longest UTF-8 encoded unicode codepoint
|
||||
const size_t max_utf8_encoded_size = 6;
|
||||
|
||||
// Decode a base64 string. Returns false on failure.
|
||||
bool base64_decode(const std::string& in, std::string& out);
|
||||
std::string base64_encode(const std::string&);
|
||||
|
||||
// Encodes a single unicode codepoint into UTF-8. Returns the number of bytes written.
|
||||
// Does not add a trailing null byte
|
||||
size_t utf8_encode(unsigned int, char*);
|
||||
|
||||
}
|
58
ext/librethinkdbxx/test/bench.cc
Normal file
58
ext/librethinkdbxx/test/bench.cc
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
#include <signal.h>
|
||||
#include <ctime>
|
||||
#include <chrono>
|
||||
#include <rethinkdb.h>
|
||||
|
||||
namespace R = RethinkDB;
|
||||
std::unique_ptr<R::Connection> conn;
|
||||
|
||||
int main() {
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
try {
|
||||
conn = R::connect();
|
||||
} catch(const R::Error& error) {
|
||||
printf("FAILURE: could not connect to localhost:28015: %s\n", error.message.c_str());
|
||||
return 1;
|
||||
}
|
||||
|
||||
try {
|
||||
printf("running test...\n");
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
R::Datum d = R::range(1, 1000000)
|
||||
.map([]() { return R::object("test", "hello", "data", "world"); })
|
||||
.run(*conn);
|
||||
auto end = std::chrono::steady_clock::now();
|
||||
auto diff = end - start;
|
||||
|
||||
printf("result size: %d\n", (int)d.get_array()->size());
|
||||
printf("completed in %f ms\n", std::chrono::duration<double, std::milli>(diff).count());
|
||||
} catch (const R::Error& error) {
|
||||
printf("FAILURE: uncaught exception: %s\n", error.message.c_str());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include <rethinkdb.h>
|
||||
|
||||
namespace R = RethinkDB;
|
||||
|
||||
int main() {
|
||||
auto conn = R::connect();
|
||||
if (!conn) {
|
||||
std::cerr << "Could not connect to server\n";
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::cout << "Connected" << std::endl;
|
||||
R::Cursor databases = R::db_list().run(*conn);
|
||||
for (R::Datum const& db : databases) {
|
||||
std::cout << *db.get_string() << '\n';
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
11
ext/librethinkdbxx/test/gen_index_cxx.py
Normal file
11
ext/librethinkdbxx/test/gen_index_cxx.py
Normal file
@ -0,0 +1,11 @@
|
||||
from sys import argv
|
||||
from re import sub
|
||||
|
||||
print("#include \"testlib.h\"");
|
||||
print("void run_upstream_tests() {")
|
||||
for path in argv[1:]:
|
||||
name = sub('/', '_', path.split('.')[0])
|
||||
print(" extern void %s();" % name)
|
||||
print(" clean_slate();")
|
||||
print(" %s();" % name)
|
||||
print("}")
|
114
ext/librethinkdbxx/test/test.cc
Normal file
114
ext/librethinkdbxx/test/test.cc
Normal file
@ -0,0 +1,114 @@
|
||||
#include <signal.h>
|
||||
|
||||
#include <ctime>
|
||||
|
||||
#include "testlib.h"
|
||||
|
||||
extern void run_upstream_tests();
|
||||
|
||||
void test_json(const char* string, const char* ret = "") {
|
||||
TEST_EQ(R::Datum::from_json(string).as_json().c_str(), ret[0] ? ret : string);
|
||||
}
|
||||
|
||||
void test_json_parse_print() {
|
||||
enter_section("json");
|
||||
test_json("-0.0", "-0.0");
|
||||
test_json("null");
|
||||
test_json("1.2");
|
||||
test_json("1.2e20", "1.2e+20");
|
||||
test_json("true");
|
||||
test_json("false");
|
||||
test_json("\"\"");
|
||||
test_json("\"\\u1234\"", "\"\u1234\"");
|
||||
test_json("\"\\\"\"");
|
||||
test_json("\"foobar\"");
|
||||
test_json("[]");
|
||||
test_json("[1]");
|
||||
test_json("[1,2,3,4]");
|
||||
test_json("{}");
|
||||
test_json("{\"a\":1}");
|
||||
test_json("{\"a\":1,\"b\":2,\"c\":3}");
|
||||
exit_section();
|
||||
}
|
||||
|
||||
void test_reql() {
|
||||
enter_section("reql");
|
||||
TEST_EQ((R::expr(1) + 2).run(*conn), R::Datum(3));
|
||||
TEST_EQ(R::range(4).count().run(*conn), R::Datum(4));
|
||||
TEST_EQ(R::js("Math.abs")(-1).run(*conn), 1);
|
||||
exit_section();
|
||||
}
|
||||
|
||||
void test_cursor() {
|
||||
enter_section("cursor");
|
||||
R::Cursor cursor = R::range(10000).run(*conn);
|
||||
TEST_EQ(cursor.next(), 0);
|
||||
R::Array array = cursor.to_array();
|
||||
TEST_EQ(array.size(), 9999);
|
||||
TEST_EQ(*array.begin(), 1);
|
||||
TEST_EQ(*array.rbegin(), 9999);
|
||||
int i = 0;
|
||||
R::range(3).run(*conn).each([&i](R::Datum&& datum){
|
||||
TEST_EQ(datum, i++); });
|
||||
exit_section();
|
||||
}
|
||||
|
||||
void test_encode(const char* str, const char* b) {
|
||||
TEST_EQ(R::base64_encode(str), b);
|
||||
}
|
||||
|
||||
void test_decode(const char* b, const char* str) {
|
||||
std::string out;
|
||||
TEST_EQ(R::base64_decode(b, out), true);
|
||||
TEST_EQ(out, str);
|
||||
}
|
||||
|
||||
#define TEST_B64(a, b) test_encode(a, b); test_decode(b, a)
|
||||
|
||||
void test_binary() {
|
||||
enter_section("base64");
|
||||
TEST_B64("", "");
|
||||
TEST_B64("foo", "Zm9v");
|
||||
exit_section();
|
||||
}
|
||||
|
||||
void test_issue28() {
|
||||
enter_section("issue #28");
|
||||
std::vector<std::string> expected{ "rethinkdb", "test" };
|
||||
std::vector<std::string> dbs;
|
||||
R::Cursor databases = R::db_list().run(*conn);
|
||||
for (R::Datum const& db : databases) {
|
||||
dbs.push_back(*db.get_string());
|
||||
}
|
||||
|
||||
TEST_EQ(dbs, expected);
|
||||
exit_section();
|
||||
}
|
||||
|
||||
int main() {
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
srand(time(NULL));
|
||||
try {
|
||||
conn = R::connect();
|
||||
} catch(const R::Error& error) {
|
||||
printf("FAILURE: could not connect to localhost:28015: %s\n", error.message.c_str());
|
||||
return 1;
|
||||
}
|
||||
try {
|
||||
//test_binary();
|
||||
//test_json_parse_print();
|
||||
//test_reql();
|
||||
//test_cursor();
|
||||
test_issue28();
|
||||
run_upstream_tests();
|
||||
} catch (const R::Error& error) {
|
||||
printf("FAILURE: uncaught expception: %s\n", error.message.c_str());
|
||||
return 1;
|
||||
}
|
||||
if (!failed) {
|
||||
printf("SUCCESS: %d tests passed\n", count);
|
||||
} else {
|
||||
printf("DONE: %d of %d tests failed\n", failed, count);
|
||||
return 1;
|
||||
}
|
||||
}
|
356
ext/librethinkdbxx/test/testlib.cc
Normal file
356
ext/librethinkdbxx/test/testlib.cc
Normal file
@ -0,0 +1,356 @@
|
||||
#include <algorithm>
|
||||
#include <regex>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "testlib.h"
|
||||
|
||||
int verbosity = 0;
|
||||
|
||||
int failed = 0;
|
||||
int count = 0;
|
||||
std::vector<std::pair<const char*, bool>> section;
|
||||
|
||||
std::unique_ptr<R::Connection> conn;
|
||||
|
||||
// std::string to_string(const R::Cursor&) {
|
||||
// return "<Cursor>";
|
||||
// }
|
||||
|
||||
std::string to_string(const R::Term& query) {
|
||||
return to_string(query.get_datum());
|
||||
}
|
||||
|
||||
std::string to_string(const R::Datum& datum) {
|
||||
return datum.as_json();
|
||||
}
|
||||
|
||||
std::string to_string(const R::Object& object) {
|
||||
auto it = object.find("special");
|
||||
if (it != object.end()) {
|
||||
std::string type = *(it->second).get_string();
|
||||
auto bag = object.find(type);
|
||||
if (bag != object.end()) {
|
||||
return to_string((R::Datum)bag->second);
|
||||
}
|
||||
}
|
||||
|
||||
return to_string((R::Datum)object);
|
||||
}
|
||||
|
||||
std::string to_string(const R::Error& error) {
|
||||
return "Error(\"" + error.message + "\")";
|
||||
}
|
||||
|
||||
void enter_section(const char* name) {
|
||||
if (verbosity == 0) {
|
||||
section.emplace_back(name, true);
|
||||
} else {
|
||||
printf("%sSection %s\n", indent(), name);
|
||||
section.emplace_back(name, false);
|
||||
}
|
||||
}
|
||||
|
||||
void section_cleanup() {
|
||||
R::db("test").table_list().for_each([=](R::Var table) {
|
||||
return R::db("test").table_drop(*table);
|
||||
}).run(*conn);
|
||||
}
|
||||
|
||||
void exit_section() {
|
||||
section.pop_back();
|
||||
}
|
||||
|
||||
std::string to_string(const err& error) {
|
||||
return "Error(\"" + error.convert_type() + ": " + error.message + "\")";
|
||||
}
|
||||
|
||||
bool equal(const R::Error& a, const err& b) {
|
||||
// @TODO: I think the proper solution to this proble is to in fact create
|
||||
// a hierarchy of exception types. This would not only simplify these
|
||||
// error cases, but could be of great use to the user.
|
||||
std::string error_type = b.convert_type();
|
||||
if (error_type == "ReqlServerCompileError" &&
|
||||
a.message.find("ReqlCompileError") != std::string::npos) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return b.trim_message(a.message) == (error_type + ": " + b.message);
|
||||
}
|
||||
|
||||
bool match(const char* pattern, const char* string) {
|
||||
return std::regex_match(string, std::regex(pattern));
|
||||
}
|
||||
|
||||
bool equal(const R::Error& a, const err_regex& b) {
|
||||
if (b.message == "Object keys must be strings" &&
|
||||
a.message == "runtime error: Expected type STRING but found NUMBER.") {
|
||||
return true;
|
||||
}
|
||||
return match(b.regex().c_str(), a.message.c_str());
|
||||
}
|
||||
|
||||
std::string to_string(const err_regex& error) {
|
||||
return "err_regex(" + error.type + ", " + error.message + ")";
|
||||
}
|
||||
|
||||
R::Object partial(R::Object&& object) {
|
||||
return R::Object{{"special", "partial"}, {"partial", std::move(object)}};
|
||||
}
|
||||
|
||||
R::Datum uuid() {
|
||||
return R::Object{{"special", "uuid"}};
|
||||
}
|
||||
|
||||
R::Object arrlen(int n, R::Datum&& datum) {
|
||||
return R::Object{{"special", "arrlen"},{"len",n},{"of",datum}};
|
||||
}
|
||||
|
||||
R::Object arrlen(int n) {
|
||||
return R::Object{{"special", "arrlen"},{"len",n}};
|
||||
}
|
||||
|
||||
std::string repeat(std::string&& s, int n) {
|
||||
std::string string;
|
||||
string.reserve(n * s.size());
|
||||
for (int i = 0; i < n; ++i) {
|
||||
string.append(s);
|
||||
}
|
||||
return string;
|
||||
}
|
||||
|
||||
R::Term fetch(R::Cursor& cursor, int count, double timeout) {
|
||||
// printf("fetch(..., %d, %lf)\n", count, timeout);
|
||||
R::Array array;
|
||||
int deadline = time(NULL) + int(timeout);
|
||||
for (int i = 0; count == -1 || i < count; ++i) {
|
||||
// printf("fetching next (%d)\n", i);
|
||||
time_t now = time(NULL);
|
||||
if (now > deadline) break;
|
||||
|
||||
try {
|
||||
array.emplace_back(cursor.next(deadline - now));
|
||||
// printf("got %s\n", write_datum(array[array.size()-1]).c_str());
|
||||
} catch (const R::Error &e) {
|
||||
if (e.message != "next: No more data") {
|
||||
throw e; // rethrow
|
||||
}
|
||||
|
||||
break;
|
||||
} catch (const R::TimeoutException &e){
|
||||
// printf("fetch timeout\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return expr(std::move(array));
|
||||
}
|
||||
|
||||
R::Object bag(R::Array&& array) {
|
||||
return R::Object{{"special", "bag"}, {"bag", std::move(array)}};
|
||||
};
|
||||
|
||||
R::Object bag(R::Datum&& d) {
|
||||
return R::Object{{"special", "bag"}, {"bag", std::move(d)}};
|
||||
};
|
||||
|
||||
std::string string_key(const R::Datum& datum) {
|
||||
const std::string* string = datum.get_string();
|
||||
if (string) return *string;
|
||||
return datum.as_json();
|
||||
}
|
||||
|
||||
bool falsey(R::Datum&& datum) {
|
||||
bool* boolean = datum.get_boolean();
|
||||
if (boolean) return !*boolean;
|
||||
double* number = datum.get_number();
|
||||
if (number) return *number == 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool equal(const R::Datum& got, const R::Datum& expected) {
|
||||
const std::string* string = expected.get_string();
|
||||
if (string) {
|
||||
const R::Binary* binary = got.get_binary();
|
||||
if (binary) {
|
||||
return *binary == R::Binary(*string);
|
||||
}
|
||||
}
|
||||
if (expected.get_object() && expected.get_field("$reql_type$")) {
|
||||
if (!got.get_field("$reql_type$")) {
|
||||
R::Datum datum = got.to_raw();
|
||||
if (datum.get_field("$reql_type$")) {
|
||||
return equal(datum, expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (got.get_object() && got.get_field("$reql_type$")) {
|
||||
const std::string* type = got.get_field("$reql_type$")->get_string();
|
||||
if (type && *type == "GROUPED_DATA" &&
|
||||
(!expected.get_object() || !expected.get_field("$reql_type$"))) {
|
||||
const R::Array* data = got.get_field("data")->get_array();
|
||||
R::Object object;
|
||||
for (R::Datum it : *data) {
|
||||
object.emplace(string_key(it.extract_nth(0)), it.extract_nth(1));
|
||||
}
|
||||
return equal(object, expected);
|
||||
}
|
||||
}
|
||||
do {
|
||||
if (!expected.get_object()) break;
|
||||
if(!expected.get_field("special")) break;
|
||||
const std::string* type = expected.get_field("special")->get_string();
|
||||
if (!type) break;
|
||||
if (*type == "bag") {
|
||||
const R::Datum* bag_datum = expected.get_field("bag");
|
||||
if (!bag_datum || !bag_datum->get_array()) {
|
||||
break;
|
||||
}
|
||||
R::Array bag = *bag_datum->get_array();
|
||||
const R::Array* array = got.get_array();
|
||||
if (!array) {
|
||||
return false;
|
||||
}
|
||||
if (bag.size() != array->size()) {
|
||||
return false;
|
||||
}
|
||||
for (const auto& it : *array) {
|
||||
auto ref = std::find(bag.begin(), bag.end(), it);
|
||||
if (ref == bag.end()) return false;
|
||||
bag.erase(ref);
|
||||
}
|
||||
return true;
|
||||
} else if (*type == "arrlen") {
|
||||
const R::Datum* len_datum = expected.get_field("len");
|
||||
if (!len_datum) break;
|
||||
const double *len = len_datum->get_number();
|
||||
if (!len) break;
|
||||
const R::Array* array = got.get_array();
|
||||
if (!array) break;
|
||||
return array->size() == *len;
|
||||
} else if (*type == "partial") {
|
||||
const R::Object* object = got.get_object();
|
||||
if (object) {
|
||||
const R::Datum* partial_datum = expected.get_field("partial");
|
||||
if (!partial_datum) break;
|
||||
const R::Object* partial = partial_datum->get_object();
|
||||
if (!partial) break;
|
||||
for (const auto& it : *partial) {
|
||||
if (!object->count(it.first) || !equal((*object).at(it.first), it.second)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
const R::Array* array = got.get_array();
|
||||
if (array) {
|
||||
const R::Datum* partial_datum = expected.get_field("partial");
|
||||
if (!partial_datum) break;
|
||||
const R::Array* partial = partial_datum->get_array();
|
||||
if (!partial) break;
|
||||
|
||||
for (const auto& want : *partial) {
|
||||
bool match = false;
|
||||
for (const auto& have : *array) {
|
||||
if (equal(have, want)) {
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (match == false) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} else if(*type == "uuid") {
|
||||
const std::string* string = got.get_string();
|
||||
if (string && string->size() == 36) {
|
||||
return true;
|
||||
}
|
||||
} else if (*type == "regex") {
|
||||
const R::Datum* regex_datum = expected.get_field("regex");
|
||||
if (!regex_datum) break;
|
||||
const std::string* regex = regex_datum->get_string();
|
||||
if (!regex) break;
|
||||
const std::string* str = got.get_string();
|
||||
if (!str) break;
|
||||
return match(regex->c_str(), str->c_str());
|
||||
}
|
||||
} while(0);
|
||||
const R::Object* got_object = got.get_object();
|
||||
const R::Object* expected_object = expected.get_object();
|
||||
if (got_object && expected_object) {
|
||||
R::Object have = *got_object;
|
||||
for (const auto& it : *expected_object) {
|
||||
auto other = have.find(it.first);
|
||||
if (other == have.end()) return false;
|
||||
if (!equal(other->second, it.second)) return false;
|
||||
have.erase(other);
|
||||
}
|
||||
for (auto& it : have) {
|
||||
if (!falsey(std::move(it.second))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
const R::Array* got_array = got.get_array();
|
||||
const R::Array* expected_array = expected.get_array();
|
||||
if (got_array && expected_array) {
|
||||
if (got_array->size() != expected_array->size()) return false;
|
||||
for (R::Array::const_iterator i = got_array->begin(), j = expected_array->begin();
|
||||
i < got_array->end();
|
||||
i++, j++) {
|
||||
if(!equal(*i, *j)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return got == expected;
|
||||
}
|
||||
|
||||
R::Object partial(R::Array&& array) {
|
||||
return R::Object{{"special", "partial"}, {"partial", std::move(array)}};
|
||||
}
|
||||
|
||||
R::Object regex(const char* pattern) {
|
||||
return R::Object{{"special", "regex"}, {"regex", pattern}};
|
||||
}
|
||||
|
||||
void clean_slate() {
|
||||
R::table_list().for_each([](R::Var t){ return R::table_drop(*t); });
|
||||
R::db("rethinkdb").table("_debug_scratch").delete_().run(*conn);
|
||||
}
|
||||
|
||||
const char* indent() {
|
||||
static const char spaces[] = " ";
|
||||
return spaces + sizeof(spaces) - 1 - 2 * section.size();
|
||||
}
|
||||
|
||||
std::string truncate(std::string&& string) {
|
||||
if (string.size() > 200) {
|
||||
return string.substr(0, 197) + "...";
|
||||
}
|
||||
return string;
|
||||
}
|
||||
|
||||
int len(const R::Datum& d) {
|
||||
const R::Array* arr = d.get_array();
|
||||
if (!arr) throw ("testlib: len: expected an array but got " + to_string(d));
|
||||
return arr->size();
|
||||
}
|
||||
|
||||
R::Term wait(int n) {
|
||||
std::this_thread::sleep_for(std::chrono::seconds(n));
|
||||
return R::expr(n);
|
||||
}
|
||||
|
||||
R::Datum nil = R::Nil();
|
||||
|
||||
R::Array append(R::Array lhs, R::Array rhs) {
|
||||
if (lhs.empty()) {
|
||||
return rhs;
|
||||
}
|
||||
lhs.reserve(lhs.size() + rhs.size());
|
||||
std::move(std::begin(rhs), std::end(rhs), std::back_inserter(lhs));
|
||||
return lhs;
|
||||
}
|
231
ext/librethinkdbxx/test/testlib.h
Normal file
231
ext/librethinkdbxx/test/testlib.h
Normal file
@ -0,0 +1,231 @@
|
||||
#pragma once
|
||||
|
||||
#include <sstream>
|
||||
#include <cstdio>
|
||||
#include <stack>
|
||||
#include <cmath>
|
||||
#include <regex>
|
||||
|
||||
#include <rethinkdb.h>
|
||||
|
||||
namespace R = RethinkDB;
|
||||
|
||||
extern std::vector<std::pair<const char*, bool>> section;
|
||||
extern int failed;
|
||||
extern int count;
|
||||
extern std::unique_ptr<R::Connection> conn;
|
||||
extern int verbosity;
|
||||
|
||||
const char* indent();
|
||||
|
||||
void enter_section(const char* name);
|
||||
void section_cleanup();
|
||||
void exit_section();
|
||||
|
||||
#define TEST_DO(code) \
|
||||
if (verbosity > 1) fprintf(stderr, "%sTEST: %s\n", indent(), #code); \
|
||||
code
|
||||
|
||||
#define TEST_EQ(code, expected) \
|
||||
do { \
|
||||
if (verbosity > 1) fprintf(stderr, "%sTEST: %s\n", indent(), #code); \
|
||||
try { test_eq(#code, (code), (expected)); } \
|
||||
catch (const R::Error& error) { test_eq(#code, error, (expected)); } \
|
||||
} while (0)
|
||||
|
||||
struct err {
|
||||
err(const char* type_, std::string message_, R::Array&& backtrace_ = {}) :
|
||||
type(type_), message(message_), backtrace(std::move(backtrace_)) { }
|
||||
|
||||
std::string convert_type() const {
|
||||
return type;
|
||||
}
|
||||
|
||||
static std::string trim_message(std::string msg) {
|
||||
size_t i = msg.find(":\n");
|
||||
if (i != std::string::npos) {
|
||||
return msg.substr(0, i + 1);
|
||||
}
|
||||
return msg;
|
||||
}
|
||||
|
||||
std::string type;
|
||||
std::string message;
|
||||
R::Array backtrace;
|
||||
};
|
||||
|
||||
struct err_regex {
|
||||
err_regex(const char* type_, const char* message_, R::Array&& backtrace_ = {}) :
|
||||
type(type_), message(message_), backtrace(std::move(backtrace_)) { }
|
||||
std::string type;
|
||||
std::string message;
|
||||
R::Array backtrace;
|
||||
std::string regex() const {
|
||||
return type + ": " + message;
|
||||
}
|
||||
};
|
||||
|
||||
R::Object regex(const char* pattern);
|
||||
|
||||
bool match(const char* pattern, const char* string);
|
||||
|
||||
R::Object partial(R::Object&& object);
|
||||
R::Object partial(R::Array&& array);
|
||||
R::Datum uuid();
|
||||
R::Object arrlen(int n, R::Datum&& datum);
|
||||
R::Object arrlen(int n);
|
||||
R::Term new_table();
|
||||
std::string repeat(std::string&& s, int n);
|
||||
R::Term fetch(R::Cursor& cursor, int count = -1, double timeout = 1);
|
||||
R::Object bag(R::Array&& array);
|
||||
R::Object bag(R::Datum&& d);
|
||||
|
||||
struct temp_table {
|
||||
temp_table() {
|
||||
char chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
|
||||
char name_[15] = "temp_";
|
||||
for (unsigned int i = 5; i + 1 < sizeof name_; ++i) {
|
||||
name_[i] = chars[random() % (sizeof chars - 1)];
|
||||
}
|
||||
name_[14] = 0;
|
||||
R::table_create(name_).run(*conn);
|
||||
name = name_;
|
||||
}
|
||||
|
||||
~temp_table() {
|
||||
try {
|
||||
R::table_drop(name).run(*conn);
|
||||
} catch (const R::Error &e) {
|
||||
if(!strstr(e.message.c_str(), "does not exist")){
|
||||
printf("error dropping temp_table: %s\n", e.message.c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
R::Term table() { return R::table(name); }
|
||||
std::string name;
|
||||
};
|
||||
|
||||
void clean_slate();
|
||||
|
||||
// std::string to_string(const R::Cursor&);
|
||||
std::string to_string(const R::Term&);
|
||||
std::string to_string(const R::Datum&);
|
||||
std::string to_string(const R::Error&);
|
||||
std::string to_string(const err_regex&);
|
||||
std::string to_string(const err&);
|
||||
|
||||
bool equal(const R::Datum&, const R::Datum&);
|
||||
bool equal(const R::Error&, const err_regex&);
|
||||
bool equal(const R::Error&, const err&);
|
||||
|
||||
template <class T>
|
||||
bool equal(const T& a, const err& b) {
|
||||
return false;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool equal(const T& a, const err_regex& b) {
|
||||
return false;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool equal(const R::Error& a, const T& b) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string truncate(std::string&&);
|
||||
|
||||
template <class T, class U>
|
||||
void test_eq(const char* code, const T val, const U expected) {
|
||||
|
||||
try {
|
||||
count ++;
|
||||
if (!equal(val, expected)) {
|
||||
failed++;
|
||||
for (auto& it : section) {
|
||||
if (it.second) {
|
||||
printf("%sSection: %s\n", indent(), it.first);
|
||||
it.second = false;
|
||||
}
|
||||
}
|
||||
try {
|
||||
printf("%sFAILURE in ‘%s’:\n%s Expected: ‘%s’\n%s but got: ‘%s’\n",
|
||||
indent(), code,
|
||||
indent(), truncate(to_string(expected)).c_str(),
|
||||
indent(), truncate(to_string(val)).c_str());
|
||||
} catch (const R::Error& e) {
|
||||
printf("%sFAILURE: Failed to print failure description: %s\n", indent(), e.message.c_str());
|
||||
} catch (...) {
|
||||
printf("%sFAILURE: Failed to print failure description\n", indent());
|
||||
}
|
||||
}
|
||||
} catch (const std::regex_error& rx_err) {
|
||||
printf("%sSKIP: error with regex (likely a buggy regex implementation): %s\n", indent(), rx_err.what());
|
||||
}
|
||||
}
|
||||
|
||||
template <class U>
|
||||
void test_eq(const char* code, const R::Cursor& val, const U expected) {
|
||||
try {
|
||||
R::Datum result = val.to_datum();
|
||||
test_eq(code, result, expected);
|
||||
} catch (R::Error& error) {
|
||||
test_eq(code, error, expected);
|
||||
}
|
||||
}
|
||||
|
||||
int len(const R::Datum&);
|
||||
|
||||
R::Term wait(int n);
|
||||
|
||||
#define PacificTimeZone() (-7 * 3600)
|
||||
#define UTCTimeZone() (0)
|
||||
|
||||
extern R::Datum nil;
|
||||
|
||||
inline R::Cursor maybe_run(R::Cursor& c, R::Connection&, R::OptArgs&& o = {}) {
|
||||
return std::move(c);
|
||||
}
|
||||
|
||||
inline R::Cursor maybe_run(R::Term q, R::Connection& c, R::OptArgs&& o = {}) {
|
||||
return q.run(c, std::move(o));
|
||||
}
|
||||
|
||||
inline int operator+(R::Datum a, int b) {
|
||||
return a.extract_number() + b;
|
||||
}
|
||||
|
||||
inline R::Array operator*(R::Array arr, int n) {
|
||||
R::Array ret;
|
||||
for(int i = 0; i < n; i++) {
|
||||
for(const auto& it: arr) {
|
||||
ret.push_back(it);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline R::Array array_range(int x, int y) {
|
||||
R::Array ret;
|
||||
for(int i = x; i < y; ++i) {
|
||||
ret.push_back(i);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class F>
|
||||
inline R::Array array_map(F f, R::Array a){
|
||||
R::Array ret;
|
||||
for(R::Datum& d: a) {
|
||||
ret.push_back(f(d.extract_number()));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
R::Array append(R::Array lhs, R::Array rhs);
|
||||
|
||||
template <class T>
|
||||
std::string str(T x){
|
||||
return to_string(x);
|
||||
}
|
575
ext/librethinkdbxx/test/upstream/aggregation.yaml
Normal file
575
ext/librethinkdbxx/test/upstream/aggregation.yaml
Normal file
@ -0,0 +1,575 @@
|
||||
desc: Tests that manipulation data in tables
|
||||
table_variable_name: tbl tbl2 tbl3 tbl4
|
||||
tests:
|
||||
|
||||
# Set up some data
|
||||
- cd: r.range(100).for_each(tbl.insert({'id':r.row, 'a':r.row.mod(4)}))
|
||||
rb: tbl.insert((0..99).map{ |i| { :id => i, :a => i % 4 } })
|
||||
ot: {'deleted':0.0,'replaced':0.0,'unchanged':0.0,'errors':0.0,'skipped':0.0,'inserted':100}
|
||||
|
||||
- cd: r.range(100).for_each(tbl2.insert({'id':r.row, 'a':r.row.mod(4)}))
|
||||
rb: tbl2.insert((0..99).map{ |i| { :id => i, :b => i % 4 } })
|
||||
ot: {'deleted':0.0,'replaced':0.0,'unchanged':0.0,'errors':0.0,'skipped':0.0,'inserted':100}
|
||||
|
||||
- cd: r.range(100).for_each(tbl3.insert({'id':r.row, 'a':r.row.mod(4), 'b':{'c':r.row.mod(5)}}))
|
||||
rb: tbl3.insert((0..99).map{ |i| { :id => i, :a => i % 4, :b => { :c => i % 5 } } })
|
||||
ot: {'deleted':0.0,'replaced':0.0,'unchanged':0.0,'errors':0.0,'skipped':0.0,'inserted':100}
|
||||
|
||||
- def:
|
||||
cd: time1 = 1375115782.24
|
||||
js: time1 = 1375115782.24 * 1000
|
||||
|
||||
- def:
|
||||
cd: time2 = 1375147296.68
|
||||
js: time2 = 1375147296.68 * 1000
|
||||
|
||||
- cd:
|
||||
- tbl4.insert({'id':0, 'time':r.epoch_time(time1)})
|
||||
- tbl4.insert({'id':1, 'time':r.epoch_time(time2)})
|
||||
ot: {'deleted':0.0,'replaced':0.0,'unchanged':0.0,'errors':0.0,'skipped':0.0,'inserted':1}
|
||||
|
||||
# GMR
|
||||
|
||||
- cd: tbl.sum('a')
|
||||
ot: 150
|
||||
- rb: tbl.map{|row| row['a']}.sum()
|
||||
py: tbl.map(lambda row:row['a']).sum()
|
||||
js: tbl.map(function(row){return row('a')}).sum()
|
||||
ot: 150
|
||||
- cd: tbl.group('a').sum('id')
|
||||
ot:
|
||||
cd: ({0:1200, 1:1225, 2:1250, 3:1275})
|
||||
js: ([{'group':0,'reduction':1200},{'group':1,'reduction':1225},{'group':2,'reduction':1250},{'group':3,'reduction':1275}])
|
||||
- cd: tbl.avg('a')
|
||||
ot: 1.5
|
||||
- rb: tbl.map{|row| row['a']}.avg()
|
||||
py: tbl.map(lambda row:row['a']).avg()
|
||||
js: tbl.map(function(row){return row('a')}).avg()
|
||||
ot: 1.5
|
||||
- cd: tbl.group('a').avg('id')
|
||||
ot:
|
||||
cd: {0:48, 1:49, 2:50, 3:51}
|
||||
js: [{'group':0,'reduction':48},{'group':1,'reduction':49},{'group':2,'reduction':50},{'group':3,'reduction':51}]
|
||||
- cd: tbl.min('a')['a']
|
||||
js: tbl.min('a')('a')
|
||||
ot: 0
|
||||
- cd: tbl.order_by('id').min('a')
|
||||
ot: {'a':0, 'id':0}
|
||||
- rb: tbl.map{|row| row['a']}.min()
|
||||
py: tbl.map(lambda row:row['a']).min()
|
||||
js: tbl.map(function(row){return row('a')}).min()
|
||||
ot: 0
|
||||
- cd: tbl.group('a').min('id')
|
||||
ot:
|
||||
cd: {0:{'a':0, 'id':0}, 1:{'a':1, 'id':1}, 2:{'a':2, 'id':2}, 3:{'a':3, 'id':3}}
|
||||
js: [{'group':0,'reduction':{'a':0, 'id':0}},{'group':1,'reduction':{'a':1, 'id':1}},{'group':2,'reduction':{'a':2, 'id':2}},{'group':3,'reduction':{'a':3, 'id':3}}]
|
||||
- cd: tbl.order_by('id').max('a')
|
||||
ot: {'a':3, 'id':3}
|
||||
- rb: tbl.map{|row| row['a']}.max()
|
||||
py: tbl.map(lambda row:row['a']).max()
|
||||
js: tbl.map(function(row){return row('a')}).max()
|
||||
ot: 3
|
||||
- cd: tbl.group('a').max('id')
|
||||
ot:
|
||||
cd: {0:{'a':0, 'id':96}, 1:{'a':1, 'id':97}, 2:{'a':2, 'id':98}, 3:{'a':3, 'id':99}}
|
||||
js: [{'group':0,'reduction':{'a':0, 'id':96}},{'group':1,'reduction':{'a':1, 'id':97}},{'group':2,'reduction':{'a':2, 'id':98}},{'group':3,'reduction':{'a':3, 'id':99}}]
|
||||
|
||||
- cd: tbl.min()
|
||||
ot: {"a":0, "id":0}
|
||||
- cd: tbl.group('a').min()
|
||||
ot:
|
||||
cd: {0:{"a":0, "id":0}, 1:{"a":1, "id":1}, 2:{"a":2, "id":2}, 3:{"a":3, "id":3}}
|
||||
js: [{'group':0,'reduction':{"a":0,"id":0}},{'group':1,'reduction':{"a":1,"id":1}},{'group':2,'reduction':{"a":2,"id":2}},{'group':3,'reduction':{"a":3,"id":3}}]
|
||||
- cd: tbl.max()
|
||||
ot: {"a":3, "id":99}
|
||||
- cd: tbl.group('a').max()
|
||||
ot:
|
||||
cd: {0:{'a':0, 'id':96}, 1:{'a':1, 'id':97}, 2:{'a':2, 'id':98}, 3:{'a':3, 'id':99}}
|
||||
js: [{'group':0,'reduction':{"a":0,"id":96}},{'group':1,'reduction':{"a":1,"id":97}},{'group':2,'reduction':{"a":2,"id":98}},{'group':3,'reduction':{"a":3,"id":99}}]
|
||||
|
||||
- rb: tbl.sum{|row| row['a']}
|
||||
py:
|
||||
- tbl.sum(lambda row:row['a'])
|
||||
- tbl.sum(r.row['a'])
|
||||
js:
|
||||
- tbl.sum(function(row){return row('a')})
|
||||
- tbl.sum(r.row('a'))
|
||||
ot: 150
|
||||
- rb: tbl.map{|row| row['a']}.sum()
|
||||
py: tbl.map(lambda row:row['a']).sum()
|
||||
js: tbl.map(function(row){return row('a')}).sum()
|
||||
ot: 150
|
||||
- rb: tbl.group{|row| row['a']}.sum{|row| row['id']}
|
||||
py: tbl.group(lambda row:row['a']).sum(lambda row:row['id'])
|
||||
js: tbl.group(function(row){return row('a')}).sum(function(row){return row('id')})
|
||||
ot:
|
||||
cd: {0:1200, 1:1225, 2:1250, 3:1275}
|
||||
js: [{'group':0,'reduction':1200},{'group':1,'reduction':1225},{'group':2,'reduction':1250},{'group':3,'reduction':1275}]
|
||||
- rb:
|
||||
- tbl.avg{|row| row['a']}
|
||||
py:
|
||||
- tbl.avg(lambda row:row['a'])
|
||||
- tbl.avg(r.row['a'])
|
||||
js:
|
||||
- tbl.avg(function(row){return row('a')})
|
||||
- tbl.avg(r.row('a'))
|
||||
ot: 1.5
|
||||
- rb: tbl.map{|row| row['a']}.avg()
|
||||
py: tbl.map(lambda row:row['a']).avg()
|
||||
js: tbl.map(function(row){return row('a')}).avg()
|
||||
ot: 1.5
|
||||
- rb: tbl.group{|row| row['a']}.avg{|row| row['id']}
|
||||
py: tbl.group(lambda row:row['a']).avg(lambda row:row['id'])
|
||||
js: tbl.group(function(row){return row('a')}).avg(function(row){return row('id')})
|
||||
ot:
|
||||
cd: {0:48, 1:49, 2:50, 3:51}
|
||||
js: [{'group':0,'reduction':48},{'group':1,'reduction':49},{'group':2,'reduction':50},{'group':3,'reduction':51}]
|
||||
- rb: tbl.order_by(r.desc('id')).min{|row| row['a']}
|
||||
py:
|
||||
- tbl.order_by(r.desc('id')).min(lambda row:row['a'])
|
||||
- tbl.order_by(r.desc('id')).min(r.row['a'])
|
||||
js:
|
||||
- tbl.order_by(r.desc('id')).min(function(row){return row('a')})
|
||||
- tbl.order_by(r.desc('id')).min(r.row('a'))
|
||||
ot: {'a':0, 'id':96}
|
||||
- rb:
|
||||
- tbl.order_by(r.desc('id')).min{|row| row['a']}['a']
|
||||
py:
|
||||
- tbl.order_by(r.desc('id')).min(lambda row:row['a'])['a']
|
||||
- tbl.order_by(r.desc('id')).min(r.row['a'])['a']
|
||||
js:
|
||||
- tbl.order_by(r.desc('id')).min(function(row){return row('a')})('a')
|
||||
- tbl.order_by(r.desc('id')).min(r.row('a'))('a')
|
||||
ot: 0
|
||||
- rb: tbl.map{|row| row['a']}.min()
|
||||
py: tbl.map(lambda row:row['a']).min()
|
||||
js: tbl.map(function(row){return row('a')}).min()
|
||||
ot: 0
|
||||
- rb: tbl.group{|row| row['a']}.min{|row| row['id']}['id']
|
||||
py: tbl.group(lambda row:row['a']).min(lambda row:row['id'])['id']
|
||||
js: tbl.group(function(row){return row('a')}).min(function(row){return row('id')})('id')
|
||||
ot:
|
||||
cd: {0:0, 1:1, 2:2, 3:3}
|
||||
js: [{'group':0,'reduction':0},{'group':1,'reduction':1},{'group':2,'reduction':2},{'group':3,'reduction':3}]
|
||||
- rb:
|
||||
- tbl.max{|row| row['a']}['a']
|
||||
py:
|
||||
- tbl.max(lambda row:row['a'])['a']
|
||||
- tbl.max(r.row['a'])['a']
|
||||
js:
|
||||
- tbl.max(function(row){return row('a')})('a')
|
||||
- tbl.max(r.row('a'))('a')
|
||||
ot: 3
|
||||
- rb: tbl.map{|row| row['a']}.max()
|
||||
py: tbl.map(lambda row:row['a']).max()
|
||||
js: tbl.map(function(row){return row('a')}).max()
|
||||
ot: 3
|
||||
- rb: tbl.group{|row| row['a']}.max{|row| row['id']}['id']
|
||||
py: tbl.group(lambda row:row['a']).max(lambda row:row['id'])['id']
|
||||
js: tbl.group(function(row){return row('a')}).max(function(row){return row('id')})('id')
|
||||
ot:
|
||||
cd: {0:96, 1:97, 2:98, 3:99}
|
||||
js: [{'group':0,'reduction':96},{'group':1,'reduction':97},{'group':2,'reduction':98},{'group':3,'reduction':99}]
|
||||
|
||||
- rb: tbl.group{|row| row[:a]}.map{|row| row[:id]}.reduce{|a,b| a+b}
|
||||
py: tbl.group(lambda row:row['a']).map(lambda row:row['id']).reduce(lambda a,b:a+b)
|
||||
js: tbl.group(function(row){return row('a')}).map(function(row){return row('id')}).reduce(function(a,b){return a.add(b)})
|
||||
ot:
|
||||
cd: {0:1200, 1:1225, 2:1250, 3:1275}
|
||||
js: [{'group':0,'reduction':1200},{'group':1,'reduction':1225},{'group':2,'reduction':1250},{'group':3,'reduction':1275}]
|
||||
|
||||
- rb: tbl.group{|row| row[:a]}.map{|row| row[:id]}.reduce{|a,b| a+b}
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
py:
|
||||
- tbl.group(lambda row:row['a']).map(lambda row:row['id']).reduce(lambda a,b:a+b)
|
||||
- tbl.group(r.row['a']).map(r.row['id']).reduce(lambda a,b:a + b)
|
||||
js:
|
||||
- tbl.group(function(row){return row('a')}).map(function(row){return row('id')}).reduce(function(a,b){return a.add(b)})
|
||||
- tbl.group(r.row('a')).map(r.row('id')).reduce(function(a,b){return a.add(b)})
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[0, 1200], [1, 1225], [2, 1250], [3, 1275]]}
|
||||
|
||||
- cd: r.expr([{'a':1}]).filter(true).limit(1).group('a')
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[1, [{'a':1}]]]}
|
||||
|
||||
# GMR
|
||||
- cd: tbl.group('a').type_of()
|
||||
ot: "GROUPED_STREAM"
|
||||
- cd: tbl.group('a').count().type_of()
|
||||
ot: "GROUPED_DATA"
|
||||
- cd: tbl.group('a').coerce_to('ARRAY').type_of()
|
||||
ot: "GROUPED_DATA"
|
||||
|
||||
- rb: tbl.orderby(index:'id').filter{|row| row['id'].lt(10)}.group('a').map{|row| row['id']}.coerce_to('ARRAY')
|
||||
py: tbl.order_by(index='id').filter(lambda row:row['id'] < 10).group('a').map(lambda row:row['id']).coerce_to('ARRAY')
|
||||
js: tbl.orderBy({index:'id'}).filter(function(row){return row('id').lt(10)}).group('a').map(function(row){return row('id')}).coerce_to('ARRAY')
|
||||
ot:
|
||||
cd: {0:[0,4,8],1:[1,5,9],2:[2,6],3:[3,7]}
|
||||
js: [{'group':0,'reduction':[0,4,8]},{'group':1,'reduction':[1,5,9]},{'group':2,'reduction':[2,6]},{'group':3,'reduction':[3,7]}]
|
||||
|
||||
- rb: tbl.filter{|row| row['id'].lt(10)}.group('a').count().do{|x| x*x}
|
||||
py: tbl.filter(lambda row:row['id'] < 10).group('a').count().do(lambda x:x*x)
|
||||
js: tbl.filter(function(row){return row('id').lt(10)}).group('a').count().do(function(x){return x.mul(x)})
|
||||
ot:
|
||||
cd: {0:9,1:9,2:4,3:4}
|
||||
js: [{'group':0,'reduction':9},{'group':1,'reduction':9},{'group':2,'reduction':4},{'group':3,'reduction':4}]
|
||||
|
||||
- rb: tbl.union(tbl).group('a').map{|x| x['id']}.reduce{|a,b| a+b}
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
py:
|
||||
- tbl.union(tbl).group(lambda row:row['a']).map(lambda row:row['id']).reduce(lambda a,b:a + b)
|
||||
- tbl.union(tbl).group(r.row['a']).map(r.row['id']).reduce(lambda a,b:a + b)
|
||||
js:
|
||||
- tbl.union(tbl).group(function(row){return row('a')}).map(function(row){return row('id')}).reduce(function(a,b){return a.add(b)})
|
||||
- tbl.union(tbl).group(r.row('a')).map(r.row('id')).reduce(function(a,b){return a.add(b)})
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[0, 2400], [1, 2450], [2, 2500], [3, 2550]]}
|
||||
|
||||
# GMR
|
||||
- rb: tbl.coerce_to("array").union(tbl).group('a').map{|x| x['id']}.reduce{|a,b| a+b}
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
py:
|
||||
- tbl.coerce_to("array").union(tbl).group(lambda row:row['a']).map(lambda row:row['id']).reduce(lambda a,b:a + b)
|
||||
- tbl.coerce_to("array").union(tbl).group(r.row['a']).map(r.row['id']).reduce(lambda a,b:a + b)
|
||||
js:
|
||||
- tbl.coerce_to("array").union(tbl).group(function(row){return row('a')}).map(function(row){return row('id')}).reduce(function(a,b){return a.add(b)})
|
||||
- tbl.coerce_to("array").union(tbl).group(r.row('a')).map(r.row('id')).reduce(function(a,b){return a.add(b)})
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[0, 2400], [1, 2450], [2, 2500], [3, 2550]]}
|
||||
|
||||
# GMR
|
||||
- rb: tbl.union(tbl.coerce_to("array")).group('a').map{|x| x['id']}.reduce{|a,b| a+b}
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
py:
|
||||
- tbl.union(tbl.coerce_to("array")).group(lambda row:row['a']).map(lambda row:row['id']).reduce(lambda a,b:a + b)
|
||||
- tbl.union(tbl.coerce_to("array")).group(r.row['a']).map(r.row['id']).reduce(lambda a,b:a + b)
|
||||
js:
|
||||
- tbl.union(tbl.coerce_to("array")).group(function(row){return row('a')}).map(function(row){return row('id')}).reduce(function(a,b){return a.add(b)})
|
||||
- tbl.union(tbl.coerce_to("array")).group(r.row('a')).map(r.row('id')).reduce(function(a,b){return a.add(b)})
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[0, 2400], [1, 2450], [2, 2500], [3, 2550]]}
|
||||
|
||||
- py:
|
||||
- tbl.group(lambda row:row['a']).map(lambda row:row['id']).reduce(lambda a,b:a + b)
|
||||
- tbl.group(r.row['a']).map(r.row['id']).reduce(lambda a,b:a + b)
|
||||
js:
|
||||
- tbl.group(function(row){return row('a')}).map(function(row){return row('id')}).reduce(function(a,b){return a.add(b)})
|
||||
- tbl.group(r.row('a')).map(r.row('id')).reduce(function(a,b){return a.add(b)})
|
||||
- tbl.group('a').map(r.row('id')).reduce(function(a,b){return a.add(b)})
|
||||
rb: tbl.group('a').map{|x| x['id']}.reduce{|a,b| a+b}
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[0, 1200], [1, 1225], [2, 1250], [3, 1275]]}
|
||||
|
||||
# undefined...
|
||||
- js:
|
||||
- tbl.group(function(row){})
|
||||
- tbl.map(function(row){})
|
||||
- tbl.reduce(function(row){})
|
||||
- tbl.group(r.row('a')).group(function(row){})
|
||||
- tbl.group(r.row('a')).map(function(row){})
|
||||
- tbl.group(r.row('a')).reduce(function(row){})
|
||||
- tbl.map(r.row('id')).group(function(row){})
|
||||
- tbl.map(r.row('id')).map(function(row){})
|
||||
- tbl.map(r.row('id')).reduce(function(row){})
|
||||
- tbl.reduce(function(a,b){return a+b}).group(function(row){})
|
||||
- tbl.reduce(function(a,b){return a+b}).map(function(row){})
|
||||
- tbl.reduce(function(a,b){return a+b}).reduce(function(row){})
|
||||
ot: err('ReqlDriverCompileError', 'Anonymous function returned `undefined`. Did you forget a `return`?', [0])
|
||||
|
||||
# GroupBy
|
||||
|
||||
# COUNT
|
||||
|
||||
- cd: tbl.group('a').count()
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[0, 25], [1, 25], [2, 25], [3, 25]]}
|
||||
|
||||
# SUM
|
||||
- cd: tbl.group('a').sum('id')
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[0, 1200], [1, 1225], [2, 1250], [3, 1275]]}
|
||||
|
||||
# AVG
|
||||
- cd: tbl.group('a').avg('id')
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[0, 48], [1, 49], [2, 50], [3, 51]]}
|
||||
|
||||
# Pattern Matching
|
||||
- rb: tbl3.group{|row| row['b']['c']}.count()
|
||||
py: tbl3.group(lambda row:row['b']['c']).count()
|
||||
js: tbl3.group(function(row){return row('b')('c')}).count()
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[0, 20], [1, 20], [2, 20], [3, 20], [4, 20]]}
|
||||
|
||||
# Multiple keys
|
||||
- rb: tbl.group('a', lambda {|row| row['id']%3}).count()
|
||||
py: tbl.group('a', lambda row:row['id'].mod(3)).count()
|
||||
js: tbl.group('a', function(row){return row('id').mod(3)}).count()
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[[0, 0], 9], [[0, 1], 8], [[0, 2], 8], [[1, 0], 8], [[1, 1], 9], [[1, 2], 8], [[2, 0], 8], [[2, 1], 8], [[2, 2], 9], [[3, 0], 9], [[3, 1], 8], [[3, 2], 8]]}
|
||||
|
||||
# Grouping by time
|
||||
- rb: tbl4.group('time').coerce_to('array')
|
||||
runopts:
|
||||
time_format: 'raw'
|
||||
ot:
|
||||
rb: {{"$reql_type$":"TIME","epoch_time":1375115782.24,"timezone":"+00:00"}:[{"id":0,"time":{"$reql_type$":"TIME","epoch_time":1375115782.24,"timezone":"+00:00"}}],{"$reql_type$":"TIME","epoch_time":1375147296.68,"timezone":"+00:00"}:[{"id":1,"time":{"$reql_type$":"TIME","epoch_time":1375147296.68,"timezone":"+00:00"}}]}
|
||||
py: {frozenset([('$reql_type$','TIME'),('timezone','+00:00'),('epoch_time',1375115782.24)]):[{'id':0,'time':{'timezone':'+00:00','$reql_type$':'TIME','epoch_time':1375115782.24}}],frozenset([('$reql_type$','TIME'),('timezone','+00:00'),('epoch_time',1375147296.68)]):[{'id':1,'time':{'timezone':'+00:00','$reql_type$':'TIME','epoch_time':1375147296.68}}]}
|
||||
js: [{'group':{"$reql_type$":"TIME","epoch_time":1375115782240,"timezone":"+00:00"},'reduction':[{"id":0,"time":{"$reql_type$":"TIME","epoch_time":1375115782240,"timezone":"+00:00"}}]},{'group':{"$reql_type$":"TIME","epoch_time":1375147296680,"timezone":"+00:00"},'reduction':[{"id":1,"time":{"$reql_type$":"TIME","epoch_time":1375147296680,"timezone":"+00:00"}}]}]
|
||||
|
||||
# Distinct
|
||||
- py: tbl.map(lambda row:row['a']).distinct().count()
|
||||
js: tbl.map(function(row) { return row('a'); }).distinct().count()
|
||||
rb: tbl.map{ |row| row[:a] }.distinct.count
|
||||
ot: 4
|
||||
|
||||
- cd: tbl.distinct().type_of()
|
||||
ot: "STREAM"
|
||||
|
||||
- cd: tbl.distinct().count()
|
||||
ot: 100
|
||||
|
||||
- cd: tbl.distinct({index:'id'}).type_of()
|
||||
py: tbl.distinct(index='id').type_of()
|
||||
ot: "STREAM"
|
||||
|
||||
- cd: tbl.distinct({index:'id'}).count()
|
||||
py: tbl.distinct(index='id').count()
|
||||
ot: 100
|
||||
|
||||
- cd: tbl.index_create('a')
|
||||
ot: {'created':1}
|
||||
|
||||
- rb: tbl.index_create('m', multi:true){|row| [row['a'], row['a']]}
|
||||
ot: {'created':1}
|
||||
|
||||
- rb: tbl.index_create('m2', multi:true){|row| [1, 2]}
|
||||
ot: {'created':1}
|
||||
|
||||
- cd: tbl.index_wait('a').pluck('index', 'ready')
|
||||
ot: [{'index':'a','ready':true}]
|
||||
|
||||
- rb: tbl.index_wait('m').pluck('index', 'ready')
|
||||
ot: [{'index':'m','ready':true}]
|
||||
|
||||
- rb: tbl.index_wait('m2').pluck('index', 'ready')
|
||||
ot: [{'index':'m2','ready':true}]
|
||||
|
||||
- cd: tbl.between(0, 1, {index:'a'}).distinct().count()
|
||||
py: tbl.between(0, 1, index='a').distinct().count()
|
||||
ot: 25
|
||||
|
||||
- cd: tbl.between(0, 1, {index:'a'}).distinct({index:'id'}).count()
|
||||
py: tbl.between(0, 1, index='a').distinct(index='id').count()
|
||||
ot: 25
|
||||
|
||||
- rb: tbl.between(0, 1, {index:'m'}).count()
|
||||
ot: 50
|
||||
|
||||
- rb: tbl.between(0, 1, {index:'m'}).distinct().count()
|
||||
ot: 25
|
||||
|
||||
- rb: tbl.orderby({index:'m'}).count()
|
||||
ot: 200
|
||||
|
||||
- rb: tbl.orderby({index:'m'}).distinct().count()
|
||||
ot: 100
|
||||
|
||||
- rb: tbl.orderby({index:r.desc('m')}).count()
|
||||
ot: 200
|
||||
|
||||
- rb: tbl.orderby({index:r.desc('m')}).distinct().count()
|
||||
ot: 100
|
||||
|
||||
- rb: tbl.between(1, 3, {index:'m2'}).count()
|
||||
ot: 200
|
||||
|
||||
- rb: tbl.between(1, 3, {index:'m2'}).distinct().count()
|
||||
ot: 100
|
||||
|
||||
- rb: tbl.between(1, 3, {index:'m2'}).orderby(index:r.desc('m2')).distinct().count()
|
||||
ot: 100
|
||||
|
||||
- rb: tbl.between(0, 1, {index:'m'}).count()
|
||||
ot: 50
|
||||
|
||||
- rb: tbl.between(0, 1, {index:'m'}).distinct().count()
|
||||
ot: 25
|
||||
|
||||
- cd: tbl.distinct({index:'a'}).type_of()
|
||||
py: tbl.distinct(index='a').type_of()
|
||||
ot: "STREAM"
|
||||
|
||||
- cd: tbl.distinct({index:'a'}).count()
|
||||
py: tbl.distinct(index='a').count()
|
||||
ot: 4
|
||||
|
||||
- cd: tbl.group()
|
||||
ot: err('ReqlQueryLogicError', 'Cannot group by nothing.', [])
|
||||
|
||||
- py: tbl.group(index='id').count()
|
||||
js: tbl.group({index:'id'}).count()
|
||||
cd: tbl.group(index:'id').count
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: ({'$reql_type$':'GROUPED_DATA', 'data':[[0, 1], [1, 1], [2, 1], [3, 1], [4, 1], [5, 1], [6, 1], [7, 1], [8, 1], [9, 1], [10, 1], [11, 1], [12, 1], [13, 1], [14, 1], [15, 1], [16, 1], [17, 1], [18, 1], [19, 1], [20, 1], [21, 1], [22, 1], [23, 1], [24, 1], [25, 1], [26, 1], [27, 1], [28, 1], [29, 1], [30, 1], [31, 1], [32, 1], [33, 1], [34, 1], [35, 1], [36, 1], [37, 1], [38, 1], [39, 1], [40, 1], [41, 1], [42, 1], [43, 1], [44, 1], [45, 1], [46, 1], [47, 1], [48, 1], [49, 1], [50, 1], [51, 1], [52, 1], [53, 1], [54, 1], [55, 1], [56, 1], [57, 1], [58, 1], [59, 1], [60, 1], [61, 1], [62, 1], [63, 1], [64, 1], [65, 1], [66, 1], [67, 1], [68, 1], [69, 1], [70, 1], [71, 1], [72, 1], [73, 1], [74, 1], [75, 1], [76, 1], [77, 1], [78, 1], [79, 1], [80, 1], [81, 1], [82, 1], [83, 1], [84, 1], [85, 1], [86, 1], [87, 1], [88, 1], [89, 1], [90, 1], [91, 1], [92, 1], [93, 1], [94, 1], [95, 1], [96, 1], [97, 1], [98, 1], [99, 1]]})
|
||||
|
||||
- py: tbl.group(index='a').count()
|
||||
js: tbl.group({index:'a'}).count()
|
||||
rb: tbl.group(index:'a').count
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[0, 25], [1, 25], [2, 25], [3, 25]]}
|
||||
|
||||
- py: tbl.group('a', index='id').count()
|
||||
js: tbl.group('a', {index:'id'}).count()
|
||||
rb: tbl.group('a', index:'id').count
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[[0, 0], 1], [[0, 4], 1], [[0, 8], 1], [[0, 12], 1], [[0, 16], 1], [[0, 20], 1], [[0, 24], 1], [[0, 28], 1], [[0, 32], 1], [[0, 36], 1], [[0, 40], 1], [[0, 44], 1], [[0, 48], 1], [[0, 52], 1], [[0, 56], 1], [[0, 60], 1], [[0, 64], 1], [[0, 68], 1], [[0, 72], 1], [[0, 76], 1], [[0, 80], 1], [[0, 84], 1], [[0, 88], 1], [[0, 92], 1], [[0, 96], 1], [[1, 1], 1], [[1, 5], 1], [[1, 9], 1], [[1, 13], 1], [[1, 17], 1], [[1, 21], 1], [[1, 25], 1], [[1, 29], 1], [[1, 33], 1], [[1, 37], 1], [[1, 41], 1], [[1, 45], 1], [[1, 49], 1], [[1, 53], 1], [[1, 57], 1], [[1, 61], 1], [[1, 65], 1], [[1, 69], 1], [[1, 73], 1], [[1, 77], 1], [[1, 81], 1], [[1, 85], 1], [[1, 89], 1], [[1, 93], 1], [[1, 97], 1], [[2, 2], 1], [[2, 6], 1], [[2, 10], 1], [[2, 14], 1], [[2, 18], 1], [[2, 22], 1], [[2, 26], 1], [[2, 30], 1], [[2, 34], 1], [[2, 38], 1], [[2, 42], 1], [[2, 46], 1], [[2, 50], 1], [[2, 54], 1], [[2, 58], 1], [[2, 62], 1], [[2, 66], 1], [[2, 70], 1], [[2, 74], 1], [[2, 78], 1], [[2, 82], 1], [[2, 86], 1], [[2, 90], 1], [[2, 94], 1], [[2, 98], 1], [[3, 3], 1], [[3, 7], 1], [[3, 11], 1], [[3, 15], 1], [[3, 19], 1], [[3, 23], 1], [[3, 27], 1], [[3, 31], 1], [[3, 35], 1], [[3, 39], 1], [[3, 43], 1], [[3, 47], 1], [[3, 51], 1], [[3, 55], 1], [[3, 59], 1], [[3, 63], 1], [[3, 67], 1], [[3, 71], 1], [[3, 75], 1], [[3, 79], 1], [[3, 83], 1], [[3, 87], 1], [[3, 91], 1], [[3, 95], 1], [[3, 99], 1]]}
|
||||
|
||||
- py: tbl.group('a', index='a').count()
|
||||
js: tbl.group('a', {index:'a'}).count()
|
||||
rb: tbl.group('a', index:'a').count
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[[0, 0], 25], [[1, 1], 25], [[2, 2], 25], [[3, 3], 25]]}
|
||||
|
||||
- rb: tbl.group('a', lambda {|row| 'f'}, lambda {|row| []}, lambda {|row| [{}, [0], null, 0]}, multi:true).count
|
||||
py: tbl.group('a', lambda row:'f', lambda row:[], lambda row:[{}, [0], null, 0], multi=True).count()
|
||||
js: tbl.group('a', function(row){return 'f';}, function(row){return [];}, function(row){return [{}, [0], null, 0];}, {multi:true}).count()
|
||||
runopts:
|
||||
group_format: 'raw'
|
||||
ot: {'$reql_type$':'GROUPED_DATA', 'data':[[[0, "f", null, [0]], 25], [[0, "f", null, null], 25], [[0, "f", null, 0], 25], [[0, "f", null, {}], 25], [[1, "f", null, [0]], 25], [[1, "f", null, null], 25], [[1, "f", null, 0], 25], [[1, "f", null, {}], 25], [[2, "f", null, [0]], 25], [[2, "f", null, null], 25], [[2, "f", null, 0], 25], [[2, "f", null, {}], 25], [[3, "f", null, [0]], 25], [[3, "f", null, null], 25], [[3, "f", null, 0], 25], [[3, "f", null, {}], 25]]}
|
||||
|
||||
- cd: tbl.group('a').count().ungroup()
|
||||
ot: [{'group':0, 'reduction':25}, {'group':1, 'reduction':25}, {'group':2, 'reduction':25}, {'group':3, 'reduction':25}]
|
||||
|
||||
- cd: tbl.group('a').ungroup()['group']
|
||||
js: tbl.group('a').ungroup()('group')
|
||||
ot: [0, 1, 2, 3]
|
||||
|
||||
- py: tbl.order_by(index='id').limit(16).group('a','a').map(r.row['id']).sum().ungroup()
|
||||
js: tbl.order_by({index:'id'}).limit(16).group('a','a').map(r.row('id')).sum().ungroup()
|
||||
rb: tbl.order_by(index:'id').limit(16).group('a','a').map{|row| row['id']}.sum().ungroup()
|
||||
ot: [{'group':[0,0],'reduction':24},{'group':[1,1],'reduction':28},{'group':[2,2],'reduction':32},{'group':[3,3],'reduction':36}]
|
||||
|
||||
- cd: tbl.group('a', null).count().ungroup()
|
||||
ot: [{'group':[0,null],'reduction':25},{'group':[1,null],'reduction':25},{'group':[2,null],'reduction':25},{'group':[3,null],'reduction':25}]
|
||||
|
||||
- py: tbl.group('a', lambda row:[1,'two'], multi=True).count().ungroup()
|
||||
js: tbl.group('a', function(row){return [1,'two']},{multi:true}).count().ungroup()
|
||||
rb: tbl.group('a', lambda {|row| [1,'two']}, multi:true).count().ungroup()
|
||||
ot: [{'group':[0,1],'reduction':25},{'group':[0,'two'],'reduction':25},{'group':[1,1],'reduction':25},{'group':[1,'two'],'reduction':25},{'group':[2,1],'reduction':25},{'group':[2,'two'],'reduction':25},{'group':[3,1],'reduction':25},{'group':[3,'two'],'reduction':25}]
|
||||
|
||||
# proper test for seq.count()
|
||||
- cd: tbl.count()
|
||||
ot: 100
|
||||
|
||||
- js: tbl.filter(r.row('a').ne(1).and(r.row('id').gt(10))).update({'b':r.row('a').mul(10)})
|
||||
py: tbl.filter(r.row['a'].ne(1).and_(r.row['id'].gt(10))).update({'b':r.row['a'] * 10})
|
||||
rb: tbl.filter{|row| row['a'].ne(1).and(row['id'].gt(10))}.update{|row| {'b'=>row['a'] * 10}}
|
||||
ot: partial({'errors':0, 'replaced':67})
|
||||
|
||||
- cd: tbl.group('b').count()
|
||||
ot:
|
||||
cd: {null:33, 0:22, 20:22, 30:23}
|
||||
js: [{"group":null, "reduction":33}, {"group":0, "reduction":22}, {"group":20, "reduction":22}, {"group":30, "reduction":23}]
|
||||
|
||||
- cd: tbl.group('a').sum('b')
|
||||
ot:
|
||||
cd: {0:0, 2:440, 3:690}
|
||||
js: [{"group":0, "reduction":0}, {"group":2, "reduction":440}, {"group":3, "reduction":690}]
|
||||
|
||||
- cd: tbl.group('a').avg('b')
|
||||
ot:
|
||||
cd: {0:0, 2:20, 3:30}
|
||||
js: [{"group":0, "reduction":0}, {"group":2, "reduction":20}, {"group":3, "reduction":30}]
|
||||
|
||||
- cd: tbl.order_by('id').group('a').min('b')
|
||||
ot:
|
||||
cd: {0:{"a":0, "b":0, "id":12}, 2:{"a":2, "b":20, "id":14}, 3:{"a":3, "b":30, "id":11}}
|
||||
js: [{"group":0, "reduction":{"a":0, "b":0, "id":12}}, {"group":2, "reduction":{"a":2, "b":20, "id":14}}, {"group":3, "reduction":{"a":3, "b":30, "id":11}}]
|
||||
|
||||
- cd: tbl.order_by('id').group('a').min('id')
|
||||
ot:
|
||||
cd: {0:{"a":0, "id":0}, 1:{"a":1, "id":1}, 2:{"a":2, "id":2}, 3:{"a":3, "id":3}}
|
||||
js: [{"group":0, "reduction":{"a":0, "id":0}}, {"group":1, "reduction":{"a":1, "id":1}}, {"group":2, "reduction":{"a":2, "id":2}}, {"group":3, "reduction":{"a":3, "id":3}}]
|
||||
|
||||
- cd: tbl.order_by('id').group('a').max('b')
|
||||
ot:
|
||||
cd: {0:{"a":0, "b":0, "id":12}, 2:{"a":2, "b":20, "id":14}, 3:{"a":3, "b":30, "id":11}}
|
||||
js: [{"group":0, "reduction":{"a":0,"b":0, "id":12}}, {"group":2, "reduction":{"a":2, "b":20, "id":14}}, {"group":3, "reduction":{"a":3, "b":30, "id":11}}]
|
||||
|
||||
- cd: tbl.min()
|
||||
ot: {'a':0,'id':0}
|
||||
- py: tbl.min(index='id')
|
||||
rb: tbl.min(index:'id')
|
||||
js: tbl.min({index:'id'})
|
||||
ot: {'a':0,'id':0}
|
||||
- py: tbl.min(index='a')
|
||||
rb: tbl.min(index:'a')
|
||||
js: tbl.min({index:'a'})
|
||||
ot: {'a':0,'id':0}
|
||||
|
||||
- cd: tbl.max().without('b')
|
||||
ot: {'a':3,'id':99}
|
||||
- py: tbl.max(index='id').without('b')
|
||||
rb: tbl.max(index:'id').without('b')
|
||||
js: tbl.max({index:'id'}).without('b')
|
||||
ot: {'a':3,'id':99}
|
||||
- py: tbl.max(index='a').without('b')
|
||||
rb: tbl.max(index:'a').without('b')
|
||||
js: tbl.max({index:'a'}).without('b')
|
||||
ot: {'a':3,'id':99}
|
||||
|
||||
|
||||
# Infix
|
||||
|
||||
- cd: r.group([ 1, 1, 2 ], r.row).count().ungroup()
|
||||
rb: r.group([ 1, 1, 2 ]) {|row| row}.count().ungroup()
|
||||
ot: [ {'group': 1, 'reduction': 2}, {'group': 2, 'reduction': 1} ]
|
||||
- cd:
|
||||
- r.count([ 1, 2 ])
|
||||
- r.count([ 1, 2 ], r.row.gt(0))
|
||||
rb:
|
||||
- r.count([ 1, 2 ])
|
||||
- r.count([ 1, 2 ]) {|row| row.gt(0)}
|
||||
ot: 2
|
||||
- cd:
|
||||
- r.sum([ 1, 2 ])
|
||||
- r.sum([ 1, 2 ], r.row)
|
||||
rb: r.sum([ 1, 2 ])
|
||||
ot: 3
|
||||
- cd:
|
||||
- r.avg([ 1, 2 ])
|
||||
- r.avg([ 1, 2 ], r.row)
|
||||
rb: r.avg([ 1, 2 ])
|
||||
ot: 1.5
|
||||
- cd:
|
||||
- r.min([ 1, 2 ])
|
||||
- r.min([ 1, 2 ], r.row)
|
||||
rb: r.min([ 1, 2 ])
|
||||
ot: 1
|
||||
- cd:
|
||||
- r.max([ 1, 2 ])
|
||||
- r.max([ 1, 2 ], r.row)
|
||||
rb: r.max([ 1, 2 ])
|
||||
ot: 2
|
||||
- cd: r.distinct([ 1, 1 ])
|
||||
ot: [ 1 ]
|
||||
- cd:
|
||||
- r.contains([ 1, 2 ])
|
||||
- r.contains([ 1, 2 ], r.row.gt(0))
|
||||
rb:
|
||||
- r.contains([ 1, 2 ])
|
||||
- r.contains([ 1, 2 ]) {|row| row.gt(0)}
|
||||
ot: true
|
316
ext/librethinkdbxx/test/upstream/arity.yaml
Normal file
316
ext/librethinkdbxx/test/upstream/arity.yaml
Normal file
@ -0,0 +1,316 @@
|
||||
desc: Test the arity of every function
|
||||
table_variable_name: tbl
|
||||
tests:
|
||||
|
||||
# TODO: add test for slice (should require one or two arguments)
|
||||
|
||||
# Set up some data
|
||||
- def: db = r.db('test')
|
||||
- def: obj = r.expr({'a':1})
|
||||
- def: array = r.expr([1])
|
||||
|
||||
- ot: err("ReqlCompileError", "Expected 0 arguments but found 1.", [])
|
||||
cd: r.db_list(1)
|
||||
|
||||
- ot: err("ReqlCompileError", "Expected 1 argument but found 2.", [])
|
||||
cd:
|
||||
- tbl.zip(1)
|
||||
- tbl.is_empty(1)
|
||||
- obj.keys(1)
|
||||
|
||||
- cd: tbl.distinct(1)
|
||||
ot:
|
||||
cd: err("ReqlCompileError", "Expected 1 argument but found 2.", [])
|
||||
js: err("ReqlCompileError", "Expected 0 arguments (not including options) but found 1.", [])
|
||||
|
||||
- cd: tbl.delete(1)
|
||||
ot:
|
||||
js: err("ReqlCompileError", "Expected 0 arguments (not including options) but found 1.", [])
|
||||
cd: err("ReqlCompileError", "Expected 1 argument but found 2.", [])
|
||||
|
||||
- rb: db.table_list(1)
|
||||
ot: err("ReqlCompileError", "Expected between 0 and 1 arguments but found 2.", [])
|
||||
|
||||
- ot: err("ReqlCompileError", "Expected 1 argument but found 0.", [])
|
||||
cd:
|
||||
- r.db_create()
|
||||
- r.db_drop()
|
||||
- r.db()
|
||||
- r.floor()
|
||||
- r.ceil()
|
||||
- r.round()
|
||||
|
||||
- cd: r.error()
|
||||
ot: err("ReqlQueryLogicError", "Empty ERROR term outside a default block.", [])
|
||||
|
||||
- cd: r.js()
|
||||
ot:
|
||||
cd: err("ReqlCompileError", "Expected 1 argument but found 0.", [])
|
||||
js: err("ReqlCompileError", "Expected 1 argument (not including options) but found 0.", [])
|
||||
|
||||
- cd: r.expr()
|
||||
ot:
|
||||
py3.3: err_regex('TypeError', '.* missing 1 required positional argument.*', [])
|
||||
py3.4: err_regex('TypeError', '.* missing 1 required positional argument.*', [])
|
||||
py3.5: err_regex('TypeError', '.* missing 1 required positional argument.*', [])
|
||||
py: err_regex('TypeError', ".* takes at least 1 (?:positional )?argument \(0 given\)", [])
|
||||
js: err("ReqlCompileError", "Expected between 1 and 2 arguments but found 0.", [])
|
||||
rb: err("ArgumentError", 'wrong number of arguments (0 for 1)', [])
|
||||
rb2: err("ArgumentError", 'wrong number of arguments (0 for 1..2)', [])
|
||||
|
||||
- ot: err("ReqlCompileError", "Expected 2 arguments but found 1.", [])
|
||||
cd:
|
||||
- tbl.concat_map()
|
||||
- tbl.skip()
|
||||
- tbl.limit()
|
||||
- array.append()
|
||||
- array.prepend()
|
||||
- array.difference()
|
||||
- array.set_insert()
|
||||
- array.set_union()
|
||||
- array.set_intersection()
|
||||
- array.set_difference()
|
||||
- tbl.nth()
|
||||
- tbl.for_each()
|
||||
- tbl.get()
|
||||
- r.expr([]).sample()
|
||||
- tbl.offsets_of()
|
||||
- ot: err("ReqlCompileError", "Expected 1 argument but found 2.", [])
|
||||
cd:
|
||||
- r.db_create(1,2)
|
||||
- r.db_drop(1,2)
|
||||
- r.db(1,2)
|
||||
- r.floor(1, 2)
|
||||
- r.ceil(1, 2)
|
||||
- r.round(1, 2)
|
||||
|
||||
- cd: tbl.filter()
|
||||
ot:
|
||||
js: err("ReqlCompileError", "Expected 1 argument (not including options) but found 0.", [])
|
||||
cd: err("ReqlCompileError", "Expected 2 arguments but found 1.", [])
|
||||
|
||||
- cd: r.error(1, 2)
|
||||
ot: err("ReqlCompileError", "Expected between 0 and 1 arguments but found 2.", [])
|
||||
|
||||
- cd: db.table_drop()
|
||||
ot: err("ReqlQueryLogicError", "Expected type DATUM but found DATABASE:", [])
|
||||
|
||||
|
||||
- cd: db.table_create()
|
||||
ot:
|
||||
cd: err("ReqlQueryLogicError", "Expected type DATUM but found DATABASE:", [])
|
||||
js: err("ReqlCompileError", "Expected 1 argument (not including options) but found 0.", [])
|
||||
|
||||
- cd: r.js(1,2)
|
||||
ot:
|
||||
cd: err("ReqlCompileError", "Expected 1 argument but found 2.", [])
|
||||
js: err("ReqlCompileError", "Expected 1 argument (not including options) but found 2.", [])
|
||||
|
||||
- ot: err("ReqlCompileError", "Expected 2 arguments but found 3.", [])
|
||||
cd:
|
||||
- tbl.concat_map(1,2)
|
||||
- tbl.skip(1,2)
|
||||
- tbl.limit(1,2)
|
||||
- array.append(1,2)
|
||||
- array.prepend(1,2)
|
||||
- array.difference([], [])
|
||||
- array.set_insert(1,2)
|
||||
- array.set_union([1],[2])
|
||||
- array.set_intersection([1],[2])
|
||||
- array.set_difference([1],[2])
|
||||
- tbl.nth(1,2)
|
||||
- tbl.for_each(1,2)
|
||||
- tbl.get(1,2)
|
||||
- r.expr([]).sample(1,2)
|
||||
- tbl.offsets_of(1,2)
|
||||
|
||||
- cd: tbl.filter(1,2,3)
|
||||
ot:
|
||||
cd: err("ReqlCompileError", "Expected 2 arguments but found 4.", [])
|
||||
js: err("ReqlCompileError", "Expected 1 argument (not including options) but found 3.", [])
|
||||
|
||||
- cd: db.table_drop(1,2)
|
||||
ot: err("ReqlCompileError", "Expected between 1 and 2 arguments but found 3.", [])
|
||||
|
||||
- cd: r.expr([]).delete_at()
|
||||
ot: err("ReqlCompileError", "Expected between 2 and 3 arguments but found 1.", [])
|
||||
|
||||
- cd: db.table_create(1,2)
|
||||
ot:
|
||||
cd: err("ReqlCompileError", "Expected between 1 and 2 arguments but found 3.", [])
|
||||
js: err("ReqlCompileError", "Expected 1 argument (not including options) but found 2.", [])
|
||||
|
||||
- cd: tbl.count(1,2)
|
||||
ot: err("ReqlCompileError", "Expected between 1 and 2 arguments but found 3.", [])
|
||||
|
||||
- ot:
|
||||
cd: err("ReqlCompileError", "Expected 2 arguments but found 1.", [])
|
||||
js: err("ReqlCompileError", "Expected 1 argument (not including options) but found 0.", [])
|
||||
cd:
|
||||
- tbl.update()
|
||||
- tbl.replace()
|
||||
- tbl.insert()
|
||||
|
||||
- cd: db.table()
|
||||
ot:
|
||||
cd: err("ReqlQueryLogicError", "Expected type DATUM but found DATABASE:", [])
|
||||
js: err("ReqlCompileError", "Expected 1 argument (not including options) but found 0.", [])
|
||||
|
||||
- cd: tbl.reduce()
|
||||
ot: err("ReqlCompileError", "Expected 2 arguments but found 1.", [])
|
||||
|
||||
- cd: tbl.eq_join()
|
||||
ot:
|
||||
cd: err("ReqlCompileError", "Expected 3 arguments but found 1.", [])
|
||||
js: err("ReqlCompileError", "Expected 2 arguments (not including options) but found 0.", [])
|
||||
|
||||
- ot: err("ReqlCompileError", "Expected 3 arguments but found 1.", [])
|
||||
cd:
|
||||
- tbl.inner_join()
|
||||
- tbl.outer_join()
|
||||
- r.expr([]).insert_at()
|
||||
- r.expr([]).splice_at()
|
||||
- r.expr([]).change_at()
|
||||
|
||||
- cd: tbl.eq_join(1)
|
||||
ot:
|
||||
cd: err("ReqlCompileError", "Expected 3 arguments but found 2.", [])
|
||||
js: err("ReqlCompileError", "Expected 2 arguments (not including options) but found 1.", [])
|
||||
|
||||
- ot: err("ReqlCompileError", "Expected 3 arguments but found 2.", [])
|
||||
cd:
|
||||
- tbl.inner_join(1)
|
||||
- tbl.outer_join(1)
|
||||
- r.expr([]).insert_at(1)
|
||||
- r.expr([]).splice_at(1)
|
||||
- r.expr([]).change_at(1)
|
||||
|
||||
- cd: tbl.eq_join(1,2,3,4)
|
||||
ot:
|
||||
cd: err("ReqlCompileError", "Expected 3 arguments but found 5.", [])
|
||||
js: err("ReqlCompileError", "Expected 2 arguments (not including options) but found 4.", [])
|
||||
|
||||
- ot: err("ReqlCompileError", "Expected 3 arguments but found 4.", [])
|
||||
cd:
|
||||
- tbl.inner_join(1,2,3)
|
||||
- tbl.outer_join(1,2,3)
|
||||
- r.expr([]).insert_at(1, 2, 3)
|
||||
- r.expr([]).splice_at(1, 2, 3)
|
||||
- r.expr([]).change_at(1, 2, 3)
|
||||
|
||||
- cd: tbl.map()
|
||||
ot:
|
||||
cd: err('ReqlCompileError', "Expected 2 or more arguments but found 1.", [])
|
||||
js: err('ReqlCompileError', "Expected 1 or more arguments but found 0.", [])
|
||||
|
||||
- cd: r.branch(1,2)
|
||||
ot: err("ReqlCompileError", "Expected 3 or more arguments but found 2.", [])
|
||||
- cd: r.branch(1,2,3,4)
|
||||
ot: err("ReqlQueryLogicError", "Cannot call `branch` term with an even number of arguments.", [])
|
||||
|
||||
- cd: r.expr({})[1,2]
|
||||
js: r.expr({})(1,2)
|
||||
ot:
|
||||
js: err('ReqlCompileError', "Expected 1 argument but found 2.", [])
|
||||
py: err('ReqlQueryLogicError', 'Expected NUMBER or STRING as second argument to `bracket` but found ARRAY.')
|
||||
rb: err('ArgumentError', 'wrong number of arguments (2 for 1)')
|
||||
|
||||
- cd: tbl.insert([{'id':0},{'id':1},{'id':2},{'id':3},{'id':4},{'id':5},{'id':6},{'id':7},{'id':8},{'id':9}]).get_field('inserted')
|
||||
ot: 10
|
||||
|
||||
- cd: tbl.get_all(0, 1, 2).get_field('id')
|
||||
ot: bag([0, 1, 2])
|
||||
|
||||
- cd: tbl.get_all(r.args([]), 0, 1, 2).get_field('id')
|
||||
ot: bag([0, 1, 2])
|
||||
|
||||
- cd: tbl.get_all(r.args([0]), 1, 2).get_field('id')
|
||||
ot: bag([0, 1, 2])
|
||||
|
||||
- cd: tbl.get_all(r.args([0, 1]), 2).get_field('id')
|
||||
ot: bag([0, 1, 2])
|
||||
|
||||
- cd: tbl.get_all(r.args([0, 1, 2])).get_field('id')
|
||||
ot: bag([0, 1, 2])
|
||||
|
||||
- cd: tbl.get_all(r.args([0]), 1, r.args([2])).get_field('id')
|
||||
ot: bag([0, 1, 2])
|
||||
|
||||
# Make sure partial-evaluation still works
|
||||
|
||||
- cd: r.branch(true, 1, r.error("a"))
|
||||
ot: 1
|
||||
|
||||
- cd: r.branch(r.args([true, 1]), r.error("a"))
|
||||
ot: 1
|
||||
|
||||
- cd: r.expr(true).branch(1, 2)
|
||||
ot: 1
|
||||
|
||||
- cd: r.branch(r.args([true, 1, r.error("a")]))
|
||||
ot: err("ReqlUserError", "a", [])
|
||||
|
||||
# Make sure our grouped data hack still works
|
||||
|
||||
- rb: tbl.group{|row| row['id'] % 2}.count({'id':0}).ungroup()
|
||||
py: tbl.group(lambda row:row['id'].mod(2)).count({'id':0}).ungroup()
|
||||
js: tbl.group(r.row('id').mod(2)).count({'id':0}).ungroup()
|
||||
ot: ([{'group':0, 'reduction':1}])
|
||||
|
||||
- rb: tbl.group{|row| row['id'] % 2}.count(r.args([{'id':0}])).ungroup()
|
||||
py: tbl.group(r.row['id'].mod(2)).count(r.args([{'id':0}])).ungroup()
|
||||
js: tbl.group(r.row('id').mod(2)).count(r.args([{'id':0}])).ungroup()
|
||||
ot: ([{'group':0, 'reduction':1}])
|
||||
|
||||
# Make sure `r.literal` still works
|
||||
|
||||
- cd: r.expr({'a':{'b':1}}).merge(r.args([{'a':r.literal({'c':1})}]))
|
||||
ot: ({'a':{'c':1}})
|
||||
|
||||
- cd: r.http("httpbin.org/get","bad_param")
|
||||
ot:
|
||||
js: err("ReqlCompileError", "Expected 1 argument (not including options) but found 2.", [])
|
||||
rb: err("ReqlCompileError", "Expected 1 argument but found 2.", [])
|
||||
py: err_regex('TypeError', ".*takes exactly 1 argument \(2 given\)", [])
|
||||
py3.0: err_regex('TypeError', ".*takes exactly 1 positional argument \(2 given\)", [])
|
||||
py3.1: err_regex('TypeError', ".*takes exactly 1 positional argument \(2 given\)", [])
|
||||
py3.2: err_regex('TypeError', ".*takes exactly 1 positional argument \(2 given\)", [])
|
||||
py3: err_regex('TypeError', ".*takes 1 positional argument but 2 were given", [])
|
||||
|
||||
- cd: r.binary("1", "2")
|
||||
ot:
|
||||
py: err_regex('TypeError', ".*takes exactly 1 argument \(2 given\)", [])
|
||||
js: err("ReqlCompileError", "Expected 1 argument but found 2.", [])
|
||||
rb: err("ReqlCompileError", "Expected 1 argument but found 2.", [])
|
||||
py3.0: err_regex('TypeError', ".*takes exactly 1 positional argument \(2 given\)", [])
|
||||
py3.1: err_regex('TypeError', ".*takes exactly 1 positional argument \(2 given\)", [])
|
||||
py3.2: err_regex('TypeError', ".*takes exactly 1 positional argument \(2 given\)", [])
|
||||
py3: err_regex('TypeError', ".*takes 1 positional argument but 2 were given", [])
|
||||
- cd: r.binary()
|
||||
ot:
|
||||
py: err_regex('TypeError', ".*takes exactly 1 argument \(0 given\)", [])
|
||||
js: err("ReqlCompileError", "Expected 1 argument but found 0.", [])
|
||||
rb: err("ReqlCompileError", "Expected 1 argument but found 0.", [])
|
||||
py3.0: err_regex('TypeError', ".*takes exactly 1 positional argument \(0 given\)", [])
|
||||
py3.1: err_regex('TypeError', ".*takes exactly 1 positional argument \(0 given\)", [])
|
||||
py3.2: err_regex('TypeError', ".*takes exactly 1 argument \(0 given\)", [])
|
||||
py3: err_regex('TypeError', ".* missing 1 required positional argument.*", [])
|
||||
|
||||
# TODO: Math and logic
|
||||
# TODO: Upper bound on optional arguments
|
||||
# TODO: between, merge, slice
|
||||
|
||||
- cd: tbl.index_rename('idx')
|
||||
ot:
|
||||
cd: err('ReqlCompileError','Expected 3 arguments but found 2.',[])
|
||||
js: err('ReqlCompileError','Expected 2 arguments (not including options) but found 1.',[])
|
||||
|
||||
- cd: tbl.index_rename('idx','idx2','idx3')
|
||||
ot:
|
||||
cd: err('ReqlCompileError','Expected 3 arguments but found 4.',[])
|
||||
js: err('ReqlCompileError','Expected 2 arguments (not including options) but found 3.',[])
|
||||
|
||||
- cd:
|
||||
- r.now('foo')
|
||||
- r.now(r.args([1,2,3]))
|
||||
ot: err('ReqlCompileError','NOW does not accept any args.')
|
142
ext/librethinkdbxx/test/upstream/changefeeds/edge.yaml
Normal file
142
ext/librethinkdbxx/test/upstream/changefeeds/edge.yaml
Normal file
@ -0,0 +1,142 @@
|
||||
desc: Test edge cases of changefeed operations
|
||||
table_variable_name: tbl
|
||||
tests:
|
||||
|
||||
- def: common_prefix = r.expr([0,1,2,3,4,5,6,7,8])
|
||||
|
||||
- js: tbl.indexCreate('sindex', function (row) { return common_prefix.append(row('value')); })
|
||||
py: tbl.index_create('sindex', lambda row:common_prefix.append(row['value']))
|
||||
rb: tbl.index_create('sindex'){ |row| common_prefix.append(row['value']) }
|
||||
ot: ({'created':1})
|
||||
- cd: tbl.index_wait('sindex')
|
||||
|
||||
# create target values
|
||||
- cd: pre = r.range(7).coerce_to('array').add(r.range(10,70).coerce_to('array')).append(100).map(r.row.coerce_to('string'))
|
||||
rb: pre = r.range(7).coerce_to('array').add(r.range(10,70).coerce_to('array')).append(100).map{ |row| row.coerce_to('string') }
|
||||
- cd: mid = r.range(2,9).coerce_to('array').add(r.range(20,90).coerce_to('array')).map(r.row.coerce_to('string'))
|
||||
rb: mid = r.range(2,9).coerce_to('array').add(r.range(20,90).coerce_to('array')).map{ |row| row.coerce_to('string') }
|
||||
- cd: post = r.range(3,10).coerce_to('array').add(r.range(30,100).coerce_to('array')).map(r.row.coerce_to('string'))
|
||||
rb: post = r.range(3,10).coerce_to('array').add(r.range(30,100).coerce_to('array')).map{ |row| row.coerce_to('string') }
|
||||
|
||||
- cd: erroredres = r.range(2).coerce_to('array').add(r.range(10, 20).coerce_to('array')).append(100).map(r.row.coerce_to('string'))
|
||||
rb: erroredres = r.range(2).coerce_to('array').add(r.range(10, 20).coerce_to('array')).append(100).map{ |val| val.coerce_to('string') }
|
||||
|
||||
# Start overlapping changefeeds
|
||||
- js: pre_changes = tbl.between(r.minval, commonPrefix.append('7'), {index:'sindex'}).changes({squash:false}).limit(pre.length)('new_val')('value')
|
||||
py: pre_changes = tbl.between(r.minval, common_prefix.append('7'), index='sindex').changes(squash=False).limit(len(pre))['new_val']['value']
|
||||
rb: pre_changes = tbl.between(r.minval, common_prefix.append('7'), index:'sindex').changes(squash:false).limit(pre.length)['new_val']['value']
|
||||
- js: mid_changes = tbl.between(commonPrefix.append('2'), common_prefix.append('9'), {index:'sindex'}).changes({squash:false}).limit(post.length)('new_val')('value')
|
||||
py: mid_changes = tbl.between(common_prefix.append('2'), common_prefix.append('9'), index='sindex').changes(squash=False).limit(len(post))['new_val']['value']
|
||||
rb: mid_changes = tbl.between(common_prefix.append('2'), common_prefix.append('9'), index:'sindex').changes(squash:false).limit(post.length)['new_val']['value']
|
||||
- js: post_changes = tbl.between(commonPrefix.append('3'), r.maxval, {index:'sindex'}).changes({squash:false}).limit(mid.length)('new_val')('value')
|
||||
py: post_changes = tbl.between(common_prefix.append('3'), r.maxval, index='sindex').changes(squash=False).limit(len(mid))['new_val']['value']
|
||||
rb: post_changes = tbl.between(common_prefix.append('3'), r.maxval, index:'sindex').changes(squash:false).limit(mid.length)['new_val']['value']
|
||||
|
||||
# Start changefeeds with non-existence errors
|
||||
|
||||
- js: premap_changes1 = tbl.map(r.branch(r.row('value').lt('2'), r.row, r.row("dummy"))).changes({squash:false}).limit(erroredres.length)('new_val')('value')
|
||||
py: premap_changes1 = tbl.map(r.branch(r.row['value'].lt('2'), r.row, r.row["dummy"])).changes(squash=False).limit(len(erroredres))['new_val']['value']
|
||||
rb: premap_changes1 = tbl.map{ |row| r.branch(row['value'].lt('2'), row, row["dummy"]) }.changes(squash:false).limit(erroredres.length)['new_val']['value']
|
||||
|
||||
- js: postmap_changes1 = tbl.changes({squash:false}).map(r.branch(r.row('new_val')('value').lt('2'), r.row, r.row("dummy"))).limit(erroredres.length)('new_val')('value')
|
||||
py: postmap_changes1 = tbl.changes(squash=False).map(r.branch(r.row['new_val']['value'].lt('2'), r.row, r.row["dummy"])).limit(len(erroredres))['new_val']['value']
|
||||
rb: postmap_changes1 = tbl.changes(squash:false).map{ |row| r.branch(row['new_val']['value'].lt('2'), row, row["dummy"]) }.limit(erroredres.length)['new_val']['value']
|
||||
|
||||
- js: prefilter_changes1 = tbl.filter(r.branch(r.row('value').lt('2'), true, r.row("dummy"))).changes({squash:false}).limit(erroredres.length)('new_val')('value')
|
||||
py: prefilter_changes1 = tbl.filter(r.branch(r.row['value'].lt('2'), True, r.row["dummy"])).changes(squash=False).limit(len(erroredres))['new_val']['value']
|
||||
rb: prefilter_changes1 = tbl.filter{ |row| r.branch(row['value'].lt('2'), true, row["dummy"]) }.changes(squash:false).limit(erroredres.length)['new_val']['value']
|
||||
|
||||
- js: postfilter_changes1 = tbl.changes({squash:false}).filter(r.branch(r.row('new'+'_'+'val')('value').lt('2'), true, r.row("dummy"))).limit(erroredres.length)('new_val')('value')
|
||||
py: postfilter_changes1 = tbl.changes(squash=False).filter(r.branch(r.row['new_val']['value'].lt('2'), True, r.row["dummy"])).limit(len(erroredres))['new_val']['value']
|
||||
rb: postfilter_changes1 = tbl.changes(squash:false).filter{ |row| r.branch(row['new_val']['value'].lt('2'), true, row["dummy"]) }.limit(erroredres.length)['new_val']['value']
|
||||
|
||||
# Start changefeeds with runtime errors
|
||||
|
||||
- js: premap_changes2 = tbl.map(r.branch(r.row('value').lt('2'), r.row, r.expr([]).nth(1))).changes({squash:false}).limit(erroredres.length)('new_val')('value')
|
||||
py: premap_changes2 = tbl.map(r.branch(r.row['value'].lt('2'), r.row, r.expr([])[1])).changes(squash=False).limit(len(erroredres))['new_val']['value']
|
||||
rb: premap_changes2 = tbl.map{ |row| r.branch(row['value'].lt('2'), row, r.expr([])[1]) }.changes(squash:false).limit(erroredres.length)['new_val']['value']
|
||||
|
||||
- js: postmap_changes2 = tbl.changes({squash:false}).map(r.branch(r.row('new'+'_'+'val')('value').lt('2'), r.row, r.expr([]).nth(1))).limit(erroredres.length)('new_val')('value')
|
||||
py: postmap_changes2 = tbl.changes(squash=False).map(r.branch(r.row['new_val']['value'].lt('2'), r.row, r.expr([])[1])).limit(len(erroredres))['new_val']['value']
|
||||
rb: postmap_changes2 = tbl.changes(squash:false).map{ |row| r.branch(row['new_val']['value'].lt('2'), row, r.expr([])[1]) }.limit(erroredres.length)['new_val']['value']
|
||||
|
||||
- js: prefilter_changes2 = tbl.filter(r.branch(r.row('value').lt('2'), true, r.expr([]).nth(1))).changes({squash:false}).limit(erroredres.length)('new_val')('value')
|
||||
py: prefilter_changes2 = tbl.filter(r.branch(r.row['value'].lt('2'), True, r.expr([])[1])).changes(squash=False).limit(len(erroredres))['new_val']['value']
|
||||
rb: prefilter_changes2 = tbl.filter{ |row| r.branch(row['value'].lt('2'), true, r.expr([])[1]) }.changes(squash:false).limit(erroredres.length)['new_val']['value']
|
||||
|
||||
- js: postfilter_changes2 = tbl.changes({squash:false}).filter(r.branch(r.row('new'+'_'+'val')('value').lt('2'), true, r.expr([]).nth(1))).limit(erroredres.length)('new_val')('value')
|
||||
py: postfilter_changes2 = tbl.changes(squash=False).filter(r.branch(r.row['new_val']['value'].lt('2'), True, r.expr([])[1])).limit(len(erroredres))['new_val']['value']
|
||||
rb: postfilter_changes2 = tbl.changes(squash:false).filter{ |row| r.branch(row['new_val']['value'].lt('2'), true, r.expr([])[1]) }.limit(erroredres.length)['new_val']['value']
|
||||
|
||||
# Start non-deterministic changefeeds - very small chance of these hanging due to not enough results
|
||||
- def:
|
||||
py: nondetermmap = r.branch(r.random().gt(0.5), r.row, r.error("dummy"))
|
||||
js: nondetermmap = function (row) { return r.branch(r.random().gt(0.5), row, r.error("dummy")); }
|
||||
rb: nondetermmap = Proc.new { |row| r.branch(r.random().gt(0.5), row, r.error("dummy")) }
|
||||
- def:
|
||||
py: nondetermfilter = lambda row:r.random().gt(0.5)
|
||||
js: nondetermfilter = function (row) { return r.random().gt(0.5); }
|
||||
rb: nondetermfilter = Proc.new { |row| r.random().gt(0.5) }
|
||||
|
||||
- rb: tbl.map(nondetermmap).changes(squash:false)
|
||||
js: tbl.map(nondetermmap).changes({squash:false})
|
||||
py: tbl.map(nondetermmap).changes(squash=False)
|
||||
ot: err('ReqlQueryLogicError', 'Cannot call `changes` after a non-deterministic function.')
|
||||
|
||||
- rb: postmap_changes3 = tbl.changes(squash:false).map(nondetermmap).limit(100)
|
||||
js: postmap_changes3 = tbl.changes({squash:false}).map(nondetermmap).limit(100)
|
||||
py: postmap_changes3 = tbl.changes(squash=False).map(nondetermmap).limit(100)
|
||||
|
||||
- rb: tbl.filter(nondetermfilter).changes(squash:false)
|
||||
js: tbl.filter(nondetermfilter).changes({squash:false})
|
||||
py: tbl.filter(nondetermfilter).changes(squash=False)
|
||||
ot: err('ReqlQueryLogicError', 'Cannot call `changes` after a non-deterministic function.')
|
||||
|
||||
- rb: postfilter_changes3 = tbl.changes(squash:false).filter(nondetermfilter).limit(4)
|
||||
js: postfilter_changes3 = tbl.changes({squash:false}).filter(nondetermfilter).limit(4)
|
||||
py: postfilter_changes3 = tbl.changes(squash=False).filter(nondetermfilter).limit(4)
|
||||
|
||||
# Insert several rows that will and will not be returned
|
||||
- cd: tbl.insert(r.range(101).map({'id':r.uuid().coerce_to('binary').slice(0,r.random(4,24)).coerce_to('string'),'value':r.row.coerce_to('string')}))
|
||||
rb: tbl.insert(r.range(101).map{ |row| {'id'=>r.uuid().coerce_to('binary').slice(0,r.random(4,24)).coerce_to('string'),'value'=>row.coerce_to('string')}})
|
||||
ot: ({'skipped':0,'deleted':0,'unchanged':0,'errors':0,'replaced':0,'inserted':101})
|
||||
|
||||
# Check that our limited watchers have been satified
|
||||
- cd: pre_changes
|
||||
ot: bag(pre)
|
||||
|
||||
- cd: mid_changes
|
||||
ot: bag(mid)
|
||||
|
||||
- cd: post_changes
|
||||
ot: bag(post)
|
||||
|
||||
- cd: premap_changes1
|
||||
ot: bag(erroredres)
|
||||
|
||||
- cd: premap_changes2
|
||||
ot: bag(erroredres)
|
||||
|
||||
- cd: postmap_changes1
|
||||
ot: err('ReqlNonExistenceError', "No attribute `dummy` in object:")
|
||||
|
||||
- cd: postmap_changes2
|
||||
ot: err('ReqlNonExistenceError', "Index out of bounds:" + " 1")
|
||||
|
||||
- cd: postmap_changes3
|
||||
ot: err('ReqlUserError', "dummy")
|
||||
|
||||
- cd: prefilter_changes1
|
||||
ot: bag(erroredres)
|
||||
|
||||
- cd: prefilter_changes2
|
||||
ot: bag(erroredres)
|
||||
|
||||
- cd: postfilter_changes1
|
||||
ot: bag(erroredres)
|
||||
|
||||
- cd: postfilter_changes2
|
||||
ot: bag(erroredres)
|
||||
|
||||
- ot: arrlen(postfilter_changes3)
|
||||
ot: 4
|
27
ext/librethinkdbxx/test/upstream/changefeeds/geo.rb.yaml
Normal file
27
ext/librethinkdbxx/test/upstream/changefeeds/geo.rb.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
desc: Geo indexed changefeed operations
|
||||
table_variable_name: tbl
|
||||
tests:
|
||||
- rb: tbl.index_create('L', {geo: true})
|
||||
ot: partial({'created': 1})
|
||||
|
||||
- rb: tbl.index_wait().count
|
||||
ot: 1
|
||||
|
||||
- def: obj11 = {id: "11", L: r.point(1,1)}
|
||||
- def: obj12 = {id: "12", L: r.point(1,2)}
|
||||
- def: obj21 = {id: "21", L: r.point(2,1)}
|
||||
- def: obj22 = {id: "22", L: r.point(2,2)}
|
||||
|
||||
# A distance of 130,000 meters from 1,1 is enough to cover 1,2 and 2,1 (~110km
|
||||
# distance) but not 2,2 (~150km distance.)
|
||||
#
|
||||
# This is useful because the S2LatLngRect bounding box passed to the shards contains
|
||||
# 2,2 yet it should not be returned in the changefeed results.
|
||||
- rb: feed = tbl.get_intersecting(r.circle(r.point(1,1), 130000), {index: "L"}).get_field("id").changes(include_initial: true)
|
||||
|
||||
- rb: tbl.insert([obj11, obj12, obj21, obj22])
|
||||
ot: partial({'errors': 0, 'inserted': 4})
|
||||
|
||||
- rb: fetch(feed, 3)
|
||||
ot: bag([{"new_val" => "11", "old_val" => nil}, {"new_val" => "12", "old_val" => nil}, {"new_val" => "21", "old_val" => nil}])
|
||||
|
38
ext/librethinkdbxx/test/upstream/changefeeds/idxcopy.yaml
Normal file
38
ext/librethinkdbxx/test/upstream/changefeeds/idxcopy.yaml
Normal file
@ -0,0 +1,38 @@
|
||||
desc: Test duplicate indexes with squashing
|
||||
table_variable_name: tbl
|
||||
tests:
|
||||
- cd: tbl.index_create('a')
|
||||
ot: partial({'created':1})
|
||||
- cd: tbl.index_wait('a')
|
||||
|
||||
- py: feed = tbl.order_by(index='a').limit(10).changes(squash=2)
|
||||
rb: feed = tbl.orderby(index:'a').limit(10).changes(squash:2).limit(9)
|
||||
js: feed = tbl.orderBy({index:'a'}).limit(10).changes({squash:2}).limit(9)
|
||||
runopts:
|
||||
# limit the number of pre-fetched rows
|
||||
max_batch_rows: 1
|
||||
|
||||
- py: tbl.insert(r.range(0, 12).map({'id':r.row, 'a':5}))
|
||||
rb: tbl.insert(r.range(0, 12).map{|row| {'id':row, 'a':5}})
|
||||
js: tbl.insert(r.range(0, 12).map(function(row){ return {'id':row, 'a':5}; }))
|
||||
ot: partial({'inserted':12, 'errors':0})
|
||||
|
||||
- py: tbl.get_all(1, 8, 9, index='id').delete()
|
||||
rb: tbl.get_all(1, 8, 9, index:'id').delete()
|
||||
js: tbl.get_all(1, 8, 9, {index:'id'}).delete()
|
||||
ot: partial({'deleted':3, 'errors':0})
|
||||
|
||||
# should be replaced with a noreplyWait
|
||||
- cd: wait(2)
|
||||
|
||||
- cd: fetch(feed)
|
||||
ot: bag([
|
||||
{"new_val":{"a":5, "id":0}, "old_val":nil},
|
||||
{"new_val":{"a":5, "id":2}, "old_val":nil},
|
||||
{"new_val":{"a":5, "id":3}, "old_val":nil},
|
||||
{"new_val":{"a":5, "id":4}, "old_val":nil},
|
||||
{"new_val":{"a":5, "id":5}, "old_val":nil},
|
||||
{"new_val":{"a":5, "id":6}, "old_val":nil},
|
||||
{"new_val":{"a":5, "id":7}, "old_val":nil},
|
||||
{"new_val":{"a":5, "id":10}, "old_val":nil},
|
||||
{"new_val":{"a":5, "id":11}, "old_val":nil}])
|
@ -0,0 +1,58 @@
|
||||
desc: Test `include_states`
|
||||
table_variable_name: tbl
|
||||
tests:
|
||||
- py: tbl.changes(squash=true, include_states=true).limit(1)
|
||||
rb: tbl.changes(squash:true, include_states:true).limit(1)
|
||||
js: tbl.changes({squash:true, includeStates:true}).limit(1)
|
||||
ot: [{'state':'ready'}]
|
||||
|
||||
- py: tbl.get(0).changes(squash=true, include_states=true, include_initial=true).limit(3)
|
||||
rb: tbl.get(0).changes(squash:true, include_states:true, include_initial:true).limit(3)
|
||||
js: tbl.get(0).changes({squash:true, includeStates:true, includeInitial:true}).limit(3)
|
||||
ot: [{'state':'initializing'}, {'new_val':null}, {'state':'ready'}]
|
||||
|
||||
- py: tbl.order_by(index='id').limit(10).changes(squash=true, include_states=true, include_initial=true).limit(2)
|
||||
rb: tbl.order_by(index:'id').limit(10).changes(squash:true, include_states:true, include_initial:true).limit(2)
|
||||
js: tbl.orderBy({index:'id'}).limit(10).changes({squash:true, includeStates:true, includeInitial:true}).limit(2)
|
||||
ot: [{'state':'initializing'}, {'state':'ready'}]
|
||||
|
||||
- cd: tbl.insert({'id':1})
|
||||
|
||||
- py: tbl.order_by(index='id').limit(10).changes(squash=true, include_states=true, include_initial=true).limit(3)
|
||||
rb: tbl.order_by(index:'id').limit(10).changes(squash:true, include_states:true, include_initial:true).limit(3)
|
||||
js: tbl.orderBy({index:'id'}).limit(10).changes({squash:true, includeStates:true, includeInitial:true}).limit(3)
|
||||
ot: [{'state':'initializing'}, {'new_val':{'id':1}}, {'state':'ready'}]
|
||||
|
||||
- py: tblchanges = tbl.changes(squash=true, include_states=true)
|
||||
rb: tblchanges = tbl.changes(squash:true, include_states:true)
|
||||
js: tblchanges = tbl.changes({squash:true, includeStates:true})
|
||||
|
||||
- cd: tbl.insert({'id':2})
|
||||
|
||||
- cd: fetch(tblchanges, 2)
|
||||
ot: [{'state':'ready'},{'new_val':{'id':2},'old_val':null}]
|
||||
|
||||
- py: getchanges = tbl.get(2).changes(include_states=true, include_initial=true)
|
||||
rb: getchanges = tbl.get(2).changes(include_states:true, include_initial:true)
|
||||
js: getchanges = tbl.get(2).changes({includeStates:true, includeInitial:true})
|
||||
|
||||
- cd: tbl.get(2).update({'a':1})
|
||||
|
||||
- cd: fetch(getchanges, 4)
|
||||
ot: [{'state':'initializing'}, {'new_val':{'id':2}}, {'state':'ready'}, {'old_val':{'id':2},'new_val':{'id':2,'a':1}}]
|
||||
|
||||
- py: limitchanges = tbl.order_by(index='id').limit(10).changes(include_states=true, include_initial=true)
|
||||
rb: limitchanges = tbl.order_by(index:'id').limit(10).changes(include_states:true, include_initial:true)
|
||||
js: limitchanges = tbl.orderBy({index:'id'}).limit(10).changes({includeStates:true, includeInitial:true})
|
||||
|
||||
- py: limitchangesdesc = tbl.order_by(index=r.desc('id')).limit(10).changes(include_states=true, include_initial=true)
|
||||
rb: limitchangesdesc = tbl.order_by(index:r.desc('id')).limit(10).changes(include_states:true, include_initial:true)
|
||||
js: limitchangesdesc = tbl.orderBy({index:r.desc('id')}).limit(10).changes({includeStates:true, includeInitial:true})
|
||||
|
||||
- cd: tbl.insert({'id':3})
|
||||
|
||||
- cd: fetch(limitchanges, 5)
|
||||
ot: [{'state':'initializing'}, {'new_val':{'id':1}}, {'new_val':{'a':1, 'id':2}}, {'state':'ready'}, {'old_val':null, 'new_val':{'id':3}}]
|
||||
|
||||
- cd: fetch(limitchangesdesc, 5)
|
||||
ot: [{'state':'initializing'}, {'new_val':{'a':1, 'id':2}}, {'new_val':{'id':1}}, {'state':'ready'}, {'old_val':null, 'new_val':{'id':3}}]
|
147
ext/librethinkdbxx/test/upstream/changefeeds/point.yaml
Normal file
147
ext/librethinkdbxx/test/upstream/changefeeds/point.yaml
Normal file
@ -0,0 +1,147 @@
|
||||
desc: Test point changebasics
|
||||
table_variable_name: tbl
|
||||
tests:
|
||||
|
||||
# -- basic
|
||||
|
||||
# start a feed
|
||||
|
||||
- cd: basic = tbl.get(1).changes({include_initial:true})
|
||||
py: basic = tbl.get(1).changes(include_initial=True)
|
||||
|
||||
# - inital return
|
||||
|
||||
- cd: fetch(basic, 1)
|
||||
ot: [{'new_val':null}]
|
||||
|
||||
# - inserts
|
||||
|
||||
- cd: tbl.insert({'id':1})
|
||||
ot: partial({'errors':0, 'inserted':1})
|
||||
|
||||
- cd: fetch(basic, 1)
|
||||
ot: [{'old_val':null, 'new_val':{'id':1}}]
|
||||
|
||||
# - updates
|
||||
|
||||
- cd: tbl.get(1).update({'update':1})
|
||||
ot: partial({'errors':0, 'replaced':1})
|
||||
|
||||
- cd: fetch(basic, 1)
|
||||
ot: [{'old_val':{'id':1}, 'new_val':{'id':1,'update':1}}]
|
||||
|
||||
# - deletions
|
||||
|
||||
- cd: tbl.get(1).delete()
|
||||
ot: partial({'errors':0, 'deleted':1})
|
||||
|
||||
- cd: fetch(basic, 1)
|
||||
ot: [{'old_val':{'id':1,'update':1}, 'new_val':null}]
|
||||
|
||||
# - closing
|
||||
|
||||
- cd: basic.close()
|
||||
rb: def pass; end
|
||||
# the ruby test driver currently has to mangle cursors, so we can't close them properly
|
||||
|
||||
# -- filter
|
||||
|
||||
- py: filter = tbl.get(1).changes(squash=false,include_initial=True).filter(r.row['new_val']['update'].gt(2))['new_val']['update']
|
||||
rb: filter = tbl.get(1).changes(squash:false,include_initial:true).filter{|row| row['new_val']['update'].gt(2)}['new_val']['update']
|
||||
js: filter = tbl.get(1).changes({squash:false,include_initial:true}).filter(r.row('new_val')('update').gt(2))('new_val')('update')
|
||||
|
||||
- cd: tbl.insert({'id':1, 'update':1})
|
||||
- cd: tbl.get(1).update({'update':4})
|
||||
- cd: tbl.get(1).update({'update':1})
|
||||
- cd: tbl.get(1).update({'update':7})
|
||||
|
||||
- cd: fetch(filter, 2)
|
||||
ot: [4,7]
|
||||
|
||||
# -- pluck on values
|
||||
|
||||
- py: pluck = tbl.get(3).changes(squash=false,include_initial=True).pluck({'new_val':['red', 'blue']})['new_val']
|
||||
rb: pluck = tbl.get(3).changes(squash:false,include_initial:true).pluck({'new_val':['red', 'blue']})['new_val']
|
||||
js: pluck = tbl.get(3).changes({squash:false,include_initial:true}).pluck({'new_val':['red', 'blue']})('new_val')
|
||||
|
||||
- cd: tbl.insert({'id':3, 'red':1, 'green':1})
|
||||
ot: partial({'errors':0, 'inserted':1})
|
||||
- cd: tbl.get(3).update({'blue':2, 'green':3})
|
||||
ot: partial({'errors':0, 'replaced':1})
|
||||
- cd: tbl.get(3).update({'green':4})
|
||||
ot: partial({'errors':0, 'replaced':1})
|
||||
- cd: tbl.get(3).update({'blue':4})
|
||||
ot: partial({'errors':0, 'replaced':1})
|
||||
|
||||
- cd: fetch(pluck, 4)
|
||||
ot: [{'red': 1}, {'blue': 2, 'red': 1}, {'blue': 2, 'red': 1}, {'blue': 4, 'red': 1}]
|
||||
|
||||
# -- virtual tables
|
||||
|
||||
# - rethinkdb._debug_scratch
|
||||
|
||||
- def: dtbl = r.db('rethinkdb').table('_debug_scratch')
|
||||
|
||||
- cd: debug = dtbl.get(1).changes({include_initial:true})
|
||||
py: debug = dtbl.get(1).changes(include_initial=True)
|
||||
|
||||
- cd: fetch(debug, 1)
|
||||
ot: [{'new_val':null}]
|
||||
|
||||
- cd: dtbl.insert({'id':1})
|
||||
ot: partial({'errors':0, 'inserted':1})
|
||||
- cd: fetch(debug, 1)
|
||||
ot: [{'old_val':null, 'new_val':{'id':1}}]
|
||||
|
||||
- cd: dtbl.get(1).update({'update':1})
|
||||
ot: partial({'errors':0, 'replaced':1})
|
||||
- cd: fetch(debug, 1)
|
||||
ot: [{'old_val':{'id':1}, 'new_val':{'id':1,'update':1}}]
|
||||
|
||||
- cd: dtbl.get(1).delete()
|
||||
ot: partial({'errors':0, 'deleted':1})
|
||||
- cd: fetch(debug, 1)
|
||||
ot: [{'old_val':{'id':1,'update':1}, 'new_val':null}]
|
||||
|
||||
- cd: dtbl.insert({'id':5, 'red':1, 'green':1})
|
||||
ot: {'skipped':0, 'deleted':0, 'unchanged':0, 'errors':0, 'replaced':0, 'inserted':1}
|
||||
- py: dtblPluck = dtbl.get(5).changes(include_initial=True).pluck({'new_val':['red', 'blue']})['new_val']
|
||||
rb: dtblPluck = dtbl.get(5).changes(include_initial:true).pluck({'new_val':['red', 'blue']})['new_val']
|
||||
js: dtblPluck = dtbl.get(5).changes({include_initial:true}).pluck({'new_val':['red', 'blue']})('new_val')
|
||||
|
||||
# disabled because inital value is not being reported correctly, so goes missing. see #3723
|
||||
- cd: fetch(dtblPluck, 1)
|
||||
ot: [{'red':1}]
|
||||
|
||||
- cd: dtbl.get(5).update({'blue':2, 'green':3})
|
||||
ot: partial({'errors':0, 'replaced':1})
|
||||
|
||||
- cd: fetch(dtblPluck, 1)
|
||||
ot: [{'blue':2, 'red':1}]
|
||||
|
||||
# - rethinkdb.table_status bad optargs
|
||||
|
||||
# disabled, re-enable once #3725 is done
|
||||
# - py: r.db('rethinkdb').table('table_status').changes(squash=False)
|
||||
# rb: r.db('rethinkdb').table('table_status').changes(squash:False)
|
||||
# js: r.db('rethinkdb').table('table_status').changes({squash:False})
|
||||
# ot: err('ReqlRuntimeError', 'replace with error message decided in \#3725')
|
||||
|
||||
# - rethinkdb.table_status
|
||||
|
||||
- cd: tableId = tbl.info()['id']
|
||||
js: tableId = tbl.info()('id')
|
||||
|
||||
- cd: rtblPluck = r.db('rethinkdb').table('table_status').get(tableId).changes({include_initial:true})
|
||||
py: rtblPluck = r.db('rethinkdb').table('table_status').get(tableId).changes(include_initial=True)
|
||||
- cd: fetch(rtblPluck, 1)
|
||||
ot: partial([{'new_val':partial({'db':'test'})}])
|
||||
|
||||
- py: tbl.reconfigure(shards=3, replicas=1)
|
||||
rb: tbl.reconfigure(shards:3, replicas:1)
|
||||
js: tbl.reconfigure({shards:3, replicas:1})
|
||||
- py: fetch(rtblPluck, 1, 2)
|
||||
js: fetch(rtblPluck, 1, 2)
|
||||
rb: fetch(rtblPluck, 1)
|
||||
ot: partial([{'old_val':partial({'db':'test'}), 'new_val':partial({'db':'test'})}])
|
||||
|
50
ext/librethinkdbxx/test/upstream/changefeeds/sindex.yaml
Normal file
50
ext/librethinkdbxx/test/upstream/changefeeds/sindex.yaml
Normal file
@ -0,0 +1,50 @@
|
||||
desc: Test basic changefeed operations
|
||||
table_variable_name: tbl
|
||||
tests:
|
||||
|
||||
# Fill in some data
|
||||
- rb: tbl.index_create('a')
|
||||
ot: partial({'created':1})
|
||||
|
||||
- rb: tbl.index_wait().count
|
||||
ot: 1
|
||||
|
||||
- rb: tbl.insert([{id:1, a:8}, {id:2, a:7}])
|
||||
ot: partial({'errors':0, 'inserted':2})
|
||||
|
||||
- rb: idmin = tbl.min(index:'id').changes(squash:false, include_initial:true).limit(2)
|
||||
- rb: idmax = tbl.max(index:'id').changes(squash:false, include_initial:true).limit(2)
|
||||
- rb: amin = tbl.min(index:'a').changes(squash:false, include_initial:true).limit(2)
|
||||
- rb: amax = tbl.max(index:'a').changes(squash:false, include_initial:true).limit(2)
|
||||
|
||||
- rb: idmin2 = tbl.min(index:'id').changes(squash:true, include_initial:true).limit(2)
|
||||
- rb: idmax2 = tbl.max(index:'id').changes(squash:true, include_initial:true).limit(2)
|
||||
- rb: amin2 = tbl.min(index:'a').changes(squash:true, include_initial:true).limit(2)
|
||||
- rb: amax2 = tbl.max(index:'a').changes(squash:true, include_initial:true).limit(2)
|
||||
|
||||
- rb: tbl.insert([{id:0, a:9}, {id:3, a:6}])
|
||||
ot: partial({'errors':0, 'inserted':2})
|
||||
|
||||
- rb: idmin.to_a
|
||||
ot: ([{"new_val"=>{"a"=>8, "id"=>1}}, {"new_val"=>{"a"=>9, "id"=>0}, "old_val"=>{"a"=>8, "id"=>1}}])
|
||||
|
||||
- rb: idmax.to_a
|
||||
ot: ([{"new_val"=>{"a"=>7, "id"=>2}}, {"new_val"=>{"a"=>6, "id"=>3}, "old_val"=>{"a"=>7, "id"=>2}}])
|
||||
|
||||
- rb: amin.to_a
|
||||
ot: ([{"new_val"=>{"a"=>7, "id"=>2}}, {"new_val"=>{"a"=>6, "id"=>3}, "old_val"=>{"a"=>7, "id"=>2}}])
|
||||
|
||||
- rb: amax.to_a
|
||||
ot: ([{"new_val"=>{"a"=>8, "id"=>1}}, {"new_val"=>{"a"=>9, "id"=>0}, "old_val"=>{"a"=>8, "id"=>1}}])
|
||||
|
||||
- rb: idmin2.to_a
|
||||
ot: ([{"new_val"=>{"a"=>8, "id"=>1}}, {"new_val"=>{"a"=>9, "id"=>0}, "old_val"=>{"a"=>8, "id"=>1}}])
|
||||
|
||||
- rb: idmax2.to_a
|
||||
ot: ([{"new_val"=>{"a"=>7, "id"=>2}}, {"new_val"=>{"a"=>6, "id"=>3}, "old_val"=>{"a"=>7, "id"=>2}}])
|
||||
|
||||
- rb: amin2.to_a
|
||||
ot: ([{"new_val"=>{"a"=>7, "id"=>2}}, {"new_val"=>{"a"=>6, "id"=>3}, "old_val"=>{"a"=>7, "id"=>2}}])
|
||||
|
||||
- rb: amax2.to_a
|
||||
ot: ([{"new_val"=>{"a"=>8, "id"=>1}}, {"new_val"=>{"a"=>9, "id"=>0}, "old_val"=>{"a"=>8, "id"=>1}}])
|
62
ext/librethinkdbxx/test/upstream/changefeeds/squash.yaml
Normal file
62
ext/librethinkdbxx/test/upstream/changefeeds/squash.yaml
Normal file
@ -0,0 +1,62 @@
|
||||
desc: Test changefeed squashing
|
||||
table_variable_name: tbl
|
||||
tests:
|
||||
|
||||
# Check type
|
||||
|
||||
- py: tbl.changes(squash=true).type_of()
|
||||
rb: tbl.changes(squash:true).type_of()
|
||||
js: tbl.changes({squash:true}).typeOf()
|
||||
ot: ("STREAM")
|
||||
|
||||
# comparison changes
|
||||
|
||||
- cd: normal_changes = tbl.changes().limit(2)
|
||||
|
||||
- py: false_squash_changes = tbl.changes(squash=False).limit(2)
|
||||
js: false_squash_changes = tbl.changes({squash:false}).limit(2)
|
||||
rb: false_squash_changes = tbl.changes(squash:false).limit(2)
|
||||
|
||||
- py: long_squash_changes = tbl.changes(squash=0.5).limit(1)
|
||||
js: long_squash_changes = tbl.changes({squash:0.5}).limit(1)
|
||||
rb: long_squash_changes = tbl.changes(squash:0.5).limit(1)
|
||||
|
||||
- py: squash_changes = tbl.changes(squash=true).limit(1)
|
||||
js: squash_changes = tbl.changes({squash:true}).limit(1)
|
||||
rb: squash_changes = tbl.changes(squash:true).limit(1)
|
||||
|
||||
- cd: tbl.insert({'id':100})['inserted']
|
||||
js: tbl.insert({'id':100})('inserted')
|
||||
ot: 1
|
||||
|
||||
- cd: tbl.get(100).update({'a':1})['replaced']
|
||||
js: tbl.get(100).update({'a':1})('replaced')
|
||||
ot: 1
|
||||
|
||||
- cd: normal_changes
|
||||
ot: ([{'new_val':{'id':100}, 'old_val':null},
|
||||
{'new_val':{'a':1, 'id':100}, 'old_val':{'id':100}}])
|
||||
|
||||
- cd: false_squash_changes
|
||||
ot: ([{'new_val':{'id':100}, 'old_val':null},
|
||||
{'new_val':{'a':1, 'id':100}, 'old_val':{'id':100}}])
|
||||
|
||||
- cd: long_squash_changes
|
||||
ot: ([{'new_val':{'a':1, 'id':100}, 'old_val':null}])
|
||||
|
||||
- cd: squash_changes
|
||||
ot:
|
||||
js: ([{'new_val':{'a':1, 'id':100}, 'old_val':null}])
|
||||
cd: ([{'new_val':{'id':100}, 'old_val':null}])
|
||||
|
||||
# Bad squash values
|
||||
|
||||
- py: tbl.changes(squash=null)
|
||||
rb: tbl.changes(squash:null)
|
||||
js: tbl.changes({squash:null})
|
||||
ot: err('ReqlQueryLogicError', 'Expected BOOL or NUMBER but found NULL.')
|
||||
|
||||
- py: tbl.changes(squash=-10)
|
||||
rb: tbl.changes(squash:-10)
|
||||
js: tbl.changes({squash:-10})
|
||||
ot: err('ReqlQueryLogicError', 'Expected BOOL or a positive NUMBER but found a negative NUMBER.')
|
101
ext/librethinkdbxx/test/upstream/changefeeds/table.yaml
Normal file
101
ext/librethinkdbxx/test/upstream/changefeeds/table.yaml
Normal file
@ -0,0 +1,101 @@
|
||||
desc: Test changefeeds on a table
|
||||
table_variable_name: tbl
|
||||
tests:
|
||||
|
||||
# ==== regular tables
|
||||
|
||||
# - start feeds
|
||||
|
||||
- cd: all = tbl.changes()
|
||||
|
||||
# - note: no initial values from table changefeeds
|
||||
|
||||
# - inserts
|
||||
|
||||
- cd: tbl.insert([{'id':1}, {'id':2}])
|
||||
ot: partial({'errors':0, 'inserted':2})
|
||||
- cd: fetch(all, 2)
|
||||
ot: bag([{'old_val':null, 'new_val':{'id':1}}, {'old_val':null, 'new_val':{'id':2}}])
|
||||
|
||||
# - updates
|
||||
|
||||
- cd: tbl.get(1).update({'version':1})
|
||||
ot: partial({'errors':0, 'replaced':1})
|
||||
- cd: fetch(all, 1)
|
||||
ot: [{'old_val':{'id':1}, 'new_val':{'id':1, 'version':1}}]
|
||||
|
||||
# - deletions
|
||||
|
||||
- cd: tbl.get(1).delete()
|
||||
ot: partial({'errors':0, 'deleted':1})
|
||||
- cd: fetch(all, 1)
|
||||
ot: [{'old_val':{'id':1, 'version':1}, 'new_val':null}]
|
||||
|
||||
# - pluck on values
|
||||
|
||||
- cd: pluck = tbl.changes().pluck({'new_val':['version']})
|
||||
- cd: tbl.insert([{'id':5, 'version':5}])
|
||||
ot: partial({'errors':0, 'inserted':1})
|
||||
- cd: fetch(pluck, 1)
|
||||
ot: [{'new_val':{'version':5}}]
|
||||
|
||||
# - order by
|
||||
|
||||
- cd: tbl.changes().order_by('id')
|
||||
ot: err('ReqlQueryLogicError', "Cannot call a terminal (`reduce`, `count`, etc.) on an infinite stream (such as a changefeed).")
|
||||
#
|
||||
# ToDo: enable this when #4067 is done
|
||||
#
|
||||
# - js: orderedLimit = tbl.changes().limit(5).order_by(r.desc('id'))('new_val')('id')
|
||||
# cd: orderedLimit = tbl.changes().limit(5).order_by(r.desc('id'))['new_val']['id']
|
||||
# - js: tbl.range(100, 105).map(function (row) { return {'id':row} })
|
||||
# py: tbl.range(100, 105).map({'id':r.row})
|
||||
# rb: tbl.range(100, 105).map{|row| {'id':row}}
|
||||
# - cd: fetch(orderedLimit)
|
||||
# ot: [104, 103, 102, 101, 100]
|
||||
|
||||
# - changes overflow
|
||||
|
||||
- cd: overflow = tbl.changes()
|
||||
runopts:
|
||||
changefeed_queue_size: 100
|
||||
# add enough entries to make sure we get the overflow error
|
||||
- js: tbl.insert(r.range(200).map(function(x) { return({}); }))
|
||||
py: tbl.insert(r.range(200).map(lambda x: {}))
|
||||
rb: tbl.insert(r.range(200).map{|x| {}})
|
||||
- cd: fetch(overflow, 90)
|
||||
ot: partial([{'error': regex('Changefeed cache over array size limit, skipped \d+ elements.')}])
|
||||
|
||||
# ==== virtual tables
|
||||
|
||||
- def: vtbl = r.db('rethinkdb').table('_debug_scratch')
|
||||
- cd: allVirtual = vtbl.changes()
|
||||
|
||||
# - inserts
|
||||
|
||||
- cd: vtbl.insert([{'id':1}, {'id':2}])
|
||||
ot: partial({'errors':0, 'inserted':2})
|
||||
- cd: fetch(allVirtual, 2)
|
||||
ot: bag([{'old_val':null, 'new_val':{'id':1}}, {'old_val':null, 'new_val':{'id':2}}])
|
||||
|
||||
# - updates
|
||||
|
||||
- cd: vtbl.get(1).update({'version':1})
|
||||
ot: partial({'errors':0, 'replaced':1})
|
||||
- cd: fetch(allVirtual, 1)
|
||||
ot: [{'old_val':{'id':1}, 'new_val':{'id':1, 'version':1}}]
|
||||
|
||||
# - deletions
|
||||
|
||||
- cd: vtbl.get(1).delete()
|
||||
ot: partial({'errors':0, 'deleted':1})
|
||||
- cd: fetch(allVirtual, 1)
|
||||
ot: [{'old_val':{'id':1, 'version':1}, 'new_val':null}]
|
||||
|
||||
# - pluck on values
|
||||
|
||||
- cd: vpluck = vtbl.changes().pluck({'new_val':['version']})
|
||||
- cd: vtbl.insert([{'id':5, 'version':5}])
|
||||
ot: partial({'errors':0, 'inserted':1})
|
||||
- cd: fetch(vpluck, 1)
|
||||
ot: [{'new_val':{'version':5}}]
|
297
ext/librethinkdbxx/test/upstream/control.yaml
Normal file
297
ext/librethinkdbxx/test/upstream/control.yaml
Normal file
@ -0,0 +1,297 @@
|
||||
desc: Tests RQL control flow structures
|
||||
table_variable_name: tbl, tbl2
|
||||
tests:
|
||||
|
||||
## FunCall
|
||||
|
||||
- py: r.expr(1).do(lambda v: v * 2)
|
||||
js: r.expr(1).do(function(v) { return v.mul(2); })
|
||||
rb: r.expr(1).do{|v| v * 2 }
|
||||
ot: 2
|
||||
|
||||
- py: r.expr([0, 1, 2]).do(lambda v: v.append(3))
|
||||
js: r([0, 1, 2]).do(function(v) { return v.append(3); })
|
||||
rb: r([0, 1, 2]).do{ |v| v.append(3) }
|
||||
ot: [0, 1, 2, 3]
|
||||
|
||||
- py: r.do(1, 2, lambda x, y: x + y)
|
||||
js: r.do(1, 2, function(x, y) { return x.add(y); })
|
||||
rb: r.do(1, 2) {|x, y| x + y}
|
||||
ot: 3
|
||||
|
||||
- py: r.do(lambda: 1)
|
||||
js: r.do(function() { return 1; })
|
||||
rb: r.do{1}
|
||||
ot: 1
|
||||
|
||||
# do error cases
|
||||
- py: r.do(1, 2, lambda x: x)
|
||||
js: r.do(1, 2, function(x) { return x; })
|
||||
rb: r.do(1, 2) {|x| x}
|
||||
ot: err("ReqlQueryLogicError", 'Expected function with 2 arguments but found function with 1 argument.', [1])
|
||||
|
||||
- py: r.do(1, 2, 3, lambda x, y: x + y)
|
||||
js: r.do(1, 2, 3, function(x, y) { return x.add(y); })
|
||||
rb: r.do(1, 2, 3) {|x, y| x + y}
|
||||
ot: err("ReqlQueryLogicError", 'Expected function with 3 arguments but found function with 2 arguments.', [1])
|
||||
|
||||
- cd: r.do(1)
|
||||
ot: 1
|
||||
|
||||
- js: r.do(1, function(x) {})
|
||||
ot: err("ReqlDriverCompileError", 'Anonymous function returned `undefined`. Did you forget a `return`?', [1])
|
||||
|
||||
- js: r.do(1, function(x) { return undefined; })
|
||||
ot: err("ReqlDriverCompileError", 'Anonymous function returned `undefined`. Did you forget a `return`?', [1])
|
||||
|
||||
- cd: r.do()
|
||||
ot:
|
||||
cd: err("ReqlCompileError", 'Expected 1 or more arguments but found 0.', [1])
|
||||
|
||||
# FunCall errors
|
||||
|
||||
- py: r.expr('abc').do(lambda v: v.append(3))
|
||||
js: r('abc').do(function(v) { return v.append(3); })
|
||||
rb: r('abc').do{ |v| v.append(3) }
|
||||
ot: err("ReqlQueryLogicError", "Expected type ARRAY but found STRING.", [1, 0])
|
||||
|
||||
- py: r.expr('abc').do(lambda v: v + 3)
|
||||
js: r('abc').do(function(v) { return v.add(3); })
|
||||
rb: r('abc').do{ |v| v + 3 }
|
||||
ot: err("ReqlQueryLogicError", "Expected type STRING but found NUMBER.", [1, 1])
|
||||
|
||||
- py: r.expr('abc').do(lambda v: v + 'def') + 3
|
||||
js: r('abc').do(function(v) { return v.add('def'); }).add(3)
|
||||
rb: r('abc').do{ |v| v + 'def' } + 3
|
||||
ot: err("ReqlQueryLogicError", "Expected type STRING but found NUMBER.", [1])
|
||||
|
||||
- py: r.expr(0).do(lambda a,b: a + b)
|
||||
js: r(0).do(function(a,b) { return a.add(b); })
|
||||
rb: r(0).do{ |a, b| a + b }
|
||||
ot: err("ReqlQueryLogicError", 'Expected function with 1 argument but found function with 2 arguments.', [1])
|
||||
|
||||
- py: r.do(1, 2, lambda a: a)
|
||||
js: r.do(1,2, function(a) { return a; })
|
||||
rb: r.do(1, 2) { |a| a }
|
||||
ot: err("ReqlQueryLogicError", 'Expected function with 2 arguments but found function with 1 argument.', [1])
|
||||
|
||||
- cd: r.expr(5).do(r.row)
|
||||
rb: r(5).do{ |row| row }
|
||||
ot: 5
|
||||
|
||||
## Branch
|
||||
|
||||
- cd: r.branch(True, 1, 2)
|
||||
ot: 1
|
||||
- cd: r.branch(False, 1, 2)
|
||||
ot: 2
|
||||
- cd: r.branch(1, 'c', False)
|
||||
ot: ("c")
|
||||
- cd: r.branch(null, {}, [])
|
||||
ot: ([])
|
||||
|
||||
- cd: r.branch(r.db('test'), 1, 2)
|
||||
ot: err("ReqlQueryLogicError", "Expected type DATUM but found DATABASE:", [])
|
||||
- cd: r.branch(tbl, 1, 2)
|
||||
ot: err("ReqlQueryLogicError", "Expected type DATUM but found TABLE:", [])
|
||||
- cd: r.branch(r.error("a"), 1, 2)
|
||||
ot: err("ReqlUserError", "a", [])
|
||||
|
||||
- cd: r.branch([], 1, 2)
|
||||
ot: 1
|
||||
- cd: r.branch({}, 1, 2)
|
||||
ot: 1
|
||||
- cd: r.branch("a", 1, 2)
|
||||
ot: 1
|
||||
- cd: r.branch(1.2, 1, 2)
|
||||
ot: 1
|
||||
|
||||
- cd: r.branch(True, 1, True, 2, 3)
|
||||
ot: 1
|
||||
- cd: r.branch(True, 1, False, 2, 3)
|
||||
ot: 1
|
||||
- cd: r.branch(False, 1, True, 2, 3)
|
||||
ot: 2
|
||||
- cd: r.branch(False, 1, False, 2, 3)
|
||||
ot: 3
|
||||
|
||||
- cd: r.branch(True, 1, True, 2)
|
||||
ot: err("ReqlQueryLogicError", "Cannot call `branch` term with an even number of arguments.")
|
||||
|
||||
# r.error()
|
||||
- cd: r.error('Hello World')
|
||||
ot: err("ReqlUserError", "Hello World", [0])
|
||||
|
||||
- cd: r.error(5)
|
||||
# we might want to allow this eventually
|
||||
ot: err("ReqlQueryLogicError", "Expected type STRING but found NUMBER.", [0])
|
||||
|
||||
# r.filter
|
||||
- cd: r.expr([1, 2, 3]).filter()
|
||||
ot:
|
||||
cd: err("ReqlCompileError", "Expected 2 arguments but found 1.", [0])
|
||||
js: err("ReqlCompileError", "Expected 1 argument (not including options) but found 0.", [0])
|
||||
- cd: r.expr([1, 2, 3]).filter(1, 2)
|
||||
ot:
|
||||
cd: err("ReqlCompileError", "Expected 2 arguments but found 3.", [0])
|
||||
js: err("ReqlCompileError", "Expected 1 argument (not including options) but found 2.", [0])
|
||||
|
||||
# r.js()
|
||||
- cd: r.js('1 + 1')
|
||||
ot: 2
|
||||
|
||||
- cd: r.js('1 + 1; 2 + 2')
|
||||
ot: 4
|
||||
|
||||
- cd: r.do(1, 2, r.js('(function(a, b) { return a + b; })'))
|
||||
ot: 3
|
||||
|
||||
- cd: r.expr(1).do(r.js('(function(x) { return x + 1; })'))
|
||||
ot: 2
|
||||
|
||||
- cd: r.expr('foo').do(r.js('(function(x) { return x + "bar"; })'))
|
||||
ot: 'foobar'
|
||||
|
||||
# js timeout optarg shouldn't be triggered
|
||||
- cd: r.js('1 + 2', {timeout:1.2})
|
||||
py: r.js('1 + 2', timeout=1.2)
|
||||
ot: 3
|
||||
|
||||
# js error cases
|
||||
- cd: r.js('(function() { return 1; })')
|
||||
ot: err("ReqlQueryLogicError", "Query result must be of type DATUM, GROUPED_DATA, or STREAM (got FUNCTION).", [0])
|
||||
|
||||
- cd: r.js('function() { return 1; }')
|
||||
ot: err("ReqlQueryLogicError", "SyntaxError: Unexpected token (", [0])
|
||||
|
||||
# Play with the number of arguments in the JS function
|
||||
- cd: r.do(1, 2, r.js('(function(a) { return a; })'))
|
||||
ot: 1
|
||||
|
||||
- cd: r.do(1, 2, r.js('(function(a, b, c) { return a; })'))
|
||||
ot: 1
|
||||
|
||||
- cd: r.do(1, 2, r.js('(function(a, b, c) { return c; })'))
|
||||
ot: err("ReqlQueryLogicError", "Cannot convert javascript `undefined` to ql::datum_t.", [0])
|
||||
|
||||
- cd: r.expr([1, 2, 3]).filter(r.js('(function(a) { return a >= 2; })'))
|
||||
ot: ([2, 3])
|
||||
|
||||
- cd: r.expr([1, 2, 3]).map(r.js('(function(a) { return a + 1; })'))
|
||||
ot: ([2, 3, 4])
|
||||
|
||||
- cd: r.expr([1, 2, 3]).map(r.js('1'))
|
||||
ot: err("ReqlQueryLogicError", "Expected type FUNCTION but found DATUM:", [0])
|
||||
|
||||
- cd: r.expr([1, 2, 3]).filter(r.js('(function(a) {})'))
|
||||
ot: err("ReqlQueryLogicError", "Cannot convert javascript `undefined` to ql::datum_t.", [0])
|
||||
|
||||
# What happens if we pass static values to things that expect functions
|
||||
- cd: r.expr([1, 2, 3]).map(1)
|
||||
ot: err("ReqlQueryLogicError", "Expected type FUNCTION but found DATUM:", [0])
|
||||
|
||||
- cd: r.expr([1, 2, 3]).filter('foo')
|
||||
ot: ([1, 2, 3])
|
||||
- cd: r.expr([1, 2, 4]).filter([])
|
||||
ot: ([1, 2, 4])
|
||||
- cd: r.expr([1, 2, 3]).filter(null)
|
||||
ot: ([])
|
||||
|
||||
- cd: r.expr([1, 2, 4]).filter(False)
|
||||
rb: r([1, 2, 4]).filter(false)
|
||||
ot: ([])
|
||||
|
||||
# forEach
|
||||
- cd: tbl.count()
|
||||
ot: 0
|
||||
|
||||
# Insert three elements
|
||||
- js: r([1, 2, 3]).forEach(function (row) { return tbl.insert({ id:row }) })
|
||||
py: r.expr([1, 2, 3]).for_each(lambda row:tbl.insert({ 'id':row }))
|
||||
rb: r([1, 2, 3]).for_each{ |row| tbl.insert({ :id => row }) }
|
||||
ot: ({'deleted':0.0,'replaced':0.0,'unchanged':0.0,'errors':0.0,'skipped':0.0,'inserted':3})
|
||||
|
||||
- cd: tbl.count()
|
||||
ot: 3
|
||||
|
||||
# Update each row to add additional attribute
|
||||
- js: r([1, 2, 3]).forEach(function (row) { return tbl.update({ foo:row }) })
|
||||
py: r.expr([1,2,3]).for_each(lambda row:tbl.update({'foo':row}))
|
||||
rb: r.expr([1,2,3]).for_each{ |row| tbl.update({ :foo => row }) }
|
||||
ot: ({'deleted':0.0,'replaced':9,'unchanged':0.0,'errors':0.0,'skipped':0.0,'inserted':0.0})
|
||||
|
||||
# Insert three more elements (and error on three)
|
||||
- js: r([1, 2, 3]).forEach(function (row) { return [tbl.insert({ id:row }), tbl.insert({ id:row.mul(10) })] })
|
||||
py: r.expr([1,2,3]).for_each(lambda row:[tbl.insert({ 'id':row }), tbl.insert({ 'id':row*10 })])
|
||||
rb: r.expr([1,2,3]).for_each{ |row| [tbl.insert({ :id => row}), tbl.insert({ :id => row*10})] }
|
||||
ot: {'first_error':"Duplicate primary key `id`:\n{\n\t\"foo\":\t3,\n\t\"id\":\t1\n}\n{\n\t\"id\":\t1\n}",'deleted':0.0,'replaced':0.0,'unchanged':0.0,'errors':3,'skipped':0.0,'inserted':3}
|
||||
|
||||
- cd: tbl.count()
|
||||
ot: 6
|
||||
|
||||
- cd: tableCount = tbl2.count()
|
||||
- cd: r.expr([1, 2, 3]).for_each( tbl2.insert({}) )
|
||||
ot: ({'deleted':0.0,'replaced':0.0,'generated_keys':arrlen(3,uuid()),'unchanged':0.0,'errors':0.0,'skipped':0.0,'inserted':3})
|
||||
# inserts only a single document per #3700
|
||||
- cd: tbl2.count()
|
||||
ot: tableCount + 1
|
||||
|
||||
# We have six elements, update them 6*2*3=36 times
|
||||
- js: r([1, 2, 3]).forEach(function (row) { return [tbl.update({ foo:row }), tbl.update({ bar:row })] })
|
||||
py: r.expr([1,2,3]).for_each(lambda row:[tbl.update({'foo':row}), tbl.update({'bar':row})])
|
||||
rb: r.expr([1,2,3]).for_each{ |row| [tbl.update({:foo => row}), tbl.update({:bar => row})]}
|
||||
ot: ({'deleted':0.0,'replaced':36,'unchanged':0.0,'errors':0.0,'skipped':0.0,'inserted':0.0})
|
||||
|
||||
# forEach negative cases
|
||||
- cd: r.expr([1, 2, 3]).for_each( tbl2.insert({ 'id':r.row }) )
|
||||
rb: r([1, 2, 3]).for_each{ |row| tbl2.insert({ 'id':row }) }
|
||||
ot: ({'deleted':0.0,'replaced':0.0,'unchanged':0.0,'errors':0.0,'skipped':0.0,'inserted':3})
|
||||
|
||||
- cd: r.expr([1, 2, 3]).for_each(1)
|
||||
ot: err("ReqlQueryLogicError", "FOR_EACH expects one or more basic write queries. Expected type ARRAY but found NUMBER.", [0])
|
||||
|
||||
- py: r.expr([1, 2, 3]).for_each(lambda x:x)
|
||||
js: r([1, 2, 3]).forEach(function (x) { return x; })
|
||||
rb: r([1, 2, 3]).for_each{ |x| x }
|
||||
ot: err("ReqlQueryLogicError", "FOR_EACH expects one or more basic write queries. Expected type ARRAY but found NUMBER.", [1, 1])
|
||||
|
||||
- cd: r.expr([1, 2, 3]).for_each(r.row)
|
||||
rb: r([1, 2, 3]).for_each{ |row| row }
|
||||
ot: err("ReqlQueryLogicError", "FOR_EACH expects one or more basic write queries. Expected type ARRAY but found NUMBER.", [1, 1])
|
||||
|
||||
- js: r([1, 2, 3]).forEach(function (row) { return tbl; })
|
||||
py: r.expr([1, 2, 3]).for_each(lambda row:tbl)
|
||||
rb: r([1, 2, 3]).for_each{ |row| tbl }
|
||||
ot: err("ReqlQueryLogicError", "FOR_EACH expects one or more basic write queries.", [1, 1])
|
||||
|
||||
# This is only relevant in JS -- what happens when we return undefined
|
||||
- js: r([1, 2, 3]).forEach(function (row) {})
|
||||
ot: err("ReqlDriverCompileError", 'Anonymous function returned `undefined`. Did you forget a `return`?', [1])
|
||||
|
||||
# Make sure write queries can't be nested into stream ops
|
||||
- cd: r.expr(1).do(tbl.insert({'foo':r.row}))
|
||||
rb: r(1).do{ |row| tbl.insert({ :foo => row }) }
|
||||
ot: ({'deleted':0.0,'replaced':0.0,'generated_keys':arrlen(1,uuid()),'unchanged':0.0,'errors':0.0,'skipped':0.0,'inserted':1})
|
||||
|
||||
- py: r.expr([1, 2])[0].do(tbl.insert({'foo':r.row}))
|
||||
js: r.expr([1, 2]).nth(0).do(tbl.insert({'foo':r.row}))
|
||||
rb: r([1, 2])[0].do{ |row| tbl.insert({ :foo => row }) }
|
||||
ot: ({'deleted':0.0,'replaced':0.0,'generated_keys':arrlen(1,uuid()),'unchanged':0.0,'errors':0.0,'skipped':0.0,'inserted':1})
|
||||
|
||||
- cd: r.expr([1, 2]).map(tbl.insert({'foo':r.row}))
|
||||
rb: r([1, 2]).map{ |row| tbl.insert({ :foo => row }) }
|
||||
ot: err('ReqlCompileError', 'Cannot nest writes or meta ops in stream operations. Use FOR_EACH instead.', [0])
|
||||
|
||||
- cd: r.expr([1, 2]).map(r.db('test').table_create('table_create_failure'))
|
||||
ot: err('ReqlCompileError', 'Cannot nest writes or meta ops in stream operations. Use FOR_EACH instead.', [0])
|
||||
|
||||
- cd: r.expr([1, 2]).map(tbl.insert({'foo':r.row}).get_field('inserted'))
|
||||
rb: r.expr([1, 2]).map{|x| tbl.insert({'foo':x}).get_field('inserted')}
|
||||
ot: err('ReqlCompileError', 'Cannot nest writes or meta ops in stream operations. Use FOR_EACH instead.', [0])
|
||||
|
||||
- cd: r.expr([1, 2]).map(tbl.insert({'foo':r.row}).get_field('inserted').add(5))
|
||||
rb: r.expr([1, 2]).map{|x| tbl.insert({'foo':x}).get_field('inserted').add(5)}
|
||||
ot: err('ReqlCompileError', 'Cannot nest writes or meta ops in stream operations. Use FOR_EACH instead.', [0])
|
||||
|
||||
- cd: r.expr(1).do(r.db('test').table_create('table_create_success'))
|
||||
ot: partial({'tables_created':1})
|
133
ext/librethinkdbxx/test/upstream/datum/array.yaml
Normal file
133
ext/librethinkdbxx/test/upstream/datum/array.yaml
Normal file
@ -0,0 +1,133 @@
|
||||
desc: Tests conversion to and from the RQL array type
|
||||
tests:
|
||||
- cd:
|
||||
- r.expr([])
|
||||
- r([])
|
||||
py: r.expr([])
|
||||
ot: []
|
||||
|
||||
- py: r.expr([1])
|
||||
js: r([1])
|
||||
rb: r([1])
|
||||
ot: [1]
|
||||
|
||||
- py: r.expr([1,2,3,4,5])
|
||||
js: r([1,2,3,4,5])
|
||||
rb: r.expr([1,2,3,4,5])
|
||||
ot: [1,2,3,4,5]
|
||||
|
||||
- cd: r.expr([]).type_of()
|
||||
ot: 'ARRAY'
|
||||
|
||||
# test coercions
|
||||
- cd:
|
||||
- r.expr([1, 2]).coerce_to('string')
|
||||
- r.expr([1, 2]).coerce_to('STRING')
|
||||
ot: '[1,2]'
|
||||
|
||||
- cd: r.expr([1, 2]).coerce_to('array')
|
||||
ot: [1, 2]
|
||||
|
||||
- cd: r.expr([1, 2]).coerce_to('number')
|
||||
ot: err('ReqlQueryLogicError', 'Cannot coerce ARRAY to NUMBER.', [0])
|
||||
|
||||
- cd: r.expr([['a', 1], ['b', 2]]).coerce_to('object')
|
||||
ot: {'a':1,'b':2}
|
||||
|
||||
- cd: r.expr([[]]).coerce_to('object')
|
||||
ot: err('ReqlQueryLogicError', 'Expected array of size 2, but got size 0.')
|
||||
|
||||
- cd: r.expr([['1',2,3]]).coerce_to('object')
|
||||
ot: err('ReqlQueryLogicError', 'Expected array of size 2, but got size 3.')
|
||||
|
||||
# Nested expression
|
||||
- cd: r.expr([r.expr(1)])
|
||||
ot: [1]
|
||||
|
||||
- cd: r.expr([1,3,4]).insert_at(1, 2)
|
||||
ot: [1,2,3,4]
|
||||
- cd: r.expr([2,3]).insert_at(0, 1)
|
||||
ot: [1,2,3]
|
||||
- cd: r.expr([1,2,3]).insert_at(-1, 4)
|
||||
ot: [1,2,3,4]
|
||||
- cd: r.expr([1,2,3]).insert_at(3, 4)
|
||||
ot: [1,2,3,4]
|
||||
- py: r.expr(3).do(lambda x: r.expr([1,2,3]).insert_at(x, 4))
|
||||
- js: r.expr(3).do(function (x) { return r.expr([1,2,3]).insert_at(x, 4); })
|
||||
- rb: r.expr(3).do{|x| r.expr([1,2,3]).insert_at(x, 4)}
|
||||
ot: [1,2,3,4]
|
||||
- cd: r.expr([1,2,3]).insert_at(4, 5)
|
||||
ot: err('ReqlNonExistenceError', 'Index `4` out of bounds for array of size: `3`.', [0])
|
||||
- cd: r.expr([1,2,3]).insert_at(-5, -1)
|
||||
ot: err('ReqlNonExistenceError', 'Index out of bounds: -5', [0])
|
||||
- cd: r.expr([1,2,3]).insert_at(1.5, 1)
|
||||
ot: err('ReqlQueryLogicError', 'Number not an integer: 1.5', [0])
|
||||
- cd: r.expr([1,2,3]).insert_at(null, 1)
|
||||
ot: err('ReqlNonExistenceError', 'Expected type NUMBER but found NULL.', [0])
|
||||
|
||||
- cd: r.expr([1,4]).splice_at(1, [2,3])
|
||||
ot: [1,2,3,4]
|
||||
- cd: r.expr([3,4]).splice_at(0, [1,2])
|
||||
ot: [1,2,3,4]
|
||||
- cd: r.expr([1,2]).splice_at(2, [3,4])
|
||||
ot: [1,2,3,4]
|
||||
- cd: r.expr([1,2]).splice_at(-1, [3,4])
|
||||
ot: [1,2,3,4]
|
||||
- py: r.expr(2).do(lambda x: r.expr([1,2]).splice_at(x, [3,4]))
|
||||
- js: r.expr(2).do(function (x) { return r.expr([1,2]).splice_at(x, [3,4]); })
|
||||
- rb: r.expr(2).do{|x| r.expr([1,2]).splice_at(x, [3,4])}
|
||||
ot: [1,2,3,4]
|
||||
- cd: r.expr([1,2]).splice_at(3, [3,4])
|
||||
ot: err('ReqlNonExistenceError', 'Index `3` out of bounds for array of size: `2`.', [0])
|
||||
- cd: r.expr([1,2]).splice_at(-4, [3,4])
|
||||
ot: err('ReqlNonExistenceError', 'Index out of bounds: -4', [0])
|
||||
- cd: r.expr([1,2,3]).splice_at(1.5, [1])
|
||||
ot: err('ReqlQueryLogicError', 'Number not an integer: 1.5', [0])
|
||||
- cd: r.expr([1,2,3]).splice_at(null, [1])
|
||||
ot: err('ReqlNonExistenceError', 'Expected type NUMBER but found NULL.', [0])
|
||||
- cd: r.expr([1,4]).splice_at(1, 2)
|
||||
ot: err('ReqlQueryLogicError', 'Expected type ARRAY but found NUMBER.', [0])
|
||||
|
||||
- cd: r.expr([1,2,3,4]).delete_at(0)
|
||||
ot: [2,3,4]
|
||||
- py: r.expr(0).do(lambda x: r.expr([1,2,3,4]).delete_at(x))
|
||||
- js: r.expr(0).do(function (x) { return r.expr([1,2,3,4]).delete_at(x); })
|
||||
- rb: r.expr(0).do{|x| r.expr([1,2,3,4]).delete_at(x)}
|
||||
ot: [2,3,4]
|
||||
- cd: r.expr([1,2,3,4]).delete_at(-1)
|
||||
ot: [1,2,3]
|
||||
- cd: r.expr([1,2,3,4]).delete_at(1,3)
|
||||
ot: [1,4]
|
||||
- cd: r.expr([1,2,3,4]).delete_at(4,4)
|
||||
ot: [1,2,3,4]
|
||||
- cd: r.expr([]).delete_at(0,0)
|
||||
ot: []
|
||||
- cd: r.expr([1,2,3,4]).delete_at(1,-1)
|
||||
ot: [1,4]
|
||||
- cd: r.expr([1,2,3,4]).delete_at(4)
|
||||
ot: err('ReqlNonExistenceError', 'Index `4` out of bounds for array of size: `4`.', [0])
|
||||
- cd: r.expr([1,2,3,4]).delete_at(-5)
|
||||
ot: err('ReqlNonExistenceError', 'Index out of bounds: -5', [0])
|
||||
- cd: r.expr([1,2,3]).delete_at(1.5)
|
||||
ot: err('ReqlQueryLogicError', 'Number not an integer: 1.5', [0])
|
||||
- cd: r.expr([1,2,3]).delete_at(null)
|
||||
ot: err('ReqlNonExistenceError', 'Expected type NUMBER but found NULL.', [0])
|
||||
|
||||
- cd: r.expr([0,2,3]).change_at(0, 1)
|
||||
ot: [1,2,3]
|
||||
- py: r.expr(1).do(lambda x: r.expr([0,2,3]).change_at(0,x))
|
||||
- js: r.expr(1).do(function (x) { return r.expr([0,2,3]).change_at(0,x); })
|
||||
- rb: r.expr(1).do{|x| r.expr([0,2,3]).change_at(0,x)}
|
||||
ot: [1,2,3]
|
||||
- cd: r.expr([1,0,3]).change_at(1, 2)
|
||||
ot: [1,2,3]
|
||||
- cd: r.expr([1,2,0]).change_at(2, 3)
|
||||
ot: [1,2,3]
|
||||
- cd: r.expr([1,2,3]).change_at(3, 4)
|
||||
ot: err('ReqlNonExistenceError', 'Index `3` out of bounds for array of size: `3`.', [0])
|
||||
- cd: r.expr([1,2,3,4]).change_at(-5, 1)
|
||||
ot: err('ReqlNonExistenceError', 'Index out of bounds: -5', [0])
|
||||
- cd: r.expr([1,2,3]).change_at(1.5, 1)
|
||||
ot: err('ReqlQueryLogicError', 'Number not an integer: 1.5', [0])
|
||||
- cd: r.expr([1,2,3]).change_at(null, 1)
|
||||
ot: err('ReqlNonExistenceError', 'Expected type NUMBER but found NULL.', [0])
|
363
ext/librethinkdbxx/test/upstream/datum/binary.yaml
Normal file
363
ext/librethinkdbxx/test/upstream/datum/binary.yaml
Normal file
@ -0,0 +1,363 @@
|
||||
desc: Tests of converstion to and from the RQL binary type
|
||||
tests:
|
||||
|
||||
# Short binary data from 0 to 12 characters
|
||||
# Not fully implemented for JS as comparing Buffer objects is non-trivial
|
||||
- def:
|
||||
rb: s = "".force_encoding('BINARY')
|
||||
py: s = b''
|
||||
js: s = Buffer("", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 0
|
||||
|
||||
- def:
|
||||
rb: s = "\x00".force_encoding('BINARY')
|
||||
py: s = b'\x00'
|
||||
js: s = Buffer("\x00", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 1
|
||||
|
||||
- def:
|
||||
rb: s = "\x00\x42".force_encoding('BINARY')
|
||||
py: s = b'\x00\x42'
|
||||
js: s = Buffer("\x00\x42", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 2
|
||||
|
||||
- def:
|
||||
rb: s = "\x00\xfe\x7a".force_encoding('BINARY')
|
||||
py: s = b'\x00\xfe\x7a'
|
||||
js: s = Buffer("\x00\xfe\x7a", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 3
|
||||
|
||||
- def:
|
||||
rb: s = "\xed\xfe\x00\xba".force_encoding('BINARY')
|
||||
py: s = b'\xed\xfe\x00\xba'
|
||||
js: s = Buffer("\xed\xfe\x00\xba", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 4
|
||||
|
||||
- def:
|
||||
rb: s = "\x50\xf9\x00\x77\xf9".force_encoding('BINARY')
|
||||
py: s = b'\x50\xf9\x00\x77\xf9'
|
||||
js: s = Buffer("\x50\xf9\x00\x77\xf9", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 5
|
||||
|
||||
- def:
|
||||
rb: s = "\x2f\xe3\xb5\x57\x00\x92".force_encoding('BINARY')
|
||||
py: s = b'\x2f\xe3\xb5\x57\x00\x92'
|
||||
js: s = Buffer("\x2f\xe3\xb5\x57\x00\x92", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 6
|
||||
|
||||
- def:
|
||||
rb: s = "\xa9\x43\x54\xe9\x00\xf8\xfb".force_encoding('BINARY')
|
||||
py: s = b'\xa9\x43\x54\xe9\x00\xf8\xfb'
|
||||
js: s = Buffer("\xa9\x43\x54\xe9\x00\xf8\xfb", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 7
|
||||
|
||||
- def:
|
||||
rb: s = "\x57\xbb\xe5\x82\x8b\xd3\x00\xf9".force_encoding('BINARY')
|
||||
py: s = b'\x57\xbb\xe5\x82\x8b\xd3\x00\xf9'
|
||||
js: s = Buffer("\x57\xbb\xe5\x82\x8b\xd3\x00\xf9", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 8
|
||||
|
||||
- def:
|
||||
rb: s = "\x44\x1b\x3e\x00\x13\x19\x29\x2a\xbf".force_encoding('BINARY')
|
||||
py: s = b'\x44\x1b\x3e\x00\x13\x19\x29\x2a\xbf'
|
||||
js: s = Buffer("\x44\x1b\x3e\x00\x13\x19\x29\x2a\xbf", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 9
|
||||
|
||||
- def:
|
||||
rb: s = "\x8a\x1d\x09\x00\x5d\x60\x6b\x2e\x70\xd9".force_encoding('BINARY')
|
||||
py: s = b'\x8a\x1d\x09\x00\x5d\x60\x6b\x2e\x70\xd9'
|
||||
js: s = Buffer("\x8a\x1d\x09\x00\x5d\x60\x6b\x2e\x70\xd9", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 10
|
||||
|
||||
- def:
|
||||
rb: s = "\x00\xaf\x47\x4b\x38\x99\x14\x8d\x8f\x10\x51".force_encoding('BINARY')
|
||||
py: s = b'\x00\xaf\x47\x4b\x38\x99\x14\x8d\x8f\x10\x51'
|
||||
js: s = Buffer("\x00\xaf\x47\x4b\x38\x99\x14\x8d\x8f\x10\x51", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 11
|
||||
|
||||
- def:
|
||||
cd: s = "\x45\x39\x00\xf7\xc2\x37\xfd\xe0\x38\x82\x40\xa9".force_encoding('BINARY')
|
||||
py: s = b'\x45\x39\x00\xf7\xc2\x37\xfd\xe0\x38\x82\x40\xa9'
|
||||
js: s = Buffer("\x45\x39\x00\xf7\xc2\x37\xfd\xe0\x38\x82\x40\xa9", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
- cd: r.binary(s).count()
|
||||
ot: 12
|
||||
|
||||
# Test comparisons
|
||||
# Binary objects to use, in order of increasing value
|
||||
- def:
|
||||
js: a = Buffer("\x00", 'binary')
|
||||
rb: a = "\x00".force_encoding('BINARY')
|
||||
py: a = b'\x00'
|
||||
- def:
|
||||
js: b = Buffer("\x00\x01", 'binary')
|
||||
rb: b = "\x00\x01".force_encoding('BINARY')
|
||||
py: b = b'\x00\x01'
|
||||
- def:
|
||||
js: c = Buffer("\x01", 'binary')
|
||||
rb: c = "\x01".force_encoding('BINARY')
|
||||
py: c = b'\x01'
|
||||
- def:
|
||||
js: d = Buffer("\x70\x22", 'binary')
|
||||
rb: d = "\x70\x22".force_encoding('BINARY')
|
||||
py: d = b'\x70\x22'
|
||||
- def:
|
||||
js: e = Buffer("\x80", 'binary')
|
||||
rb: e = "\x80".force_encoding('BINARY')
|
||||
py: e = b'\x80'
|
||||
- def:
|
||||
js: f = Buffer("\xFE", 'binary')
|
||||
rb: f = "\xFE".force_encoding('BINARY')
|
||||
py: f = b'\xFE'
|
||||
|
||||
# a -> a
|
||||
- cd: r.binary(a).eq(r.binary(a))
|
||||
ot: true
|
||||
- cd: r.binary(a).le(r.binary(a))
|
||||
ot: true
|
||||
- cd: r.binary(a).ge(r.binary(a))
|
||||
ot: true
|
||||
- cd: r.binary(a).ne(r.binary(a))
|
||||
ot: false
|
||||
- cd: r.binary(a).lt(r.binary(a))
|
||||
ot: false
|
||||
- cd: r.binary(a).gt(r.binary(a))
|
||||
ot: false
|
||||
|
||||
# a -> b
|
||||
- cd: r.binary(a).ne(r.binary(b))
|
||||
ot: true
|
||||
- cd: r.binary(a).lt(r.binary(b))
|
||||
ot: true
|
||||
- cd: r.binary(a).le(r.binary(b))
|
||||
ot: true
|
||||
- cd: r.binary(a).ge(r.binary(b))
|
||||
ot: false
|
||||
- cd: r.binary(a).gt(r.binary(b))
|
||||
ot: false
|
||||
- cd: r.binary(a).eq(r.binary(b))
|
||||
ot: false
|
||||
|
||||
# b -> c
|
||||
- cd: r.binary(b).ne(r.binary(c))
|
||||
ot: true
|
||||
- cd: r.binary(b).lt(r.binary(c))
|
||||
ot: true
|
||||
- cd: r.binary(b).le(r.binary(c))
|
||||
ot: true
|
||||
- cd: r.binary(b).ge(r.binary(c))
|
||||
ot: false
|
||||
- cd: r.binary(b).gt(r.binary(c))
|
||||
ot: false
|
||||
- cd: r.binary(b).eq(r.binary(c))
|
||||
ot: false
|
||||
|
||||
# c -> d
|
||||
- cd: r.binary(c).ne(r.binary(d))
|
||||
ot: true
|
||||
- cd: r.binary(c).lt(r.binary(d))
|
||||
ot: true
|
||||
- cd: r.binary(c).le(r.binary(d))
|
||||
ot: true
|
||||
- cd: r.binary(c).ge(r.binary(d))
|
||||
ot: false
|
||||
- cd: r.binary(c).gt(r.binary(d))
|
||||
ot: false
|
||||
- cd: r.binary(c).eq(r.binary(d))
|
||||
ot: false
|
||||
|
||||
# d -> e
|
||||
- cd: r.binary(d).ne(r.binary(e))
|
||||
ot: true
|
||||
- cd: r.binary(d).lt(r.binary(e))
|
||||
ot: true
|
||||
- cd: r.binary(d).le(r.binary(e))
|
||||
ot: true
|
||||
- cd: r.binary(d).ge(r.binary(e))
|
||||
ot: false
|
||||
- cd: r.binary(d).gt(r.binary(e))
|
||||
ot: false
|
||||
- cd: r.binary(d).eq(r.binary(e))
|
||||
ot: false
|
||||
|
||||
# e -> f
|
||||
- cd: r.binary(e).ne(r.binary(f))
|
||||
ot: true
|
||||
- cd: r.binary(e).lt(r.binary(f))
|
||||
ot: true
|
||||
- cd: r.binary(e).le(r.binary(f))
|
||||
ot: true
|
||||
- cd: r.binary(e).ge(r.binary(f))
|
||||
ot: false
|
||||
- cd: r.binary(e).gt(r.binary(f))
|
||||
ot: false
|
||||
- cd: r.binary(e).eq(r.binary(f))
|
||||
ot: false
|
||||
|
||||
# f -> f
|
||||
- cd: r.binary(f).eq(r.binary(f))
|
||||
ot: true
|
||||
- cd: r.binary(f).le(r.binary(f))
|
||||
ot: true
|
||||
- cd: r.binary(f).ge(r.binary(f))
|
||||
ot: true
|
||||
- cd: r.binary(f).ne(r.binary(f))
|
||||
ot: false
|
||||
- cd: r.binary(f).lt(r.binary(f))
|
||||
ot: false
|
||||
- cd: r.binary(f).gt(r.binary(f))
|
||||
ot: false
|
||||
|
||||
# Test encodings
|
||||
- py:
|
||||
cd: r.binary(u'イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム'.encode('utf-8'))
|
||||
ot: u'イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム'.encode('utf-8')
|
||||
py3:
|
||||
cd: r.binary(str('イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム').encode('utf-8'))
|
||||
ot: str('イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム').encode('utf-8')
|
||||
- py:
|
||||
cd: r.binary(u'ƀƁƂƃƄƅƆƇƈƉƊƋƌƍƎƏ'.encode('utf-16'))
|
||||
ot: u'ƀƁƂƃƄƅƆƇƈƉƊƋƌƍƎƏ'.encode('utf-16')
|
||||
py3:
|
||||
cd: r.binary(str('ƀƁƂƃƄƅƆƇƈƉƊƋƌƍƎƏ').encode('utf-16'))
|
||||
ot: str('ƀƁƂƃƄƅƆƇƈƉƊƋƌƍƎƏ').encode('utf-16')
|
||||
- py:
|
||||
cd: r.binary(u'lorem ipsum'.encode('ascii'))
|
||||
ot: u'lorem ipsum'.encode('ascii')
|
||||
py3:
|
||||
cd: r.binary(str('lorem ipsum').encode('ascii'))
|
||||
ot: str('lorem ipsum').encode('ascii')
|
||||
|
||||
# Test coercions
|
||||
- py: r.binary(b'foo').coerce_to('string')
|
||||
ot: 'foo'
|
||||
- py:
|
||||
cd: r.binary(u'イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム'.encode('utf-8')).coerce_to('string')
|
||||
ot: u'イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム'
|
||||
py3:
|
||||
cd: r.binary(str('イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム').encode('utf-8')).coerce_to('string')
|
||||
ot: str('イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム')
|
||||
- py:
|
||||
cd: r.binary(u'lorem ipsum'.encode('ascii')).coerce_to('string')
|
||||
ot: u'lorem ipsum'
|
||||
py3:
|
||||
cd: r.binary(str('lorem ipsum').encode('ascii')).coerce_to('string')
|
||||
ot: str('lorem ipsum')
|
||||
|
||||
- py: r.expr('foo').coerce_to('binary')
|
||||
ot: b'foo'
|
||||
|
||||
- cd: r.binary(a).coerce_to('bool')
|
||||
ot: True
|
||||
|
||||
- py: r.binary(b'foo').coerce_to('binary')
|
||||
ot: b'foo'
|
||||
|
||||
# Test slice
|
||||
- py: r.binary(b'abcdefg').slice(-3,-1)
|
||||
ot: b'ef'
|
||||
- py: r.binary(b'abcdefg').slice(0, 2)
|
||||
ot: b'ab'
|
||||
- py: r.binary(b'abcdefg').slice(3, -1)
|
||||
ot: b'def'
|
||||
- py: r.binary(b'abcdefg').slice(-5, 5)
|
||||
ot: b'cde'
|
||||
- py: r.binary(b'abcdefg').slice(-8, 2)
|
||||
ot: b'ab'
|
||||
- py: r.binary(b'abcdefg').slice(5, 7)
|
||||
ot: b'fg'
|
||||
|
||||
# Left side out-of-bound should clamp to index 0
|
||||
- py: r.binary(b'abcdefg').slice(-9, 2)
|
||||
ot: b'ab'
|
||||
|
||||
# Right side out-of-bound should return the valid subset of the range
|
||||
- py: r.binary(b'abcdefg').slice(5, 9)
|
||||
ot: b'fg'
|
||||
|
||||
# Test binary_format optarg
|
||||
- cd: r.binary(b)
|
||||
runopts:
|
||||
binary_format: "native"
|
||||
ot: b
|
||||
- cd: r.binary(b)
|
||||
runopts:
|
||||
binary_format: "raw"
|
||||
ot: {'$reql_type$':'BINARY','data':'AAE='}
|
||||
|
||||
# Test r.binary of nested terms
|
||||
- cd: r.binary(r.expr("data"))
|
||||
ot:
|
||||
js: Buffer("data", "binary")
|
||||
rb: "data"
|
||||
py: b"data"
|
||||
|
||||
- cd: r.binary(r.expr({}))
|
||||
ot: err('ReqlQueryLogicError', 'Expected type STRING but found OBJECT.', [])
|
||||
|
||||
- cd: r.binary(r.expr([]))
|
||||
ot: err('ReqlQueryLogicError', 'Expected type STRING but found ARRAY.', [])
|
||||
|
||||
# Test errors
|
||||
|
||||
# Missing 'data' field
|
||||
- py: r.expr({'$reql_type$':'BINARY'})
|
||||
rb: r.expr({'$reql_type$':'BINARY'})
|
||||
ot: err('ReqlQueryLogicError','Invalid binary pseudotype:'+' lacking `data` key.',[])
|
||||
|
||||
# Invalid base64 format
|
||||
- py: r.expr({'$reql_type$':'BINARY','data':'ABCDEFGH==AA'})
|
||||
ot: err('ReqlQueryLogicError','Invalid base64 format, data found after padding character \'=\'.',[])
|
||||
- py: r.expr({'$reql_type$':'BINARY','data':'ABCDEF==$'})
|
||||
ot: err('ReqlQueryLogicError','Invalid base64 format, data found after padding character \'=\'.',[])
|
||||
- py: r.expr({'$reql_type$':'BINARY','data':'A^CDEFGH'})
|
||||
ot: err('ReqlQueryLogicError','Invalid base64 character found:'+' \'^\'.',[])
|
||||
- py: r.expr({'$reql_type$':'BINARY','data':'ABCDE'})
|
||||
ot: err('ReqlQueryLogicError','Invalid base64 length:'+' 1 character remaining, cannot decode a full byte.',[])
|
||||
|
||||
# Invalid coercions
|
||||
- cd: r.binary(a).coerce_to('array')
|
||||
ot: err('ReqlQueryLogicError','Cannot coerce BINARY to ARRAY.',[])
|
||||
- cd: r.binary(a).coerce_to('object')
|
||||
ot: err('ReqlQueryLogicError','Cannot coerce BINARY to OBJECT.',[])
|
||||
- cd: r.binary(a).coerce_to('number')
|
||||
ot: err('ReqlQueryLogicError','Cannot coerce BINARY to NUMBER.',[])
|
||||
- cd: r.binary(a).coerce_to('nu'+'ll')
|
||||
ot: err('ReqlQueryLogicError','Cannot coerce BINARY to NULL.',[])
|
47
ext/librethinkdbxx/test/upstream/datum/bool.yaml
Normal file
47
ext/librethinkdbxx/test/upstream/datum/bool.yaml
Normal file
@ -0,0 +1,47 @@
|
||||
desc: Tests of conversion to and from the RQL bool type
|
||||
tests:
|
||||
- py: r.expr(True)
|
||||
js:
|
||||
- r.expr(true)
|
||||
- r(true)
|
||||
rb: r true
|
||||
ot: true
|
||||
|
||||
- py: r.expr(False)
|
||||
js:
|
||||
- r.expr(false)
|
||||
- r(false)
|
||||
rb: r false
|
||||
ot: false
|
||||
|
||||
- cd: r.expr(False).type_of()
|
||||
ot: 'BOOL'
|
||||
|
||||
# test coercions
|
||||
- cd: r.expr(True).coerce_to('string')
|
||||
ot: 'true'
|
||||
|
||||
- cd: r.expr(True).coerce_to('bool')
|
||||
ot: True
|
||||
|
||||
- cd: r.expr(False).coerce_to('bool')
|
||||
ot: False
|
||||
|
||||
- cd: r.expr(null).coerce_to('bool')
|
||||
ot: False
|
||||
|
||||
- cd: r.expr(0).coerce_to('bool')
|
||||
ot: True
|
||||
|
||||
- cd: r.expr('false').coerce_to('bool')
|
||||
ot: True
|
||||
|
||||
- cd: r.expr('foo').coerce_to('bool')
|
||||
ot: True
|
||||
|
||||
- cd: r.expr([]).coerce_to('bool')
|
||||
ot: True
|
||||
|
||||
- cd: r.expr({}).coerce_to('bool')
|
||||
ot: True
|
||||
|
18
ext/librethinkdbxx/test/upstream/datum/null.yaml
Normal file
18
ext/librethinkdbxx/test/upstream/datum/null.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
desc: Tests of conversion to and from the RQL null type
|
||||
tests:
|
||||
- cd:
|
||||
- r(null)
|
||||
- r.expr(null)
|
||||
py: r.expr(null)
|
||||
ot: (null)
|
||||
|
||||
- cd: r.expr(null).type_of()
|
||||
rb: r(null).type_of()
|
||||
ot: 'NULL'
|
||||
|
||||
# test coercions
|
||||
- cd: r.expr(null).coerce_to('string')
|
||||
ot: 'null'
|
||||
|
||||
- cd: r.expr(null).coerce_to('null')
|
||||
ot: null
|
125
ext/librethinkdbxx/test/upstream/datum/number.yaml
Normal file
125
ext/librethinkdbxx/test/upstream/datum/number.yaml
Normal file
@ -0,0 +1,125 @@
|
||||
# desc will be included in a comment to help identify test groups
|
||||
desc: Tests of conversion to and from the RQL number type
|
||||
tests:
|
||||
|
||||
# Simple integers
|
||||
- cd: r.expr(1)
|
||||
js:
|
||||
- r(1)
|
||||
- r.expr(1)
|
||||
rb:
|
||||
- r 1
|
||||
- r(1)
|
||||
- r.expr(1)
|
||||
ot: 1
|
||||
- cd: r.expr(-1)
|
||||
js:
|
||||
- r(-1)
|
||||
- r.expr(-1)
|
||||
rb:
|
||||
- r -1
|
||||
- r(-1)
|
||||
- r.expr(-1)
|
||||
ot: -1
|
||||
- cd: r.expr(0)
|
||||
js:
|
||||
- r(0)
|
||||
- r.expr(0)
|
||||
rb:
|
||||
- r 0
|
||||
- r(0)
|
||||
- r.expr(0)
|
||||
ot: 0
|
||||
|
||||
# Floats
|
||||
- cd: r.expr(1.0)
|
||||
js:
|
||||
- r(1.0)
|
||||
- r.expr(1.0)
|
||||
rb:
|
||||
- r 1.0
|
||||
- r(1.0)
|
||||
- r.expr(1.0)
|
||||
ot: 1.0
|
||||
- cd: r.expr(1.5)
|
||||
js:
|
||||
- r(1.5)
|
||||
- r.expr(1.5)
|
||||
rb:
|
||||
- r 1.5
|
||||
- r(1.5)
|
||||
- r.expr(1.5)
|
||||
ot: 1.5
|
||||
- cd: r.expr(-0.5)
|
||||
js:
|
||||
- r(-0.5)
|
||||
- r.expr(-0.5)
|
||||
rb:
|
||||
- r -0.5
|
||||
- r(-0.5)
|
||||
- r.expr(-0.5)
|
||||
ot: -0.5
|
||||
- cd: r.expr(67498.89278)
|
||||
js:
|
||||
- r(67498.89278)
|
||||
- r.expr(67498.89278)
|
||||
rb:
|
||||
- r 67498.89278
|
||||
- r(67498.89278)
|
||||
- r.expr(67498.89278)
|
||||
ot: 67498.89278
|
||||
|
||||
# Big numbers
|
||||
- cd: r.expr(1234567890)
|
||||
js:
|
||||
- r(1234567890)
|
||||
- r.expr(1234567890)
|
||||
rb:
|
||||
- r 1234567890
|
||||
- r(1234567890)
|
||||
- r.expr(1234567890)
|
||||
ot: 1234567890
|
||||
|
||||
- cd: r.expr(-73850380122423)
|
||||
js:
|
||||
- r.expr(-73850380122423)
|
||||
- r(-73850380122423)
|
||||
rb:
|
||||
- r -73850380122423
|
||||
- r.expr(-73850380122423)
|
||||
- r(-73850380122423)
|
||||
ot: -73850380122423
|
||||
|
||||
# Test that numbers round-trip correctly
|
||||
- py:
|
||||
cd: r.expr(1234567890123456789012345678901234567890)
|
||||
ot: float(1234567890123456789012345678901234567890)
|
||||
js:
|
||||
cd: r.expr(1234567890123456789012345678901234567890)
|
||||
ot: 1234567890123456789012345678901234567890
|
||||
- cd: r.expr(123.4567890123456789012345678901234567890)
|
||||
ot: 123.4567890123456789012345678901234567890
|
||||
|
||||
- cd: r.expr(1).type_of()
|
||||
ot: 'NUMBER'
|
||||
|
||||
# test coercions
|
||||
- cd: r.expr(1).coerce_to('string')
|
||||
ot: '1'
|
||||
|
||||
- cd: r.expr(1).coerce_to('number')
|
||||
ot: 1
|
||||
|
||||
# The drivers now convert to an int (where relevant) if we think the result
|
||||
# looks like an int (result % 1.0 == 0.0)
|
||||
- py: r.expr(1.0)
|
||||
rb: r 1.0
|
||||
ot: int_cmp(1)
|
||||
|
||||
- py: r.expr(45)
|
||||
rb: r 45
|
||||
ot: int_cmp(45)
|
||||
|
||||
- py: r.expr(1.2)
|
||||
rb: r 1.2
|
||||
ot: float_cmp(1.2)
|
85
ext/librethinkdbxx/test/upstream/datum/object.yaml
Normal file
85
ext/librethinkdbxx/test/upstream/datum/object.yaml
Normal file
@ -0,0 +1,85 @@
|
||||
desc: Tests conversion to and from the RQL object type
|
||||
tests:
|
||||
- cd:
|
||||
- r({})
|
||||
- r.expr({})
|
||||
py: r.expr({})
|
||||
ot: {}
|
||||
- cd:
|
||||
- r({a:1})
|
||||
- r.expr({'a':1})
|
||||
py: r.expr({'a':1})
|
||||
ot: {'a':1}
|
||||
- cd:
|
||||
- r({a:1, b:'two', c:True})
|
||||
- r.expr({'a':1, 'b':'two', 'c':True})
|
||||
py: r.expr({'a':1, 'b':'two', 'c':True})
|
||||
ot: {'a':1, 'b':'two', 'c':True}
|
||||
|
||||
# Nested expressions
|
||||
- cd: r.expr({'a':r.expr(1)})
|
||||
ot: {'a':1}
|
||||
|
||||
- cd: r.expr({'a':{'b':[{'c':2}, 'a', 4]}})
|
||||
ot: {'a':{'b':[{'c':2}, 'a', 4]}}
|
||||
|
||||
- cd: r.expr({'a':1}).type_of()
|
||||
ot: 'OBJECT'
|
||||
|
||||
# test coercions
|
||||
- cd: r.expr({'a':1}).coerce_to('string')
|
||||
ot:
|
||||
cd: '{"a":1}'
|
||||
|
||||
- cd: r.expr({'a':1}).coerce_to('object')
|
||||
ot: {'a':1}
|
||||
|
||||
- cd: r.expr({'a':1}).coerce_to('array')
|
||||
ot: [['a',1]]
|
||||
|
||||
# Error cases
|
||||
- cd: r.expr({12:'a'})
|
||||
# JavaScript auto-converts keys for us
|
||||
js:
|
||||
ot: err_regex("ReqlCompileError", "Object keys must be strings.*")
|
||||
|
||||
- cd: r.expr({'a':{12:'b'}})
|
||||
# JavaScript auto-converts keys for us
|
||||
js:
|
||||
ot: err_regex("ReqlCompileError", "Object keys must be strings.*")
|
||||
|
||||
- js: r({'a':undefined})
|
||||
ot: err("ReqlCompileError", "Object field 'a' may not be undefined")
|
||||
|
||||
- js: r({'a':{'b':undefined}})
|
||||
ot: err("ReqlCompileError", "Object field 'b' may not be undefined")
|
||||
|
||||
- cd: r.expr({}, "foo")
|
||||
ot:
|
||||
cd: err("ReqlCompileError", "Second argument to `r.expr` must be a number.")
|
||||
js: err("ReqlCompileError", "Second argument to `r.expr` must be a number or undefined.")
|
||||
|
||||
- js: r.expr({}, NaN)
|
||||
ot: err("ReqlCompileError", "Second argument to `r.expr` must be a number or undefined.")
|
||||
|
||||
# r.object
|
||||
- cd: r.object()
|
||||
ot: {}
|
||||
|
||||
- cd: r.object('a', 1, 'b', 2)
|
||||
ot: {'a':1,'b':2}
|
||||
|
||||
- cd: r.object('c'+'d', 3)
|
||||
ot: {'cd':3}
|
||||
|
||||
- cd: r.object('o','d','d')
|
||||
ot: err("ReqlQueryLogicError", "OBJECT expects an even number of arguments (but found 3).", [])
|
||||
|
||||
- cd: r.object(1, 1)
|
||||
ot: err("ReqlQueryLogicError","Expected type STRING but found NUMBER.",[])
|
||||
|
||||
- cd: r.object('e', 4, 'e', 5)
|
||||
ot: err("ReqlQueryLogicError","Duplicate key \"e\" in object. (got 4 and 5 as values)",[])
|
||||
|
||||
- cd: r.object('g', r.db('test'))
|
||||
ot: err("ReqlQueryLogicError","Expected type DATUM but found DATABASE:",[])
|
329
ext/librethinkdbxx/test/upstream/datum/string.yaml
Normal file
329
ext/librethinkdbxx/test/upstream/datum/string.yaml
Normal file
@ -0,0 +1,329 @@
|
||||
desc: Tests of converstion to and from the RQL string type
|
||||
tests:
|
||||
|
||||
- def:
|
||||
cd: japanese_hello = 'こんにちは'
|
||||
# Python supports unicode strings with the u'' pattern, except 3.0-3.2
|
||||
py: japanese_hello = u'こんにちは'
|
||||
py3.0: japanese_hello = 'こんにちは'
|
||||
py3.1: japanese_hello = 'こんにちは'
|
||||
py3.2: japanese_hello = 'こんにちは'
|
||||
|
||||
# Simple strings
|
||||
- cd:
|
||||
- r('str')
|
||||
- r.expr('str')
|
||||
py: r.expr('str')
|
||||
ot: "str"
|
||||
- cd:
|
||||
- r("str")
|
||||
- r.expr("str")
|
||||
py: r.expr("str")
|
||||
ot: "str"
|
||||
|
||||
# Unicode
|
||||
|
||||
- cd:
|
||||
py:
|
||||
cd: r.expr(u'str')
|
||||
ot: u'str'
|
||||
py3.0: r.expr('str')
|
||||
py3.1: r.expr('str')
|
||||
py3.2: r.expr('str')
|
||||
ot: 'str'
|
||||
|
||||
- cd: r.expr(japanese_hello)
|
||||
ot:
|
||||
cd: 'こんにちは'
|
||||
py: u'こんにちは'
|
||||
py3.0: 'こんにちは'
|
||||
py3.1: 'こんにちは'
|
||||
py3.2: 'こんにちは'
|
||||
|
||||
- cd: r.expr('foo').type_of()
|
||||
ot: 'STRING'
|
||||
|
||||
# test coercions
|
||||
- cd: r.expr('foo').coerce_to('string')
|
||||
ot: 'foo'
|
||||
- cd: r.expr('-1.2').coerce_to('NUMBER')
|
||||
ot: -1.2
|
||||
- cd: r.expr('--1.2').coerce_to('NUMBER')
|
||||
ot: err("ReqlQueryLogicError", "Could not coerce `--1.2` to NUMBER.", [])
|
||||
- cd: r.expr('-1.2-').coerce_to('NUMBER')
|
||||
ot: err("ReqlQueryLogicError", "Could not coerce `-1.2-` to NUMBER.", [])
|
||||
- cd: r.expr('0xa').coerce_to('NUMBER')
|
||||
ot: 10
|
||||
- cd: r.expr('inf').coerce_to('NUMBER')
|
||||
ot: err("ReqlQueryLogicError", "Non-finite number: inf", [])
|
||||
|
||||
# count is defined as the number of unicode codepoints
|
||||
- cd: r.expr('hello, world!').count()
|
||||
ot: 13
|
||||
- cd: r.expr(japanese_hello).count()
|
||||
ot: 5
|
||||
|
||||
# slice is defined on unicode codepoints
|
||||
- cd: r.expr('hello').slice(1)
|
||||
ot: 'ello'
|
||||
- cd: r.expr('hello').slice(-1)
|
||||
ot: 'o'
|
||||
- cd: r.expr('hello').slice(-4,3)
|
||||
ot: 'el'
|
||||
- cd: r.expr('hello').slice(-99)
|
||||
ot: 'hello'
|
||||
- cd: r.expr('hello').slice(0)
|
||||
ot: 'hello'
|
||||
- cd: r.expr(japanese_hello).slice(1)
|
||||
ot:
|
||||
cd: 'んにちは'
|
||||
py: u'んにちは'
|
||||
py3.0: 'んにちは'
|
||||
py3.1: 'んにちは'
|
||||
py3.2: 'んにちは'
|
||||
- cd: r.expr(japanese_hello).slice(1,2)
|
||||
ot:
|
||||
cd: 'ん'
|
||||
py: u'ん'
|
||||
py3.0: 'ん'
|
||||
py3.1: 'ん'
|
||||
py3.2: 'ん'
|
||||
- cd: r.expr(japanese_hello).slice(-3)
|
||||
ot:
|
||||
cd: 'にちは'
|
||||
py: u'にちは'
|
||||
py3.0: 'にちは'
|
||||
py3.1: 'にちは'
|
||||
py3.2: 'にちは'
|
||||
|
||||
# This is how these edge cases are handled in Python.
|
||||
- cd: r.expr('').split()
|
||||
ot: []
|
||||
- cd: r.expr('').split(null)
|
||||
ot: []
|
||||
- cd: r.expr('').split(' ')
|
||||
ot: ['']
|
||||
- cd: r.expr('').split('')
|
||||
ot: []
|
||||
- cd: r.expr('').split(null, 5)
|
||||
ot: []
|
||||
- cd: r.expr('').split(' ', 5)
|
||||
ot: ['']
|
||||
- cd: r.expr('').split('', 5)
|
||||
ot: []
|
||||
|
||||
- cd: r.expr('aaaa bbbb cccc ').split()
|
||||
ot: ['aaaa', 'bbbb', 'cccc']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(null)
|
||||
ot: ['aaaa', 'bbbb', 'cccc']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(' ')
|
||||
ot: ['aaaa', 'bbbb', '', 'cccc', '']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('')
|
||||
ot: ['a', 'a', 'a', 'a', ' ', 'b', 'b', 'b', 'b', ' ', ' ', 'c', 'c', 'c', 'c', ' ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('b')
|
||||
ot: ['aaaa ', '', '', '', ' cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('bb')
|
||||
ot: ['aaaa ', '', ' cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(' bbbb ')
|
||||
ot: ['aaaa', 'cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split('bb')
|
||||
ot: ['aaaa ', '', ' cccc b d ', ' e ', '', ' f']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ')
|
||||
ot: ['aaaa', 'cccc b d bb e bbbb f']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ')
|
||||
ot: ['aaaa', 'cccc b d bb e', 'f']
|
||||
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(null, 3)
|
||||
ot: ['aaaa', 'bbbb', 'cccc']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(' ', 5)
|
||||
ot: ['aaaa', 'bbbb', '', 'cccc', '']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('', 5)
|
||||
ot: ['a', 'a', 'a', 'a', ' ', 'bbbb cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('b', 5)
|
||||
ot: ['aaaa ', '', '', '', ' cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('bb', 3)
|
||||
ot: ['aaaa ', '', ' cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(' bbbb ', 2)
|
||||
ot: ['aaaa', 'cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split('bb', 6)
|
||||
ot: ['aaaa ', '', ' cccc b d ', ' e ', '', ' f']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 2)
|
||||
ot: ['aaaa', 'cccc b d bb e bbbb f']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 3)
|
||||
ot: ['aaaa', 'cccc b d bb e', 'f']
|
||||
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(null, 2)
|
||||
ot: ['aaaa', 'bbbb', 'cccc ']
|
||||
- cd: r.expr("a b ").split(null, 2)
|
||||
ot: ["a", "b"]
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(' ', 4)
|
||||
ot: ['aaaa', 'bbbb', '', 'cccc', '']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('', 4)
|
||||
ot: ['a', 'a', 'a', 'a', ' bbbb cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('b', 4)
|
||||
ot: ['aaaa ', '', '', '', ' cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('bb', 2)
|
||||
ot: ['aaaa ', '', ' cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(' bbbb ', 1)
|
||||
ot: ['aaaa', 'cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split('bb', 5)
|
||||
ot: ['aaaa ', '', ' cccc b d ', ' e ', '', ' f']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 1)
|
||||
ot: ['aaaa', 'cccc b d bb e bbbb f']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 2)
|
||||
ot: ['aaaa', 'cccc b d bb e', 'f']
|
||||
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(null, 1)
|
||||
ot: ['aaaa', 'bbbb cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(' ', 2)
|
||||
ot: ['aaaa', 'bbbb', ' cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('', 2)
|
||||
ot: ['a', 'a', 'aa bbbb cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('b', 2)
|
||||
ot: ['aaaa ', '', 'bb cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split('bb', 2)
|
||||
ot: ['aaaa ', '', ' cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc ').split(' bbbb ', 2)
|
||||
ot: ['aaaa', 'cccc ']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split('bb', 2)
|
||||
ot: ['aaaa ', '', ' cccc b d bb e bbbb f']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 2)
|
||||
ot: ['aaaa', 'cccc b d bb e bbbb f']
|
||||
- cd: r.expr('aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 2)
|
||||
ot: ['aaaa', 'cccc b d bb e', 'f']
|
||||
|
||||
- cd: r.expr(' ').split()
|
||||
ot: []
|
||||
- cd: r.expr(' ').split(null)
|
||||
ot: []
|
||||
- cd: r.expr(' ').split(' ')
|
||||
ot: ['', '', '']
|
||||
- cd: r.expr(' ').split(null, 5)
|
||||
ot: []
|
||||
- cd: r.expr(' ').split(' ', 5)
|
||||
ot: ['', '', '']
|
||||
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split()
|
||||
ot: ['aaaa', 'bbbb', 'cccc']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(null)
|
||||
ot: ['aaaa', 'bbbb', 'cccc']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(' ')
|
||||
ot: ['', '', 'aaaa', 'bbbb', '', 'cccc', '']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split('b')
|
||||
ot: [' aaaa ', '', '', '', ' cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split('bb')
|
||||
ot: [' aaaa ', '', ' cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(' bbbb ')
|
||||
ot: [' aaaa', 'cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split('bb')
|
||||
ot: [' aaaa ', '', ' cccc b d ', ' e ', '', ' f']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ')
|
||||
ot: [' aaaa', 'cccc b d bb e bbbb f']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ')
|
||||
ot: [' aaaa', 'cccc b d bb e', 'f']
|
||||
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(null, 3)
|
||||
ot: ['aaaa', 'bbbb', 'cccc']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(' ', 5)
|
||||
ot: ['', '', 'aaaa', 'bbbb', '', 'cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split('b', 5)
|
||||
ot: [' aaaa ', '', '', '', ' cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split('bb', 3)
|
||||
ot: [' aaaa ', '', ' cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(' bbbb ', 2)
|
||||
ot: [' aaaa', 'cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split('bb', 6)
|
||||
ot: [' aaaa ', '', ' cccc b d ', ' e ', '', ' f']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 2)
|
||||
ot: [' aaaa', 'cccc b d bb e bbbb f']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 3)
|
||||
ot: [' aaaa', 'cccc b d bb e', 'f']
|
||||
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(null, 2)
|
||||
ot: ['aaaa', 'bbbb', 'cccc ']
|
||||
- cd: r.expr("a b ").split(null, 2)
|
||||
ot: ["a", "b"]
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(' ', 4)
|
||||
ot: ['', '', 'aaaa', 'bbbb', ' cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split('b', 4)
|
||||
ot: [' aaaa ', '', '', '', ' cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split('bb', 2)
|
||||
ot: [' aaaa ', '', ' cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(' bbbb ', 1)
|
||||
ot: [' aaaa', 'cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split('bb', 5)
|
||||
ot: [' aaaa ', '', ' cccc b d ', ' e ', '', ' f']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 1)
|
||||
ot: [' aaaa', 'cccc b d bb e bbbb f']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 2)
|
||||
ot: [' aaaa', 'cccc b d bb e', 'f']
|
||||
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(null, 1)
|
||||
ot: ['aaaa', 'bbbb cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(' ', 2)
|
||||
ot: ['', '', 'aaaa bbbb cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split('b', 2)
|
||||
ot: [' aaaa ', '', 'bb cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split('bb', 2)
|
||||
ot: [' aaaa ', '', ' cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc ').split(' bbbb ', 2)
|
||||
ot: [' aaaa', 'cccc ']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split('bb', 2)
|
||||
ot: [' aaaa ', '', ' cccc b d bb e bbbb f']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 2)
|
||||
ot: [' aaaa', 'cccc b d bb e bbbb f']
|
||||
- cd: r.expr(' aaaa bbbb cccc b d bb e bbbb f').split(' bbbb ', 2)
|
||||
ot: [' aaaa', 'cccc b d bb e', 'f']
|
||||
|
||||
- cd: r.expr("abc-dEf-GHJ").upcase()
|
||||
ot: "ABC-DEF-GHJ"
|
||||
- cd: r.expr("abc-dEf-GHJ").downcase()
|
||||
ot: "abc-def-ghj"
|
||||
|
||||
# Same 3.0-3.2 caveats
|
||||
- py:
|
||||
cd: r.expr(u"f\u00e9oo").split("")
|
||||
ot: [u"f", u"\u00e9", u"o", u"o"]
|
||||
py3.0: r.expr("f\u00e9oo").split("")
|
||||
py3.1: r.expr("f\u00e9oo").split("")
|
||||
py3.2: r.expr("f\u00e9oo").split("")
|
||||
cd: r.expr("f\u00e9oo").split("")
|
||||
ot: ["f", "\u00e9", "o", "o"]
|
||||
|
||||
- py:
|
||||
cd: r.expr(u"fe\u0301oo").split("")
|
||||
ot: [u"f", u"e\u0301", u"o", u"o"]
|
||||
py3.0: r.expr("fe\u0301oo").split("")
|
||||
py3.1: r.expr("fe\u0301oo").split("")
|
||||
py3.2: r.expr("fe\u0301oo").split("")
|
||||
cd: r.expr("fe\u0301oo").split("")
|
||||
ot: ["f", "e\u0301", "o", "o"]
|
||||
|
||||
## Unicode spacing characters.
|
||||
|
||||
## original set from previous work:
|
||||
- cd: r.expr("foo bar\tbaz\nquux\rfred\u000bbarney\u000cwilma").split()
|
||||
py:
|
||||
cd: r.expr(u"foo bar\tbaz\nquux\rfred\u000bbarney\u000cwilma").split()
|
||||
ot: ["foo", "bar", "baz", "quux", "fred", "barney", "wilma"]
|
||||
py3.0: r.expr("foo bar\tbaz\nquux\rfred\u000bbarney\u000cwilma").split()
|
||||
py3.1: r.expr("foo bar\tbaz\nquux\rfred\u000bbarney\u000cwilma").split()
|
||||
py3.2: r.expr("foo bar\tbaz\nquux\rfred\u000bbarney\u000cwilma").split()
|
||||
ot: ["foo", "bar", "baz", "quux", "fred", "barney", "wilma"]
|
||||
|
||||
## some specialized Unicode horrors:
|
||||
## - U+00A0 is nonbreaking space and is in the Zs category
|
||||
## - U+0085 is the next line character and is not in the Zs category but is considered whitespace
|
||||
## - U+2001 is em quad space and is in the Zs category
|
||||
## - U+200B is a zero width space and is not in the Zs category and is not considered whitespace
|
||||
## - U+2060 is a word joining zero width nonbreaking space and is NOT in any of the Z categories
|
||||
## - U+2028 is a line separator and is in the Zl category
|
||||
## - U+2029 is a paragraph separator and is in the Zp category
|
||||
- py:
|
||||
cd: r.expr(u"foo\u00a0bar\u2001baz\u2060quux\u2028fred\u2028barney\u2029wilma\u0085betty\u200b").split()
|
||||
ot: ["foo", "bar", u"baz\u2060quux", "fred", "barney", "wilma", u"betty\u200b"]
|
||||
py3.0: r.expr("foo\u00a0bar\u2001baz\u2060quux\u2028fred\u2028barney\u2029wilma\u0085betty\u200b").split()
|
||||
py3.1: r.expr("foo\u00a0bar\u2001baz\u2060quux\u2028fred\u2028barney\u2029wilma\u0085betty\u200b").split()
|
||||
py3.2: r.expr("foo\u00a0bar\u2001baz\u2060quux\u2028fred\u2028barney\u2029wilma\u0085betty\u200b").split()
|
||||
cd: r.expr("foo\u00a0bar\u2001baz\u2060quux\u2028fred\u2028barney\u2029wilma\u0085betty\u200b").split()
|
||||
ot: ["foo", "bar", "baz\u2060quux", "fred", "barney", "wilma", "betty\u200b"]
|
14
ext/librethinkdbxx/test/upstream/datum/typeof.yaml
Normal file
14
ext/librethinkdbxx/test/upstream/datum/typeof.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
desc: These tests test the type of command
|
||||
tests:
|
||||
|
||||
# Method form
|
||||
- cd: r.expr(null).type_of()
|
||||
ot: 'NULL'
|
||||
|
||||
# Prefix form
|
||||
- cd: r.type_of(null)
|
||||
ot: 'NULL'
|
||||
|
||||
# Error cases
|
||||
- js: r(null).typeOf(1)
|
||||
ot: err('ReqlCompileError', 'Expected 1 argument but found 2.', [0])
|
20
ext/librethinkdbxx/test/upstream/datum/uuid.yaml
Normal file
20
ext/librethinkdbxx/test/upstream/datum/uuid.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
desc: Test that UUIDs work
|
||||
tests:
|
||||
- cd: r.uuid()
|
||||
ot: uuid()
|
||||
- cd: r.expr(r.uuid())
|
||||
ot: uuid()
|
||||
- cd: r.type_of(r.uuid())
|
||||
ot: 'STRING'
|
||||
- cd: r.uuid().ne(r.uuid())
|
||||
ot: true
|
||||
- cd: r.uuid('magic')
|
||||
ot: ('97dd10a5-4fc4-554f-86c5-0d2c2e3d5330')
|
||||
- cd: r.uuid('magic').eq(r.uuid('magic'))
|
||||
ot: true
|
||||
- cd: r.uuid('magic').ne(r.uuid('beans'))
|
||||
ot: true
|
||||
- py: r.expr([1,2,3,4,5,6,7,8,9,10]).map(lambda u:r.uuid()).distinct().count()
|
||||
js: r([1,2,3,4,5,6,7,8,9,10]).map(function(u) {return r.uuid();}).distinct().count()
|
||||
rb: r.expr([1,2,3,4,5,6,7,8,9,10]).map {|u| r.uuid()}.distinct().count()
|
||||
ot: 10
|
270
ext/librethinkdbxx/test/upstream/default.yaml
Normal file
270
ext/librethinkdbxx/test/upstream/default.yaml
Normal file
@ -0,0 +1,270 @@
|
||||
desc: Tests r.default
|
||||
tests:
|
||||
- cd: r.expr(1).default(2)
|
||||
ot: 1
|
||||
- cd: r.expr(null).default(2)
|
||||
ot: 2
|
||||
- cd: r.expr({})['b'].default(2)
|
||||
js: r.expr({})('b').default(2)
|
||||
ot: 2
|
||||
- cd: r.expr(r.expr('a')['b']).default(2)
|
||||
js: r.expr(r.expr('a')('b')).default(2)
|
||||
ot: err("ReqlQueryLogicError", "Cannot perform bracket on a non-object non-sequence `\"a\"`.", [])
|
||||
- rb: r.expr([]).reduce{|a,b| a+b}.default(2)
|
||||
py: r.expr([]).reduce(lambda a,b:a+b).default(2)
|
||||
js: r.expr([]).reduce(function(a,b){return a+b}).default(2)
|
||||
ot: 2
|
||||
- rb: r.expr([]).union([]).reduce{|a,b| a+b}.default(2)
|
||||
py: r.expr([]).union([]).reduce(lambda a,b:a+b).default(2)
|
||||
js: r.expr([]).union([]).reduce(function(a,b){return a+b}).default(2)
|
||||
ot: 2
|
||||
- rb: r.expr('a').reduce{|a,b| a+b}.default(2)
|
||||
py: r.expr('a').reduce(lambda a,b:a+b).default(2)
|
||||
js: r.expr('a').reduce(function(a,b){return a+b}).default(2)
|
||||
ot: err("ReqlQueryLogicError", "Cannot convert STRING to SEQUENCE", [])
|
||||
- cd: (r.expr(null) + 5).default(2)
|
||||
js: (r.expr(null).add(5)).default(2)
|
||||
ot: 2
|
||||
- cd: (5 + r.expr(null)).default(2)
|
||||
js: (r.expr(5).add(null)).default(2)
|
||||
ot: 2
|
||||
- cd: (5 - r.expr(null)).default(2)
|
||||
js: (r.expr(5).sub(null)).default(2)
|
||||
ot: 2
|
||||
- cd: (r.expr(null) - 5).default(2)
|
||||
js: (r.expr(null).sub(5)).default(2)
|
||||
ot: 2
|
||||
- cd: (r.expr('a') + 5).default(2)
|
||||
js: (r.expr('a').add(5)).default(2)
|
||||
ot: err("ReqlQueryLogicError", "Expected type STRING but found NUMBER.", [])
|
||||
- cd: (5 + r.expr('a')).default(2)
|
||||
js: (r.expr(5).add('a')).default(2)
|
||||
ot: err("ReqlQueryLogicError", "Expected type NUMBER but found STRING.", [])
|
||||
- cd: (r.expr('a') - 5).default(2)
|
||||
js: (r.expr('a').sub(5)).default(2)
|
||||
ot: err("ReqlQueryLogicError", "Expected type NUMBER but found STRING.", [])
|
||||
- cd: (5 - r.expr('a')).default(2)
|
||||
js: (r.expr(5).sub('a')).default(2)
|
||||
ot: err("ReqlQueryLogicError", "Expected type NUMBER but found STRING.", [])
|
||||
|
||||
- cd: r.expr(1).default(r.error())
|
||||
ot: 1
|
||||
- cd: r.expr(null).default(r.error())
|
||||
ot: (null)
|
||||
- cd: r.expr({})['b'].default(r.error())
|
||||
js: r.expr({})('b').default(r.error())
|
||||
ot: err("ReqlNonExistenceError", "No attribute `b` in object:", [])
|
||||
- rb: r.expr([]).reduce{|a,b| a+b}.default(r.error)
|
||||
py: r.expr([]).reduce(lambda a,b:a+b).default(r.error)
|
||||
js: r.expr([]).reduce(function(a,b){return a+b}).default(r.error)
|
||||
ot: err("ReqlNonExistenceError", "Cannot reduce over an empty stream.", [])
|
||||
- rb: r.expr([]).union([]).reduce{|a,b| a+b}.default(r.error)
|
||||
py: r.expr([]).union([]).reduce(lambda a,b:a+b).default(r.error)
|
||||
js: r.expr([]).union([]).reduce(function(a,b){return a+b}).default(r.error)
|
||||
ot: err("ReqlNonExistenceError", "Cannot reduce over an empty stream.", [])
|
||||
- cd: (r.expr(null) + 5).default(r.error)
|
||||
js: (r.expr(null).add(5)).default(r.error)
|
||||
ot: err("ReqlNonExistenceError", "Expected type NUMBER but found NULL.", [])
|
||||
- cd: (5 + r.expr(null)).default(r.error)
|
||||
js: (r.expr(5).add(null)).default(r.error)
|
||||
ot: err("ReqlNonExistenceError", "Expected type NUMBER but found NULL.", [])
|
||||
- cd: (5 - r.expr(null)).default(r.error)
|
||||
js: (r.expr(5).sub(null)).default(r.error)
|
||||
ot: err("ReqlNonExistenceError", "Expected type NUMBER but found NULL.", [])
|
||||
- cd: (r.expr(null) - 5).default(r.error)
|
||||
js: (r.expr(null).sub(5)).default(r.error)
|
||||
ot: err("ReqlNonExistenceError", "Expected type NUMBER but found NULL.", [])
|
||||
|
||||
- rb: r.expr(1).default{|e| e}
|
||||
py: r.expr(1).default(lambda e:e)
|
||||
js: r.expr(1).default(function(e){return e})
|
||||
ot: 1
|
||||
- cd: r.expr(null).default{|e| e}
|
||||
py: r.expr(null).default(lambda e:e)
|
||||
js: r.expr(null).default(function(e){return e})
|
||||
ot: (null)
|
||||
- cd: r.expr({})['b'].default{|e| e}
|
||||
py: r.expr({})['b'].default(lambda e:e)
|
||||
js: r.expr({})('b').default(function(e){return e})
|
||||
ot: "No attribute `b` in object:\n{}"
|
||||
- cd: r.expr([]).reduce{|a,b| a+b}.default{|e| e}
|
||||
py: r.expr([]).reduce(lambda a,b:a+b).default(lambda e:e)
|
||||
js: r.expr([]).reduce(function(a,b){return a+b}).default(function(e){return e})
|
||||
ot: ("Cannot reduce over an empty stream.")
|
||||
- cd: r.expr([]).union([]).reduce{|a,b| a+b}.default{|e| e}
|
||||
py: r.expr([]).union([]).reduce(lambda a,b:a+b).default(lambda e:e)
|
||||
js: r.expr([]).union([]).reduce(function(a,b){return a+b}).default(function(e){return e})
|
||||
ot: ("Cannot reduce over an empty stream.")
|
||||
- cd: (r.expr(null) + 5).default{|e| e}
|
||||
py: (r.expr(null) + 5).default(lambda e:e)
|
||||
js: (r.expr(null).add(5)).default(function(e){return e})
|
||||
ot: ("Expected type NUMBER but found NULL.")
|
||||
- cd: (5 + r.expr(null)).default{|e| e}
|
||||
py: (5 + r.expr(null)).default(lambda e:e)
|
||||
js: (r.expr(5).add(null)).default(function(e){return e})
|
||||
ot: ("Expected type NUMBER but found NULL.")
|
||||
- cd: (5 - r.expr(null)).default{|e| e}
|
||||
py: (5 - r.expr(null)).default(lambda e:e)
|
||||
js: (r.expr(5).sub(null)).default(function(e){return e})
|
||||
ot: ("Expected type NUMBER but found NULL.")
|
||||
- cd: (r.expr(null) - 5).default{|e| e}
|
||||
py: (r.expr(null) - 5).default(lambda e:e)
|
||||
js: (r.expr(null).sub(5)).default(function(e){return e})
|
||||
ot: ("Expected type NUMBER but found NULL.")
|
||||
|
||||
- def: arr = r.expr([{'a':1},{'a':null},{}]).order_by('a')
|
||||
|
||||
- cd: arr.filter{|x| x['a'].eq(1)}
|
||||
py: arr.filter(lambda x:x['a'].eq(1))
|
||||
js: arr.filter(function(x){return x('a').eq(1)})
|
||||
ot: [{'a':1}]
|
||||
- cd: arr.filter(:default => false){|x| x['a'].eq(1)}
|
||||
py: arr.filter(lambda x:x['a'].eq(1), default=False)
|
||||
js: arr.filter(function(x){return x('a').eq(1)}, {'default':false})
|
||||
ot: [{'a':1}]
|
||||
- cd: arr.filter(:default => true){|x| x['a'].eq(1)}
|
||||
py: arr.filter(lambda x:x['a'].eq(1), default=True)
|
||||
js: arr.filter(function(x){return x('a').eq(1)}, {'default':true})
|
||||
ot: [{}, {'a':1}]
|
||||
# `null` compares not equal to 1 with no error
|
||||
- cd: arr.filter(:default => r.js('true')){|x| x['a'].eq(1)}
|
||||
py: arr.filter(lambda x:x['a'].eq(1), default=r.js('true'))
|
||||
js: arr.filter(function(x) { return x('a').eq(1) }, { 'default':r.js('true') })
|
||||
ot: [{}, {'a':1}]
|
||||
- cd: arr.filter(:default => r.js('false')){|x| x['a'].eq(1)}
|
||||
py: arr.filter(lambda x:x['a'].eq(1), default=r.js('false'))
|
||||
js: arr.filter(function(x) { return x('a').eq(1) }, { 'default':r.js('false') })
|
||||
ot: [{'a':1}]
|
||||
- cd: arr.filter(:default => r.error){|x| x['a'].eq(1)}
|
||||
py: arr.filter(lambda x:x['a'].eq(1), default=r.error())
|
||||
js: arr.filter(function(x){return x('a').eq(1)}, {'default':r.error()})
|
||||
ot: err("ReqlNonExistenceError", "No attribute `a` in object:", [])
|
||||
|
||||
- cd: r.expr(false).do{|d| arr.filter(:default => d){|x| x['a'].eq(1)}}
|
||||
py: r.expr(False).do(lambda d:arr.filter(lambda x:x['a'].eq(1), default=d))
|
||||
js: r.expr(false).do(function(d){return arr.filter(function(x){return x('a').eq(1)}, {default:d})})
|
||||
ot: [{'a':1}]
|
||||
- cd: r.expr(true).do{|d| arr.filter(:default => d){|x| x['a'].eq(1)}}.orderby('a')
|
||||
py: r.expr(True).do(lambda d:arr.filter(lambda x:x['a'].eq(1), default=d)).order_by('a')
|
||||
js: r.expr(true).do(function(d){return arr.filter(function(x){return x('a').eq(1)}, {default:d})}).orderBy('a')
|
||||
ot: [{}, {'a':1}]
|
||||
# `null` compares not equal to 1 with no error
|
||||
|
||||
- cd: arr.filter{|x| x['a'].default(0).eq(1)}
|
||||
py: arr.filter(lambda x:x['a'].default(0).eq(1))
|
||||
js: arr.filter(function(x){return x('a').default(0).eq(1)})
|
||||
ot: [{'a':1}]
|
||||
- cd: arr.filter{|x| x['a'].default(1).eq(1)}.orderby('a')
|
||||
py: arr.filter(lambda x:x['a'].default(1).eq(1)).order_by('a')
|
||||
js: arr.filter(function(x){return x('a').default(1).eq(1)}).orderBy('a')
|
||||
ot: ([{}, {'a':null}, {'a':1}])
|
||||
- cd: arr.filter{|x| x['a'].default(r.error).eq(1)}
|
||||
py: arr.filter(lambda x:x['a'].default(r.error()).eq(1))
|
||||
js: arr.filter(function(x){return x('a').default(r.error()).eq(1)})
|
||||
ot: [{'a':1}]
|
||||
# gets caught by `filter` default
|
||||
|
||||
- cd: r.expr(0).do{|i| arr.filter{|x| x['a'].default(i).eq(1)}}
|
||||
py: r.expr(0).do(lambda i:arr.filter(lambda x:x['a'].default(i).eq(1)))
|
||||
js: r.expr(0).do(function(i){return arr.filter(function(x){return x('a').default(i).eq(1)})})
|
||||
ot: [{'a':1}]
|
||||
- cd: r.expr(1).do{|i| arr.filter{|x| x['a'].default(i).eq(1)}}.orderby('a')
|
||||
py: r.expr(1).do(lambda i:arr.filter(lambda x:x['a'].default(i).eq(1))).order_by('a')
|
||||
js: r.expr(1).do(function(i){return arr.filter(function(x){return x('a').default(i).eq(1)})}).orderBy('a')
|
||||
ot: ([{},{'a':null},{'a':1}])
|
||||
|
||||
- cd: arr.filter{|x| x['a'].eq(1).or(x['a']['b'].eq(2))}
|
||||
py: arr.filter(lambda x:r.or_(x['a'].eq(1), x['a']['b'].eq(2)))
|
||||
js: arr.filter(function(x){return x('a').eq(1).or(x('a')('b').eq(2))})
|
||||
ot: [{'a':1}]
|
||||
- cd: arr.filter(:default => false){|x| x['a'].eq(1).or(x['a']['b'].eq(2))}
|
||||
py: arr.filter(lambda x:r.or_(x['a'].eq(1), x['a']['b'].eq(2)), default=False)
|
||||
js: arr.filter(function(x){return x('a').eq(1).or(x('a')('b').eq(2))}, {default:false})
|
||||
ot: [{'a':1}]
|
||||
- cd: arr.filter(:default => true){|x| x['a'].eq(1).or(x['a']['b'].eq(2))}.orderby('a')
|
||||
py: arr.filter(lambda x:r.or_(x['a'].eq(1), x['a']['b'].eq(2)), default=True).order_by('a')
|
||||
js: arr.filter(function(x){return x('a').eq(1).or(x('a')('b').eq(2))}, {default:true}).orderBy('a')
|
||||
ot: ([{}, {'a':null}, {'a':1}])
|
||||
- cd: arr.filter(:default => r.error){|x| x['a'].eq(1).or(x['a']['b'].eq(2))}
|
||||
py: arr.filter(lambda x:r.or_(x['a'].eq(1), x['a']['b'].eq(2)), default=r.error())
|
||||
js: arr.filter(function(x){return x('a').eq(1).or(x('a')('b').eq(2))}, {default:r.error()})
|
||||
ot: err("ReqlNonExistenceError", "No attribute `a` in object:", [])
|
||||
|
||||
- cd: r.table_create('default_test')
|
||||
ot: partial({'tables_created':1})
|
||||
|
||||
- cd: r.table('default_test').insert(arr)
|
||||
ot: ({'deleted':0,'replaced':0,'generated_keys':arrlen(3,uuid()),'unchanged':0,'errors':0,'skipped':0,'inserted':3})
|
||||
|
||||
- def: tbl = r.table('default_test').order_by('a').pluck('a')
|
||||
|
||||
- cd: tbl.filter{|x| x['a'].eq(1)}
|
||||
py: tbl.filter(lambda x:x['a'].eq(1))
|
||||
js: tbl.filter(function(x){return x('a').eq(1)})
|
||||
ot: [{'a':1}]
|
||||
- cd: tbl.filter(:default => false){|x| x['a'].eq(1)}
|
||||
py: tbl.filter(lambda x:x['a'].eq(1), default=False)
|
||||
js: tbl.filter(function(x){return x('a').eq(1)}, {'default':false})
|
||||
ot: [{'a':1}]
|
||||
- cd: tbl.filter(:default => true){|x| x['a'].eq(1)}
|
||||
py: tbl.filter(lambda x:x['a'].eq(1), default=True)
|
||||
js: tbl.filter(function(x){return x('a').eq(1)}, {'default':true})
|
||||
ot: [{}, {'a':1}]
|
||||
# `null` compares not equal to 1 with no error
|
||||
- cd: tbl.filter(:default => r.error){|x| x['a'].eq(1)}
|
||||
py: tbl.filter(lambda x:x['a'].eq(1), default=r.error())
|
||||
js: tbl.filter(function(x){return x('a').eq(1)}, {'default':r.error()})
|
||||
ot: err("ReqlNonExistenceError", "No attribute `a` in object:", [])
|
||||
|
||||
- cd: r.expr(false).do{|d| tbl.filter(:default => d){|x| x['a'].eq(1)}}
|
||||
py: r.expr(False).do(lambda d:tbl.filter(lambda x:x['a'].eq(1), default=d))
|
||||
js: r.expr(false).do(function(d){return tbl.filter(function(x){return x('a').eq(1)}, {default:d})})
|
||||
ot: [{'a':1}]
|
||||
- cd: r.expr(true).do{|d| tbl.filter(:default => d){|x| x['a'].eq(1)}}.orderby('a')
|
||||
py: r.expr(True).do(lambda d:tbl.filter(lambda x:x['a'].eq(1), default=d)).order_by('a')
|
||||
js: r.expr(true).do(function(d){return tbl.filter(function(x){return x('a').eq(1)}, {default:d})}).orderBy('a')
|
||||
ot: [{}, {'a':1}]
|
||||
# `null` compares not equal to 1 with no error
|
||||
|
||||
- cd: tbl.filter{|x| x['a'].default(0).eq(1)}
|
||||
py: tbl.filter(lambda x:x['a'].default(0).eq(1))
|
||||
js: tbl.filter(function(x){return x('a').default(0).eq(1)})
|
||||
ot: [{'a':1}]
|
||||
- cd: tbl.filter{|x| x['a'].default(1).eq(1)}.orderby('a')
|
||||
py: tbl.filter(lambda x:x['a'].default(1).eq(1)).order_by('a')
|
||||
js: tbl.filter(function(x){return x('a').default(1).eq(1)}).orderBy('a')
|
||||
ot: ([{}, {'a':null}, {'a':1}])
|
||||
- cd: tbl.filter{|x| x['a'].default(r.error).eq(1)}
|
||||
py: tbl.filter(lambda x:x['a'].default(r.error()).eq(1))
|
||||
js: tbl.filter(function(x){return x('a').default(r.error()).eq(1)})
|
||||
ot: [{'a':1}]
|
||||
# gets caught by `filter` default
|
||||
|
||||
- cd: r.expr(0).do{|i| tbl.filter{|x| x['a'].default(i).eq(1)}}
|
||||
py: r.expr(0).do(lambda i:tbl.filter(lambda x:x['a'].default(i).eq(1)))
|
||||
js: r.expr(0).do(function(i){return tbl.filter(function(x){return x('a').default(i).eq(1)})})
|
||||
ot: [{'a':1}]
|
||||
- cd: r.expr(1).do{|i| tbl.filter{|x| x['a'].default(i).eq(1)}}.orderby('a')
|
||||
py: r.expr(1).do(lambda i:tbl.filter(lambda x:x['a'].default(i).eq(1))).order_by('a')
|
||||
js: r.expr(1).do(function(i){return tbl.filter(function(x){return x('a').default(i).eq(1)})}).orderBy('a')
|
||||
ot: ([{},{'a':null},{'a':1}])
|
||||
|
||||
- cd: tbl.filter{|x| x['a'].eq(1).or(x['a']['b'].eq(2))}
|
||||
py: tbl.filter(lambda x:r.or_(x['a'].eq(1), x['a']['b'].eq(2)))
|
||||
js: tbl.filter(function(x){return x('a').eq(1).or(x('a')('b').eq(2))})
|
||||
ot: [{'a':1}]
|
||||
- cd: tbl.filter(:default => false){|x| x['a'].eq(1).or(x['a']['b'].eq(2))}
|
||||
py: tbl.filter(lambda x:r.or_(x['a'].eq(1), x['a']['b'].eq(2)), default=False)
|
||||
js: tbl.filter(function(x){return x('a').eq(1).or(x('a')('b').eq(2))}, {default:false})
|
||||
ot: [{'a':1}]
|
||||
- cd: tbl.filter(:default => true){|x| x['a'].eq(1).or(x['a']['b'].eq(2))}.orderby('a')
|
||||
py: tbl.filter(lambda x:r.or_(x['a'].eq(1), x['a']['b'].eq(2)), default=True).order_by('a')
|
||||
js: tbl.filter(function(x){return x('a').eq(1).or(x('a')('b').eq(2))}, {default:true}).orderBy('a')
|
||||
ot: ([{}, {'a':null}, {'a':1}])
|
||||
- cd: tbl.filter(:default => r.error){|x| x['a'].eq(1).or(x['a']['b'].eq(2))}
|
||||
py: tbl.filter(lambda x:r.or_(x['a'].eq(1), x['a']['b'].eq(2)), default=r.error())
|
||||
js: tbl.filter(function(x){return x('a').eq(1).or(x('a')('b').eq(2))}, {default:r.error()})
|
||||
ot: err("ReqlNonExistenceError", "No attribute `a` in object:", [])
|
||||
|
||||
- cd: r.table_drop('default_test')
|
||||
ot: partial({'tables_dropped':1})
|
64
ext/librethinkdbxx/test/upstream/geo/constructors.yaml
Normal file
64
ext/librethinkdbxx/test/upstream/geo/constructors.yaml
Normal file
@ -0,0 +1,64 @@
|
||||
desc: Test geo constructors
|
||||
tests:
|
||||
# Point
|
||||
- cd: r.point(0, 0)
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[0, 0], 'type':'Point'})
|
||||
- cd: r.point(0, -90)
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[0, -90], 'type':'Point'})
|
||||
- cd: r.point(0, 90)
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[0, 90], 'type':'Point'})
|
||||
- cd: r.point(-180, 0)
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[-180, 0], 'type':'Point'})
|
||||
- cd: r.point(180, 0)
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[180, 0], 'type':'Point'})
|
||||
- cd: r.point(0, -91)
|
||||
ot: err('ReqlQueryLogicError', 'Latitude must be between -90 and 90. Got -91.', [0])
|
||||
- cd: r.point(0, 91)
|
||||
ot: err('ReqlQueryLogicError', 'Latitude must be between -90 and 90. Got 91.', [0])
|
||||
- cd: r.point(-181, 0)
|
||||
ot: err('ReqlQueryLogicError', 'Longitude must be between -180 and 180. Got -181.', [0])
|
||||
- cd: r.point(181, 0)
|
||||
ot: err('ReqlQueryLogicError', 'Longitude must be between -180 and 180. Got 181.', [0])
|
||||
|
||||
# Line
|
||||
- cd: r.line()
|
||||
ot: err('ReqlCompileError', 'Expected 2 or more arguments but found 0.', [0])
|
||||
- cd: r.line([0,0])
|
||||
ot: err('ReqlCompileError', 'Expected 2 or more arguments but found 1.', [0])
|
||||
- cd: r.line([0,0], [0,0])
|
||||
ot: err('ReqlQueryLogicError', 'Invalid LineString. Are there antipodal or duplicate vertices?', [0])
|
||||
- cd: r.line([0,0], [0,1])
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[0,0], [0,1]], 'type':'LineString'})
|
||||
- cd: r.line([0,0], [1])
|
||||
ot: err('ReqlQueryLogicError', 'Expected point coordinate pair. Got 1 element array instead of a 2 element one.', [0])
|
||||
- cd: r.line([0,0], [1,0,0])
|
||||
ot: err('ReqlQueryLogicError', 'Expected point coordinate pair. Got 3 element array instead of a 2 element one.', [0])
|
||||
- cd: r.line([0,0], [0,1], [0,0])
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[0,0], [0,1], [0,0]], 'type':'LineString'})
|
||||
- cd: r.line(r.point(0,0), r.point(0,1), r.point(0,0))
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[0,0], [0,1], [0,0]], 'type':'LineString'})
|
||||
- cd: r.line(r.point(0,0), r.point(1,0), r.line([0,0], [1,0]))
|
||||
ot: err('ReqlQueryLogicError', 'Expected geometry of type `Point` but found `LineString`.', [0])
|
||||
|
||||
# Polygon
|
||||
- cd: r.polygon()
|
||||
ot: err('ReqlCompileError', 'Expected 3 or more arguments but found 0.', [0])
|
||||
- cd: r.polygon([0,0])
|
||||
ot: err('ReqlCompileError', 'Expected 3 or more arguments but found 1.', [0])
|
||||
- cd: r.polygon([0,0], [0,0])
|
||||
ot: err('ReqlCompileError', 'Expected 3 or more arguments but found 2.', [0])
|
||||
- cd: r.polygon([0,0], [0,0], [0,0], [0,0])
|
||||
ot: err('ReqlQueryLogicError', 'Invalid LinearRing. Are there antipodal or duplicate vertices? Is it self-intersecting?', [0])
|
||||
- cd: r.polygon([0,0], [0,1], [1,0])
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[[0,0], [0,1], [1,0], [0,0]]], 'type':'Polygon'})
|
||||
- cd: r.polygon([0,0], [0,1], [1,0], [0,0])
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[[0,0], [0,1], [1,0], [0,0]]], 'type':'Polygon'})
|
||||
- cd: r.polygon([0,0], [0,1], [1,0], [-1,0.5])
|
||||
ot: err('ReqlQueryLogicError', 'Invalid LinearRing. Are there antipodal or duplicate vertices? Is it self-intersecting?', [0])
|
||||
- cd: r.polygon([0,0], [0,1], [0])
|
||||
ot: err('ReqlQueryLogicError', 'Expected point coordinate pair. Got 1 element array instead of a 2 element one.', [0])
|
||||
- cd: r.polygon([0,0], [0,1], [0,1,0])
|
||||
ot: err('ReqlQueryLogicError', 'Expected point coordinate pair. Got 3 element array instead of a 2 element one.', [0])
|
||||
- cd: r.polygon(r.point(0,0), r.point(0,1), r.line([0,0], [0,1]))
|
||||
ot: err('ReqlQueryLogicError', 'Expected geometry of type `Point` but found `LineString`.', [0])
|
||||
|
31
ext/librethinkdbxx/test/upstream/geo/geojson.yaml
Normal file
31
ext/librethinkdbxx/test/upstream/geo/geojson.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
desc: Test geoJSON conversion
|
||||
tests:
|
||||
# Basic conversion
|
||||
- cd: r.geojson({'coordinates':[0, 0], 'type':'Point'})
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[0, 0], 'type':'Point'})
|
||||
- cd: r.geojson({'coordinates':[[0,0], [0,1]], 'type':'LineString'})
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[0,0], [0,1]], 'type':'LineString'})
|
||||
- cd: r.geojson({'coordinates':[[[0,0], [0,1], [1,0], [0,0]]], 'type':'Polygon'})
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[[0,0], [0,1], [1,0], [0,0]]], 'type':'Polygon'})
|
||||
|
||||
# Wrong / missing fields
|
||||
- cd: r.geojson({'coordinates':[[], 0], 'type':'Point'})
|
||||
ot: err('ReqlQueryLogicError', 'Expected type NUMBER but found ARRAY.', [0])
|
||||
- cd: r.geojson({'coordinates':true, 'type':'Point'})
|
||||
ot: err('ReqlQueryLogicError', 'Expected type ARRAY but found BOOL.', [0])
|
||||
- cd: r.geojson({'type':'Point'})
|
||||
ot: err('ReqlNonExistenceError', 'No attribute `coordinates` in object:', [0])
|
||||
- cd: r.geojson({'coordinates':[0, 0]})
|
||||
ot: err('ReqlNonExistenceError', 'No attribute `type` in object:', [0])
|
||||
- cd: r.geojson({'coordinates':[0, 0], 'type':'foo'})
|
||||
ot: err('ReqlQueryLogicError', 'Unrecognized GeoJSON type `foo`.', [0])
|
||||
- cd: r.geojson({'coordinates':[0, 0], 'type':'Point', 'foo':'wrong'})
|
||||
ot: err('ReqlQueryLogicError', 'Unrecognized field `foo` found in geometry object.', [0])
|
||||
|
||||
# Unsupported features
|
||||
- cd: r.geojson({'coordinates':[0, 0], 'type':'Point', 'crs':null})
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[0, 0], 'type':'Point', 'crs':null})
|
||||
- js: r.geojson({'coordinates':[0, 0], 'type':'Point', 'crs':{'type':'name', 'properties':{'name':'test'}}})
|
||||
ot: err('ReqlQueryLogicError', 'Non-default coordinate reference systems are not supported in GeoJSON objects. Make sure the `crs` field of the geometry is null or non-existent.', [0])
|
||||
- cd: r.geojson({'coordinates':[0, 0], 'type':'MultiPoint'})
|
||||
ot: err('ReqlQueryLogicError', 'GeoJSON type `MultiPoint` is not supported.', [0])
|
208
ext/librethinkdbxx/test/upstream/geo/indexing.yaml
Normal file
208
ext/librethinkdbxx/test/upstream/geo/indexing.yaml
Normal file
@ -0,0 +1,208 @@
|
||||
desc: Test ReQL interface to geo indexes
|
||||
table_variable_name: tbl
|
||||
tests:
|
||||
- def: rows = [{'id':0, 'g':r.point(10,10), 'm':[r.point(0,0),r.point(1,0),r.point(2,0)]},
|
||||
{'id':1, 'g':r.polygon([0,0], [0,1], [1,1], [1,0])},
|
||||
{'id':2, 'g':r.line([0.000002,-1], [-0.000001,1])}]
|
||||
|
||||
- cd: tbl.insert(rows)
|
||||
ot: ({'deleted':0,'inserted':3,'skipped':0,'errors':0,'replaced':0,'unchanged':0})
|
||||
|
||||
- rb: tbl.index_create('g', :geo=>true)
|
||||
py: tbl.index_create('g', geo=true)
|
||||
js: tbl.indexCreate('g', {'geo':true})
|
||||
ot: {'created':1}
|
||||
- rb: tbl.index_create('m', :geo=>true, :multi=>true)
|
||||
py: tbl.index_create('m', geo=true, multi=true)
|
||||
js: tbl.indexCreate('m', {'geo':true, 'multi':true})
|
||||
ot: {'created':1}
|
||||
- cd: tbl.index_create('other')
|
||||
ot: {'created':1}
|
||||
# r.point is deterministic and can be used in an index function
|
||||
- rb: tbl.index_create('point_det'){ |x| r.point(x, x) }
|
||||
py: tbl.index_create('point_det', lambda x: r.point(x, x) )
|
||||
js: tbl.indexCreate('point_det', function(x) {return r.point(x, x);} )
|
||||
ot: {'created':1}
|
||||
|
||||
- cd: tbl.index_wait()
|
||||
|
||||
# r.line (and friends) are non-deterministic across servers and should be disallowed
|
||||
# in index functions
|
||||
- rb: tbl.index_create('point_det'){ |x| r.line(x, x) }
|
||||
py: tbl.index_create('point_det', lambda x: r.line(x, x) )
|
||||
js: tbl.indexCreate('point_det', function(x) {return r.line(x, x);} )
|
||||
ot: err('ReqlQueryLogicError', 'Could not prove function deterministic. Index functions must be deterministic.')
|
||||
|
||||
- js: tbl.get_intersecting(r.point(0,0), {'index':'other'}).count()
|
||||
py: tbl.get_intersecting(r.point(0,0), index='other').count()
|
||||
rb: tbl.get_intersecting(r.point(0,0), :index=>'other').count()
|
||||
ot: err('ReqlQueryLogicError', 'Index `other` is not a geospatial index. get_intersecting can only be used with a geospatial index.', [0])
|
||||
- js: tbl.get_intersecting(r.point(0,0), {'index':'missing'}).count()
|
||||
py: tbl.get_intersecting(r.point(0,0), index='missing').count()
|
||||
rb: tbl.get_intersecting(r.point(0,0), :index=>'missing').count()
|
||||
ot: err_regex('ReqlOpFailedError', 'Index `missing` was not found on table `[a-zA-Z0-9_]+.[a-zA-Z0-9_]+`[.]', [0])
|
||||
- cd: tbl.get_intersecting(r.point(0,0)).count()
|
||||
ot: err('ReqlQueryLogicError', 'get_intersecting requires an index argument.', [0])
|
||||
- js: tbl.get_all(0, {'index':'g'}).count()
|
||||
py: tbl.get_all(0, index='g').count()
|
||||
rb: tbl.get_all(0, :index=>'g').count()
|
||||
ot: err('ReqlQueryLogicError', 'Index `g` is a geospatial index. Only get_nearest and get_intersecting can use a geospatial index.', [0])
|
||||
- js: tbl.between(0, 1, {'index':'g'}).count()
|
||||
py: tbl.between(0, 1, index='g').count()
|
||||
rb: tbl.between(0, 1, :index=>'g').count()
|
||||
ot: err('ReqlQueryLogicError', 'Index `g` is a geospatial index. Only get_nearest and get_intersecting can use a geospatial index.', [0])
|
||||
- js: tbl.order_by({'index':'g'}).count()
|
||||
py: tbl.order_by(index='g').count()
|
||||
rb: tbl.order_by(:index=>'g').count()
|
||||
ot: err('ReqlQueryLogicError', 'Index `g` is a geospatial index. Only get_nearest and get_intersecting can use a geospatial index.', [0])
|
||||
- js: tbl.between(0, 1).get_intersecting(r.point(0,0), {'index':'g'}).count()
|
||||
py: tbl.between(0, 1).get_intersecting(r.point(0,0), index='g').count()
|
||||
rb: tbl.between(0, 1).get_intersecting(r.point(0,0), :index=>'g').count()
|
||||
ot:
|
||||
cd: err('ReqlQueryLogicError', 'Expected type TABLE but found TABLE_SLICE:', [0])
|
||||
py: err('AttributeError', "'Between' object has no attribute 'get_intersecting'")
|
||||
- js: tbl.get_all(0).get_intersecting(r.point(0,0), {'index':'g'}).count()
|
||||
py: tbl.get_all(0).get_intersecting(r.point(0,0), index='g').count()
|
||||
rb: tbl.get_all(0).get_intersecting(r.point(0,0), :index=>'g').count()
|
||||
ot:
|
||||
cd: err('ReqlQueryLogicError', 'Expected type TABLE but found SELECTION:', [0])
|
||||
py: err('AttributeError', "'GetAll' object has no attribute 'get_intersecting'")
|
||||
- js: tbl.order_by({'index':'id'}).get_intersecting(r.point(0,0), {'index':'g'}).count()
|
||||
py: tbl.order_by(index='id').get_intersecting(r.point(0,0), index='g').count()
|
||||
rb: tbl.order_by(:index=>'id').get_intersecting(r.point(0,0), :index=>'g').count()
|
||||
ot:
|
||||
cd: err('ReqlQueryLogicError', 'Expected type TABLE but found TABLE_SLICE:', [0])
|
||||
py: err('AttributeError', "'OrderBy' object has no attribute 'get_intersecting'")
|
||||
- js: tbl.get_intersecting(r.point(0,0), {'index':'id'}).count()
|
||||
py: tbl.get_intersecting(r.point(0,0), index='id').count()
|
||||
rb: tbl.get_intersecting(r.point(0,0), :index=>'id').count()
|
||||
ot: err('ReqlQueryLogicError', 'get_intersecting cannot use the primary index.', [0])
|
||||
|
||||
- js: tbl.get_intersecting(r.point(0,0), {'index':'g'}).count()
|
||||
py: tbl.get_intersecting(r.point(0,0), index='g').count()
|
||||
rb: tbl.get_intersecting(r.point(0,0), :index=>'g').count()
|
||||
ot: 1
|
||||
- js: tbl.get_intersecting(r.point(10,10), {'index':'g'}).count()
|
||||
py: tbl.get_intersecting(r.point(10,10), index='g').count()
|
||||
rb: tbl.get_intersecting(r.point(10,10), :index=>'g').count()
|
||||
ot: 1
|
||||
- js: tbl.get_intersecting(r.point(0.5,0.5), {'index':'g'}).count()
|
||||
py: tbl.get_intersecting(r.point(0.5,0.5), index='g').count()
|
||||
rb: tbl.get_intersecting(r.point(0.5,0.5), :index=>'g').count()
|
||||
ot: 1
|
||||
- js: tbl.get_intersecting(r.point(20,20), {'index':'g'}).count()
|
||||
py: tbl.get_intersecting(r.point(20,20), index='g').count()
|
||||
rb: tbl.get_intersecting(r.point(20,20), :index=>'g').count()
|
||||
ot: 0
|
||||
- js: tbl.get_intersecting(r.polygon([0,0], [1,0], [1,1], [0,1]), {'index':'g'}).count()
|
||||
py: tbl.get_intersecting(r.polygon([0,0], [1,0], [1,1], [0,1]), index='g').count()
|
||||
rb: tbl.get_intersecting(r.polygon([0,0], [1,0], [1,1], [0,1]), :index=>'g').count()
|
||||
ot: 2
|
||||
- js: tbl.get_intersecting(r.line([0,0], [10,10]), {'index':'g'}).count()
|
||||
py: tbl.get_intersecting(r.line([0,0], [10,10]), index='g').count()
|
||||
rb: tbl.get_intersecting(r.line([0,0], [10,10]), :index=>'g').count()
|
||||
ot: 3
|
||||
- js: tbl.get_intersecting(r.point(0,0), {'index':'g'}).type_of()
|
||||
py: tbl.get_intersecting(r.point(0,0), index='g').type_of()
|
||||
rb: tbl.get_intersecting(r.point(0,0), :index=>'g').type_of()
|
||||
ot: ("SELECTION<STREAM>")
|
||||
- js: tbl.get_intersecting(r.point(0,0), {'index':'g'}).filter(true).type_of()
|
||||
py: tbl.get_intersecting(r.point(0,0), index='g').filter(true).type_of()
|
||||
rb: tbl.get_intersecting(r.point(0,0), :index=>'g').filter(true).type_of()
|
||||
ot: ("SELECTION<STREAM>")
|
||||
- js: tbl.get_intersecting(r.point(0,0), {'index':'g'}).map(r.row).type_of()
|
||||
py: tbl.get_intersecting(r.point(0,0), index='g').map(r.row).type_of()
|
||||
rb: tbl.get_intersecting(r.point(0,0), :index=>'g').map{|x|x}.type_of()
|
||||
ot: ("STREAM")
|
||||
|
||||
- js: tbl.get_intersecting(r.point(0,0), {'index':'m'}).count()
|
||||
py: tbl.get_intersecting(r.point(0,0), index='m').count()
|
||||
rb: tbl.get_intersecting(r.point(0,0), :index=>'m').count()
|
||||
ot: 1
|
||||
- js: tbl.get_intersecting(r.point(1,0), {'index':'m'}).count()
|
||||
py: tbl.get_intersecting(r.point(1,0), index='m').count()
|
||||
rb: tbl.get_intersecting(r.point(1,0), :index=>'m').count()
|
||||
ot: 1
|
||||
- js: tbl.get_intersecting(r.point(2,0), {'index':'m'}).count()
|
||||
py: tbl.get_intersecting(r.point(2,0), index='m').count()
|
||||
rb: tbl.get_intersecting(r.point(2,0), :index=>'m').count()
|
||||
ot: 1
|
||||
- js: tbl.get_intersecting(r.point(3,0), {'index':'m'}).count()
|
||||
py: tbl.get_intersecting(r.point(3,0), index='m').count()
|
||||
rb: tbl.get_intersecting(r.point(3,0), :index=>'m').count()
|
||||
ot: 0
|
||||
# The document is emitted once for each match.
|
||||
- js: tbl.get_intersecting(r.polygon([0,0], [0,1], [1,1], [1,0]), {'index':'m'}).count()
|
||||
py: tbl.get_intersecting(r.polygon([0,0], [0,1], [1,1], [1,0]), index='m').count()
|
||||
rb: tbl.get_intersecting(r.polygon([0,0], [0,1], [1,1], [1,0]), :index=>'m').count()
|
||||
ot: 2
|
||||
|
||||
|
||||
- js: tbl.get_nearest(r.point(0,0), {'index':'other'})
|
||||
py: tbl.get_nearest(r.point(0,0), index='other')
|
||||
rb: tbl.get_nearest(r.point(0,0), :index=>'other')
|
||||
ot: err('ReqlQueryLogicError', 'Index `other` is not a geospatial index. get_nearest can only be used with a geospatial index.', [0])
|
||||
- js: tbl.get_nearest(r.point(0,0), {'index':'missing'})
|
||||
py: tbl.get_nearest(r.point(0,0), index='missing')
|
||||
rb: tbl.get_nearest(r.point(0,0), :index=>'missing')
|
||||
ot: err_regex('ReqlOpFailedError', 'Index `missing` was not found on table `[a-zA-Z0-9_]+.[a-zA-Z0-9_]+`[.]', [0])
|
||||
- cd: tbl.get_nearest(r.point(0,0))
|
||||
ot: err('ReqlQueryLogicError', 'get_nearest requires an index argument.', [0])
|
||||
- js: tbl.between(0, 1).get_nearest(r.point(0,0), {'index':'g'}).count()
|
||||
py: tbl.between(0, 1).get_nearest(r.point(0,0), index='g').count()
|
||||
rb: tbl.between(0, 1).get_nearest(r.point(0,0), :index=>'g').count()
|
||||
ot:
|
||||
cd: err('ReqlQueryLogicError', 'Expected type TABLE but found TABLE_SLICE:', [0])
|
||||
py: err('AttributeError', "'Between' object has no attribute 'get_nearest'")
|
||||
- js: tbl.get_all(0).get_nearest(r.point(0,0), {'index':'g'}).count()
|
||||
py: tbl.get_all(0).get_nearest(r.point(0,0), index='g').count()
|
||||
rb: tbl.get_all(0).get_nearest(r.point(0,0), :index=>'g').count()
|
||||
ot:
|
||||
cd: err('ReqlQueryLogicError', 'Expected type TABLE but found SELECTION:', [0])
|
||||
py: err('AttributeError', "'GetAll' object has no attribute 'get_nearest'")
|
||||
- js: tbl.order_by({'index':'id'}).get_nearest(r.point(0,0), {'index':'g'}).count()
|
||||
py: tbl.order_by(index='id').get_nearest(r.point(0,0), index='g').count()
|
||||
rb: tbl.order_by(:index=>'id').get_nearest(r.point(0,0), :index=>'g').count()
|
||||
ot:
|
||||
cd: err('ReqlQueryLogicError', 'Expected type TABLE but found TABLE_SLICE:', [0])
|
||||
py: err('AttributeError', "'OrderBy' object has no attribute 'get_nearest'")
|
||||
- js: tbl.get_nearest(r.point(0,0), {'index':'id'}).count()
|
||||
py: tbl.get_nearest(r.point(0,0), index='id').count()
|
||||
rb: tbl.get_nearest(r.point(0,0), :index=>'id').count()
|
||||
ot: err('ReqlQueryLogicError', 'get_nearest cannot use the primary index.', [0])
|
||||
|
||||
- js: tbl.get_nearest(r.point(0,0), {'index':'g'}).pluck('dist', {'doc':'id'})
|
||||
py: tbl.get_nearest(r.point(0,0), index='g').pluck('dist', {'doc':'id'})
|
||||
rb: tbl.get_nearest(r.point(0,0), :index=>'g').pluck('dist', {'doc':'id'})
|
||||
ot: ([{'dist':0,'doc':{'id':1}},{'dist':0.055659745396754216,'doc':{'id':2}}])
|
||||
- js: tbl.get_nearest(r.point(-0.000001,1), {'index':'g'}).pluck('dist', {'doc':'id'})
|
||||
py: tbl.get_nearest(r.point(-0.000001,1), index='g').pluck('dist', {'doc':'id'})
|
||||
rb: tbl.get_nearest(r.point(-0.000001,1), :index=>'g').pluck('dist', {'doc':'id'})
|
||||
ot: ([{'dist':0,'doc':{'id':2}},{'dist':0.11130264976984369,'doc':{'id':1}}])
|
||||
- js: tbl.get_nearest(r.point(0,0), {'index':'g', 'max_dist':1565110}).pluck('dist', {'doc':'id'})
|
||||
py: tbl.get_nearest(r.point(0,0), index='g', max_dist=1565110).pluck('dist', {'doc':'id'})
|
||||
rb: tbl.get_nearest(r.point(0,0), :index=>'g', :max_dist=>1565110).pluck('dist', {'doc':'id'})
|
||||
ot: ([{'dist':0,'doc':{'id':1}},{'dist':0.055659745396754216,'doc':{'id':2}},{'dist':1565109.0992178896,'doc':{'id':0}}])
|
||||
- js: tbl.get_nearest(r.point(0,0), {'index':'g', 'max_dist':1565110, 'max_results':2}).pluck('dist', {'doc':'id'})
|
||||
py: tbl.get_nearest(r.point(0,0), index='g', max_dist=1565110, max_results=2).pluck('dist', {'doc':'id'})
|
||||
rb: tbl.get_nearest(r.point(0,0), :index=>'g', :max_dist=>1565110, :max_results=>2).pluck('dist', {'doc':'id'})
|
||||
ot: ([{'dist':0,'doc':{'id':1}},{'dist':0.055659745396754216,'doc':{'id':2}}])
|
||||
- js: tbl.get_nearest(r.point(0,0), {'index':'g', 'max_dist':10000000}).pluck('dist', {'doc':'id'})
|
||||
py: tbl.get_nearest(r.point(0,0), index='g', max_dist=10000000).pluck('dist', {'doc':'id'})
|
||||
rb: tbl.get_nearest(r.point(0,0), :index=>'g', :max_dist=>10000000).pluck('dist', {'doc':'id'})
|
||||
ot: err('ReqlQueryLogicError', 'The distance has become too large for continuing the indexed nearest traversal. Consider specifying a smaller `max_dist` parameter. (Radius must be smaller than a quarter of the circumference along the minor axis of the reference ellipsoid. Got 10968937.995244588703m, but must be smaller than 9985163.1855612862855m.)', [0])
|
||||
- js: tbl.get_nearest(r.point(0,0), {'index':'g', 'max_dist':1566, 'unit':'km'}).pluck('dist', {'doc':'id'})
|
||||
py: tbl.get_nearest(r.point(0,0), index='g', max_dist=1566, unit='km').pluck('dist', {'doc':'id'})
|
||||
rb: tbl.get_nearest(r.point(0,0), :index=>'g', :max_dist=>1566, :unit=>'km').pluck('dist', {'doc':'id'})
|
||||
ot: ([{'dist':0,'doc':{'id':1}},{'dist':0.00005565974539675422,'doc':{'id':2}},{'dist':1565.1090992178897,'doc':{'id':0}}])
|
||||
- py: tbl.get_nearest(r.point(0,0), index='g', max_dist=1, geo_system='unit_sphere').pluck('dist', {'doc':'id'})
|
||||
rb: tbl.get_nearest(r.point(0,0), :index=>'g', :max_dist=>1, :geo_system=>'unit_sphere').pluck('dist', {'doc':'id'})
|
||||
ot: ([{'dist':0, 'doc':{'id':1}}, {'dist':8.726646259990191e-09, 'doc':{'id':2}}, {'dist':0.24619691677893205, 'doc':{'id':0}}])
|
||||
- js: tbl.get_nearest(r.point(0,0), {'index':'g'}).type_of()
|
||||
py: tbl.get_nearest(r.point(0,0), index='g').type_of()
|
||||
rb: tbl.get_nearest(r.point(0,0), :index=>'g').type_of()
|
||||
ot: ("ARRAY")
|
||||
- js: tbl.get_nearest(r.point(0,0), {'index':'g'}).map(r.row).type_of()
|
||||
py: tbl.get_nearest(r.point(0,0), index='g').map(r.row).type_of()
|
||||
rb: tbl.get_nearest(r.point(0,0), :index=>'g').map{|x|x}.type_of()
|
||||
ot: ("ARRAY")
|
119
ext/librethinkdbxx/test/upstream/geo/intersection_inclusion.yaml
Normal file
119
ext/librethinkdbxx/test/upstream/geo/intersection_inclusion.yaml
Normal file
@ -0,0 +1,119 @@
|
||||
desc: Test intersects and includes semantics
|
||||
tests:
|
||||
# Intersects
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.point(1.5,1.5))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.point(2.5,2.5))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).intersects(r.point(1.5,1.5))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).intersects(r.point(1.05,1.05))
|
||||
ot: true
|
||||
# Our current semantics: we define polygons as closed, so points that are exactly *on* the outline of a polygon do intersect
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.point(2,2))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.point(2,1.5))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.line([1.5,1.5], [2,2]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.line([1.5,1.5], [2,1.5]))
|
||||
ot: true
|
||||
# (...with holes in the polygon being closed with respect to the polygon, i.e. the set cut out is open)
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).intersects(r.point(1.1,1.1))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).intersects(r.point(1.5,1.1))
|
||||
ot: true
|
||||
# ... lines are interpreted as closed sets as well, so even if they meet only at their end points, we consider them as intersecting.
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.line([2,2], [3,3]))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.line([2,1.5], [3,3]))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.line([1.5,1.5], [3,3]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.polygon([1.2,1.2], [1.8,1.2], [1.8,1.8], [1.2,1.8]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.polygon([1.5,1.5], [2.5,1.5], [2.5,2.5], [1.5,2.5]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).intersects(r.polygon([1.2,1.2], [1.8,1.2], [1.8,1.8], [1.2,1.8]))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).intersects(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9]))
|
||||
ot: false
|
||||
# Polygons behave like lines in that respect
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.polygon([2,1.1], [3,1.1], [3,1.9], [2,1.9]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).intersects(r.polygon([2,2], [3,2], [3,3], [2,3]))
|
||||
ot: false
|
||||
- cd: r.point(1,1).intersects(r.point(1.5,1.5))
|
||||
ot: false
|
||||
- cd: r.point(1,1).intersects(r.point(1,1))
|
||||
ot: true
|
||||
- cd: r.line([1,1], [2,1]).intersects(r.point(1,1))
|
||||
ot: true
|
||||
# This one currently fails due to numeric precision problems.
|
||||
#- cd: r.line([1,0], [2,0]).intersects(r.point(1.5,0))
|
||||
# ot: true
|
||||
- cd: r.line([1,1], [1,2]).intersects(r.point(1,1.8))
|
||||
ot: true
|
||||
- cd: r.line([1,0], [2,0]).intersects(r.point(1.8,0))
|
||||
ot: true
|
||||
- cd: r.line([1,1], [2,1]).intersects(r.point(1.5,1.5))
|
||||
ot: false
|
||||
- cd: r.line([1,1], [2,1]).intersects(r.line([2,1], [3,1]))
|
||||
ot: true
|
||||
# intersects on an array/stream
|
||||
- cd: r.expr([r.point(1, 0), r.point(3,0), r.point(2, 0)]).intersects(r.line([0,0], [2, 0])).count()
|
||||
ot: 2
|
||||
|
||||
# Includes
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.point(1.5,1.5))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.point(2.5,2.5))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).includes(r.point(1.5,1.5))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).includes(r.point(1.05,1.05))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.point(2,2))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.point(2,1.5))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.line([1.5,1.5], [2,2]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.line([1.5,1.5], [2,1.5]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).includes(r.point(1.1,1.1))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).includes(r.point(1.5,1.1))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.line([2,2], [3,3]))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.line([2,1.5], [2,2]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.line([2,1], [2,2]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.line([1.5,1.5], [3,3]))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.polygon([1,1], [2,1], [2,2], [1,2]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.polygon([1.2,1.2], [1.8,1.2], [1.8,1.8], [1.2,1.8]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.polygon([1.5,1.5], [2,1.5], [2,2], [1.5,2]))
|
||||
ot: true
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.polygon([1.5,1.5], [2.5,1.5], [2.5,2.5], [1.5,2.5]))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).includes(r.polygon([1.2,1.2], [1.8,1.2], [1.8,1.8], [1.2,1.8]))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).polygon_sub(r.polygon([1.1,1.1], [1.9,1.1], [1.9,1.9], [1.1,1.9])).includes(r.polygon([1.1,1.1], [2,1.1], [2,2], [1.1,2]))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.polygon([2,1.1], [3,1.1], [3,1.9], [2,1.9]))
|
||||
ot: false
|
||||
- cd: r.polygon([1,1], [2,1], [2,2], [1,2]).includes(r.polygon([2,2], [3,2], [3,3], [2,3]))
|
||||
ot: false
|
||||
# includes on an array/stream
|
||||
- cd: r.expr([r.polygon([0,0], [1,1], [1,0]), r.polygon([0,1], [1,2], [1,1])]).includes(r.point(0,0)).count()
|
||||
ot: 1
|
||||
# Wrong geometry type arguments (the first one must be a polygon)
|
||||
- cd: r.point(0,0).includes(r.point(0,0))
|
||||
ot: err('ReqlQueryLogicError', 'Expected geometry of type `Polygon` but found `Point`.')
|
||||
- cd: r.line([0,0], [0,1]).includes(r.point(0,0))
|
||||
ot: err('ReqlQueryLogicError', 'Expected geometry of type `Polygon` but found `LineString`.')
|
97
ext/librethinkdbxx/test/upstream/geo/operations.yaml
Normal file
97
ext/librethinkdbxx/test/upstream/geo/operations.yaml
Normal file
@ -0,0 +1,97 @@
|
||||
desc: Test basic geometry operators
|
||||
tests:
|
||||
# Distance
|
||||
# coerce_to('STRING') because the test utility has some issues with rounding and I'm too lazy to investigate that now.
|
||||
- cd: r.distance(r.point(-122, 37), r.point(-123, 37)).coerce_to('STRING')
|
||||
ot: ("89011.26253835332")
|
||||
- cd: r.distance(r.point(-122, 37), r.point(-122, 36)).coerce_to('STRING')
|
||||
ot: ("110968.30443995494")
|
||||
- cd: r.distance(r.point(-122, 37), r.point(-122, 36)).eq(r.distance(r.point(-122, 36), r.point(-122, 37)))
|
||||
ot: true
|
||||
- cd: r.point(-122, 37).distance(r.point(-123, 37)).coerce_to('STRING')
|
||||
ot: ("89011.26253835332")
|
||||
- def: someDist = r.distance(r.point(-122, 37), r.point(-123, 37))
|
||||
js: someDist.eq(r.distance(r.point(-122, 37), r.point(-123, 37), {unit:'m'}))
|
||||
py: someDist.eq(r.distance(r.point(-122, 37), r.point(-123, 37), unit='m'))
|
||||
rb: someDist.eq(r.distance(r.point(-122, 37), r.point(-123, 37), :unit=>'m'))
|
||||
ot: true
|
||||
- js: someDist.mul(1.0/1000.0).eq(r.distance(r.point(-122, 37), r.point(-123, 37), {unit:'km'}))
|
||||
py: someDist.mul(1.0/1000.0).eq(r.distance(r.point(-122, 37), r.point(-123, 37), unit='km'))
|
||||
rb: someDist.mul(1.0/1000.0).eq(r.distance(r.point(-122, 37), r.point(-123, 37), :unit=>'km'))
|
||||
ot: true
|
||||
- js: someDist.mul(1.0/1609.344).eq(r.distance(r.point(-122, 37), r.point(-123, 37), {unit:'mi'}))
|
||||
py: someDist.mul(1.0/1609.344).eq(r.distance(r.point(-122, 37), r.point(-123, 37), unit='mi'))
|
||||
rb: someDist.mul(1.0/1609.344).eq(r.distance(r.point(-122, 37), r.point(-123, 37), :unit=>'mi'))
|
||||
ot: true
|
||||
- js: someDist.mul(1.0/0.3048).eq(r.distance(r.point(-122, 37), r.point(-123, 37), {unit:'ft'}))
|
||||
py: someDist.mul(1.0/0.3048).eq(r.distance(r.point(-122, 37), r.point(-123, 37), unit='ft'))
|
||||
rb: someDist.mul(1.0/0.3048).eq(r.distance(r.point(-122, 37), r.point(-123, 37), :unit=>'ft'))
|
||||
ot: true
|
||||
- js: someDist.mul(1.0/1852.0).eq(r.distance(r.point(-122, 37), r.point(-123, 37), {unit:'nm'}))
|
||||
py: someDist.mul(1.0/1852.0).eq(r.distance(r.point(-122, 37), r.point(-123, 37), unit='nm'))
|
||||
rb: someDist.mul(1.0/1852.0).eq(r.distance(r.point(-122, 37), r.point(-123, 37), :unit=>'nm'))
|
||||
ot: true
|
||||
- js: someDist.eq(r.distance(r.point(-122, 37), r.point(-123, 37), {'geo_system':'WGS84'}))
|
||||
py: someDist.eq(r.distance(r.point(-122, 37), r.point(-123, 37), geo_system='WGS84'))
|
||||
rb: someDist.eq(r.distance(r.point(-122, 37), r.point(-123, 37), :geo_system=>'WGS84'))
|
||||
ot: true
|
||||
# Mearth is a small planet, just 1/10th of earth's size.
|
||||
- js: someDist.div(10).eq(r.distance(r.point(-122, 37), r.point(-123, 37), {'geo_system':{'a':637813.7, 'f':(1.0/298.257223563)}}))
|
||||
py: someDist.div(10).eq(r.distance(r.point(-122, 37), r.point(-123, 37), geo_system={'a':637813.7, 'f':(1.0/298.257223563)}))
|
||||
rb: someDist.div(10).eq(r.distance(r.point(-122, 37), r.point(-123, 37), :geo_system=>{'a':637813.7, 'f':(1.0/298.257223563)}))
|
||||
ot: true
|
||||
- py: r.distance(r.point(-122, 37), r.point(-123, 37), geo_system='unit_sphere').coerce_to('STRING')
|
||||
rb: r.distance(r.point(-122, 37), r.point(-123, 37), :geo_system=>'unit_sphere').coerce_to('STRING')
|
||||
js: r.distance(r.point(-122, 37), r.point(-123, 37), {'geo_system':'unit_sphere'}).coerce_to('STRING')
|
||||
ot: ("0.01393875509649327")
|
||||
- cd: r.distance(r.point(0, 0), r.point(0, 0)).coerce_to('STRING')
|
||||
ot: ("0")
|
||||
# These two give the earth's circumference through the poles
|
||||
- cd: r.distance(r.point(0, 0), r.point(180, 0)).mul(2).coerce_to('STRING')
|
||||
ot: ("40007862.917250897")
|
||||
- cd: r.distance(r.point(0, -90), r.point(0, 90)).mul(2).coerce_to('STRING')
|
||||
ot: ("40007862.917250897")
|
||||
- cd: r.distance(r.point(0, 0), r.line([0,0], [0,1])).coerce_to('STRING')
|
||||
ot: ("0")
|
||||
- cd: r.distance(r.line([0,0], [0,1]), r.point(0, 0)).coerce_to('STRING')
|
||||
ot: ("0")
|
||||
- cd: r.distance(r.point(0, 0), r.line([0.1,0], [1,0])).eq(r.distance(r.point(0, 0), r.point(0.1, 0)))
|
||||
ot: true
|
||||
- cd: r.distance(r.point(0, 0), r.line([5,-1], [4,2])).coerce_to('STRING')
|
||||
ot: ("492471.4990055255")
|
||||
- cd: r.distance(r.point(0, 0), r.polygon([5,-1], [4,2], [10,10])).coerce_to('STRING')
|
||||
ot: ("492471.4990055255")
|
||||
- cd: r.distance(r.point(0, 0), r.polygon([0,-1], [0,1], [10,10])).coerce_to('STRING')
|
||||
ot: ("0")
|
||||
- cd: r.distance(r.point(0.5, 0.5), r.polygon([0,-1], [0,1], [10,10])).coerce_to('STRING')
|
||||
ot: ("0")
|
||||
|
||||
# Fill
|
||||
- js: r.circle([0,0], 1, {fill:false}).eq(r.circle([0,0], 1, {fill:true}))
|
||||
py: r.circle([0,0], 1, fill=false).eq(r.circle([0,0], 1, fill=true))
|
||||
rb: r.circle([0,0], 1, :fill=>false).eq(r.circle([0,0], 1, :fill=>true))
|
||||
ot: false
|
||||
- js: r.circle([0,0], 1, {fill:false}).fill().eq(r.circle([0,0], 1, {fill:true}))
|
||||
py: r.circle([0,0], 1, fill=false).fill().eq(r.circle([0,0], 1, fill=true))
|
||||
rb: r.circle([0,0], 1, :fill=>false).fill().eq(r.circle([0,0], 1, :fill=>true))
|
||||
ot: true
|
||||
|
||||
# Subtraction
|
||||
- cd: r.polygon([0,0], [1,0], [1,1], [0,1]).polygon_sub(r.polygon([0.1,0.1], [0.9,0.1], [0.9,0.9], [0.1,0.9]))
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[[0,0],[1,0],[1,1],[0,1],[0,0]],[[0.1,0.1],[0.9,0.1],[0.9,0.9],[0.1,0.9],[0.1,0.1]]], 'type':'Polygon'})
|
||||
- cd: r.polygon([0,0], [1,0], [1,1], [0,1]).polygon_sub(r.polygon([0.1,0.9], [0.9,0.0], [0.9,0.9], [0.1,0.9]))
|
||||
ot: err('ReqlQueryLogicError', 'The second argument to `polygon_sub` is not contained in the first one.', [0])
|
||||
- cd: r.polygon([0,0], [1,0], [1,1], [0,1]).polygon_sub(r.polygon([0,0], [2,0], [2,2], [0,2]))
|
||||
ot: err('ReqlQueryLogicError', 'The second argument to `polygon_sub` is not contained in the first one.', [0])
|
||||
- cd: r.polygon([0,0], [1,0], [1,1], [0,1]).polygon_sub(r.polygon([0,-2], [1,-2], [-1,1], [0,-1]))
|
||||
ot: err('ReqlQueryLogicError', 'The second argument to `polygon_sub` is not contained in the first one.', [0])
|
||||
- cd: r.polygon([0,0], [1,0], [1,1], [0,1]).polygon_sub(r.polygon([0,-1], [1,-1], [1,0], [0,0]))
|
||||
ot: err('ReqlQueryLogicError', 'The second argument to `polygon_sub` is not contained in the first one.', [0])
|
||||
- cd: r.polygon([0,0], [1,0], [1,1], [0,1]).polygon_sub(r.polygon([0.1,-1], [0.9,-1], [0.9,0.5], [0.1,0.5]))
|
||||
ot: err('ReqlQueryLogicError', 'The second argument to `polygon_sub` is not contained in the first one.', [0])
|
||||
- cd: r.polygon([0,0], [1,0], [1,1], [0,1]).polygon_sub(r.polygon([0,0],[0.1,0.9],[0.9,0.9],[0.9,0.1]))
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[[0,0],[1,0],[1,1],[0,1],[0,0]],[[0,0],[0.1,0.9],[0.9,0.9],[0.9,0.1],[0,0]]], 'type':'Polygon'})
|
||||
- cd: r.polygon([0,0], [1,0], [1,1], [0,1]).polygon_sub(r.polygon([0,0],[0.1,0.9],[0.9,0.9],[0.9,0.1]).polygon_sub(r.polygon([0.2,0.2],[0.5,0.8],[0.8,0.2])))
|
||||
ot: err('ReqlQueryLogicError', 'Expected a Polygon with only an outer shell. This one has holes.', [0])
|
||||
- cd: r.polygon([0,0], [1,0], [1,1], [0,1]).polygon_sub(r.line([0,0],[0.9,0.1],[0.9,0.9],[0.1,0.9]))
|
||||
ot: err('ReqlQueryLogicError', 'Expected a Polygon but found a LineString.', [])
|
50
ext/librethinkdbxx/test/upstream/geo/primitives.yaml
Normal file
50
ext/librethinkdbxx/test/upstream/geo/primitives.yaml
Normal file
@ -0,0 +1,50 @@
|
||||
desc: Test geometric primitive constructors
|
||||
tests:
|
||||
# Circle
|
||||
- js: r.circle([0,0], 1, {num_vertices:3})
|
||||
py: r.circle([0,0], 1, num_vertices=3)
|
||||
rb: r.circle([0,0], 1, :num_vertices=>3)
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[[0, -9.04369477050382e-06], [-7.779638566553426e-06, 4.5218473852518965e-06], [7.779638566553426e-06, 4.5218473852518965e-06], [0, -9.04369477050382e-06]]], 'type':'Polygon'})
|
||||
|
||||
- js: r.circle(r.point(0,0), 1, {num_vertices:3})
|
||||
py: r.circle(r.point(0,0), 1, num_vertices=3)
|
||||
rb: r.circle(r.point(0,0), 1, :num_vertices=>3)
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[[0, -9.04369477050382e-06], [-7.779638566553426e-06, 4.5218473852518965e-06], [7.779638566553426e-06, 4.5218473852518965e-06], [0, -9.04369477050382e-06]]], 'type':'Polygon'})
|
||||
|
||||
- js: r.circle([0,0], 1, {num_vertices:3, fill:false})
|
||||
py: r.circle([0,0], 1, num_vertices=3, fill=false)
|
||||
rb: r.circle([0,0], 1, :num_vertices=>3, :fill=>false)
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[0, -9.04369477050382e-06], [-7.779638566553426e-06, 4.5218473852518965e-06], [7.779638566553426e-06, 4.5218473852518965e-06], [0, -9.04369477050382e-06]], 'type':'LineString'})
|
||||
|
||||
- js: r.circle([0,0], 14000000, {num_vertices:3})
|
||||
py: r.circle([0,0], 14000000, num_vertices=3)
|
||||
rb: r.circle([0,0], 14000000, :num_vertices=>3)
|
||||
ot: err('ReqlQueryLogicError', 'Radius must be smaller than a quarter of the circumference along the minor axis of the reference ellipsoid. Got 14000000m, but must be smaller than 9985163.1855612862855m.', [0])
|
||||
|
||||
- js: r.circle([0,0], 1, {num_vertices:3, geo_system:'WGS84'})
|
||||
py: r.circle([0,0], 1, num_vertices=3, geo_system='WGS84')
|
||||
rb: r.circle([0,0], 1, :num_vertices=>3, :geo_system=>'WGS84')
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[[0, -9.04369477050382e-06], [-7.779638566553426e-06, 4.5218473852518965e-06], [7.779638566553426e-06, 4.5218473852518965e-06], [0, -9.04369477050382e-06]]], 'type':'Polygon'})
|
||||
|
||||
- js: r.circle([0,0], 2, {num_vertices:3, geo_system:'unit_'+'sphere'})
|
||||
py: r.circle([0,0], 2, num_vertices=3, geo_system='unit_sphere')
|
||||
rb: r.circle([0,0], 2, :num_vertices=>3, :geo_system=>'unit_sphere')
|
||||
ot: err('ReqlQueryLogicError', 'Radius must be smaller than a quarter of the circumference along the minor axis of the reference ellipsoid. Got 2m, but must be smaller than 1.570796326794896558m.', [0])
|
||||
|
||||
- js: r.circle([0,0], 0.1, {num_vertices:3, geo_system:'unit_'+'sphere'})
|
||||
py: r.circle([0,0], 0.1, num_vertices=3, geo_system='unit_sphere')
|
||||
rb: r.circle([0,0], 0.1, :num_vertices=>3, :geo_system=>'unit_sphere')
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[[0, -5.729577951308232], [-4.966092947444857, 2.861205754495701], [4.966092947444857, 2.861205754495701], [0, -5.729577951308232]]], 'type':'Polygon'})
|
||||
testopts:
|
||||
precision: 0.0000000000001
|
||||
|
||||
- js: r.circle([0,0], 1.0/1000.0, {num_vertices:3, unit:'km'})
|
||||
py: r.circle([0,0], 1.0/1000.0, num_vertices=3, unit='km')
|
||||
rb: r.circle([0,0], 1.0/1000.0, :num_vertices=>3, :unit=>'km')
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[[0, -9.04369477050382e-06], [-7.779638566553426e-06, 4.5218473852518965e-06], [7.779638566553426e-06, 4.5218473852518965e-06], [0, -9.04369477050382e-06]]], 'type':'Polygon'})
|
||||
|
||||
- js: r.circle([0,0], 1.0/1609.344, {num_vertices:3, unit:'mi'})
|
||||
py: r.circle([0,0], 1.0/1609.344, num_vertices=3, unit='mi')
|
||||
rb: r.circle([0,0], 1.0/1609.344, :num_vertices=>3, :unit=>'mi')
|
||||
ot: ({'$reql_type$':'GEOMETRY', 'coordinates':[[[0, -9.04369477050382e-06], [-7.779638566553426e-06, 4.5218473852518965e-06], [7.779638566553426e-06, 4.5218473852518965e-06], [0, -9.04369477050382e-06]]], 'type':'Polygon'})
|
||||
|
133
ext/librethinkdbxx/test/upstream/joins.yaml
Normal file
133
ext/librethinkdbxx/test/upstream/joins.yaml
Normal file
@ -0,0 +1,133 @@
|
||||
desc: Tests that manipulation data in tables
|
||||
table_variable_name: tbl, tbl2, senders, receivers, messages, otbl, otbl2
|
||||
tests:
|
||||
|
||||
# Setup some more tables
|
||||
|
||||
- py: r.db('test').table_create('test3', primary_key='foo')
|
||||
rb: r.db('test').table_create('test3', {:primary_key=>'foo'})
|
||||
js: r.db('test').tableCreate('test3', {'primaryKey':'foo'})
|
||||
ot: partial({'tables_created':1})
|
||||
- def: tbl3 = r.db('test').table('test3')
|
||||
|
||||
- py: tbl.insert(r.range(0, 100).map({'id':r.row, 'a':r.row % 4}))
|
||||
rb: tbl.insert(r.range(0, 100).map{|row| {'id':row, a:row % 4}})
|
||||
js: tbl.insert(r.range(0, 100).map(function (row) { return {'id':row, 'a':row.mod(4)}; }))
|
||||
ot: partial({'errors':0, 'inserted':100})
|
||||
|
||||
- py: tbl2.insert(r.range(0, 100).map({'id':r.row, 'b':r.row % 4}))
|
||||
rb: tbl2.insert(r.range(0, 100).map{|row| {'id':row, b:row % 4}})
|
||||
js: tbl2.insert(r.range(0, 100).map(function (row) { return {'id':row, 'b':row.mod(4)}; }))
|
||||
ot: partial({'errors':0, 'inserted':100})
|
||||
|
||||
- py: tbl3.insert(r.range(0, 100).map({'foo':r.row, 'b':r.row % 4}))
|
||||
rb: tbl3.insert(r.range(0, 100).map{|row| {'foo':row, b:row % 4}})
|
||||
js: tbl3.insert(r.range(0, 100).map(function (row) { return {'foo':row, 'b':row.mod(4)}; }))
|
||||
ot: partial({'errors':0, 'inserted':100})
|
||||
|
||||
- py: otbl.insert(r.range(1,100).map({'id': r.row, 'a': r.row}))
|
||||
- py: otbl2.insert(r.range(1,100).map({'id': r.row, 'b': 2 * r.row}))
|
||||
|
||||
# Inner-Join
|
||||
|
||||
- def:
|
||||
py: ij = tbl.inner_join(tbl2, lambda x,y:x['a'] == y['b']).zip()
|
||||
js: ij = tbl.innerJoin(tbl2, function(x, y) { return x('a').eq(y('b')); }).zip()
|
||||
rb: ij = tbl.inner_join(tbl2){ |x, y| x[:a].eq y[:b] }.zip
|
||||
- cd: ij.count()
|
||||
ot: 2500
|
||||
- py: ij.filter(lambda row:row['a'] != row['b']).count()
|
||||
js: ij.filter(function(row) { return row('a').ne(row('b')); }).count()
|
||||
rb: ij.filter{ |row| row[:a].ne row[:b] }.count
|
||||
ot: 0
|
||||
|
||||
# Outer-Join
|
||||
- def:
|
||||
py: oj = tbl.outer_join(tbl2, lambda x,y:x['a'] == y['b']).zip()
|
||||
js: oj = tbl.outerJoin(tbl2, function(x, y) { return x('a').eq(y('b')); }).zip()
|
||||
rb: oj = tbl.outer_join(tbl2){ |x, y| x[:a].eq y[:b] }.zip
|
||||
- cd: oj.count()
|
||||
ot: 2500
|
||||
- py: oj.filter(lambda row:row['a'] != row['b']).count()
|
||||
js: oj.filter(function(row) { return row('a').ne(row('b')); }).count()
|
||||
rb: oj.filter{ |row| row[:a].ne row[:b] }.count
|
||||
ot: 0
|
||||
|
||||
# Ordered eq_join
|
||||
- py: blah = otbl.order_by("id").eq_join(r.row['id'], otbl2, ordered=True).zip()
|
||||
ot: [{'id': i, 'a': i, 'b': i * 2} for i in range(1, 100)]
|
||||
- py: blah = otbl.order_by(r.desc("id")).eq_join(r.row['id'], otbl2, ordered=True).zip()
|
||||
ot: [{'id': i, 'a': i, 'b': i * 2} for i in range(99, 0, -1)]
|
||||
- py: blah = otbl.order_by("id").eq_join(r.row['a'], otbl2, ordered=True).zip()
|
||||
ot: [{'id': i, 'a': i, 'b': i * 2} for i in range(1, 100)]
|
||||
|
||||
# Eq-Join
|
||||
- cd: tbl.eq_join('a', tbl2).zip().count()
|
||||
ot: 100
|
||||
|
||||
- cd: tbl.eq_join('fake', tbl2).zip().count()
|
||||
ot: 0
|
||||
|
||||
- py: tbl.eq_join(lambda x:x['a'], tbl2).zip().count()
|
||||
rb: tbl.eq_join(lambda{|x| x['a']}, tbl2).zip().count()
|
||||
js: tbl.eq_join(function(x) { return x('a'); }, tbl2).zip().count()
|
||||
ot: 100
|
||||
|
||||
- py: tbl.eq_join(lambda x:x['fake'], tbl2).zip().count()
|
||||
rb: tbl.eq_join(lambda{|x| x['fake']}, tbl2).zip().count()
|
||||
js: tbl.eq_join(function(x) { return x('fake'); }, tbl2).zip().count()
|
||||
ot: 0
|
||||
|
||||
- py: tbl.eq_join(lambda x:null, tbl2).zip().count()
|
||||
rb: tbl.eq_join(lambda{|x| null}, tbl2).zip().count()
|
||||
js: tbl.eq_join(function(x) { return null; }, tbl2).zip().count()
|
||||
ot: 0
|
||||
|
||||
- py: tbl.eq_join(lambda x:x['a'], tbl2).count()
|
||||
rb: tbl.eq_join(lambda {|x| x[:a]}, tbl2).count()
|
||||
js: tbl.eq_join(function(x) { return x('a'); }, tbl2).count()
|
||||
ot: 100
|
||||
|
||||
# eqjoin where id isn't a primary key
|
||||
- cd: tbl.eq_join('a', tbl3).zip().count()
|
||||
ot: 100
|
||||
|
||||
- py: tbl.eq_join(lambda x:x['a'], tbl3).count()
|
||||
rb: tbl.eq_join(lambda {|x| x[:a]}, tbl3).count()
|
||||
js: tbl.eq_join(function(x) { return x('a'); }, tbl3).count()
|
||||
ot: 100
|
||||
|
||||
# eq_join with r.row
|
||||
- py: tbl.eq_join(r.row['a'], tbl2).count()
|
||||
js: tbl.eq_join(r.row('a'), tbl2).count()
|
||||
ot: 100
|
||||
|
||||
# test an inner-join condition where inner-join differs from outer-join
|
||||
- def: left = r.expr([{'a':1},{'a':2},{'a':3}])
|
||||
- def: right = r.expr([{'b':2},{'b':3}])
|
||||
|
||||
- py: left.inner_join(right, lambda l, r:l['a'] == r['b']).zip()
|
||||
js: left.innerJoin(right, function(l, r) { return l('a').eq(r('b')); }).zip()
|
||||
rb: left.inner_join(right){ |lt, rt| lt[:a].eq(rt[:b]) }.zip
|
||||
ot: [{'a':2,'b':2},{'a':3,'b':3}]
|
||||
|
||||
# test an outer-join condition where outer-join differs from inner-join
|
||||
- py: left.outer_join(right, lambda l, r:l['a'] == r['b']).zip()
|
||||
js: left.outerJoin(right, function(l, r) { return l('a').eq(r('b')); }).zip()
|
||||
rb: left.outer_join(right){ |lt, rt| lt[:a].eq(rt[:b]) }.zip
|
||||
ot: [{'a':1},{'a':2,'b':2},{'a':3,'b':3}]
|
||||
|
||||
- rb: senders.insert({id:1, sender:'Sender One'})['inserted']
|
||||
ot: 1
|
||||
- rb: receivers.insert({id:1, receiver:'Receiver One'})['inserted']
|
||||
ot: 1
|
||||
- rb: messages.insert([{id:10, sender_id:1, receiver_id:1, msg:'Message One'}, {id:20, sender_id:1, receiver_id:1, msg:'Message Two'}, {id:30, sender_id:1, receiver_id:1, msg:'Message Three'}])['inserted']
|
||||
ot: 3
|
||||
|
||||
- rb: messages.orderby(index:'id').eq_join('sender_id', senders).without({right:{id:true}}).zip.eq_join('receiver_id', receivers).without({right:{id:true}}).zip
|
||||
ot: [{'id':10,'msg':'Message One','receiver':'Receiver One','receiver_id':1,'sender':'Sender One','sender_id':1},{'id':20,'msg':'Message Two','receiver':'Receiver One','receiver_id':1,'sender':'Sender One','sender_id':1},{'id':30,'msg':'Message Three','receiver':'Receiver One','receiver_id':1,'sender':'Sender One','sender_id':1}]
|
||||
|
||||
# Clean up
|
||||
|
||||
- cd: r.db('test').table_drop('test3')
|
||||
ot: partial({'tables_dropped':1})
|
74
ext/librethinkdbxx/test/upstream/json.yaml
Normal file
74
ext/librethinkdbxx/test/upstream/json.yaml
Normal file
@ -0,0 +1,74 @@
|
||||
desc: Tests RQL json parsing
|
||||
tests:
|
||||
|
||||
- cd: r.json("[1,2,3]")
|
||||
ot: [1,2,3]
|
||||
|
||||
- cd: r.json("1")
|
||||
ot: 1
|
||||
|
||||
- cd: r.json("{}")
|
||||
ot: {}
|
||||
|
||||
- cd: r.json('"foo"')
|
||||
ot: "foo"
|
||||
|
||||
- cd: r.json("[1,2")
|
||||
ot: err("ReqlQueryLogicError", 'Failed to parse "[1,2" as JSON:' + ' Missing a comma or \']\' after an array element.', [0])
|
||||
|
||||
- cd: r.json("[1,2,3]").to_json_string()
|
||||
ot: '[1,2,3]'
|
||||
|
||||
- js: r.json("[1,2,3]").toJSON()
|
||||
py: r.json("[1,2,3]").to_json()
|
||||
ot: '[1,2,3]'
|
||||
|
||||
- cd: r.json("{\"foo\":4}").to_json_string()
|
||||
ot: '{"foo":4}'
|
||||
|
||||
- js: r.json("{\"foo\":4}").toJSON()
|
||||
py: r.json("{\"foo\":4}").to_json()
|
||||
ot: '{"foo":4}'
|
||||
|
||||
# stress test: data is from http://www.mockaroo.com/
|
||||
- def: text = '[{"id":1,"first_name":"Harry","last_name":"Riley","email":"hriley0@usgs.gov","country":"Andorra","ip_address":"221.25.65.136"},{"id":2,"first_name":"Bonnie","last_name":"Anderson","email":"banderson1@list-manage.com","country":"Tuvalu","ip_address":"116.162.43.150"},{"id":3,"first_name":"Marie","last_name":"Schmidt","email":"mschmidt2@diigo.com","country":"Iraq","ip_address":"181.105.59.57"},{"id":4,"first_name":"Phillip","last_name":"Willis","email":"pwillis3@com.com","country":"Montenegro","ip_address":"24.223.139.156"}]'
|
||||
- def: sorted = '[{"country":"Andorra","email":"hriley0@usgs.gov","first_name":"Harry","id":1,"ip_address":"221.25.65.136","last_name":"Riley"},{"country":"Tuvalu","email":"banderson1@list-manage.com","first_name":"Bonnie","id":2,"ip_address":"116.162.43.150","last_name":"Anderson"},{"country":"Iraq","email":"mschmidt2@diigo.com","first_name":"Marie","id":3,"ip_address":"181.105.59.57","last_name":"Schmidt"},{"country":"Montenegro","email":"pwillis3@com.com","first_name":"Phillip","id":4,"ip_address":"24.223.139.156","last_name":"Willis"}]'
|
||||
|
||||
- cd: r.json(text).to_json_string()
|
||||
ot: sorted
|
||||
|
||||
- cd: r.expr(r.minval).to_json_string()
|
||||
ot: err('ReqlQueryLogicError', 'Cannot convert `r.minval` to JSON.')
|
||||
|
||||
- cd: r.expr(r.maxval).to_json_string()
|
||||
ot: err('ReqlQueryLogicError', 'Cannot convert `r.maxval` to JSON.')
|
||||
|
||||
- cd: r.expr(r.minval).coerce_to('string')
|
||||
ot: err('ReqlQueryLogicError', 'Cannot convert `r.minval` to JSON.')
|
||||
|
||||
- cd: r.expr(r.maxval).coerce_to('string')
|
||||
ot: err('ReqlQueryLogicError', 'Cannot convert `r.maxval` to JSON.')
|
||||
|
||||
- cd: r.time(2014,9,11, 'Z')
|
||||
runopts:
|
||||
time_format: 'raw'
|
||||
ot: {'timezone':'+00:00','$reql_type$':'TIME','epoch_time':1410393600}
|
||||
|
||||
- cd: r.time(2014,9,11, 'Z').to_json_string()
|
||||
ot: '{"$reql_type$":"TIME","epoch_time":1410393600,"timezone":"+00:00"}'
|
||||
|
||||
- cd: r.point(0,0)
|
||||
ot: {'$reql_type$':'GEOMETRY','coordinates':[0,0],'type':'Point'}
|
||||
|
||||
- cd: r.point(0,0).to_json_string()
|
||||
ot: '{"$reql_type$":"GEOMETRY","coordinates":[0,0],"type":"Point"}'
|
||||
|
||||
- def:
|
||||
rb: s = "\x66\x6f\x6f".force_encoding('BINARY')
|
||||
py: s = b'\x66\x6f\x6f'
|
||||
js: s = Buffer("\x66\x6f\x6f", 'binary')
|
||||
- cd: r.binary(s)
|
||||
ot: s
|
||||
|
||||
- cd: r.expr("foo").coerce_to("binary").to_json_string()
|
||||
ot: '{"$reql_type$":"BINARY","data":"Zm9v"}'
|
128
ext/librethinkdbxx/test/upstream/limits.yaml
Normal file
128
ext/librethinkdbxx/test/upstream/limits.yaml
Normal file
@ -0,0 +1,128 @@
|
||||
desc: Tests array limit variations
|
||||
table_variable_name: tbl
|
||||
tests:
|
||||
|
||||
# test simplistic array limits
|
||||
- cd: r.expr([1,1,1,1]).union([1, 1, 1, 1])
|
||||
runopts:
|
||||
array_limit: 8
|
||||
ot: [1,1,1,1,1,1,1,1]
|
||||
- cd: r.expr([1,2,3,4]).union([5, 6, 7, 8])
|
||||
runopts:
|
||||
array_limit: 4
|
||||
ot: err("ReqlResourceLimitError", "Array over size limit `4`.", [0])
|
||||
|
||||
# test array limits on query creation
|
||||
- cd: r.expr([1,2,3,4,5,6,7,8])
|
||||
runopts:
|
||||
array_limit: 4
|
||||
ot: err("ReqlResourceLimitError", "Array over size limit `4`.", [0])
|
||||
|
||||
# test bizarre array limits
|
||||
- cd: r.expr([1,2,3,4,5,6,7,8])
|
||||
runopts:
|
||||
array_limit: -1
|
||||
ot: err("ReqlQueryLogicError", "Illegal array size limit `-1`. (Must be >= 1.)", [])
|
||||
|
||||
- cd: r.expr([1,2,3,4,5,6,7,8])
|
||||
runopts:
|
||||
array_limit: 0
|
||||
ot: err("ReqlQueryLogicError", "Illegal array size limit `0`. (Must be >= 1.)", [])
|
||||
|
||||
# make enormous > 100,000 element array
|
||||
- def: ten_l = r.expr([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
|
||||
- def:
|
||||
js: ten_f = function(l) { return ten_l }
|
||||
py: ten_f = lambda l:list(range(1,11))
|
||||
- def:
|
||||
js: huge_l = r.expr(ten_l).concatMap(ten_f).concatMap(ten_f).concatMap(ten_f).concatMap(ten_f)
|
||||
py: huge_l = r.expr(ten_l).concat_map(ten_f).concat_map(ten_f).concat_map(ten_f).concat_map(ten_f)
|
||||
rb: huge_l = r.expr(ten_l).concat_map {|l| ten_l}.concat_map {|l| ten_l}.concat_map {|l| ten_l}.concat_map {|l| ten_l}
|
||||
- cd: huge_l.append(1).count()
|
||||
runopts:
|
||||
array_limit: 100001
|
||||
ot: 100001
|
||||
|
||||
# attempt to insert enormous array
|
||||
- cd: tbl.insert({'id':0, 'array':huge_l.append(1)})
|
||||
runopts:
|
||||
array_limit: 100001
|
||||
ot: partial({'errors':1, 'first_error':"Array too large for disk writes (limit 100,000 elements)."})
|
||||
|
||||
- cd: tbl.get(0)
|
||||
runopts:
|
||||
array_limit: 100001
|
||||
ot: (null)
|
||||
|
||||
# attempt to read array that violates limit from disk
|
||||
- cd: tbl.insert({'id':1, 'array':ten_l})
|
||||
ot: ({'deleted':0,'replaced':0,'unchanged':0,'errors':0,'skipped':0,'inserted':1})
|
||||
- cd: tbl.get(1)
|
||||
runopts:
|
||||
array_limit: 4
|
||||
ot: ({'array':[1,2,3,4,5,6,7,8,9,10],'id':1})
|
||||
|
||||
|
||||
# Test that the changefeed queue size actually causes changes to be sent early.
|
||||
- cd: tbl.delete().get_field('deleted')
|
||||
ot: 1
|
||||
|
||||
- cd: c = tbl.changes({squash:1000000, changefeed_queue_size:10})
|
||||
py: c = tbl.changes(squash=1000000, changefeed_queue_size=10)
|
||||
|
||||
- cd: tbl.insert([{'id':0}, {'id':1}, {'id':2}, {'id':3}, {'id':4}, {'id':5}, {'id':6}]).get_field('inserted')
|
||||
ot: 7
|
||||
- py: fetch(c, 7)
|
||||
rb: fetch(c, 7)
|
||||
ot: bag([{'old_val':null, 'new_val':{'id':0}},
|
||||
{'old_val':null, 'new_val':{'id':1}},
|
||||
{'old_val':null, 'new_val':{'id':2}},
|
||||
{'old_val':null, 'new_val':{'id':3}},
|
||||
{'old_val':null, 'new_val':{'id':4}},
|
||||
{'old_val':null, 'new_val':{'id':5}},
|
||||
{'old_val':null, 'new_val':{'id':6}}])
|
||||
|
||||
- cd: tbl.insert([{'id':7}, {'id':8}, {'id':9}, {'id':10}, {'id':11}, {'id':12}, {'id':13}]).get_field('inserted')
|
||||
ot: 7
|
||||
- py: fetch(c, 7)
|
||||
rb: fetch(c, 7)
|
||||
ot: bag([{'old_val':null, 'new_val':{'id':7}},
|
||||
{'old_val':null, 'new_val':{'id':8}},
|
||||
{'old_val':null, 'new_val':{'id':9}},
|
||||
{'old_val':null, 'new_val':{'id':10}},
|
||||
{'old_val':null, 'new_val':{'id':11}},
|
||||
{'old_val':null, 'new_val':{'id':12}},
|
||||
{'old_val':null, 'new_val':{'id':13}}])
|
||||
|
||||
- cd: tbl.delete().get_field('deleted')
|
||||
ot: 14
|
||||
|
||||
- cd: c2 = tbl.changes({squash:1000000})
|
||||
py: c2 = tbl.changes(squash=1000000)
|
||||
runopts:
|
||||
changefeed_queue_size: 10
|
||||
|
||||
|
||||
- cd: tbl.insert([{'id':0}, {'id':1}, {'id':2}, {'id':3}, {'id':4}, {'id':5}, {'id':6}]).get_field('inserted')
|
||||
ot: 7
|
||||
- py: fetch(c2, 7)
|
||||
rb: fetch(c2, 7)
|
||||
ot: bag([{'old_val':null, 'new_val':{'id':0}},
|
||||
{'old_val':null, 'new_val':{'id':1}},
|
||||
{'old_val':null, 'new_val':{'id':2}},
|
||||
{'old_val':null, 'new_val':{'id':3}},
|
||||
{'old_val':null, 'new_val':{'id':4}},
|
||||
{'old_val':null, 'new_val':{'id':5}},
|
||||
{'old_val':null, 'new_val':{'id':6}}])
|
||||
|
||||
- cd: tbl.insert([{'id':7}, {'id':8}, {'id':9}, {'id':10}, {'id':11}, {'id':12}, {'id':13}]).get_field('inserted')
|
||||
ot: 7
|
||||
- py: fetch(c2, 7)
|
||||
rb: fetch(c2, 7)
|
||||
ot: bag([{'old_val':null, 'new_val':{'id':7}},
|
||||
{'old_val':null, 'new_val':{'id':8}},
|
||||
{'old_val':null, 'new_val':{'id':9}},
|
||||
{'old_val':null, 'new_val':{'id':10}},
|
||||
{'old_val':null, 'new_val':{'id':11}},
|
||||
{'old_val':null, 'new_val':{'id':12}},
|
||||
{'old_val':null, 'new_val':{'id':13}}])
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user